diff mbox series

[PULL,12/15] migration/block: rename BLOCK_SIZE macro

Message ID 20200228092420.103757-13-quintela@redhat.com
State New
Headers show
Series [PULL,01/15] multifd: Add multifd-compression parameter | expand

Commit Message

Juan Quintela Feb. 28, 2020, 9:24 a.m. UTC
From: Stefan Hajnoczi <stefanha@redhat.com>

Both <linux/fs.h> and <sys/mount.h> define BLOCK_SIZE macros.  Avoiding
using that name in block/migration.c.

I noticed this when including <liburing.h> (Linux io_uring) from
"block/aio.h" and compilation failed.  Although patches adding that
include haven't been sent yet, it makes sense to rename the macro now in
case someone else stumbles on it in the meantime.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/block.c | 39 ++++++++++++++++++++-------------------
 1 file changed, 20 insertions(+), 19 deletions(-)

Comments

Peter Maydell May 12, 2022, 4:22 p.m. UTC | #1
On Fri, 28 Feb 2020 at 09:30, Juan Quintela <quintela@redhat.com> wrote:
>
> From: Stefan Hajnoczi <stefanha@redhat.com>
>
> Both <linux/fs.h> and <sys/mount.h> define BLOCK_SIZE macros.  Avoiding
> using that name in block/migration.c.
>
> I noticed this when including <liburing.h> (Linux io_uring) from
> "block/aio.h" and compilation failed.  Although patches adding that
> include haven't been sent yet, it makes sense to rename the macro now in
> case someone else stumbles on it in the meantime.

A rather old change, and it didn't even introduce the code that
Coverity is complaining about, but this seems as good a point as
any to hang the email off of...

BLK_MIG_BLOCK_SIZE doesn't have a ULL suffix, so it's 32 bits,
and so Coverity complains about places where we multiply some
block count by it and then use that in a 64-bit result, eg here:

> @@ -770,7 +771,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
>
>      /* control the rate of transfer */
>      blk_mig_lock();
> -    while (block_mig_state.read_done * BLOCK_SIZE <
> +    while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
>             qemu_file_get_rate_limit(f) &&
>             block_mig_state.submitted < MAX_PARALLEL_IO &&
>             (block_mig_state.submitted + block_mig_state.read_done) <

and here:

> @@ -874,13 +875,13 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
>      qemu_mutex_unlock_iothread();
>
>      blk_mig_lock();
> -    pending += block_mig_state.submitted * BLOCK_SIZE +
> -               block_mig_state.read_done * BLOCK_SIZE;
> +    pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
> +               block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
>      blk_mig_unlock();

Putting a suitable cast to ensure the multiply is done at
64 bits would satisfy Coverity.

This is CID 1487136, 1487175.

thanks
-- PMM
diff mbox series

Patch

diff --git a/migration/block.c b/migration/block.c
index c90288ed29..737b6499f9 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -27,8 +27,8 @@ 
 #include "migration/vmstate.h"
 #include "sysemu/block-backend.h"
 
-#define BLOCK_SIZE                       (1 << 20)
-#define BDRV_SECTORS_PER_DIRTY_CHUNK     (BLOCK_SIZE >> BDRV_SECTOR_BITS)
+#define BLK_MIG_BLOCK_SIZE           (1 << 20)
+#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
 
 #define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
 #define BLK_MIG_FLAG_EOS                0x02
@@ -133,7 +133,7 @@  static void blk_send(QEMUFile *f, BlkMigBlock * blk)
     uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
 
     if (block_mig_state.zero_blocks &&
-        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+        buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) {
         flags |= BLK_MIG_FLAG_ZERO_BLOCK;
     }
 
@@ -154,7 +154,7 @@  static void blk_send(QEMUFile *f, BlkMigBlock * blk)
         return;
     }
 
-    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
+    qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE);
 }
 
 int blk_mig_active(void)
@@ -309,7 +309,7 @@  static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
     }
 
     blk = g_new(BlkMigBlock, 1);
-    blk->buf = g_malloc(BLOCK_SIZE);
+    blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
     blk->bmds = bmds;
     blk->sector = cur_sector;
     blk->nr_sectors = nr_sectors;
@@ -350,7 +350,8 @@  static int set_dirty_tracking(void)
 
     QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
         bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
-                                                      BLOCK_SIZE, NULL, NULL);
+                                                      BLK_MIG_BLOCK_SIZE,
+                                                      NULL, NULL);
         if (!bmds->dirty_bitmap) {
             ret = -errno;
             goto fail;
@@ -548,7 +549,7 @@  static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
             bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
 
             blk = g_new(BlkMigBlock, 1);
-            blk->buf = g_malloc(BLOCK_SIZE);
+            blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
             blk->bmds = bmds;
             blk->sector = sector;
             blk->nr_sectors = nr_sectors;
@@ -770,7 +771,7 @@  static int block_save_iterate(QEMUFile *f, void *opaque)
 
     /* control the rate of transfer */
     blk_mig_lock();
-    while (block_mig_state.read_done * BLOCK_SIZE <
+    while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
            qemu_file_get_rate_limit(f) &&
            block_mig_state.submitted < MAX_PARALLEL_IO &&
            (block_mig_state.submitted + block_mig_state.read_done) <
@@ -874,13 +875,13 @@  static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
     qemu_mutex_unlock_iothread();
 
     blk_mig_lock();
-    pending += block_mig_state.submitted * BLOCK_SIZE +
-               block_mig_state.read_done * BLOCK_SIZE;
+    pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
+               block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
     blk_mig_unlock();
 
     /* Report at least one block pending during bulk phase */
     if (pending <= max_size && !block_mig_state.bulk_completed) {
-        pending = max_size + BLOCK_SIZE;
+        pending = max_size + BLK_MIG_BLOCK_SIZE;
     }
 
     DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
@@ -901,7 +902,7 @@  static int block_load(QEMUFile *f, void *opaque, int version_id)
     int nr_sectors;
     int ret;
     BlockDriverInfo bdi;
-    int cluster_size = BLOCK_SIZE;
+    int cluster_size = BLK_MIG_BLOCK_SIZE;
 
     do {
         addr = qemu_get_be64(f);
@@ -939,11 +940,11 @@  static int block_load(QEMUFile *f, void *opaque, int version_id)
 
                 ret = bdrv_get_info(blk_bs(blk), &bdi);
                 if (ret == 0 && bdi.cluster_size > 0 &&
-                    bdi.cluster_size <= BLOCK_SIZE &&
-                    BLOCK_SIZE % bdi.cluster_size == 0) {
+                    bdi.cluster_size <= BLK_MIG_BLOCK_SIZE &&
+                    BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) {
                     cluster_size = bdi.cluster_size;
                 } else {
-                    cluster_size = BLOCK_SIZE;
+                    cluster_size = BLK_MIG_BLOCK_SIZE;
                 }
             }
 
@@ -962,14 +963,14 @@  static int block_load(QEMUFile *f, void *opaque, int version_id)
                 int64_t cur_addr;
                 uint8_t *cur_buf;
 
-                buf = g_malloc(BLOCK_SIZE);
-                qemu_get_buffer(f, buf, BLOCK_SIZE);
-                for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
+                buf = g_malloc(BLK_MIG_BLOCK_SIZE);
+                qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE);
+                for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) {
                     cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
                     cur_buf = buf + i * cluster_size;
 
                     if ((!block_mig_state.zero_blocks ||
-                        cluster_size < BLOCK_SIZE) &&
+                        cluster_size < BLK_MIG_BLOCK_SIZE) &&
                         buffer_is_zero(cur_buf, cluster_size)) {
                         ret = blk_pwrite_zeroes(blk, cur_addr,
                                                 cluster_size,