diff mbox series

[v2,12/16] migration/rdma: Remove QEMUFile parameter when not used

Message ID 20230515195709.63843-13-quintela@redhat.com
State New
Headers show
Series Migration: More migration atomic counters | expand

Commit Message

Juan Quintela May 15, 2023, 7:57 p.m. UTC
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/rdma.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

Comments

Leonardo Bras May 25, 2023, 7:21 a.m. UTC | #1
On Mon, 2023-05-15 at 21:57 +0200, Juan Quintela wrote:
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/rdma.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)
> 
> diff --git a/migration/rdma.c b/migration/rdma.c
> index 074456f9df..416dec00a2 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -2027,7 +2027,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
>   * If we're using dynamic registration on the dest-side, we have to
>   * send a registration command first.
>   */
> -static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
> +static int qemu_rdma_write_one(RDMAContext *rdma,
>                                 int current_index, uint64_t current_addr,
>                                 uint64_t length)
>  {
> @@ -2263,7 +2263,7 @@ retry:
>   * We support sending out multiple chunks at the same time.
>   * Not all of them need to get signaled in the completion queue.
>   */
> -static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
> +static int qemu_rdma_write_flush(RDMAContext *rdma)
>  {
>      int ret;
>  
> @@ -2271,7 +2271,7 @@ static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
>          return 0;
>      }
>  
> -    ret = qemu_rdma_write_one(f, rdma,
> +    ret = qemu_rdma_write_one(rdma,
>              rdma->current_index, rdma->current_addr, rdma->current_length);
>  
>      if (ret < 0) {
> @@ -2344,7 +2344,7 @@ static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
>   *    and only require that a batch gets acknowledged in the completion
>   *    queue instead of each individual chunk.
>   */
> -static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
> +static int qemu_rdma_write(RDMAContext *rdma,
>                             uint64_t block_offset, uint64_t offset,
>                             uint64_t len)
>  {
> @@ -2355,7 +2355,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
>  
>      /* If we cannot merge it, we flush the current buffer first. */
>      if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
> -        ret = qemu_rdma_write_flush(f, rdma);
> +        ret = qemu_rdma_write_flush(rdma);
>          if (ret) {
>              return ret;
>          }
> @@ -2377,7 +2377,7 @@ static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
>  
>      /* flush it if buffer is too large */
>      if (rdma->current_length >= RDMA_MERGE_MAX) {
> -        return qemu_rdma_write_flush(f, rdma);
> +        return qemu_rdma_write_flush(rdma);
>      }
>  
>      return 0;
> @@ -2798,7 +2798,6 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc,
>                                         Error **errp)
>  {
>      QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
> -    QEMUFile *f = rioc->file;
>      RDMAContext *rdma;
>      int ret;
>      ssize_t done = 0;
> @@ -2819,7 +2818,7 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc,
>       * Push out any writes that
>       * we're queued up for VM's ram.
>       */
> -    ret = qemu_rdma_write_flush(f, rdma);
> +    ret = qemu_rdma_write_flush(rdma);
>      if (ret < 0) {
>          rdma->error_state = ret;
>          error_setg(errp, "qemu_rdma_write_flush returned %d", ret);
> @@ -2958,11 +2957,11 @@ static ssize_t qio_channel_rdma_readv(QIOChannel *ioc,
>  /*
>   * Block until all the outstanding chunks have been delivered by the hardware.
>   */
> -static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
> +static int qemu_rdma_drain_cq(RDMAContext *rdma)
>  {
>      int ret;
>  
> -    if (qemu_rdma_write_flush(f, rdma) < 0) {
> +    if (qemu_rdma_write_flush(rdma) < 0) {
>          return -EIO;
>      }
>  
> @@ -3272,7 +3271,7 @@ static size_t qemu_rdma_save_page(QEMUFile *f,
>       * is full, or the page doesn't belong to the current chunk,
>       * an actual RDMA write will occur and a new chunk will be formed.
>       */
> -    ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
> +    ret = qemu_rdma_write(rdma, block_offset, offset, size);
>      if (ret < 0) {
>          error_report("rdma migration: write error! %d", ret);
>          goto err;
> @@ -3927,7 +3926,7 @@ static int qemu_rdma_registration_stop(QEMUFile *f,
>      CHECK_ERROR_STATE();
>  
>      qemu_fflush(f);
> -    ret = qemu_rdma_drain_cq(f, rdma);
> +    ret = qemu_rdma_drain_cq(rdma);
>  
>      if (ret < 0) {
>          goto err;

Reviewed-by: Leonardo Bras <leobras@redhat.com>
diff mbox series

Patch

diff --git a/migration/rdma.c b/migration/rdma.c
index 074456f9df..416dec00a2 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -2027,7 +2027,7 @@  static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
  * If we're using dynamic registration on the dest-side, we have to
  * send a registration command first.
  */
-static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma,
+static int qemu_rdma_write_one(RDMAContext *rdma,
                                int current_index, uint64_t current_addr,
                                uint64_t length)
 {
@@ -2263,7 +2263,7 @@  retry:
  * We support sending out multiple chunks at the same time.
  * Not all of them need to get signaled in the completion queue.
  */
-static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
+static int qemu_rdma_write_flush(RDMAContext *rdma)
 {
     int ret;
 
@@ -2271,7 +2271,7 @@  static int qemu_rdma_write_flush(QEMUFile *f, RDMAContext *rdma)
         return 0;
     }
 
-    ret = qemu_rdma_write_one(f, rdma,
+    ret = qemu_rdma_write_one(rdma,
             rdma->current_index, rdma->current_addr, rdma->current_length);
 
     if (ret < 0) {
@@ -2344,7 +2344,7 @@  static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma,
  *    and only require that a batch gets acknowledged in the completion
  *    queue instead of each individual chunk.
  */
-static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
+static int qemu_rdma_write(RDMAContext *rdma,
                            uint64_t block_offset, uint64_t offset,
                            uint64_t len)
 {
@@ -2355,7 +2355,7 @@  static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
 
     /* If we cannot merge it, we flush the current buffer first. */
     if (!qemu_rdma_buffer_mergable(rdma, current_addr, len)) {
-        ret = qemu_rdma_write_flush(f, rdma);
+        ret = qemu_rdma_write_flush(rdma);
         if (ret) {
             return ret;
         }
@@ -2377,7 +2377,7 @@  static int qemu_rdma_write(QEMUFile *f, RDMAContext *rdma,
 
     /* flush it if buffer is too large */
     if (rdma->current_length >= RDMA_MERGE_MAX) {
-        return qemu_rdma_write_flush(f, rdma);
+        return qemu_rdma_write_flush(rdma);
     }
 
     return 0;
@@ -2798,7 +2798,6 @@  static ssize_t qio_channel_rdma_writev(QIOChannel *ioc,
                                        Error **errp)
 {
     QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc);
-    QEMUFile *f = rioc->file;
     RDMAContext *rdma;
     int ret;
     ssize_t done = 0;
@@ -2819,7 +2818,7 @@  static ssize_t qio_channel_rdma_writev(QIOChannel *ioc,
      * Push out any writes that
      * we're queued up for VM's ram.
      */
-    ret = qemu_rdma_write_flush(f, rdma);
+    ret = qemu_rdma_write_flush(rdma);
     if (ret < 0) {
         rdma->error_state = ret;
         error_setg(errp, "qemu_rdma_write_flush returned %d", ret);
@@ -2958,11 +2957,11 @@  static ssize_t qio_channel_rdma_readv(QIOChannel *ioc,
 /*
  * Block until all the outstanding chunks have been delivered by the hardware.
  */
-static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
+static int qemu_rdma_drain_cq(RDMAContext *rdma)
 {
     int ret;
 
-    if (qemu_rdma_write_flush(f, rdma) < 0) {
+    if (qemu_rdma_write_flush(rdma) < 0) {
         return -EIO;
     }
 
@@ -3272,7 +3271,7 @@  static size_t qemu_rdma_save_page(QEMUFile *f,
      * is full, or the page doesn't belong to the current chunk,
      * an actual RDMA write will occur and a new chunk will be formed.
      */
-    ret = qemu_rdma_write(f, rdma, block_offset, offset, size);
+    ret = qemu_rdma_write(rdma, block_offset, offset, size);
     if (ret < 0) {
         error_report("rdma migration: write error! %d", ret);
         goto err;
@@ -3927,7 +3926,7 @@  static int qemu_rdma_registration_stop(QEMUFile *f,
     CHECK_ERROR_STATE();
 
     qemu_fflush(f);
-    ret = qemu_rdma_drain_cq(f, rdma);
+    ret = qemu_rdma_drain_cq(rdma);
 
     if (ret < 0) {
         goto err;