diff mbox series

[v12,13/19] multi-process: add proxy communication functions

Message ID 75c61eaf356343f207ad8d4e244cd3f9a36de792.1606853298.git.jag.raman@oracle.com
State New
Headers show
Series Initial support for multi-process Qemu | expand

Commit Message

Jag Raman Dec. 1, 2020, 8:22 p.m. UTC
From: Elena Ufimtseva <elena.ufimtseva@oracle.com>

Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 include/hw/remote/mpqemu-link.h |  4 ++++
 hw/remote/mpqemu-link.c         | 38 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 42 insertions(+)

Comments

Marc-André Lureau Dec. 8, 2020, 12:39 p.m. UTC | #1
Hi

On Wed, Dec 2, 2020 at 12:23 AM Jagannathan Raman <jag.raman@oracle.com>
wrote:

> From: Elena Ufimtseva <elena.ufimtseva@oracle.com>
>
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
> Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  include/hw/remote/mpqemu-link.h |  4 ++++
>  hw/remote/mpqemu-link.c         | 38
> ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 42 insertions(+)
>
> diff --git a/include/hw/remote/mpqemu-link.h
> b/include/hw/remote/mpqemu-link.h
> index 070ac77..cee9468 100644
> --- a/include/hw/remote/mpqemu-link.h
> +++ b/include/hw/remote/mpqemu-link.h
> @@ -15,6 +15,8 @@
>  #include "qemu/thread.h"
>  #include "io/channel.h"
>  #include "exec/hwaddr.h"
> +#include "io/channel-socket.h"
> +#include "hw/remote/proxy.h"
>
>  #define REMOTE_MAX_FDS 8
>
> @@ -65,6 +67,8 @@ typedef struct {
>      int num_fds;
>  } MPQemuMsg;
>
> +uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev
> *pdev,
> +                                         Error **errp);
>  void mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
>  void mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
>
> diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
> index bbd9df3..18c8a54 100644
> --- a/hw/remote/mpqemu-link.c
> +++ b/hw/remote/mpqemu-link.c
> @@ -17,6 +17,7 @@
>  #include "qemu/iov.h"
>  #include "qemu/error-report.h"
>  #include "qemu/main-loop.h"
> +#include "io/channel.h"
>
>  /*
>   * Send message over the ioc QIOChannel.
> @@ -219,6 +220,43 @@ fail:
>      }
>  }
>
> +/*
> + * Called from VCPU thread in non-coroutine context.
>

You could check that precondition in code early on, presumably.

The comment could use some further description, like "Send @msg and wait
for a RET_MSG reply. Returns the associated u64 message code, or UINT64_MAX
on error."

+ */
> +uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev
> *pdev,
> +                                         Error **errp)
> +{
> +    MPQemuMsg msg_reply = {0};
> +    uint64_t ret = UINT64_MAX;
> +    Error *local_err = NULL;
> +
>

Should work with ERRP_GUARD instead

+    qemu_mutex_lock(&pdev->io_mutex);
>

Should work with QEMU_LOCK_GUARD

+    mpqemu_msg_send(msg, pdev->ioc, &local_err);
> +    if (local_err) {
> +        error_propagate(errp, local_err);
> +        goto exit_send;
> +    }
> +
>

By making mpqemu_msg_send() return true on success, this should then become
simply

if (!mpqemu_msg_send(msg, pdev->ioc, errp))
  return ret;

+    mpqemu_msg_recv(&msg_reply, pdev->ioc, &local_err);
> +    if (local_err) {
> +        error_propagate(errp, local_err);
> +        goto exit_send;
> +    }
> +
> +    if (!mpqemu_msg_valid(&msg_reply) || msg_reply.cmd != RET_MSG) {
> +        error_setg(errp, "ERROR: Invalid reply received for command %d",
> +                         msg->cmd);
> +        goto exit_send;
> +    } else {
>

that else is unneeded.

The function with automatic cleanups should be simpler.

+        ret = msg_reply.data.u64;
> +    }
> +
> + exit_send:
> +    qemu_mutex_unlock(&pdev->io_mutex);
> +
> +    return ret;
> +}
> +
>  bool mpqemu_msg_valid(MPQemuMsg *msg)
>  {
>      if (msg->cmd >= MPQEMU_CMD_MAX && msg->cmd < 0) {
> --
> 1.8.3.1
>
>
diff mbox series

Patch

diff --git a/include/hw/remote/mpqemu-link.h b/include/hw/remote/mpqemu-link.h
index 070ac77..cee9468 100644
--- a/include/hw/remote/mpqemu-link.h
+++ b/include/hw/remote/mpqemu-link.h
@@ -15,6 +15,8 @@ 
 #include "qemu/thread.h"
 #include "io/channel.h"
 #include "exec/hwaddr.h"
+#include "io/channel-socket.h"
+#include "hw/remote/proxy.h"
 
 #define REMOTE_MAX_FDS 8
 
@@ -65,6 +67,8 @@  typedef struct {
     int num_fds;
 } MPQemuMsg;
 
+uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev,
+                                         Error **errp);
 void mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
 void mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp);
 
diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c
index bbd9df3..18c8a54 100644
--- a/hw/remote/mpqemu-link.c
+++ b/hw/remote/mpqemu-link.c
@@ -17,6 +17,7 @@ 
 #include "qemu/iov.h"
 #include "qemu/error-report.h"
 #include "qemu/main-loop.h"
+#include "io/channel.h"
 
 /*
  * Send message over the ioc QIOChannel.
@@ -219,6 +220,43 @@  fail:
     }
 }
 
+/*
+ * Called from VCPU thread in non-coroutine context.
+ */
+uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev,
+                                         Error **errp)
+{
+    MPQemuMsg msg_reply = {0};
+    uint64_t ret = UINT64_MAX;
+    Error *local_err = NULL;
+
+    qemu_mutex_lock(&pdev->io_mutex);
+    mpqemu_msg_send(msg, pdev->ioc, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        goto exit_send;
+    }
+
+    mpqemu_msg_recv(&msg_reply, pdev->ioc, &local_err);
+    if (local_err) {
+        error_propagate(errp, local_err);
+        goto exit_send;
+    }
+
+    if (!mpqemu_msg_valid(&msg_reply) || msg_reply.cmd != RET_MSG) {
+        error_setg(errp, "ERROR: Invalid reply received for command %d",
+                         msg->cmd);
+        goto exit_send;
+    } else {
+        ret = msg_reply.data.u64;
+    }
+
+ exit_send:
+    qemu_mutex_unlock(&pdev->io_mutex);
+
+    return ret;
+}
+
 bool mpqemu_msg_valid(MPQemuMsg *msg)
 {
     if (msg->cmd >= MPQEMU_CMD_MAX && msg->cmd < 0) {