diff mbox series

[v13,11/13] virtio-gpu: Handle resource blob commands

Message ID 20240527030233.3775514-12-dmitry.osipenko@collabora.com
State New
Headers show
Series Support blob memory and venus on qemu | expand

Commit Message

Dmitry Osipenko May 27, 2024, 3:02 a.m. UTC
From: Antonio Caggiano <antonio.caggiano@collabora.com>

Support BLOB resources creation, mapping and unmapping by calling the
new stable virglrenderer 0.10 interface. Only enabled when available and
via the blob config. E.g. -device virtio-vga-gl,blob=true

Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 hw/display/virtio-gpu-virgl.c  | 320 ++++++++++++++++++++++++++++++++-
 hw/display/virtio-gpu.c        |   4 +-
 include/hw/virtio/virtio-gpu.h |   2 +
 3 files changed, 322 insertions(+), 4 deletions(-)

Comments

Akihiko Odaki June 2, 2024, 5:45 a.m. UTC | #1
On 2024/05/27 12:02, Dmitry Osipenko wrote:
> From: Antonio Caggiano <antonio.caggiano@collabora.com>
> 
> Support BLOB resources creation, mapping and unmapping by calling the
> new stable virglrenderer 0.10 interface. Only enabled when available and
> via the blob config. E.g. -device virtio-vga-gl,blob=true
> 
> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
>   hw/display/virtio-gpu-virgl.c  | 320 ++++++++++++++++++++++++++++++++-
>   hw/display/virtio-gpu.c        |   4 +-
>   include/hw/virtio/virtio-gpu.h |   2 +
>   3 files changed, 322 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index 7f45b4fa5fd7..0c73d9ba65f9 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -26,6 +26,7 @@
>   
>   struct virtio_gpu_virgl_resource {
>       struct virtio_gpu_simple_resource base;
> +    MemoryRegion *mr;
>   };
>   
>   static struct virtio_gpu_virgl_resource *
> @@ -49,6 +50,159 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
>   }
>   #endif
>   
> +#ifdef HAVE_VIRGL_RESOURCE_BLOB
> +typedef enum {
> +    HOSTMEM_MR_MAPPED,
> +    HOSTMEM_MR_UNMAPPING,
> +    HOSTMEM_MR_FINISH_UNMAPPING,
> +} HostmemMRState;
> +
> +struct virtio_gpu_virgl_hostmem_region {
> +    MemoryRegion mr;
> +    struct VirtIOGPU *g;
> +    HostmemMRState state;
> +};
> +
> +static struct virtio_gpu_virgl_hostmem_region *
> +to_hostmem_region(MemoryRegion *mr)
> +{
> +    return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
> +}
> +
> +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
> +{
> +    VirtIOGPU *g = opaque;
> +
> +    virtio_gpu_process_cmdq(g);
> +}
> +
> +static void virtio_gpu_virgl_hostmem_region_free(void *obj)
> +{
> +    MemoryRegion *mr = MEMORY_REGION(obj);
> +    struct virtio_gpu_virgl_hostmem_region *vmr;
> +    VirtIOGPUBase *b;
> +    VirtIOGPUGL *gl;
> +
> +    vmr = to_hostmem_region(mr);
> +    vmr->state = HOSTMEM_MR_FINISH_UNMAPPING;
> +
> +    b = VIRTIO_GPU_BASE(vmr->g);
> +    b->renderer_blocked--;
> +
> +    /*
> +     * memory_region_unref() is executed from RCU thread context, while
> +     * virglrenderer works only on the main-loop thread that's holding GL
> +     * context.
> +     */
> +    gl = VIRTIO_GPU_GL(vmr->g);
> +    qemu_bh_schedule(gl->cmdq_resume_bh);
> +}
> +
> +static int
> +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
> +                                   struct virtio_gpu_virgl_resource *res,
> +                                   uint64_t offset)
> +{
> +    struct virtio_gpu_virgl_hostmem_region *vmr;
> +    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> +    MemoryRegion *mr;
> +    uint64_t size;
> +    void *data;
> +    int ret;
> +
> +    if (!virtio_gpu_hostmem_enabled(b->conf)) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
> +        return -EOPNOTSUPP;
> +    }
> +
> +    ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
> +    if (ret) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
> +                      __func__, strerror(-ret));
> +        return ret;
> +    }
> +
> +    vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
> +    vmr->g = g;
> +
> +    mr = &vmr->mr;
> +    memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
> +    memory_region_add_subregion(&b->hostmem, offset, mr);
> +    memory_region_set_enabled(mr, true);
> +
> +    /*
> +     * MR could outlive the resource if MR's reference is held outside of
> +     * virtio-gpu. In order to prevent unmapping resource while MR is alive,
> +     * and thus, making the data pointer invalid, we will block virtio-gpu
> +     * command processing until MR is fully unreferenced and freed.
> +     */
> +    OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
> +
> +    res->mr = mr;
> +
> +    return 0;
> +}
> +
> +static int
> +virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
> +                                     struct virtio_gpu_virgl_resource *res,
> +                                     bool *cmd_suspended)
> +{
> +    struct virtio_gpu_virgl_hostmem_region *vmr;
> +    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> +    MemoryRegion *mr = res->mr;
> +    int ret;
> +
> +    if (!mr) {
> +        return 0;
> +    }
> +
> +    vmr = to_hostmem_region(res->mr);
> +
> +    /*
> +     * Perform async unmapping in 3 steps:
> +     *
> +     * 1. Begin async unmapping with memory_region_del_subregion()
> +     *    and suspend/block cmd processing.
> +     * 2. Wait for res->mr to be freed and cmd processing resumed
> +     *    asynchronously by virtio_gpu_virgl_hostmem_region_free().
> +     * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
> +     */
> +
> +    switch (vmr->state) {
> +    case HOSTMEM_MR_MAPPED:
> +        vmr->state = HOSTMEM_MR_UNMAPPING;
> +
> +        *cmd_suspended = true;
> +
> +        /* render will be unblocked once MR is freed */
> +        b->renderer_blocked++;
> +
> +        /* memory region owns self res->mr object and frees it by itself */
> +        memory_region_set_enabled(mr, false);
> +        memory_region_del_subregion(&b->hostmem, mr);
> +        object_unparent(OBJECT(mr));
> +        break;
> +    case HOSTMEM_MR_FINISH_UNMAPPING:
> +        ret = virgl_renderer_resource_unmap(res->base.resource_id);
> +        if (ret) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "%s: failed to unmap virgl resource: %s\n",
> +                          __func__, strerror(-ret));
> +            return ret;
> +        }
> +        res->mr = NULL;
> +        g_free(vmr);
> +        break;
> +    case HOSTMEM_MR_UNMAPPING:
> +        *cmd_suspended = true;

This code path should be unreachable since the command processing is 
blocked while unmapping.

> +        break;
> +    }
> +
> +    return 0;
> +}
> +#endif /* HAVE_VIRGL_RESOURCE_BLOB */
> +
>   static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
>                                            struct virtio_gpu_ctrl_command *cmd)
>   {
> @@ -146,7 +300,8 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
>   }
>   
>   static void virgl_cmd_resource_unref(VirtIOGPU *g,
> -                                     struct virtio_gpu_ctrl_command *cmd)
> +                                     struct virtio_gpu_ctrl_command *cmd,
> +                                     bool *cmd_suspended)
>   {
>       struct virtio_gpu_resource_unref unref;
>       struct virtio_gpu_virgl_resource *res;
> @@ -164,6 +319,16 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
>           return;
>       }
>   
> +#ifdef HAVE_VIRGL_RESOURCE_BLOB
> +    if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> +        return;
> +    }
> +    if (*cmd_suspended) {
> +        return;
> +    }
> +#endif
> +
>       virgl_renderer_resource_detach_iov(unref.resource_id,
>                                          &res_iovs,
>                                          &num_iovs);
> @@ -514,6 +679,133 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
>   }
>   
>   #ifdef HAVE_VIRGL_RESOURCE_BLOB
> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
> +                                           struct virtio_gpu_ctrl_command *cmd)
> +{
> +    struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
> +    struct virtio_gpu_resource_create_blob cblob;
> +    struct virtio_gpu_virgl_resource *res;
> +    int ret;
> +
> +    if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
> +        return;
> +    }
> +
> +    VIRTIO_GPU_FILL_CMD(cblob);
> +    virtio_gpu_create_blob_bswap(&cblob);
> +    trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
> +
> +    if (cblob.resource_id == 0) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
> +                      __func__);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
> +    if (res) {
> +        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
> +                      __func__, cblob.resource_id);
> +        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> +        return;
> +    }
> +
> +    res = g_new0(struct virtio_gpu_virgl_resource, 1);
> +    res->base.resource_id = cblob.resource_id;
> +    res->base.blob_size = cblob.size;
> +    res->base.dmabuf_fd = -1;
> +
> +    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
> +        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
> +                                            cmd, &res->base.addrs,
> +                                            &res->base.iov, &res->base.iov_cnt);
> +        if (!ret) {
> +            g_free(res);

As noted for an earlier version:
 > Use g_autofree instead of writing duplicate g_free() calls. See
 > docs/devel/style.rst for details.
Dmitry Osipenko June 3, 2024, 5:32 a.m. UTC | #2
On 6/2/24 08:45, Akihiko Odaki wrote:
...
>> +    case HOSTMEM_MR_FINISH_UNMAPPING:
>> +        ret = virgl_renderer_resource_unmap(res->base.resource_id);
>> +        if (ret) {
>> +            qemu_log_mask(LOG_GUEST_ERROR,
>> +                          "%s: failed to unmap virgl resource: %s\n",
>> +                          __func__, strerror(-ret));
>> +            return ret;
>> +        }
>> +        res->mr = NULL;
>> +        g_free(vmr);
>> +        break;
>> +    case HOSTMEM_MR_UNMAPPING:
>> +        *cmd_suspended = true;
> 
> This code path should be unreachable since the command processing is
> blocked while unmapping.

Will change to abort()

>> +    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
>> +        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries,
>> sizeof(cblob),
>> +                                            cmd, &res->base.addrs,
>> +                                            &res->base.iov,
>> &res->base.iov_cnt);
>> +        if (!ret) {
>> +            g_free(res);
> 
> As noted for an earlier version:
>> Use g_autofree instead of writing duplicate g_free() calls. See
>> docs/devel/style.rst for details.

The g_autofree isn't appropriate for this code. It's intended to be used
if you allocate a tmp variable that should be freed in all code paths.
This is not the case here, the res variable isn't temporal and shall not
be freed on success.
Akihiko Odaki June 3, 2024, 5:43 a.m. UTC | #3
On 2024/06/03 14:32, Dmitry Osipenko wrote:
> On 6/2/24 08:45, Akihiko Odaki wrote:
> ...
>>> +    case HOSTMEM_MR_FINISH_UNMAPPING:
>>> +        ret = virgl_renderer_resource_unmap(res->base.resource_id);
>>> +        if (ret) {
>>> +            qemu_log_mask(LOG_GUEST_ERROR,
>>> +                          "%s: failed to unmap virgl resource: %s\n",
>>> +                          __func__, strerror(-ret));
>>> +            return ret;
>>> +        }
>>> +        res->mr = NULL;
>>> +        g_free(vmr);
>>> +        break;
>>> +    case HOSTMEM_MR_UNMAPPING:
>>> +        *cmd_suspended = true;
>>
>> This code path should be unreachable since the command processing is
>> blocked while unmapping.
> 
> Will change to abort()

I don't think abort() call is needed here. You can just do what you do 
for HOSTMEM_MR_MAPPED; the reference counter will automatically catch 
the double-free condition and abort.

> 
>>> +    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
>>> +        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries,
>>> sizeof(cblob),
>>> +                                            cmd, &res->base.addrs,
>>> +                                            &res->base.iov,
>>> &res->base.iov_cnt);
>>> +        if (!ret) {
>>> +            g_free(res);
>>
>> As noted for an earlier version:
>>> Use g_autofree instead of writing duplicate g_free() calls. See
>>> docs/devel/style.rst for details.
> 
> The g_autofree isn't appropriate for this code. It's intended to be used
> if you allocate a tmp variable that should be freed in all code paths.
> This is not the case here, the res variable isn't temporal and shall not
> be freed on success.

You can assign NULL to res after QTAILQ_INSERT_HEAD(). See 
rutabaga_cmd_resource_create_blob() for example.

Usually g_steal_pointer() should be used in such a situation but 
unfortunately it is not possible in this case due to how 
QTAILQ_INSERT_HEAD() macro is implemented.
diff mbox series

Patch

diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 7f45b4fa5fd7..0c73d9ba65f9 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -26,6 +26,7 @@ 
 
 struct virtio_gpu_virgl_resource {
     struct virtio_gpu_simple_resource base;
+    MemoryRegion *mr;
 };
 
 static struct virtio_gpu_virgl_resource *
@@ -49,6 +50,159 @@  virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
 }
 #endif
 
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+typedef enum {
+    HOSTMEM_MR_MAPPED,
+    HOSTMEM_MR_UNMAPPING,
+    HOSTMEM_MR_FINISH_UNMAPPING,
+} HostmemMRState;
+
+struct virtio_gpu_virgl_hostmem_region {
+    MemoryRegion mr;
+    struct VirtIOGPU *g;
+    HostmemMRState state;
+};
+
+static struct virtio_gpu_virgl_hostmem_region *
+to_hostmem_region(MemoryRegion *mr)
+{
+    return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
+}
+
+static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+
+    virtio_gpu_process_cmdq(g);
+}
+
+static void virtio_gpu_virgl_hostmem_region_free(void *obj)
+{
+    MemoryRegion *mr = MEMORY_REGION(obj);
+    struct virtio_gpu_virgl_hostmem_region *vmr;
+    VirtIOGPUBase *b;
+    VirtIOGPUGL *gl;
+
+    vmr = to_hostmem_region(mr);
+    vmr->state = HOSTMEM_MR_FINISH_UNMAPPING;
+
+    b = VIRTIO_GPU_BASE(vmr->g);
+    b->renderer_blocked--;
+
+    /*
+     * memory_region_unref() is executed from RCU thread context, while
+     * virglrenderer works only on the main-loop thread that's holding GL
+     * context.
+     */
+    gl = VIRTIO_GPU_GL(vmr->g);
+    qemu_bh_schedule(gl->cmdq_resume_bh);
+}
+
+static int
+virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
+                                   struct virtio_gpu_virgl_resource *res,
+                                   uint64_t offset)
+{
+    struct virtio_gpu_virgl_hostmem_region *vmr;
+    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+    MemoryRegion *mr;
+    uint64_t size;
+    void *data;
+    int ret;
+
+    if (!virtio_gpu_hostmem_enabled(b->conf)) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
+        return -EOPNOTSUPP;
+    }
+
+    ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
+    if (ret) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
+                      __func__, strerror(-ret));
+        return ret;
+    }
+
+    vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
+    vmr->g = g;
+
+    mr = &vmr->mr;
+    memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+    memory_region_add_subregion(&b->hostmem, offset, mr);
+    memory_region_set_enabled(mr, true);
+
+    /*
+     * MR could outlive the resource if MR's reference is held outside of
+     * virtio-gpu. In order to prevent unmapping resource while MR is alive,
+     * and thus, making the data pointer invalid, we will block virtio-gpu
+     * command processing until MR is fully unreferenced and freed.
+     */
+    OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
+
+    res->mr = mr;
+
+    return 0;
+}
+
+static int
+virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
+                                     struct virtio_gpu_virgl_resource *res,
+                                     bool *cmd_suspended)
+{
+    struct virtio_gpu_virgl_hostmem_region *vmr;
+    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+    MemoryRegion *mr = res->mr;
+    int ret;
+
+    if (!mr) {
+        return 0;
+    }
+
+    vmr = to_hostmem_region(res->mr);
+
+    /*
+     * Perform async unmapping in 3 steps:
+     *
+     * 1. Begin async unmapping with memory_region_del_subregion()
+     *    and suspend/block cmd processing.
+     * 2. Wait for res->mr to be freed and cmd processing resumed
+     *    asynchronously by virtio_gpu_virgl_hostmem_region_free().
+     * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
+     */
+
+    switch (vmr->state) {
+    case HOSTMEM_MR_MAPPED:
+        vmr->state = HOSTMEM_MR_UNMAPPING;
+
+        *cmd_suspended = true;
+
+        /* render will be unblocked once MR is freed */
+        b->renderer_blocked++;
+
+        /* memory region owns self res->mr object and frees it by itself */
+        memory_region_set_enabled(mr, false);
+        memory_region_del_subregion(&b->hostmem, mr);
+        object_unparent(OBJECT(mr));
+        break;
+    case HOSTMEM_MR_FINISH_UNMAPPING:
+        ret = virgl_renderer_resource_unmap(res->base.resource_id);
+        if (ret) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: failed to unmap virgl resource: %s\n",
+                          __func__, strerror(-ret));
+            return ret;
+        }
+        res->mr = NULL;
+        g_free(vmr);
+        break;
+    case HOSTMEM_MR_UNMAPPING:
+        *cmd_suspended = true;
+        break;
+    }
+
+    return 0;
+}
+#endif /* HAVE_VIRGL_RESOURCE_BLOB */
+
 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
                                          struct virtio_gpu_ctrl_command *cmd)
 {
@@ -146,7 +300,8 @@  static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
 }
 
 static void virgl_cmd_resource_unref(VirtIOGPU *g,
-                                     struct virtio_gpu_ctrl_command *cmd)
+                                     struct virtio_gpu_ctrl_command *cmd,
+                                     bool *cmd_suspended)
 {
     struct virtio_gpu_resource_unref unref;
     struct virtio_gpu_virgl_resource *res;
@@ -164,6 +319,16 @@  static void virgl_cmd_resource_unref(VirtIOGPU *g,
         return;
     }
 
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+    if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+    if (*cmd_suspended) {
+        return;
+    }
+#endif
+
     virgl_renderer_resource_detach_iov(unref.resource_id,
                                        &res_iovs,
                                        &num_iovs);
@@ -514,6 +679,133 @@  static void virgl_cmd_get_capset(VirtIOGPU *g,
 }
 
 #ifdef HAVE_VIRGL_RESOURCE_BLOB
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+                                           struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+    struct virtio_gpu_resource_create_blob cblob;
+    struct virtio_gpu_virgl_resource *res;
+    int ret;
+
+    if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+        return;
+    }
+
+    VIRTIO_GPU_FILL_CMD(cblob);
+    virtio_gpu_create_blob_bswap(&cblob);
+    trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+    if (cblob.resource_id == 0) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+                      __func__);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
+    if (res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+                      __func__, cblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    res = g_new0(struct virtio_gpu_virgl_resource, 1);
+    res->base.resource_id = cblob.resource_id;
+    res->base.blob_size = cblob.size;
+    res->base.dmabuf_fd = -1;
+
+    if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+        ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+                                            cmd, &res->base.addrs,
+                                            &res->base.iov, &res->base.iov_cnt);
+        if (!ret) {
+            g_free(res);
+            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+            return;
+        }
+    }
+
+    virgl_args.res_handle = cblob.resource_id;
+    virgl_args.ctx_id = cblob.hdr.ctx_id;
+    virgl_args.blob_mem = cblob.blob_mem;
+    virgl_args.blob_id = cblob.blob_id;
+    virgl_args.blob_flags = cblob.blob_flags;
+    virgl_args.size = cblob.size;
+    virgl_args.iovecs = res->base.iov;
+    virgl_args.num_iovs = res->base.iov_cnt;
+
+    ret = virgl_renderer_resource_create_blob(&virgl_args);
+    if (ret) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+                      __func__, strerror(-ret));
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        virtio_gpu_cleanup_mapping(g, &res->base);
+        g_free(res);
+        return;
+    }
+
+    QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+                                        struct virtio_gpu_ctrl_command *cmd)
+{
+    struct virtio_gpu_resource_map_blob mblob;
+    struct virtio_gpu_virgl_resource *res;
+    struct virtio_gpu_resp_map_info resp;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(mblob);
+    virtio_gpu_map_blob_bswap(&mblob);
+
+    res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
+    if (!res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+                      __func__, mblob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
+    if (ret) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+
+    memset(&resp, 0, sizeof(resp));
+    resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
+    virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
+    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
+                                          struct virtio_gpu_ctrl_command *cmd,
+                                          bool *cmd_suspended)
+{
+    struct virtio_gpu_resource_unmap_blob ublob;
+    struct virtio_gpu_virgl_resource *res;
+    int ret;
+
+    VIRTIO_GPU_FILL_CMD(ublob);
+    virtio_gpu_unmap_blob_bswap(&ublob);
+
+    res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
+    if (!res) {
+        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+                      __func__, ublob.resource_id);
+        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+        return;
+    }
+
+    ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
+    if (ret) {
+        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+        return;
+    }
+}
+
 static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
                                        struct virtio_gpu_ctrl_command *cmd)
 {
@@ -616,6 +908,8 @@  static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
                                       struct virtio_gpu_ctrl_command *cmd)
 {
+    bool cmd_suspended = false;
+
     VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 
     virgl_renderer_force_ctx_0();
@@ -657,7 +951,7 @@  void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         virgl_cmd_resource_flush(g, cmd);
         break;
     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
-        virgl_cmd_resource_unref(g, cmd);
+        virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
         break;
     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
         /* TODO add security */
@@ -680,6 +974,15 @@  void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         virtio_gpu_get_edid(g, cmd);
         break;
 #ifdef HAVE_VIRGL_RESOURCE_BLOB
+    case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+        virgl_cmd_resource_create_blob(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
+        virgl_cmd_resource_map_blob(g, cmd);
+        break;
+    case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
+        virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
+        break;
     case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
         virgl_cmd_set_scanout_blob(g, cmd);
         break;
@@ -689,7 +992,7 @@  void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
         break;
     }
 
-    if (cmd->finished) {
+    if (cmd_suspended || cmd->finished) {
         return;
     }
     if (cmd->error) {
@@ -858,6 +1161,13 @@  int virtio_gpu_virgl_init(VirtIOGPU *g)
         timer_mod(gl->print_stats,
                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
     }
+
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+    gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
+                                    virtio_gpu_virgl_resume_cmdq_bh,
+                                    g);
+#endif
+
     return 0;
 }
 
@@ -875,6 +1185,10 @@  void virtio_gpu_virgl_deinit(VirtIOGPU *g)
 {
     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
 
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+    qemu_bh_delete(gl->cmdq_resume_bh);
+#endif
+
     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
         timer_free(gl->print_stats);
     }
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 8e05a2d0c7c5..d423bc9a7bf5 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1496,10 +1496,12 @@  void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
             return;
         }
 
+#ifndef HAVE_VIRGL_RESOURCE_BLOB
         if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
-            error_setg(errp, "blobs and virgl are not compatible (yet)");
+            error_setg(errp, "old virglrenderer, blob resources unsupported");
             return;
         }
+#endif
     }
 
     if (!virtio_gpu_base_device_realize(qdev,
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 0bfdfd91db46..368f96a813c9 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -239,6 +239,8 @@  struct VirtIOGPUGL {
 
     QEMUTimer *fence_poll;
     QEMUTimer *print_stats;
+
+    QEMUBH *cmdq_resume_bh;
 };
 
 struct VhostUserGPU {