diff mbox

[6/8] virtio-gpu: v0.1 of the virtio based GPU code.

Message ID 1384926761-9962-7-git-send-email-airlied@gmail.com
State New
Headers show

Commit Message

Dave Airlie Nov. 20, 2013, 5:52 a.m. UTC
From: Dave Airlie <airlied@redhat.com>

This is the basic virtio-gpu which is

multi-head capable,
ARGB cursor support,
unaccelerated.

Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 default-configs/x86_64-softmmu.mak |   1 +
 hw/display/Makefile.objs           |   2 +
 hw/display/virtgpu_hw.h            | 225 ++++++++++++++
 hw/display/virtio-gpu.c            | 606 +++++++++++++++++++++++++++++++++++++
 hw/virtio/virtio-pci.c             |  49 +++
 hw/virtio/virtio-pci.h             |  15 +
 include/hw/pci/pci.h               |   1 +
 include/hw/virtio/virtio-gpu.h     |  90 ++++++
 8 files changed, 989 insertions(+)
 create mode 100644 hw/display/virtgpu_hw.h
 create mode 100644 hw/display/virtio-gpu.c
 create mode 100644 include/hw/virtio/virtio-gpu.h

Comments

Gerd Hoffmann Nov. 20, 2013, 11:26 a.m. UTC | #1
On Mi, 2013-11-20 at 15:52 +1000, Dave Airlie wrote:
> From: Dave Airlie <airlied@redhat.com>
> 
> This is the basic virtio-gpu which is
> 
> multi-head capable,
> ARGB cursor support,
> unaccelerated.

I'd like to see an overview on the design of the virtual hardware.
What is the purpose of the virtio commands?
What are the steps a guest is supposed to do to get something displayed
on the screen?

> +    res = calloc(1, sizeof(struct virtgpu_simple_resource));
> +    if (!res)
> +	return;

qemu uses glib memory allocation functions (i.e. g_malloc0 for this
one).  Also no need to check for NULL.

cheers,
  Gerd
diff mbox

Patch

diff --git a/default-configs/x86_64-softmmu.mak b/default-configs/x86_64-softmmu.mak
index 31bddce..1a00b78 100644
--- a/default-configs/x86_64-softmmu.mak
+++ b/default-configs/x86_64-softmmu.mak
@@ -9,6 +9,7 @@  CONFIG_VGA_PCI=y
 CONFIG_VGA_ISA=y
 CONFIG_VGA_CIRRUS=y
 CONFIG_VMWARE_VGA=y
+CONFIG_VIRTIO_GPU=y
 CONFIG_VMMOUSE=y
 CONFIG_SERIAL=y
 CONFIG_PARALLEL=y
diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index 540df82..10e4066 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -32,3 +32,5 @@  obj-$(CONFIG_TCX) += tcx.o
 obj-$(CONFIG_VGA) += vga.o
 
 common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
+
+obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu.o
diff --git a/hw/display/virtgpu_hw.h b/hw/display/virtgpu_hw.h
new file mode 100644
index 0000000..81223de
--- /dev/null
+++ b/hw/display/virtgpu_hw.h
@@ -0,0 +1,225 @@ 
+#ifndef VIRTGPU_HW_H
+#define VIRTGPU_HW_H
+
+#define VIRTGPU_CMD_HAS_RESP (1 << 31)
+#define VIRTGPU_CMD_3D_ONLY  (1 << 30)
+enum virtgpu_ctrl_cmd {
+	VIRTGPU_CMD_NOP,
+	VIRTGPU_CMD_GET_DISPLAY_INFO = (1 | VIRTGPU_CMD_HAS_RESP),
+	VIRTGPU_CMD_GET_CAPS = (2 | VIRTGPU_CMD_HAS_RESP),
+	VIRTGPU_CMD_RESOURCE_CREATE_2D = 3,
+	VIRTGPU_CMD_RESOURCE_UNREF = 4,
+	VIRTGPU_CMD_SET_SCANOUT = 5,
+	VIRTGPU_CMD_RESOURCE_FLUSH = 6,
+	VIRTGPU_CMD_TRANSFER_SEND_2D = 7,
+	VIRTGPU_CMD_RESOURCE_ATTACH_BACKING = 8,
+	VIRTGPU_CMD_RESOURCE_INVAL_BACKING = 9,
+       
+	VIRTGPU_CMD_CTX_CREATE = (10 | VIRTGPU_CMD_3D_ONLY),
+	VIRTGPU_CMD_CTX_DESTROY = (11 | VIRTGPU_CMD_3D_ONLY),
+	VIRTGPU_CMD_CTX_ATTACH_RESOURCE = (12 | VIRTGPU_CMD_3D_ONLY),
+	VIRTGPU_CMD_CTX_DETACH_RESOURCE = (13 | VIRTGPU_CMD_3D_ONLY),
+
+	VIRTGPU_CMD_RESOURCE_CREATE_3D = (14 | VIRTGPU_CMD_3D_ONLY),
+
+	VIRTGPU_CMD_TRANSFER_SEND_3D = (15 | VIRTGPU_CMD_3D_ONLY),
+	VIRTGPU_CMD_TRANSFER_RECV_3D = (16 | VIRTGPU_CMD_3D_ONLY),
+
+	VIRTGPU_CMD_SUBMIT_3D = (17 | VIRTGPU_CMD_3D_ONLY),
+};
+
+enum virtgpu_ctrl_event {
+	VIRTGPU_EVENT_NOP,
+	VIRTGPU_EVENT_ERROR,
+	VIRTGPU_EVENT_DISPLAY_CHANGE,
+};
+
+/* data passed in the cursor vq */
+struct virtgpu_hw_cursor_page {
+	uint32_t cursor_x, cursor_y;
+	uint32_t cursor_hot_x, cursor_hot_y;
+	uint32_t cursor_id;
+};
+
+struct virtgpu_resource_unref {
+	uint32_t resource_id;
+};
+
+/* create a simple 2d resource with a format */
+struct virtgpu_resource_create_2d {
+	uint32_t resource_id;
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+};
+
+struct virtgpu_set_scanout {
+	uint32_t scanout_id;
+	uint32_t resource_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+};
+
+struct virtgpu_resource_flush {
+	uint32_t resource_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+};
+
+/* simple transfer send */
+struct virtgpu_transfer_send_2d {
+	uint32_t resource_id;
+	uint32_t offset;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+};
+
+struct virtgpu_mem_entry {
+	uint64_t addr;
+	uint32_t length;
+	uint32_t pad;
+};
+
+struct virtgpu_resource_attach_backing {
+	uint32_t resource_id;
+	uint32_t nr_entries;
+};
+
+struct virtgpu_resource_inval_backing {
+	uint32_t resource_id;
+};
+
+#define VIRTGPU_MAX_SCANOUTS 16
+struct virtgpu_display_info {
+	uint32_t num_scanouts;
+	struct {
+		uint32_t enabled;
+		uint32_t width;
+		uint32_t height;
+		uint32_t x;
+		uint32_t y;
+		uint32_t flags;
+	} pmodes[VIRTGPU_MAX_SCANOUTS];
+};
+
+
+/* 3d related */
+struct virtgpu_box {
+	uint32_t x, y, z;
+	uint32_t w, h, d;
+};
+
+struct virtgpu_transfer_send_3d {
+	uint64_t data;
+	uint32_t resource_id;
+	uint32_t level;
+	struct virtgpu_box box;
+	uint32_t stride;
+	uint32_t layer_stride;
+	uint32_t ctx_id;
+};
+
+struct virtgpu_transfer_recv_3d {
+	uint64_t data;
+	uint32_t resource_id;
+	uint32_t level;
+	struct virtgpu_box box;
+	uint32_t stride;
+	uint32_t layer_stride;
+	uint32_t ctx_id;
+};
+
+#define VIRTGPU_RESOURCE_FLAG_Y_0_TOP (1 << 0)
+struct virtgpu_resource_create_3d {
+	uint32_t resource_id;
+	uint32_t target;
+	uint32_t format;
+	uint32_t bind;
+	uint32_t width;
+	uint32_t height;
+	uint32_t depth;
+	uint32_t array_size;
+	uint32_t last_level;
+	uint32_t nr_samples;
+	uint32_t flags;
+};
+
+struct virtgpu_ctx_create {
+	uint32_t ctx_id;
+	uint32_t nlen;
+	char debug_name[64];
+};
+
+struct virtgpu_ctx_destroy {
+	uint32_t ctx_id;
+};
+
+struct virtgpu_ctx_resource {
+	uint32_t resource_id;
+	uint32_t ctx_id;
+};
+
+struct virtgpu_cmd_submit {
+	uint64_t phy_addr;
+	uint32_t size;
+	uint32_t ctx_id;
+};
+
+struct virtgpu_command {
+	uint32_t type;
+	uint32_t flags;
+	uint64_t rsvd;
+	union virtgpu_cmds {
+		struct virtgpu_resource_create_2d resource_create_2d;
+		struct virtgpu_resource_unref resource_unref;
+		struct virtgpu_resource_flush resource_flush;
+		struct virtgpu_set_scanout set_scanout;
+		struct virtgpu_transfer_send_2d transfer_send_2d;
+		struct virtgpu_resource_attach_backing resource_attach_backing;
+		struct virtgpu_resource_inval_backing resource_inval_backing;
+
+		struct virtgpu_cmd_submit cmd_submit;
+		struct virtgpu_ctx_create ctx_create;
+		struct virtgpu_ctx_destroy ctx_destroy;
+		struct virtgpu_ctx_resource ctx_resource;
+		struct virtgpu_resource_create_3d resource_create_3d;
+		struct virtgpu_transfer_send_3d transfer_send_3d;
+		struct virtgpu_transfer_recv_3d transfer_recv_3d;
+	} u;
+};
+
+struct virtgpu_response {
+	uint32_t type;
+	uint32_t flags;
+	union virtgpu_resps {
+		struct virtgpu_display_info display_info;
+	} u;
+};
+
+struct virtgpu_event {
+	uint32_t type;
+	uint32_t err_code;
+	union virtgpu_events {
+		struct virtgpu_display_info display_info;
+	} u;
+};
+
+/* simple formats for fbcon/X use */
+enum virtgpu_formats {
+   VIRGL_FORMAT_B8G8R8A8_UNORM          = 1,
+   VIRGL_FORMAT_B8G8R8X8_UNORM          = 2,
+   VIRGL_FORMAT_A8R8G8B8_UNORM          = 3,
+   VIRGL_FORMAT_X8R8G8B8_UNORM          = 4,
+
+   VIRGL_FORMAT_B5G5R5A1_UNORM          = 5,
+
+   VIRGL_FORMAT_R8_UNORM                = 64,
+};
+
+#endif
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
new file mode 100644
index 0000000..392672d
--- /dev/null
+++ b/hw/display/virtio-gpu.c
@@ -0,0 +1,606 @@ 
+/*
+ * Virtio video device
+ *
+ * Copyright Red Hat
+ *
+ * Authors:
+ *  Dave Airlie
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/iov.h"
+#include "ui/console.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-bus.h"
+
+#include "virtgpu_hw.h"
+
+static struct virtgpu_simple_resource *virtgpu_find_resource(VirtIOGPU *g,
+							  uint32_t resource_id);
+
+static void update_cursor_data(VirtIOGPU *g, struct virtgpu_simple_resource *res)
+{
+    uint32_t pixels;
+    int i;
+    pixels = g->current_cursor->width * g->current_cursor->height;
+    for (i = 0; i < pixels; i++) {
+	uint32_t value = ((uint32_t *)pixman_image_get_data(res->image))[i];
+	g->current_cursor->data[i] = value;
+    }
+    dpy_cursor_define(g->con[0], g->current_cursor);
+}
+
+static void update_cursor(VirtIOGPU *g, struct virtgpu_hw_cursor_page *cursor)
+{	
+    if (g->current_cursor_id != cursor->cursor_id) {
+	if (cursor->cursor_id > 0) {
+	    struct virtgpu_simple_resource *res;
+
+	    res = virtgpu_find_resource(g, cursor->cursor_id);
+	    if (res) {
+		if (!g->current_cursor)
+		    g->current_cursor = cursor_alloc(res->width, res->height);
+		g->current_cursor->hot_x = cursor->cursor_hot_x;
+		g->current_cursor->hot_y = cursor->cursor_hot_y;
+		
+		update_cursor_data(g, res);
+	    }
+	}
+	g->current_cursor_id = cursor->cursor_id;
+    }
+    
+    dpy_mouse_set(g->con[0], cursor->cursor_x, cursor->cursor_y, cursor->cursor_id ? 1 : 0);
+}
+
+static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+	
+}
+
+static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
+{
+}
+
+static uint32_t virtio_gpu_get_features(VirtIODevice *vdev, uint32_t features)
+{
+    return features;
+}
+
+static void virtio_gpu_set_features(VirtIODevice *vdev, uint32_t features)
+{
+
+}
+
+static struct virtgpu_simple_resource *virtgpu_find_resource(VirtIOGPU *g,
+							  uint32_t resource_id)
+{
+    struct virtgpu_simple_resource *res;
+
+    QLIST_FOREACH(res, &g->reslist, next) {
+	if (res->resource_id == resource_id)
+	    return res;
+    }
+    return NULL;
+}
+
+static void virtgpu_fill_display_info(VirtIOGPU *g,
+				      struct virtgpu_display_info *dpy_info)
+{
+    int i;
+    memset(dpy_info, 0, sizeof(*dpy_info));
+    dpy_info->num_scanouts = g->conf.max_outputs;
+    for (i = 0; i < g->conf.max_outputs; i++) {
+	if (g->enabled_output_bitmask & (1 << i)) {
+	    dpy_info->pmodes[i].enabled = 1;
+	    dpy_info->pmodes[i].width = g->req_state[i].width;
+	    dpy_info->pmodes[i].height = g->req_state[i].height;
+	}
+    }
+}
+
+static void virtgpu_get_display_info(VirtIOGPU *g,
+				     struct iovec *iov,
+				     unsigned int iov_cnt)
+{
+    struct virtgpu_display_info dpy_info;
+
+    virtgpu_fill_display_info(g, &dpy_info);
+
+    iov_from_buf(iov, iov_cnt, 0, &dpy_info, sizeof(dpy_info));
+}
+
+static pixman_format_code_t get_pixman_format(uint32_t virtgpu_format)
+{
+    switch (virtgpu_format) {
+    case VIRGL_FORMAT_B8G8R8X8_UNORM:
+	return PIXMAN_x8r8g8b8;
+    case VIRGL_FORMAT_B8G8R8A8_UNORM:
+	return PIXMAN_a8r8g8b8;
+    default:
+	assert(0);
+	break;
+    }
+    return 0;
+}
+
+static void virtgpu_resource_create_2d(VirtIOGPU *g,
+				       struct virtgpu_resource_create_2d *c2d)
+{
+    pixman_format_code_t pformat;
+    struct virtgpu_simple_resource *res;
+
+    res = calloc(1, sizeof(struct virtgpu_simple_resource));
+    if (!res)
+	return;
+
+    res->width = c2d->width;
+    res->height = c2d->height;
+    res->format = c2d->format;
+    res->resource_id = c2d->resource_id;
+
+    pformat = get_pixman_format(c2d->format);
+    res->image = pixman_image_create_bits(pformat,
+					  c2d->width,
+					  c2d->height,
+					  NULL, 0);
+
+    QLIST_INSERT_HEAD(&g->reslist, res, next);
+}
+
+static void virtgpu_resource_unref(VirtIOGPU *g,
+				   uint32_t resource_id)
+{
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, resource_id);
+    
+    if (!res)
+	return;
+
+    pixman_image_unref(res->image);
+    QLIST_REMOVE(res, next);
+    free(res);
+}
+
+static void virtgpu_transfer_send_2d(VirtIOGPU *g,
+				     struct virtgpu_transfer_send_2d *t2d)
+{
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, t2d->resource_id);
+    int h;
+    uint32_t src_offset, dst_offset, stride;
+    int bpp;
+    pixman_format_code_t format;
+    if (!res)
+	return;
+
+    if (!res->iov)
+	return;
+    
+    format = pixman_image_get_format(res->image);
+    bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
+    stride = pixman_image_get_stride(res->image);
+
+    if (t2d->offset || t2d->x || t2d->y || t2d->width != pixman_image_get_width(res->image)) {
+	for (h = 0; h < t2d->height; h++) {
+	    src_offset = t2d->offset + stride * h;
+	    dst_offset = (t2d->y + h) * stride + (t2d->x * bpp);
+
+	    iov_to_buf(res->iov, res->iov_cnt, src_offset, (uint8_t *)pixman_image_get_data(res->image) + dst_offset, t2d->width * bpp);
+	}
+    } else {
+        iov_to_buf(res->iov, res->iov_cnt, 0, pixman_image_get_data(res->image), pixman_image_get_stride(res->image) * pixman_image_get_height(res->image));
+    }
+}
+
+static void virtgpu_resource_flush(VirtIOGPU *g,
+				   struct virtgpu_resource_flush *rf)
+{
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, rf->resource_id); 
+    int i;
+    pixman_region16_t flush_region;
+
+    pixman_region_init_rect(&flush_region,
+			    rf->x, rf->y, rf->width, rf->height);
+    for (i = 0; i < VIRTGPU_MAX_SCANOUT; i++) {
+	struct virtgpu_scanout *scanout;
+	pixman_region16_t region, finalregion;
+	pixman_box16_t *extents;
+
+	if (!(res->scanout_bitmask & (1 << i)))
+	    continue;
+	scanout = &g->scanout[i];
+
+	pixman_region_init(&finalregion);
+	pixman_region_init_rect(&region,
+				scanout->x, scanout->y, scanout->width, scanout->height);
+
+	pixman_region_intersect(&finalregion, &flush_region, &region);
+	pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
+	extents = pixman_region_extents(&finalregion);
+	/* work out the area we need to update for each console */
+	dpy_gfx_update(g->con[i], extents->x1, extents->y1, extents->x2 - extents->x1,
+		       extents->y2 - extents->y1);
+
+	pixman_region_fini(&region);
+	pixman_region_fini(&finalregion);
+    }
+    pixman_region_fini(&flush_region);
+}
+
+static void virtgpu_set_scanout(VirtIOGPU *g,
+				struct virtgpu_set_scanout *ss)
+{
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, ss->resource_id); 
+    uint32_t offset;
+    int bpp;
+    pixman_format_code_t format;
+
+    g->enable = 1;
+    if (ss->resource_id == 0) {
+	if (g->scanout[ss->scanout_id].resource_id) {
+	    res = virtgpu_find_resource(g, g->scanout[ss->scanout_id].resource_id);
+	    if (res)
+		res->scanout_bitmask &= ~(1 << ss->scanout_id);
+	}
+	if (ss->scanout_id == 0)
+	    return;
+	dpy_gfx_replace_surface(g->con[ss->scanout_id], NULL);
+	g->scanout[ss->scanout_id].ds = NULL;
+	g->scanout[ss->scanout_id].width = 0;
+	g->scanout[ss->scanout_id].height = 0;
+
+	dpy_notify_state(g->con[ss->scanout_id], 0, 0, 0, 0);
+	return;
+    }
+    /* create a surface for this scanout */
+
+    if (ss->scanout_id > 4) {
+	fprintf(stderr,"set scanout for non-0 surface %d\n", ss->scanout_id);
+	return;
+    }
+
+    format = pixman_image_get_format(res->image);
+    bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
+    offset = (ss->x * bpp) + ss->y * pixman_image_get_stride(res->image);
+
+    dpy_notify_state(g->con[ss->scanout_id], ss->x, ss->y, ss->width, ss->height);
+    if (!g->scanout[ss->scanout_id].ds ||
+	surface_data(g->scanout[ss->scanout_id].ds) != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
+	g->scanout[ss->scanout_id].width != ss->width ||
+	g->scanout[ss->scanout_id].height != ss->height) {
+	/* realloc the surface ptr */
+	g->scanout[ss->scanout_id].ds = qemu_create_displaysurface_from(ss->width, ss->height, 32, pixman_image_get_stride(res->image), (uint8_t *)pixman_image_get_data(res->image) + offset, FALSE);
+	if (!g->scanout[ss->scanout_id].ds)
+	    return;
+
+	dpy_gfx_replace_surface(g->con[ss->scanout_id], g->scanout[ss->scanout_id].ds);
+    }
+
+    res->scanout_bitmask |= (1 << ss->scanout_id);
+
+
+    g->scanout[ss->scanout_id].resource_id = ss->resource_id;
+    g->scanout[ss->scanout_id].x = ss->x;
+    g->scanout[ss->scanout_id].y = ss->y;
+    g->scanout[ss->scanout_id].width = ss->width;
+    g->scanout[ss->scanout_id].height = ss->height;
+}
+
+static void virtgpu_resource_attach_backing(VirtIOGPU *g,
+					    struct virtgpu_resource_attach_backing *att_rb,
+					    struct iovec *iov,
+					    unsigned int iov_cnt)
+{
+    uint32_t gsize = iov_size(iov, iov_cnt);
+    struct iovec *res_iovs;
+    int i;
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, att_rb->resource_id);
+    void *data;
+    if (!res)
+	return;
+    
+    res_iovs = malloc(att_rb->nr_entries * sizeof(struct iovec));
+    if (!res_iovs)
+	return;
+
+    if (iov_cnt > 1) {
+	data = malloc(gsize);
+	iov_to_buf(iov, iov_cnt, 0, data, gsize);
+    } else
+	data = iov[0].iov_base;
+
+    for (i = 0; i < att_rb->nr_entries; i++) {
+	struct virtgpu_mem_entry *ent = ((struct virtgpu_mem_entry *)data) + i;
+	hwaddr len;
+	res_iovs[i].iov_len = ent->length;
+
+	len = ent->length;
+	res_iovs[i].iov_base = cpu_physical_memory_map(ent->addr, &len, 1);
+	if (!res_iovs[i].iov_base || len != ent->length) {
+	    fprintf(stderr, "virtgp: trying to map MMIO memory");
+	    exit(1);
+	}
+    }
+ 
+    res->iov = res_iovs;
+    res->iov_cnt = att_rb->nr_entries;
+
+    if (iov_cnt > 1)
+	free(data);
+}
+
+static void virtgpu_resource_inval_backing(VirtIOGPU *g,
+					   uint32_t resource_id)
+{
+    int i;
+    struct virtgpu_simple_resource *res = virtgpu_find_resource(g, resource_id);
+
+    if (!res)
+	return;
+    if (!res->iov)
+	return;
+
+    for (i = 0; i < res->iov_cnt; i++) {
+	cpu_physical_memory_unmap(res->iov[i].iov_base, res->iov[i].iov_len, 1, res->iov[i].iov_len);
+    }
+    free(res->iov);
+    res->iov_cnt = 0;
+    res->iov = NULL;
+}
+
+static void virtio_gpu_process_cmd(VirtIOGPU *g,
+				   struct virtgpu_command *cmd,
+				   struct iovec *iov,
+				   unsigned int iov_cnt)
+{
+    switch (cmd->type) {
+    case VIRTGPU_CMD_GET_DISPLAY_INFO:
+	if (iov_cnt < 1)
+	    return;
+	virtgpu_get_display_info(g, iov, iov_cnt);
+	break;
+    case VIRTGPU_CMD_RESOURCE_CREATE_2D:
+	virtgpu_resource_create_2d(g, &cmd->u.resource_create_2d);
+	break;
+    case VIRTGPU_CMD_RESOURCE_UNREF:
+	virtgpu_resource_unref(g, cmd->u.resource_unref.resource_id);
+	break;
+    case VIRTGPU_CMD_RESOURCE_FLUSH:
+	virtgpu_resource_flush(g, &cmd->u.resource_flush);
+	break;
+    case VIRTGPU_CMD_TRANSFER_SEND_2D:
+	virtgpu_transfer_send_2d(g, &cmd->u.transfer_send_2d);
+	break;
+    case VIRTGPU_CMD_SET_SCANOUT:
+	virtgpu_set_scanout(g, &cmd->u.set_scanout);
+	break;
+    case VIRTGPU_CMD_RESOURCE_ATTACH_BACKING:
+	virtgpu_resource_attach_backing(g, &cmd->u.resource_attach_backing, iov, iov_cnt);
+	break;
+    case VIRTGPU_CMD_RESOURCE_INVAL_BACKING:
+	virtgpu_resource_inval_backing(g, cmd->u.resource_inval_backing.resource_id);
+	break;
+    case VIRTGPU_CMD_GET_CAPS:
+    case VIRTGPU_CMD_NOP:
+	break;
+    default:
+	fprintf(stderr,"got bad command from host %d\n", cmd->type);
+	break;
+    }
+}
+
+static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
+{ 
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+    qemu_bh_schedule(g->ctrl_bh);
+}
+
+static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
+{ 
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+    qemu_bh_schedule(g->cursor_bh);
+}
+
+static void virtio_gpu_handle_event_cb(VirtIODevice *vdev, VirtQueue *vq)
+{
+}
+
+
+static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+    struct iovec *iov;
+    VirtQueueElement elem;
+    struct virtgpu_command cmd;
+    size_t s;
+    unsigned int iov_cnt;
+
+    if (!virtio_queue_ready(vq))
+      return;
+    while (virtqueue_pop(vq, &elem)) {
+
+	iov = elem.out_sg;
+	iov_cnt = elem.out_num;
+
+	s = iov_to_buf(iov, iov_cnt, 0, &cmd, sizeof(cmd));
+	if (s != sizeof(cmd))
+	    fprintf(stderr,"error\n");
+	else {
+	    if (elem.in_num > 0)
+		virtio_gpu_process_cmd(g, &cmd, elem.in_sg, elem.in_num);
+	    else
+		virtio_gpu_process_cmd(g, &cmd, &iov[1], iov_cnt - 1);
+	}
+	virtqueue_push(vq, &elem, 0);
+	virtio_notify(vdev, vq);
+    }
+}
+
+static void virtio_gpu_ctrl_bh(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+    virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
+}
+
+static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+    VirtQueueElement elem;
+    size_t s;
+    struct virtgpu_hw_cursor_page cursor_info;
+
+    if (!virtio_queue_ready(vq))
+      return;
+    while (virtqueue_pop(vq, &elem)) {
+
+	s = iov_to_buf(elem.out_sg, elem.out_num, 0, &cursor_info, sizeof(cursor_info));
+	if (s != sizeof(cursor_info))
+	    fprintf(stderr,"error\n");
+	else
+	    update_cursor(g, &cursor_info);
+	virtqueue_push(vq, &elem, 0);
+	virtio_notify(vdev, vq);
+    }
+}
+
+static void virtio_gpu_cursor_bh(void *opaque)
+{
+    VirtIOGPU *g = opaque;
+    virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
+}
+
+static int virtio_gpu_send_event(VirtIOGPU *g, struct virtgpu_event *event)
+{
+    VirtQueueElement elem;
+
+    if (!virtio_queue_ready(g->event_vq))
+	return -1;
+
+    if (!virtqueue_pop(g->event_vq, &elem))
+	return -1;
+
+    iov_from_buf(elem.in_sg, elem.in_num, 0, event,
+		 sizeof(struct virtgpu_event));
+
+    virtqueue_push(g->event_vq, &elem, sizeof(struct virtgpu_event));
+    virtio_notify(&g->parent_obj, g->event_vq);
+    return 0;
+}
+
+static void virtio_gpu_invalidate_display(void *opaque)
+{
+}
+
+static void virtio_gpu_update_display(void *opaque)
+{
+}
+
+static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
+{
+}
+
+static void virtio_gpu_notify_state(void *opaque, int idx, int x, int y, uint32_t width, uint32_t height)
+{
+    VirtIOGPU *g = opaque;   
+    struct virtgpu_event event;
+
+    if (idx > g->conf.max_outputs)
+	return;
+
+    g->req_state[idx].x = x;
+    g->req_state[idx].y = y;
+    g->req_state[idx].width = width;
+    g->req_state[idx].height = height;
+
+    if (width && height)
+	g->enabled_output_bitmask |= (1 << idx);
+    else
+	g->enabled_output_bitmask &= ~(1 << idx);
+
+    /* send event to guest */
+    event.type = VIRTGPU_EVENT_DISPLAY_CHANGE;
+    event.err_code = 0;
+    virtgpu_fill_display_info(g, &event.u.display_info);
+    virtio_gpu_send_event(g, &event);
+}
+
+static const GraphicHwOps virtio_gpu_ops = {
+    .invalidate = virtio_gpu_invalidate_display,
+    .gfx_update = virtio_gpu_update_display,
+    .text_update = virtio_gpu_text_update,
+    .notify_state = virtio_gpu_notify_state,
+};
+
+static int virtio_gpu_device_init(VirtIODevice *vdev)
+{
+    DeviceState *qdev = DEVICE(vdev);
+    VirtIOGPU *g = VIRTIO_GPU(vdev);
+    int i;
+
+    g->config_size = 0;
+    virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
+		g->config_size);
+    
+    for (i = 0; i < g->conf.max_outputs; i++) {
+	if (i == 0) {
+	    g->req_state[0].width = 1024;
+	    g->req_state[0].height = 768;
+	}
+        g->con[i] = graphic_console_init(DEVICE(vdev), &virtio_gpu_ops, g);
+	if (i > 0)
+	    dpy_gfx_replace_surface(g->con[i], NULL);
+    }
+    g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
+    g->cursor_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_cursor_cb);
+
+    g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
+    g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
+
+    g->event_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_event_cb);
+
+    g->enabled_output_bitmask = 1;
+    g->qdev = qdev;
+    return 0;
+}
+
+static void virtio_gpu_instance_init(Object *obj)
+{
+
+}
+
+static Property virtio_gpu_properties[] = {
+    DEFINE_VIRTIO_GPU_FEATURES(VirtIOGPU, conf),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_gpu_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+    vdc->init = virtio_gpu_device_init;
+    vdc->get_config = virtio_gpu_get_config;
+    vdc->set_config = virtio_gpu_set_config;
+    vdc->get_features = virtio_gpu_get_features;
+    vdc->set_features = virtio_gpu_set_features;
+    
+    //    dc->reset = virtio_gpu_reset_handler;
+    dc->props = virtio_gpu_properties;
+}
+
+static const TypeInfo virtio_gpu_info = {
+    .name = TYPE_VIRTIO_GPU,
+    .parent = TYPE_VIRTIO_DEVICE,
+    .instance_size = sizeof(VirtIOGPU),
+    .instance_init = virtio_gpu_instance_init,
+    .class_init = virtio_gpu_class_init,
+};
+
+static void virtio_register_types(void)
+{
+    type_register_static(&virtio_gpu_info);
+}
+
+type_init(virtio_register_types)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 7647be8..4b0e0e0 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1502,6 +1502,54 @@  static const TypeInfo virtio_rng_pci_info = {
     .class_init    = virtio_rng_pci_class_init,
 };
 
+static Property virtio_gpu_pci_properties[] = {
+    DEFINE_VIRTIO_GPU_PCI_FEATURES(VirtIOPCIProxy),
+    DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static int virtio_gpu_pci_init(VirtIOPCIProxy *vpci_dev)
+{
+    VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev);
+    DeviceState *vdev = DEVICE(&vgpu->vdev);
+
+    qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+    if (qdev_init(vdev) < 0) {
+	return -1;
+    }
+    return 0;
+}
+
+static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+    PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+    set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+    dc->props = virtio_gpu_pci_properties;
+    k->init = virtio_gpu_pci_init;
+    pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+    pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_GPU;
+    pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+    pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
+}
+
+static void virtio_gpu_initfn(Object *obj)
+{
+    VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj);
+    object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU);
+    object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static const TypeInfo virtio_gpu_pci_info = {
+    .name = TYPE_VIRTIO_GPU_PCI,
+    .parent = TYPE_VIRTIO_PCI,
+    .instance_size = sizeof(VirtIOGPUPCI),
+    .instance_init = virtio_gpu_initfn,
+    .class_init = virtio_gpu_pci_class_init,
+};
+
 /* virtio-pci-bus */
 
 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
@@ -1558,6 +1606,7 @@  static void virtio_pci_register_types(void)
 #ifdef CONFIG_VHOST_SCSI
     type_register_static(&vhost_scsi_pci_info);
 #endif
+    type_register_static(&virtio_gpu_pci_info);
 }
 
 type_init(virtio_pci_register_types)
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 917bcc5..856046d 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -24,6 +24,7 @@ 
 #include "hw/virtio/virtio-balloon.h"
 #include "hw/virtio/virtio-bus.h"
 #include "hw/virtio/virtio-9p.h"
+#include "hw/virtio/virtio-gpu.h"
 #ifdef CONFIG_VIRTFS
 #include "hw/9pfs/virtio-9p.h"
 #endif
@@ -39,6 +40,7 @@  typedef struct VirtIOSerialPCI VirtIOSerialPCI;
 typedef struct VirtIONetPCI VirtIONetPCI;
 typedef struct VHostSCSIPCI VHostSCSIPCI;
 typedef struct VirtIORngPCI VirtIORngPCI;
+typedef struct VirtIOGPUPCI VirtIOGPUPCI;
 
 /* virtio-pci-bus */
 
@@ -200,6 +202,19 @@  struct VirtIORngPCI {
     VirtIORNG vdev;
 };
 
+/*
+ * virtio-gpu-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
+#define VIRTIO_GPU_PCI(obj) \
+        OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
+
+struct VirtIOGPUPCI {
+    VirtIOPCIProxy parent_obj;
+    VirtIOGPU vdev;
+};
+
+
 /* Virtio ABI version, if we increment this, we break the guest driver. */
 #define VIRTIO_PCI_ABI_VERSION          0
 
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index b783e68..fee5d6b 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -80,6 +80,7 @@ 
 #define PCI_DEVICE_ID_VIRTIO_SCSI        0x1004
 #define PCI_DEVICE_ID_VIRTIO_RNG         0x1005
 #define PCI_DEVICE_ID_VIRTIO_9P          0x1009
+#define PCI_DEVICE_ID_VIRTIO_GPU         0x1010
 
 #define PCI_VENDOR_ID_REDHAT             0x1b36
 #define PCI_DEVICE_ID_REDHAT_BRIDGE      0x0001
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
new file mode 100644
index 0000000..c757457
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu.h
@@ -0,0 +1,90 @@ 
+#ifndef _QEMU_VIRTIO_VGA_H
+#define _QEMU_VIRTIO_VGA_H
+
+#include "qemu/queue.h"
+#include "ui/qemu-pixman.h"
+#include "ui/console.h"
+#include "hw/virtio/virtio.h"
+#include "hw/pci/pci.h"
+
+#define TYPE_VIRTIO_GPU "virtio-gpu-device"
+#define VIRTIO_GPU(obj)					\
+        OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)
+
+#define VIRTIO_ID_GPU 16
+
+#define VIRTGPU_MAX_RES 16
+
+#define VIRTGPU_MAX_SCANOUT 4
+
+struct virtgpu_simple_resource {
+    uint32_t resource_id;
+    uint32_t width;
+    uint32_t height;
+    uint32_t format;
+    struct iovec *iov;
+    unsigned int iov_cnt;
+    uint32_t scanout_bitmask;
+    pixman_image_t *image;
+    QLIST_ENTRY(virtgpu_simple_resource) next;
+};
+
+struct virtgpu_scanout {
+    DisplaySurface *ds;
+    uint32_t width, height;
+    int x, y;
+    int invalidate;
+    uint32_t resource_id;
+};
+
+struct virtgpu_requested_state {
+    uint32_t width, height;
+    int x, y;
+};
+
+struct virtio_gpu_conf {
+    uint32_t max_outputs;
+};
+
+typedef struct VirtIOGPU {
+    VirtIODevice parent_obj;
+
+    /* qemu console for this GPU */
+    QemuConsole *con[VIRTGPU_MAX_SCANOUT];
+
+    QEMUBH *ctrl_bh;
+    QEMUBH *cursor_bh;
+    VirtQueue *ctrl_vq;
+    VirtQueue *cursor_vq;
+    VirtQueue *event_vq;
+
+    int enable;
+
+    uint32_t current_cursor_id;
+    uint32_t current_cursor_hotspot_info;
+    uint32_t current_cursor_x, current_cursor_y;
+
+    int config_size;
+    DeviceState *qdev;
+
+    QLIST_HEAD(, virtgpu_simple_resource) reslist;
+
+    struct virtgpu_scanout scanout[VIRTGPU_MAX_SCANOUT];
+    struct virtgpu_requested_state req_state[VIRTGPU_MAX_SCANOUT];
+    QEMUCursor *current_cursor;
+
+    struct virtio_gpu_conf conf;
+    int enabled_output_bitmask;
+} VirtIOGPU;
+
+/* to share between PCI and VGA */
+#define DEFINE_VIRTIO_GPU_PCI_FEATURES(_state)                    \
+    DEFINE_PROP_BIT("ioeventfd", _state, flags, \
+                    VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
+    DEFINE_PROP_UINT32("vectors", _state, nvectors, 4)
+
+#define DEFINE_VIRTIO_GPU_FEATURES(_state, _conf_field)	\
+    DEFINE_PROP_UINT32("max_outputs", _state, _conf_field.max_outputs, 2)
+    
+#endif
+