diff mbox

[v2] xen_disk: add discard support

Message ID 1391702379-20042-1-git-send-email-olaf@aepfle.de
State New
Headers show

Commit Message

Olaf Hering Feb. 6, 2014, 3:59 p.m. UTC
Implement discard support for xen_disk. It makes use of the existing
discard code in qemu.

The discard support is enabled unconditionally. The tool stack may
provide a property "discard-enable" in the backend node to optionally
disable discard support.  This is helpful in case the backing file was
intentionally created non-sparse to avoid fragmentation.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
---
v2:
depends on "xen_disk: fix io accounting"
remove call to bdrv_acct_start

 hw/block/xen_blkif.h | 12 ++++++++++++
 hw/block/xen_disk.c  | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+)

Comments

Stefano Stabellini Feb. 20, 2014, 6:06 p.m. UTC | #1
On Thu, 6 Feb 2014, Olaf Hering wrote:
> @@ -253,6 +254,8 @@ static int ioreq_parse(struct ioreq *ioreq)
>      case BLKIF_OP_WRITE:
>          ioreq->prot = PROT_READ; /* from memory */
>          break;
> +    case BLKIF_OP_DISCARD:
> +        return 0;
>      default:
>          xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
>                        ioreq->req.operation);

Unfortunately I didn't realize before that older Xen releases don't
define BLKIF_OP_DISCARD, therefore this patch would cause QEMU build
failures against Xen 4.1 for example.
Give a look at include/hw/xen/xen_common.h to see how compatibility
with older Xen versions is usually achieved in QEMU.
Olaf Hering Feb. 20, 2014, 6:14 p.m. UTC | #2
On Thu, Feb 20, Stefano Stabellini wrote:

> On Thu, 6 Feb 2014, Olaf Hering wrote:
> > @@ -253,6 +254,8 @@ static int ioreq_parse(struct ioreq *ioreq)
> >      case BLKIF_OP_WRITE:
> >          ioreq->prot = PROT_READ; /* from memory */
> >          break;
> > +    case BLKIF_OP_DISCARD:
> > +        return 0;
> >      default:
> >          xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
> >                        ioreq->req.operation);
> 
> Unfortunately I didn't realize before that older Xen releases don't
> define BLKIF_OP_DISCARD, therefore this patch would cause QEMU build
> failures against Xen 4.1 for example.

Why would that matter?
Is new qemu seriously supposed to work with stale Xen releases?
But I will have a look how to solve this.

Olaf
Stefano Stabellini Feb. 20, 2014, 6:17 p.m. UTC | #3
On Thu, 20 Feb 2014, Olaf Hering wrote:
> On Thu, Feb 20, Stefano Stabellini wrote:
> 
> > On Thu, 6 Feb 2014, Olaf Hering wrote:
> > > @@ -253,6 +254,8 @@ static int ioreq_parse(struct ioreq *ioreq)
> > >      case BLKIF_OP_WRITE:
> > >          ioreq->prot = PROT_READ; /* from memory */
> > >          break;
> > > +    case BLKIF_OP_DISCARD:
> > > +        return 0;
> > >      default:
> > >          xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
> > >                        ioreq->req.operation);
> > 
> > Unfortunately I didn't realize before that older Xen releases don't
> > define BLKIF_OP_DISCARD, therefore this patch would cause QEMU build
> > failures against Xen 4.1 for example.
> 
> Why would that matter?
> Is new qemu seriously supposed to work with stale Xen releases?
> But I will have a look how to solve this.

It needs to build at least.
diff mbox

Patch

diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h
index c0f4136..711b692 100644
--- a/hw/block/xen_blkif.h
+++ b/hw/block/xen_blkif.h
@@ -79,6 +79,12 @@  static inline void blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_reque
 	dst->handle = src->handle;
 	dst->id = src->id;
 	dst->sector_number = src->sector_number;
+	if (src->operation == BLKIF_OP_DISCARD) {
+		struct blkif_request_discard *s = (void *)src;
+		struct blkif_request_discard *d = (void *)dst;
+		d->nr_sectors = s->nr_sectors;
+		return;
+	}
 	if (n > src->nr_segments)
 		n = src->nr_segments;
 	for (i = 0; i < n; i++)
@@ -94,6 +100,12 @@  static inline void blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_reque
 	dst->handle = src->handle;
 	dst->id = src->id;
 	dst->sector_number = src->sector_number;
+	if (src->operation == BLKIF_OP_DISCARD) {
+		struct blkif_request_discard *s = (void *)src;
+		struct blkif_request_discard *d = (void *)dst;
+		d->nr_sectors = s->nr_sectors;
+		return;
+	}
 	if (n > src->nr_segments)
 		n = src->nr_segments;
 	for (i = 0; i < n; i++)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 7f0f14a..841b016 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -114,6 +114,7 @@  struct XenBlkDev {
     int                 requests_finished;
 
     /* Persistent grants extension */
+    gboolean            feature_discard;
     gboolean            feature_persistent;
     GTree               *persistent_gnts;
     unsigned int        persistent_gnt_count;
@@ -253,6 +254,8 @@  static int ioreq_parse(struct ioreq *ioreq)
     case BLKIF_OP_WRITE:
         ioreq->prot = PROT_READ; /* from memory */
         break;
+    case BLKIF_OP_DISCARD:
+        return 0;
     default:
         xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
                       ioreq->req.operation);
@@ -492,6 +495,7 @@  static void qemu_aio_complete(void *opaque, int ret)
     case BLKIF_OP_READ:
         bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
         break;
+    case BLKIF_OP_DISCARD:
     default:
         break;
     }
@@ -532,6 +536,15 @@  static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
                         &ioreq->v, ioreq->v.size / BLOCK_SIZE,
                         qemu_aio_complete, ioreq);
         break;
+    case BLKIF_OP_DISCARD:
+    {
+        struct blkif_request_discard *discard_req = (void *)&ioreq->req;
+        ioreq->aio_inflight++;
+        bdrv_aio_discard(blkdev->bs,
+                        discard_req->sector_number, discard_req->nr_sectors,
+                        qemu_aio_complete, ioreq);
+        break;
+    }
     default:
         /* unknown operation (shouldn't happen -- parse catches this) */
         goto err;
@@ -710,6 +723,21 @@  static void blk_alloc(struct XenDevice *xendev)
     }
 }
 
+static void blk_parse_discard(struct XenBlkDev *blkdev)
+{
+    int enable;
+
+    blkdev->feature_discard = true;
+
+    if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
+        blkdev->feature_discard = !!enable;
+    }
+
+    if (blkdev->feature_discard) {
+        xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
+    }
+}
+
 static int blk_init(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
@@ -777,6 +805,8 @@  static int blk_init(struct XenDevice *xendev)
     xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
     xenstore_write_be_int(&blkdev->xendev, "info", info);
 
+    blk_parse_discard(blkdev);
+
     g_free(directiosafe);
     return 0;
 
@@ -812,6 +842,9 @@  static int blk_connect(struct XenDevice *xendev)
         qflags |= BDRV_O_RDWR;
         readonly = false;
     }
+    if (blkdev->feature_discard) {
+        qflags |= BDRV_O_UNMAP;
+    }
 
     /* init qemu block driver */
     index = (blkdev->xendev.dev - 202 * 256) / 16;