diff mbox

Break up the single NBD lock into one per NBD device

Message ID 1317080052-6052-1-git-send-email-hkchu@google.com
State Not Applicable, archived
Delegated to: David Miller
Headers show

Commit Message

Jerry Chu Sept. 26, 2011, 11:34 p.m. UTC
From: Jerry Chu <hkchu@google.com>

This patch breaks up the single NBD lock into one per
disk. The single NBD lock has become a serious performance
bottleneck when multiple NBD disks are being used.

The original comment on why a single lock may be ok no
longer holds for today's much faster NICs.

Signed-off-by: H.K. Jerry Chu <hkchu@google.com>
---
 drivers/block/nbd.c |   22 +++++++++-------------
 1 files changed, 9 insertions(+), 13 deletions(-)

Comments

David Miller Oct. 6, 2011, 7:37 p.m. UTC | #1
From: "H.K. Jerry Chu" <hkchu@google.com>
Date: Mon, 26 Sep 2011 16:34:12 -0700

> From: Jerry Chu <hkchu@google.com>
> 
> This patch breaks up the single NBD lock into one per
> disk. The single NBD lock has become a serious performance
> bottleneck when multiple NBD disks are being used.
> 
> The original comment on why a single lock may be ok no
> longer holds for today's much faster NICs.
> 
> Signed-off-by: H.K. Jerry Chu <hkchu@google.com>

Acked-by: David S. Miller <davem@davemloft.net>

Even though this is a "networking" block device, I think this change
should go through the Jens Axboe's block layer tree.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Eric Dumazet Oct. 6, 2011, 7:53 p.m. UTC | #2
Le lundi 26 septembre 2011 à 16:34 -0700, H.K. Jerry Chu a écrit :
> From: Jerry Chu <hkchu@google.com>
> 
> This patch breaks up the single NBD lock into one per
> disk. The single NBD lock has become a serious performance
> bottleneck when multiple NBD disks are being used.
> 
> The original comment on why a single lock may be ok no
> longer holds for today's much faster NICs.
> 
> Signed-off-by: H.K. Jerry Chu <hkchu@google.com>
> ---
>  drivers/block/nbd.c |   22 +++++++++-------------
>  1 files changed, 9 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
> index f533f33..355e15c 100644
> --- a/drivers/block/nbd.c
> +++ b/drivers/block/nbd.c
> @@ -58,20 +58,9 @@ static unsigned int debugflags;
>  
>  static unsigned int nbds_max = 16;
>  static struct nbd_device *nbd_dev;
> +static spinlock_t *nbd_locks;

static spinlock_t *nbd_locks __read_mostly;

>  static int max_part;
>  
> -/*
> - * Use just one lock (or at most 1 per NIC). Two arguments for this:
> - * 1. Each NIC is essentially a synchronization point for all servers
> - *    accessed through that NIC so there's no need to have more locks
> - *    than NICs anyway.
> - * 2. More locks lead to more "Dirty cache line bouncing" which will slow
> - *    down each lock to the point where they're actually slower than just
> - *    a single lock.
> - * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
> - */
> -static DEFINE_SPINLOCK(nbd_lock);
> -
>  #ifndef NDEBUG
>  static const char *ioctl_cmd_to_ascii(int cmd)
>  {
> @@ -753,6 +742,12 @@ static int __init nbd_init(void)
>  	if (!nbd_dev)
>  		return -ENOMEM;
>  
> +	nbd_locks = kcalloc(nbds_max, sizeof(*nbd_locks), GFP_KERNEL);
> +	if (!nbd_locks) {
> +		kfree(nbd_dev);
> +		return -ENOMEM;
> +	}
> +

	Please add loop to init spinlocks to help LOCKDEP...

	for (i = 0; i < nbds_max; i++)
		spin_lock_init(&nbd_locks[i]);

>  	part_shift = 0;
>  	if (max_part > 0) {
>  		part_shift = fls(max_part);
> @@ -784,7 +779,7 @@ static int __init nbd_init(void)
>  		 * every gendisk to have its very own request_queue struct.
>  		 * These structs are big so we dynamically allocate them.
>  		 */
> -		disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
> +		disk->queue = blk_init_queue(do_nbd_request, &nbd_locks[i]);
>  		if (!disk->queue) {
>  			put_disk(disk);
>  			goto out;
> @@ -832,6 +827,7 @@ out:
>  		put_disk(nbd_dev[i].disk);
>  	}
>  	kfree(nbd_dev);
> +	kfree(nbd_locks);
>  	return err;
>  }
>  


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f533f33..355e15c 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -58,20 +58,9 @@  static unsigned int debugflags;
 
 static unsigned int nbds_max = 16;
 static struct nbd_device *nbd_dev;
+static spinlock_t *nbd_locks;
 static int max_part;
 
-/*
- * Use just one lock (or at most 1 per NIC). Two arguments for this:
- * 1. Each NIC is essentially a synchronization point for all servers
- *    accessed through that NIC so there's no need to have more locks
- *    than NICs anyway.
- * 2. More locks lead to more "Dirty cache line bouncing" which will slow
- *    down each lock to the point where they're actually slower than just
- *    a single lock.
- * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
- */
-static DEFINE_SPINLOCK(nbd_lock);
-
 #ifndef NDEBUG
 static const char *ioctl_cmd_to_ascii(int cmd)
 {
@@ -753,6 +742,12 @@  static int __init nbd_init(void)
 	if (!nbd_dev)
 		return -ENOMEM;
 
+	nbd_locks = kcalloc(nbds_max, sizeof(*nbd_locks), GFP_KERNEL);
+	if (!nbd_locks) {
+		kfree(nbd_dev);
+		return -ENOMEM;
+	}
+
 	part_shift = 0;
 	if (max_part > 0) {
 		part_shift = fls(max_part);
@@ -784,7 +779,7 @@  static int __init nbd_init(void)
 		 * every gendisk to have its very own request_queue struct.
 		 * These structs are big so we dynamically allocate them.
 		 */
-		disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
+		disk->queue = blk_init_queue(do_nbd_request, &nbd_locks[i]);
 		if (!disk->queue) {
 			put_disk(disk);
 			goto out;
@@ -832,6 +827,7 @@  out:
 		put_disk(nbd_dev[i].disk);
 	}
 	kfree(nbd_dev);
+	kfree(nbd_locks);
 	return err;
 }