diff mbox

[net-next,V2,3/3] virtio-net: rx busy polling support

Message ID 1405491707-22706-4-git-send-email-jasowang@redhat.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Jason Wang July 16, 2014, 6:21 a.m. UTC
Add basic support for rx busy polling.

Test was done between a kvm guest and an external host. Two hosts were
connected through 40gb mlx4 cards. With both busy_poll and busy_read
are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
transaction rate was increased from 9151.94 to 19787.37.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Vlad Yasevich <vyasevic@redhat.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 drivers/net/virtio_net.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 187 insertions(+), 3 deletions(-)

Comments

Varka Bhadram July 16, 2014, 8:38 a.m. UTC | #1
On 07/16/2014 11:51 AM, Jason Wang wrote:
> Add basic support for rx busy polling.
>
> Test was done between a kvm guest and an external host. Two hosts were
> connected through 40gb mlx4 cards. With both busy_poll and busy_read
> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
> transaction rate was increased from 9151.94 to 19787.37.
>
> Cc: Rusty Russell <rusty@rustcorp.com.au>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Vlad Yasevich <vyasevic@redhat.com>
> Cc: Eric Dumazet <eric.dumazet@gmail.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>   drivers/net/virtio_net.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++-
>   1 file changed, 187 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e417d93..4830713 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -27,6 +27,7 @@
>   #include <linux/slab.h>
>   #include <linux/cpu.h>
>   #include <linux/average.h>
> +#include <net/busy_poll.h>
>   
>   static int napi_weight = NAPI_POLL_WEIGHT;
>   module_param(napi_weight, int, 0444);
> @@ -94,8 +95,143 @@ struct receive_queue {
>   
>   	/* Name of this receive queue: input.$index */
>   	char name[40];
> +
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +	unsigned int state;
> +#define VIRTNET_RQ_STATE_IDLE        0
> +#define VIRTNET_RQ_STATE_NAPI	     1    /* NAPI or refill owns this RQ */
> +#define VIRTNET_RQ_STATE_POLL	     2    /* poll owns this RQ */
> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI | VIRTNET_RQ_STATE_POLL)
> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED | VIRTNET_RQ_STATE_DISABLED)
> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded this RQ */
> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
> +	spinlock_t lock;
> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
>   };
>   
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
> +{
> +
> +	spin_lock_init(&rq->lock);
> +	rq->state = VIRTNET_RQ_STATE_IDLE;
> +}
> +
> +/* called from the device poll routine or refill routine to get ownership of a
> + * receive queue.
> + */
> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
> +{
> +	int rc = true;
> +

bool instead of int...?

> +	spin_lock(&rq->lock);
> +	if (rq->state & VIRTNET_RQ_LOCKED) {
> +		WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
> +		rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
> +		rc = false;
> +	} else
> +		/* we don't care if someone yielded */
> +		rq->state = VIRTNET_RQ_STATE_NAPI;
> +	spin_unlock(&rq->lock);

Lock for rq->state ...?

If yes:
spin_lock(&rq->lock);
if (rq->state & VIRTNET_RQ_LOCKED) {
	rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
	spin_unlock(&rq->lock);
	WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
	rc = false;
} else {
	/* we don't care if someone yielded */
	rq->state = VIRTNET_RQ_STATE_NAPI;
	spin_unlock(&rq->lock);
}

> +	return rc;
> +}
> +
> +/* returns true is someone tried to get the rq while napi or refill had it */
> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
> +{
> +	int rc = false;
> +
> +	spin_lock(&rq->lock);
> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_POLL |
> +			     VIRTNET_RQ_STATE_NAPI_YIELD));
> +
> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
> +		rc = true;
> +	/* will reset state to idle, unless RQ is disabled */
> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock(&rq->lock);
> +	return rc;
> +}
> +
> +/* called from virtnet_low_latency_recv() */
> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
> +{
> +	int rc = true;
> +
> +	spin_lock_bh(&rq->lock);
> +	if ((rq->state & VIRTNET_RQ_LOCKED)) {
> +		rq->state |= VIRTNET_RQ_STATE_POLL_YIELD;
> +		rc = false;
> +	} else
> +		/* preserve yield marks */
> +		rq->state |= VIRTNET_RQ_STATE_POLL;
> +	spin_unlock_bh(&rq->lock);
> +	return rc;
> +}
> +
> +/* returns true if someone tried to get the receive queue while it was locked */
> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
> +{
> +	int rc = false;
> +
> +	spin_lock_bh(&rq->lock);
> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_NAPI));
> +
> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
> +		rc = true;
> +	/* will reset state to idle, unless RQ is disabled */
> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock_bh(&rq->lock);
> +	return rc;
> +}
> +
> +/* return false if RQ is currently owned */
> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
> +{
> +	int rc = true;
> +
> +	spin_lock_bh(&rq->lock);
> +	if (rq->state & VIRTNET_RQ_OWNED)
> +		rc = false;
> +	rq->state |= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock_bh(&rq->lock);
> +
> +	return rc;
> +}
> +
> +#else /* CONFIG_NET_RX_BUSY_POLL */
> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
> +{
> +}
> +
> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
> +{
> +	return true;
> +}
> +
> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
> +{
> +	return true;
> +}
> +
> +#endif /* CONFIG_NET_RX_BUSY_POLL */
> +
>   struct virtnet_info {
>   	struct virtio_device *vdev;
>   	struct virtqueue *cvq;
> @@ -521,6 +657,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
>   		skb_shinfo(skb)->gso_segs = 0;
>   	}
>   
> +	skb_mark_napi_id(skb, &rq->napi);
> +
>   	netif_receive_skb(skb);
>   	return;
>   
> @@ -714,7 +852,12 @@ static void refill_work(struct work_struct *work)
>   		struct receive_queue *rq = &vi->rq[i];
>   
>   		napi_disable(&rq->napi);
> +		if (!virtnet_rq_lock_napi_refill(rq)) {
> +			virtnet_napi_enable(rq);
> +			continue;
> +		}
>   		still_empty = !try_fill_recv(rq, GFP_KERNEL);
> +		virtnet_rq_unlock_napi_refill(rq);
>   		virtnet_napi_enable(rq);
>   
>   		/* In theory, this can happen: if we don't get any buffers in
> @@ -752,8 +895,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>   	unsigned int r, received = 0;
>   
>   again:
> +	if (!virtnet_rq_lock_napi_refill(rq))
> +		return budget;
> +
>   	received += virtnet_receive(rq, budget);
>   
> +	virtnet_rq_unlock_napi_refill(rq);
> +
>   	/* Out of packets? */
>   	if (received < budget) {
>   		r = virtqueue_enable_cb_prepare(rq->vq);
> @@ -770,20 +918,50 @@ again:
>   	return received;
>   }
>   
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +/* must be called with local_bh_disable()d */
> +static int virtnet_low_latency_recv(struct napi_struct *napi)
> +{
> +	struct receive_queue *rq =
> +		container_of(napi, struct receive_queue, napi);
> +	struct virtnet_info *vi = rq->vq->vdev->priv;
> +	int received;
> +
> +	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
> +		return LL_FLUSH_FAILED;
> +
> +	if (!virtnet_rq_lock_poll(rq))
> +		return LL_FLUSH_BUSY;
> +
> +	received = virtnet_receive(rq, 4);
> +
> +	virtnet_rq_unlock_poll(rq);
> +
> +	return received;
> +}
> +#endif	/* CONFIG_NET_RX_BUSY_POLL */
> +
>   static void virtnet_napi_enable_all(struct virtnet_info *vi)
>   {
>   	int i;
>   
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> +	for (i = 0; i < vi->max_queue_pairs; i++) {
> +		virtnet_rq_init_lock(&vi->rq[i]);
>   		virtnet_napi_enable(&vi->rq[i]);
> +	}
>   }
>   
>   static void virtnet_napi_disable_all(struct virtnet_info *vi)
>   {
>   	int i;
>   
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> +	for (i = 0; i < vi->max_queue_pairs; i++) {
>   		napi_disable(&vi->rq[i].napi);
> +		while (!virtnet_rq_disable(&vi->rq[i])) {
> +			pr_info("RQ %d locked\n", i);
> +			usleep_range(1000, 20000);
> +		}
> +	}
>   }
>   
>   static int virtnet_open(struct net_device *dev)
> @@ -1372,6 +1550,9 @@ static const struct net_device_ops virtnet_netdev = {
>   #ifdef CONFIG_NET_POLL_CONTROLLER
>   	.ndo_poll_controller = virtnet_netpoll,
>   #endif
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +	.ndo_busy_poll		= virtnet_low_latency_recv,
> +#endif
>   };
>   
>   static void virtnet_config_changed_work(struct work_struct *work)
> @@ -1577,6 +1758,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
>   		vi->rq[i].pages = NULL;
>   		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
>   			       napi_weight);
> +		napi_hash_add(&vi->rq[i].napi);
>   
>   		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
>   		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
> @@ -1880,8 +2062,10 @@ static int virtnet_freeze(struct virtio_device *vdev)
>   
>   	if (netif_running(vi->dev)) {
>   		virtnet_napi_disable_all(vi);
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			napi_hash_del(&vi->rq[i].napi);
>   			netif_napi_del(&vi->rq[i].napi);
> +		}
>   	}
>   
>   	remove_vq_common(vi);
Jason Wang July 17, 2014, 2:55 a.m. UTC | #2
On 07/16/2014 04:38 PM, Varka Bhadram wrote:
> On 07/16/2014 11:51 AM, Jason Wang wrote:
>> Add basic support for rx busy polling.
>>
>> Test was done between a kvm guest and an external host. Two hosts were
>> connected through 40gb mlx4 cards. With both busy_poll and busy_read
>> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
>> transaction rate was increased from 9151.94 to 19787.37.
>>
>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Cc: Vlad Yasevich <vyasevic@redhat.com>
>> Cc: Eric Dumazet <eric.dumazet@gmail.com>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>>   drivers/net/virtio_net.c | 190
>> ++++++++++++++++++++++++++++++++++++++++++++++-
>>   1 file changed, 187 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index e417d93..4830713 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -27,6 +27,7 @@
>>   #include <linux/slab.h>
>>   #include <linux/cpu.h>
>>   #include <linux/average.h>
>> +#include <net/busy_poll.h>
>>     static int napi_weight = NAPI_POLL_WEIGHT;
>>   module_param(napi_weight, int, 0444);
>> @@ -94,8 +95,143 @@ struct receive_queue {
>>         /* Name of this receive queue: input.$index */
>>       char name[40];
>> +
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +    unsigned int state;
>> +#define VIRTNET_RQ_STATE_IDLE        0
>> +#define VIRTNET_RQ_STATE_NAPI         1    /* NAPI or refill owns
>> this RQ */
>> +#define VIRTNET_RQ_STATE_POLL         2    /* poll owns this RQ */
>> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
>> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI |
>> VIRTNET_RQ_STATE_POLL)
>> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED |
>> VIRTNET_RQ_STATE_DISABLED)
>> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded
>> this RQ */
>> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
>> +    spinlock_t lock;
>> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
>>   };
>>   +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>> +{
>> +
>> +    spin_lock_init(&rq->lock);
>> +    rq->state = VIRTNET_RQ_STATE_IDLE;
>> +}
>> +
>> +/* called from the device poll routine or refill routine to get
>> ownership of a
>> + * receive queue.
>> + */
>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue
>> *rq)
>> +{
>> +    int rc = true;
>> +
>
> bool instead of int...?

Yes, it was better.
>
>> +    spin_lock(&rq->lock);
>> +    if (rq->state & VIRTNET_RQ_LOCKED) {
>> +        WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>> +        rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>> +        rc = false;
>> +    } else
>> +        /* we don't care if someone yielded */
>> +        rq->state = VIRTNET_RQ_STATE_NAPI;
>> +    spin_unlock(&rq->lock);
>
> Lock for rq->state ...?
>
> If yes:
> spin_lock(&rq->lock);
> if (rq->state & VIRTNET_RQ_LOCKED) {
>     rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>     spin_unlock(&rq->lock);
>     WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>     rc = false;
> } else {
>     /* we don't care if someone yielded */
>     rq->state = VIRTNET_RQ_STATE_NAPI;
>     spin_unlock(&rq->lock);
> } 

I didn't see any differences. Is this used to catch the bug of driver
earlier? btw, several other rx busy polling capable driver does the same
thing.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Varka Bhadram July 17, 2014, 3:27 a.m. UTC | #3
On Thursday 17 July 2014 08:25 AM, Jason Wang wrote:
> On 07/16/2014 04:38 PM, Varka Bhadram wrote:
>> On 07/16/2014 11:51 AM, Jason Wang wrote:
>>> Add basic support for rx busy polling.
>>>
>>> Test was done between a kvm guest and an external host. Two hosts were
>>> connected through 40gb mlx4 cards. With both busy_poll and busy_read
>>> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
>>> transaction rate was increased from 9151.94 to 19787.37.
>>>
>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>> Cc: Vlad Yasevich <vyasevic@redhat.com>
>>> Cc: Eric Dumazet <eric.dumazet@gmail.com>
>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>> ---
>>>    drivers/net/virtio_net.c | 190
>>> ++++++++++++++++++++++++++++++++++++++++++++++-
>>>    1 file changed, 187 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>> index e417d93..4830713 100644
>>> --- a/drivers/net/virtio_net.c
>>> +++ b/drivers/net/virtio_net.c
>>> @@ -27,6 +27,7 @@
>>>    #include <linux/slab.h>
>>>    #include <linux/cpu.h>
>>>    #include <linux/average.h>
>>> +#include <net/busy_poll.h>
>>>      static int napi_weight = NAPI_POLL_WEIGHT;
>>>    module_param(napi_weight, int, 0444);
>>> @@ -94,8 +95,143 @@ struct receive_queue {
>>>          /* Name of this receive queue: input.$index */
>>>        char name[40];
>>> +
>>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>>> +    unsigned int state;
>>> +#define VIRTNET_RQ_STATE_IDLE        0
>>> +#define VIRTNET_RQ_STATE_NAPI         1    /* NAPI or refill owns
>>> this RQ */
>>> +#define VIRTNET_RQ_STATE_POLL         2    /* poll owns this RQ */
>>> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
>>> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI |
>>> VIRTNET_RQ_STATE_POLL)
>>> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED |
>>> VIRTNET_RQ_STATE_DISABLED)
>>> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded
>>> this RQ */
>>> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
>>> +    spinlock_t lock;
>>> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
>>>    };
>>>    +#ifdef CONFIG_NET_RX_BUSY_POLL
>>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>>> +{
>>> +
>>> +    spin_lock_init(&rq->lock);
>>> +    rq->state = VIRTNET_RQ_STATE_IDLE;
>>> +}
>>> +
>>> +/* called from the device poll routine or refill routine to get
>>> ownership of a
>>> + * receive queue.
>>> + */
>>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue
>>> *rq)
>>> +{
>>> +    int rc = true;
>>> +
>> bool instead of int...?
> Yes, it was better.
>>> +    spin_lock(&rq->lock);
>>> +    if (rq->state & VIRTNET_RQ_LOCKED) {
>>> +        WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>> +        rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>> +        rc = false;
>>> +    } else
>>> +        /* we don't care if someone yielded */
>>> +        rq->state = VIRTNET_RQ_STATE_NAPI;
>>> +    spin_unlock(&rq->lock);
>> Lock for rq->state ...?
>>
>> If yes:
>> spin_lock(&rq->lock);
>> if (rq->state & VIRTNET_RQ_LOCKED) {
>>      rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>      spin_unlock(&rq->lock);
>>      WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>      rc = false;
>> } else {
>>      /* we don't care if someone yielded */
>>      rq->state = VIRTNET_RQ_STATE_NAPI;
>>      spin_unlock(&rq->lock);
>> }
> I didn't see any differences. Is this used to catch the bug of driver
> earlier? btw, several other rx busy polling capable driver does the same
> thing.

We need not to include WARN_ON() & rc=false under critical section.
Jason Wang July 17, 2014, 4:43 a.m. UTC | #4
On 07/17/2014 11:27 AM, Varka Bhadram wrote:
>
> On Thursday 17 July 2014 08:25 AM, Jason Wang wrote:
>> On 07/16/2014 04:38 PM, Varka Bhadram wrote:
>>> On 07/16/2014 11:51 AM, Jason Wang wrote:
>>>> Add basic support for rx busy polling.
>>>>
>>>> Test was done between a kvm guest and an external host. Two hosts were
>>>> connected through 40gb mlx4 cards. With both busy_poll and busy_read
>>>> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
>>>> transaction rate was increased from 9151.94 to 19787.37.
>>>>
>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>>> Cc: Vlad Yasevich <vyasevic@redhat.com>
>>>> Cc: Eric Dumazet <eric.dumazet@gmail.com>
>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>> ---
>>>>    drivers/net/virtio_net.c | 190
>>>> ++++++++++++++++++++++++++++++++++++++++++++++-
>>>>    1 file changed, 187 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>> index e417d93..4830713 100644
>>>> --- a/drivers/net/virtio_net.c
>>>> +++ b/drivers/net/virtio_net.c
>>>> @@ -27,6 +27,7 @@
>>>>    #include <linux/slab.h>
>>>>    #include <linux/cpu.h>
>>>>    #include <linux/average.h>
>>>> +#include <net/busy_poll.h>
>>>>      static int napi_weight = NAPI_POLL_WEIGHT;
>>>>    module_param(napi_weight, int, 0444);
>>>> @@ -94,8 +95,143 @@ struct receive_queue {
>>>>          /* Name of this receive queue: input.$index */
>>>>        char name[40];
>>>> +
>>>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>>>> +    unsigned int state;
>>>> +#define VIRTNET_RQ_STATE_IDLE        0
>>>> +#define VIRTNET_RQ_STATE_NAPI         1    /* NAPI or refill owns
>>>> this RQ */
>>>> +#define VIRTNET_RQ_STATE_POLL         2    /* poll owns this RQ */
>>>> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
>>>> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI |
>>>> VIRTNET_RQ_STATE_POLL)
>>>> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED |
>>>> VIRTNET_RQ_STATE_DISABLED)
>>>> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded
>>>> this RQ */
>>>> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
>>>> +    spinlock_t lock;
>>>> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
>>>>    };
>>>>    +#ifdef CONFIG_NET_RX_BUSY_POLL
>>>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>>>> +{
>>>> +
>>>> +    spin_lock_init(&rq->lock);
>>>> +    rq->state = VIRTNET_RQ_STATE_IDLE;
>>>> +}
>>>> +
>>>> +/* called from the device poll routine or refill routine to get
>>>> ownership of a
>>>> + * receive queue.
>>>> + */
>>>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue
>>>> *rq)
>>>> +{
>>>> +    int rc = true;
>>>> +
>>> bool instead of int...?
>> Yes, it was better.
>>>> +    spin_lock(&rq->lock);
>>>> +    if (rq->state & VIRTNET_RQ_LOCKED) {
>>>> +        WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>>> +        rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>>> +        rc = false;
>>>> +    } else
>>>> +        /* we don't care if someone yielded */
>>>> +        rq->state = VIRTNET_RQ_STATE_NAPI;
>>>> +    spin_unlock(&rq->lock);
>>> Lock for rq->state ...?
>>>
>>> If yes:
>>> spin_lock(&rq->lock);
>>> if (rq->state & VIRTNET_RQ_LOCKED) {
>>>      rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>>      spin_unlock(&rq->lock);
>>>      WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>>      rc = false;
>>> } else {
>>>      /* we don't care if someone yielded */
>>>      rq->state = VIRTNET_RQ_STATE_NAPI;
>>>      spin_unlock(&rq->lock);
>>> }
>> I didn't see any differences. Is this used to catch the bug of driver
>> earlier? btw, several other rx busy polling capable driver does the same
>> thing.
>
> We need not to include WARN_ON() & rc=false under critical section.
>

Ok. but unless there's a bug in the driver itself, WARN_ON() should be
just a condition check for a branch, so there should not be noticeable
differences.

Also we should not check rq->state outside the protection of lock.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Varka Bhadram July 17, 2014, 4:54 a.m. UTC | #5
On Thursday 17 July 2014 10:13 AM, Jason Wang wrote:
> On 07/17/2014 11:27 AM, Varka Bhadram wrote:
>> On Thursday 17 July 2014 08:25 AM, Jason Wang wrote:
>>> On 07/16/2014 04:38 PM, Varka Bhadram wrote:
>>>> On 07/16/2014 11:51 AM, Jason Wang wrote:
>>>>> Add basic support for rx busy polling.
>>>>>
>>>>> Test was done between a kvm guest and an external host. Two hosts were
>>>>> connected through 40gb mlx4 cards. With both busy_poll and busy_read
>>>>> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
>>>>> transaction rate was increased from 9151.94 to 19787.37.
>>>>>
>>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>>>> Cc: Vlad Yasevich <vyasevic@redhat.com>
>>>>> Cc: Eric Dumazet <eric.dumazet@gmail.com>
>>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>>> ---
>>>>>     drivers/net/virtio_net.c | 190
>>>>> ++++++++++++++++++++++++++++++++++++++++++++++-
>>>>>     1 file changed, 187 insertions(+), 3 deletions(-)
>>>>>
>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>> index e417d93..4830713 100644
>>>>> --- a/drivers/net/virtio_net.c
>>>>> +++ b/drivers/net/virtio_net.c
>>>>> @@ -27,6 +27,7 @@
>>>>>     #include <linux/slab.h>
>>>>>     #include <linux/cpu.h>
>>>>>     #include <linux/average.h>
>>>>> +#include <net/busy_poll.h>
>>>>>       static int napi_weight = NAPI_POLL_WEIGHT;
>>>>>     module_param(napi_weight, int, 0444);
>>>>> @@ -94,8 +95,143 @@ struct receive_queue {
>>>>>           /* Name of this receive queue: input.$index */
>>>>>         char name[40];
>>>>> +
>>>>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>>>>> +    unsigned int state;
>>>>> +#define VIRTNET_RQ_STATE_IDLE        0
>>>>> +#define VIRTNET_RQ_STATE_NAPI         1    /* NAPI or refill owns
>>>>> this RQ */
>>>>> +#define VIRTNET_RQ_STATE_POLL         2    /* poll owns this RQ */
>>>>> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
>>>>> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI |
>>>>> VIRTNET_RQ_STATE_POLL)
>>>>> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED |
>>>>> VIRTNET_RQ_STATE_DISABLED)
>>>>> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded
>>>>> this RQ */
>>>>> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
>>>>> +    spinlock_t lock;
>>>>> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
>>>>>     };
>>>>>     +#ifdef CONFIG_NET_RX_BUSY_POLL
>>>>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>>>>> +{
>>>>> +
>>>>> +    spin_lock_init(&rq->lock);
>>>>> +    rq->state = VIRTNET_RQ_STATE_IDLE;
>>>>> +}
>>>>> +
>>>>> +/* called from the device poll routine or refill routine to get
>>>>> ownership of a
>>>>> + * receive queue.
>>>>> + */
>>>>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue
>>>>> *rq)
>>>>> +{
>>>>> +    int rc = true;
>>>>> +
>>>> bool instead of int...?
>>> Yes, it was better.
>>>>> +    spin_lock(&rq->lock);
>>>>> +    if (rq->state & VIRTNET_RQ_LOCKED) {
>>>>> +        WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>>>> +        rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>>>> +        rc = false;
>>>>> +    } else
>>>>> +        /* we don't care if someone yielded */
>>>>> +        rq->state = VIRTNET_RQ_STATE_NAPI;
>>>>> +    spin_unlock(&rq->lock);
>>>> Lock for rq->state ...?
>>>>
>>>> If yes:
>>>> spin_lock(&rq->lock);
>>>> if (rq->state & VIRTNET_RQ_LOCKED) {
>>>>       rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>>>>       spin_unlock(&rq->lock);
>>>>       WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>>>>       rc = false;
>>>> } else {
>>>>       /* we don't care if someone yielded */
>>>>       rq->state = VIRTNET_RQ_STATE_NAPI;
>>>>       spin_unlock(&rq->lock);
>>>> }
>>> I didn't see any differences. Is this used to catch the bug of driver
>>> earlier? btw, several other rx busy polling capable driver does the same
>>> thing.
>> We need not to include WARN_ON() & rc=false under critical section.
>>
> Ok. but unless there's a bug in the driver itself, WARN_ON() should be
> just a condition check for a branch, so there should not be noticeable
> differences.
>
> Also we should not check rq->state outside the protection of lock.

Ok. I will agree with you. But 'rc' can be outside the protection of lock
Michael S. Tsirkin July 20, 2014, 8:31 p.m. UTC | #6
On Wed, Jul 16, 2014 at 02:21:47PM +0800, Jason Wang wrote:
> Add basic support for rx busy polling.
> 
> Test was done between a kvm guest and an external host. Two hosts were
> connected through 40gb mlx4 cards. With both busy_poll and busy_read
> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
> transaction rate was increased from 9151.94 to 19787.37.

Pls include data about non polling tests: any effect on
cpu utilization there?
There could be as we are adding locking.

> 
> Cc: Rusty Russell <rusty@rustcorp.com.au>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Vlad Yasevich <vyasevic@redhat.com>
> Cc: Eric Dumazet <eric.dumazet@gmail.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>  drivers/net/virtio_net.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 187 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e417d93..4830713 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -27,6 +27,7 @@
>  #include <linux/slab.h>
>  #include <linux/cpu.h>
>  #include <linux/average.h>
> +#include <net/busy_poll.h>
>  
>  static int napi_weight = NAPI_POLL_WEIGHT;
>  module_param(napi_weight, int, 0444);
> @@ -94,8 +95,143 @@ struct receive_queue {
>  
>  	/* Name of this receive queue: input.$index */
>  	char name[40];
> +
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +	unsigned int state;
> +#define VIRTNET_RQ_STATE_IDLE        0
> +#define VIRTNET_RQ_STATE_NAPI	     1    /* NAPI or refill owns this RQ */
> +#define VIRTNET_RQ_STATE_POLL	     2    /* poll owns this RQ */
> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI | VIRTNET_RQ_STATE_POLL)
> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED | VIRTNET_RQ_STATE_DISABLED)
> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded this RQ */
> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
> +	spinlock_t lock;
> +#endif  /* CONFIG_NET_RX_BUSY_POLL */

do we have to have a new state? no way to reuse the napi state
for this? two lock/unlock operations for a poll seems
excessive.

>  };
>  
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
> +{
> +
> +	spin_lock_init(&rq->lock);
> +	rq->state = VIRTNET_RQ_STATE_IDLE;
> +}
> +
> +/* called from the device poll routine or refill routine to get ownership of a
> + * receive queue.
> + */
> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
> +{
> +	int rc = true;
> +
> +	spin_lock(&rq->lock);
> +	if (rq->state & VIRTNET_RQ_LOCKED) {
> +		WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
> +		rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
> +		rc = false;
> +	} else
> +		/* we don't care if someone yielded */
> +		rq->state = VIRTNET_RQ_STATE_NAPI;
> +	spin_unlock(&rq->lock);
> +	return rc;
> +}
> +
> +/* returns true is someone tried to get the rq while napi or refill had it */

s/is/if/

> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
> +{
> +	int rc = false;
> +
> +	spin_lock(&rq->lock);
> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_POLL |
> +			     VIRTNET_RQ_STATE_NAPI_YIELD));
> +
> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
> +		rc = true;
> +	/* will reset state to idle, unless RQ is disabled */
> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock(&rq->lock);
> +	return rc;
> +}
> +
> +/* called from virtnet_low_latency_recv() */
> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
> +{
> +	int rc = true;
> +
> +	spin_lock_bh(&rq->lock);
> +	if ((rq->state & VIRTNET_RQ_LOCKED)) {
> +		rq->state |= VIRTNET_RQ_STATE_POLL_YIELD;
> +		rc = false;
> +	} else
> +		/* preserve yield marks */
> +		rq->state |= VIRTNET_RQ_STATE_POLL;
> +	spin_unlock_bh(&rq->lock);
> +	return rc;
> +}
> +
> +/* returns true if someone tried to get the receive queue while it was locked */
> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
> +{
> +	int rc = false;
> +
> +	spin_lock_bh(&rq->lock);
> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_NAPI));
> +
> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
> +		rc = true;
> +	/* will reset state to idle, unless RQ is disabled */
> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock_bh(&rq->lock);
> +	return rc;
> +}
> +
> +/* return false if RQ is currently owned */
> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
> +{
> +	int rc = true;
> +
> +	spin_lock_bh(&rq->lock);
> +	if (rq->state & VIRTNET_RQ_OWNED)
> +		rc = false;
> +	rq->state |= VIRTNET_RQ_STATE_DISABLED;
> +	spin_unlock_bh(&rq->lock);
> +
> +	return rc;
> +}
> +
> +#else /* CONFIG_NET_RX_BUSY_POLL */
> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
> +{
> +}
> +
> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
> +{
> +	return true;
> +}
> +
> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
> +{
> +	return false;
> +}
> +
> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
> +{
> +	return true;
> +}
> +
> +#endif /* CONFIG_NET_RX_BUSY_POLL */
> +
>  struct virtnet_info {
>  	struct virtio_device *vdev;
>  	struct virtqueue *cvq;
> @@ -521,6 +657,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
>  		skb_shinfo(skb)->gso_segs = 0;
>  	}
>  
> +	skb_mark_napi_id(skb, &rq->napi);
> +
>  	netif_receive_skb(skb);
>  	return;
>  
> @@ -714,7 +852,12 @@ static void refill_work(struct work_struct *work)
>  		struct receive_queue *rq = &vi->rq[i];
>  
>  		napi_disable(&rq->napi);
> +		if (!virtnet_rq_lock_napi_refill(rq)) {
> +			virtnet_napi_enable(rq);
> +			continue;
> +		}
>  		still_empty = !try_fill_recv(rq, GFP_KERNEL);
> +		virtnet_rq_unlock_napi_refill(rq);
>  		virtnet_napi_enable(rq);
>  
>  		/* In theory, this can happen: if we don't get any buffers in
> @@ -752,8 +895,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>  	unsigned int r, received = 0;
>  
>  again:
> +	if (!virtnet_rq_lock_napi_refill(rq))
> +		return budget;
> +
>  	received += virtnet_receive(rq, budget);
>  
> +	virtnet_rq_unlock_napi_refill(rq);
> +
>  	/* Out of packets? */
>  	if (received < budget) {
>  		r = virtqueue_enable_cb_prepare(rq->vq);
> @@ -770,20 +918,50 @@ again:
>  	return received;
>  }
>  
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +/* must be called with local_bh_disable()d */
> +static int virtnet_low_latency_recv(struct napi_struct *napi)

let's call it busy poll :)

> +{
> +	struct receive_queue *rq =
> +		container_of(napi, struct receive_queue, napi);
> +	struct virtnet_info *vi = rq->vq->vdev->priv;
> +	int received;
> +
> +	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
> +		return LL_FLUSH_FAILED;
> +
> +	if (!virtnet_rq_lock_poll(rq))
> +		return LL_FLUSH_BUSY;
> +
> +	received = virtnet_receive(rq, 4);

Hmm why 4 exactly?

> +
> +	virtnet_rq_unlock_poll(rq);
> +
> +	return received;
> +}
> +#endif	/* CONFIG_NET_RX_BUSY_POLL */
> +
>  static void virtnet_napi_enable_all(struct virtnet_info *vi)
>  {
>  	int i;
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> +	for (i = 0; i < vi->max_queue_pairs; i++) {
> +		virtnet_rq_init_lock(&vi->rq[i]);
>  		virtnet_napi_enable(&vi->rq[i]);
> +	}
>  }
>  
>  static void virtnet_napi_disable_all(struct virtnet_info *vi)
>  {
>  	int i;
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> +	for (i = 0; i < vi->max_queue_pairs; i++) {
>  		napi_disable(&vi->rq[i].napi);
> +		while (!virtnet_rq_disable(&vi->rq[i])) {
> +			pr_info("RQ %d locked\n", i);
> +			usleep_range(1000, 20000);

What's going on here, exactly?

> +		}
> +	}
>  }
>  
>  static int virtnet_open(struct net_device *dev)
> @@ -1372,6 +1550,9 @@ static const struct net_device_ops virtnet_netdev = {
>  #ifdef CONFIG_NET_POLL_CONTROLLER
>  	.ndo_poll_controller = virtnet_netpoll,
>  #endif
> +#ifdef CONFIG_NET_RX_BUSY_POLL
> +	.ndo_busy_poll		= virtnet_low_latency_recv,
> +#endif
>  };
>  
>  static void virtnet_config_changed_work(struct work_struct *work)
> @@ -1577,6 +1758,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
>  		vi->rq[i].pages = NULL;
>  		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
>  			       napi_weight);
> +		napi_hash_add(&vi->rq[i].napi);
>  
>  		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
>  		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
> @@ -1880,8 +2062,10 @@ static int virtnet_freeze(struct virtio_device *vdev)
>  
>  	if (netif_running(vi->dev)) {
>  		virtnet_napi_disable_all(vi);
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			napi_hash_del(&vi->rq[i].napi);
>  			netif_napi_del(&vi->rq[i].napi);
> +		}
>  	}
>  
>  	remove_vq_common(vi);
> -- 
> 1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Wang July 21, 2014, 3:13 a.m. UTC | #7
On 07/21/2014 04:31 AM, Michael S. Tsirkin wrote:
> On Wed, Jul 16, 2014 at 02:21:47PM +0800, Jason Wang wrote:
>> Add basic support for rx busy polling.
>>
>> Test was done between a kvm guest and an external host. Two hosts were
>> connected through 40gb mlx4 cards. With both busy_poll and busy_read
>> are set to 50 in guest, 1 byte netperf tcp_rr shows 116% improvement:
>> transaction rate was increased from 9151.94 to 19787.37.
> Pls include data about non polling tests: any effect on
> cpu utilization there?
> There could be as we are adding locking.

I will do some test on this.
>
>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Cc: Vlad Yasevich <vyasevic@redhat.com>
>> Cc: Eric Dumazet <eric.dumazet@gmail.com>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>>  drivers/net/virtio_net.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++-
>>  1 file changed, 187 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index e417d93..4830713 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -27,6 +27,7 @@
>>  #include <linux/slab.h>
>>  #include <linux/cpu.h>
>>  #include <linux/average.h>
>> +#include <net/busy_poll.h>
>>  
>>  static int napi_weight = NAPI_POLL_WEIGHT;
>>  module_param(napi_weight, int, 0444);
>> @@ -94,8 +95,143 @@ struct receive_queue {
>>  
>>  	/* Name of this receive queue: input.$index */
>>  	char name[40];
>> +
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +	unsigned int state;
>> +#define VIRTNET_RQ_STATE_IDLE        0
>> +#define VIRTNET_RQ_STATE_NAPI	     1    /* NAPI or refill owns this RQ */
>> +#define VIRTNET_RQ_STATE_POLL	     2    /* poll owns this RQ */
>> +#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
>> +#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI | VIRTNET_RQ_STATE_POLL)
>> +#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED | VIRTNET_RQ_STATE_DISABLED)
>> +#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded this RQ */
>> +#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
>> +	spinlock_t lock;
>> +#endif  /* CONFIG_NET_RX_BUSY_POLL */
> do we have to have a new state? no way to reuse the napi state
> for this? two lock/unlock operations for a poll seems
> excessive.

I try this way and it works. The only usage I can think of introducing
those states is to detect the yield and do some optimizations after. But
only few drivers (bnx2x) use the yield flag.

I think I can  switch to use NAPI state since we don't do such
optimization in virtio-net.
>
>>  };
>>  
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>> +{
>> +
>> +	spin_lock_init(&rq->lock);
>> +	rq->state = VIRTNET_RQ_STATE_IDLE;
>> +}
>> +
>> +/* called from the device poll routine or refill routine to get ownership of a
>> + * receive queue.
>> + */
>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
>> +{
>> +	int rc = true;
>> +
>> +	spin_lock(&rq->lock);
>> +	if (rq->state & VIRTNET_RQ_LOCKED) {
>> +		WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
>> +		rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
>> +		rc = false;
>> +	} else
>> +		/* we don't care if someone yielded */
>> +		rq->state = VIRTNET_RQ_STATE_NAPI;
>> +	spin_unlock(&rq->lock);
>> +	return rc;
>> +}
>> +
>> +/* returns true is someone tried to get the rq while napi or refill had it */
> s/is/if/
>
>> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
>> +{
>> +	int rc = false;
>> +
>> +	spin_lock(&rq->lock);
>> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_POLL |
>> +			     VIRTNET_RQ_STATE_NAPI_YIELD));
>> +
>> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
>> +		rc = true;
>> +	/* will reset state to idle, unless RQ is disabled */
>> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
>> +	spin_unlock(&rq->lock);
>> +	return rc;
>> +}
>> +
>> +/* called from virtnet_low_latency_recv() */
>> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
>> +{
>> +	int rc = true;
>> +
>> +	spin_lock_bh(&rq->lock);
>> +	if ((rq->state & VIRTNET_RQ_LOCKED)) {
>> +		rq->state |= VIRTNET_RQ_STATE_POLL_YIELD;
>> +		rc = false;
>> +	} else
>> +		/* preserve yield marks */
>> +		rq->state |= VIRTNET_RQ_STATE_POLL;
>> +	spin_unlock_bh(&rq->lock);
>> +	return rc;
>> +}
>> +
>> +/* returns true if someone tried to get the receive queue while it was locked */
>> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
>> +{
>> +	int rc = false;
>> +
>> +	spin_lock_bh(&rq->lock);
>> +	WARN_ON(rq->state & (VIRTNET_RQ_STATE_NAPI));
>> +
>> +	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
>> +		rc = true;
>> +	/* will reset state to idle, unless RQ is disabled */
>> +	rq->state &= VIRTNET_RQ_STATE_DISABLED;
>> +	spin_unlock_bh(&rq->lock);
>> +	return rc;
>> +}
>> +
>> +/* return false if RQ is currently owned */
>> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
>> +{
>> +	int rc = true;
>> +
>> +	spin_lock_bh(&rq->lock);
>> +	if (rq->state & VIRTNET_RQ_OWNED)
>> +		rc = false;
>> +	rq->state |= VIRTNET_RQ_STATE_DISABLED;
>> +	spin_unlock_bh(&rq->lock);
>> +
>> +	return rc;
>> +}
>> +
>> +#else /* CONFIG_NET_RX_BUSY_POLL */
>> +static inline void virtnet_rq_init_lock(struct receive_queue *rq)
>> +{
>> +}
>> +
>> +static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
>> +{
>> +	return true;
>> +}
>> +
>> +static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
>> +{
>> +	return false;
>> +}
>> +
>> +static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
>> +{
>> +	return false;
>> +}
>> +
>> +static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
>> +{
>> +	return false;
>> +}
>> +
>> +static inline bool virtnet_rq_disable(struct receive_queue *rq)
>> +{
>> +	return true;
>> +}
>> +
>> +#endif /* CONFIG_NET_RX_BUSY_POLL */
>> +
>>  struct virtnet_info {
>>  	struct virtio_device *vdev;
>>  	struct virtqueue *cvq;
>> @@ -521,6 +657,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
>>  		skb_shinfo(skb)->gso_segs = 0;
>>  	}
>>  
>> +	skb_mark_napi_id(skb, &rq->napi);
>> +
>>  	netif_receive_skb(skb);
>>  	return;
>>  
>> @@ -714,7 +852,12 @@ static void refill_work(struct work_struct *work)
>>  		struct receive_queue *rq = &vi->rq[i];
>>  
>>  		napi_disable(&rq->napi);
>> +		if (!virtnet_rq_lock_napi_refill(rq)) {
>> +			virtnet_napi_enable(rq);
>> +			continue;
>> +		}
>>  		still_empty = !try_fill_recv(rq, GFP_KERNEL);
>> +		virtnet_rq_unlock_napi_refill(rq);
>>  		virtnet_napi_enable(rq);
>>  
>>  		/* In theory, this can happen: if we don't get any buffers in
>> @@ -752,8 +895,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>>  	unsigned int r, received = 0;
>>  
>>  again:
>> +	if (!virtnet_rq_lock_napi_refill(rq))
>> +		return budget;
>> +
>>  	received += virtnet_receive(rq, budget);
>>  
>> +	virtnet_rq_unlock_napi_refill(rq);
>> +
>>  	/* Out of packets? */
>>  	if (received < budget) {
>>  		r = virtqueue_enable_cb_prepare(rq->vq);
>> @@ -770,20 +918,50 @@ again:
>>  	return received;
>>  }
>>  
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +/* must be called with local_bh_disable()d */
>> +static int virtnet_low_latency_recv(struct napi_struct *napi)
> let's call it busy poll :)

Ok.
>> +{
>> +	struct receive_queue *rq =
>> +		container_of(napi, struct receive_queue, napi);
>> +	struct virtnet_info *vi = rq->vq->vdev->priv;
>> +	int received;
>> +
>> +	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
>> +		return LL_FLUSH_FAILED;
>> +
>> +	if (!virtnet_rq_lock_poll(rq))
>> +		return LL_FLUSH_BUSY;
>> +
>> +	received = virtnet_receive(rq, 4);
> Hmm why 4 exactly?

I think the reason is we need a quota here to prevent the busy polling
method from starving other threads. 4 is just copied form the existed
implementation (ixgbe).
>> +
>> +	virtnet_rq_unlock_poll(rq);
>> +
>> +	return received;
>> +}
>> +#endif	/* CONFIG_NET_RX_BUSY_POLL */
>> +
>>  static void virtnet_napi_enable_all(struct virtnet_info *vi)
>>  {
>>  	int i;
>>  
>> -	for (i = 0; i < vi->max_queue_pairs; i++)
>> +	for (i = 0; i < vi->max_queue_pairs; i++) {
>> +		virtnet_rq_init_lock(&vi->rq[i]);
>>  		virtnet_napi_enable(&vi->rq[i]);
>> +	}
>>  }
>>  
>>  static void virtnet_napi_disable_all(struct virtnet_info *vi)
>>  {
>>  	int i;
>>  
>> -	for (i = 0; i < vi->max_queue_pairs; i++)
>> +	for (i = 0; i < vi->max_queue_pairs; i++) {
>>  		napi_disable(&vi->rq[i].napi);
>> +		while (!virtnet_rq_disable(&vi->rq[i])) {
>> +			pr_info("RQ %d locked\n", i);
>> +			usleep_range(1000, 20000);
> What's going on here, exactly?

It was used to wait for the completion of busy polling to finish.
>> +		}
>> +	}
>>  }
>>  
>>  static int virtnet_open(struct net_device *dev)
>> @@ -1372,6 +1550,9 @@ static const struct net_device_ops virtnet_netdev = {
>>  #ifdef CONFIG_NET_POLL_CONTROLLER
>>  	.ndo_poll_controller = virtnet_netpoll,
>>  #endif
>> +#ifdef CONFIG_NET_RX_BUSY_POLL
>> +	.ndo_busy_poll		= virtnet_low_latency_recv,
>> +#endif
>>  };
>>  
>>  static void virtnet_config_changed_work(struct work_struct *work)
>> @@ -1577,6 +1758,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
>>  		vi->rq[i].pages = NULL;
>>  		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
>>  			       napi_weight);
>> +		napi_hash_add(&vi->rq[i].napi);
>>  
>>  		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
>>  		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
>> @@ -1880,8 +2062,10 @@ static int virtnet_freeze(struct virtio_device *vdev)
>>  
>>  	if (netif_running(vi->dev)) {
>>  		virtnet_napi_disable_all(vi);
>> -		for (i = 0; i < vi->max_queue_pairs; i++)
>> +		for (i = 0; i < vi->max_queue_pairs; i++) {
>> +			napi_hash_del(&vi->rq[i].napi);
>>  			netif_napi_del(&vi->rq[i].napi);
>> +		}
>>  	}
>>  
>>  	remove_vq_common(vi);
>> -- 
>> 1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e417d93..4830713 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -27,6 +27,7 @@ 
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/average.h>
+#include <net/busy_poll.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -94,8 +95,143 @@  struct receive_queue {
 
 	/* Name of this receive queue: input.$index */
 	char name[40];
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned int state;
+#define VIRTNET_RQ_STATE_IDLE        0
+#define VIRTNET_RQ_STATE_NAPI	     1    /* NAPI or refill owns this RQ */
+#define VIRTNET_RQ_STATE_POLL	     2    /* poll owns this RQ */
+#define VIRTNET_RQ_STATE_DISABLED    4    /* RQ is disabled */
+#define VIRTNET_RQ_OWNED (VIRTNET_RQ_STATE_NAPI | VIRTNET_RQ_STATE_POLL)
+#define VIRTNET_RQ_LOCKED (VIRTNET_RQ_OWNED | VIRTNET_RQ_STATE_DISABLED)
+#define VIRTNET_RQ_STATE_NAPI_YIELD  8    /* NAPI or refill yielded this RQ */
+#define VIRTNET_RQ_STATE_POLL_YIELD  16   /* poll yielded this RQ */
+	spinlock_t lock;
+#endif  /* CONFIG_NET_RX_BUSY_POLL */
 };
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void virtnet_rq_init_lock(struct receive_queue *rq)
+{
+
+	spin_lock_init(&rq->lock);
+	rq->state = VIRTNET_RQ_STATE_IDLE;
+}
+
+/* called from the device poll routine or refill routine to get ownership of a
+ * receive queue.
+ */
+static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
+{
+	int rc = true;
+
+	spin_lock(&rq->lock);
+	if (rq->state & VIRTNET_RQ_LOCKED) {
+		WARN_ON(rq->state & VIRTNET_RQ_STATE_NAPI);
+		rq->state |= VIRTNET_RQ_STATE_NAPI_YIELD;
+		rc = false;
+	} else
+		/* we don't care if someone yielded */
+		rq->state = VIRTNET_RQ_STATE_NAPI;
+	spin_unlock(&rq->lock);
+	return rc;
+}
+
+/* returns true is someone tried to get the rq while napi or refill had it */
+static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
+{
+	int rc = false;
+
+	spin_lock(&rq->lock);
+	WARN_ON(rq->state & (VIRTNET_RQ_STATE_POLL |
+			     VIRTNET_RQ_STATE_NAPI_YIELD));
+
+	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
+		rc = true;
+	/* will reset state to idle, unless RQ is disabled */
+	rq->state &= VIRTNET_RQ_STATE_DISABLED;
+	spin_unlock(&rq->lock);
+	return rc;
+}
+
+/* called from virtnet_low_latency_recv() */
+static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
+{
+	int rc = true;
+
+	spin_lock_bh(&rq->lock);
+	if ((rq->state & VIRTNET_RQ_LOCKED)) {
+		rq->state |= VIRTNET_RQ_STATE_POLL_YIELD;
+		rc = false;
+	} else
+		/* preserve yield marks */
+		rq->state |= VIRTNET_RQ_STATE_POLL;
+	spin_unlock_bh(&rq->lock);
+	return rc;
+}
+
+/* returns true if someone tried to get the receive queue while it was locked */
+static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
+{
+	int rc = false;
+
+	spin_lock_bh(&rq->lock);
+	WARN_ON(rq->state & (VIRTNET_RQ_STATE_NAPI));
+
+	if (rq->state & VIRTNET_RQ_STATE_POLL_YIELD)
+		rc = true;
+	/* will reset state to idle, unless RQ is disabled */
+	rq->state &= VIRTNET_RQ_STATE_DISABLED;
+	spin_unlock_bh(&rq->lock);
+	return rc;
+}
+
+/* return false if RQ is currently owned */
+static inline bool virtnet_rq_disable(struct receive_queue *rq)
+{
+	int rc = true;
+
+	spin_lock_bh(&rq->lock);
+	if (rq->state & VIRTNET_RQ_OWNED)
+		rc = false;
+	rq->state |= VIRTNET_RQ_STATE_DISABLED;
+	spin_unlock_bh(&rq->lock);
+
+	return rc;
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void virtnet_rq_init_lock(struct receive_queue *rq)
+{
+}
+
+static inline bool virtnet_rq_lock_napi_refill(struct receive_queue *rq)
+{
+	return true;
+}
+
+static inline bool virtnet_rq_unlock_napi_refill(struct receive_queue *rq)
+{
+	return false;
+}
+
+static inline bool virtnet_rq_lock_poll(struct receive_queue *rq)
+{
+	return false;
+}
+
+static inline bool virtnet_rq_unlock_poll(struct receive_queue *rq)
+{
+	return false;
+}
+
+static inline bool virtnet_rq_disable(struct receive_queue *rq)
+{
+	return true;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 struct virtnet_info {
 	struct virtio_device *vdev;
 	struct virtqueue *cvq;
@@ -521,6 +657,8 @@  static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
 		skb_shinfo(skb)->gso_segs = 0;
 	}
 
+	skb_mark_napi_id(skb, &rq->napi);
+
 	netif_receive_skb(skb);
 	return;
 
@@ -714,7 +852,12 @@  static void refill_work(struct work_struct *work)
 		struct receive_queue *rq = &vi->rq[i];
 
 		napi_disable(&rq->napi);
+		if (!virtnet_rq_lock_napi_refill(rq)) {
+			virtnet_napi_enable(rq);
+			continue;
+		}
 		still_empty = !try_fill_recv(rq, GFP_KERNEL);
+		virtnet_rq_unlock_napi_refill(rq);
 		virtnet_napi_enable(rq);
 
 		/* In theory, this can happen: if we don't get any buffers in
@@ -752,8 +895,13 @@  static int virtnet_poll(struct napi_struct *napi, int budget)
 	unsigned int r, received = 0;
 
 again:
+	if (!virtnet_rq_lock_napi_refill(rq))
+		return budget;
+
 	received += virtnet_receive(rq, budget);
 
+	virtnet_rq_unlock_napi_refill(rq);
+
 	/* Out of packets? */
 	if (received < budget) {
 		r = virtqueue_enable_cb_prepare(rq->vq);
@@ -770,20 +918,50 @@  again:
 	return received;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int virtnet_low_latency_recv(struct napi_struct *napi)
+{
+	struct receive_queue *rq =
+		container_of(napi, struct receive_queue, napi);
+	struct virtnet_info *vi = rq->vq->vdev->priv;
+	int received;
+
+	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
+		return LL_FLUSH_FAILED;
+
+	if (!virtnet_rq_lock_poll(rq))
+		return LL_FLUSH_BUSY;
+
+	received = virtnet_receive(rq, 4);
+
+	virtnet_rq_unlock_poll(rq);
+
+	return received;
+}
+#endif	/* CONFIG_NET_RX_BUSY_POLL */
+
 static void virtnet_napi_enable_all(struct virtnet_info *vi)
 {
 	int i;
 
-	for (i = 0; i < vi->max_queue_pairs; i++)
+	for (i = 0; i < vi->max_queue_pairs; i++) {
+		virtnet_rq_init_lock(&vi->rq[i]);
 		virtnet_napi_enable(&vi->rq[i]);
+	}
 }
 
 static void virtnet_napi_disable_all(struct virtnet_info *vi)
 {
 	int i;
 
-	for (i = 0; i < vi->max_queue_pairs; i++)
+	for (i = 0; i < vi->max_queue_pairs; i++) {
 		napi_disable(&vi->rq[i].napi);
+		while (!virtnet_rq_disable(&vi->rq[i])) {
+			pr_info("RQ %d locked\n", i);
+			usleep_range(1000, 20000);
+		}
+	}
 }
 
 static int virtnet_open(struct net_device *dev)
@@ -1372,6 +1550,9 @@  static const struct net_device_ops virtnet_netdev = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = virtnet_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= virtnet_low_latency_recv,
+#endif
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
@@ -1577,6 +1758,7 @@  static int virtnet_alloc_queues(struct virtnet_info *vi)
 		vi->rq[i].pages = NULL;
 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
 			       napi_weight);
+		napi_hash_add(&vi->rq[i].napi);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
 		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
@@ -1880,8 +2062,10 @@  static int virtnet_freeze(struct virtio_device *vdev)
 
 	if (netif_running(vi->dev)) {
 		virtnet_napi_disable_all(vi);
-		for (i = 0; i < vi->max_queue_pairs; i++)
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			napi_hash_del(&vi->rq[i].napi);
 			netif_napi_del(&vi->rq[i].napi);
+		}
 	}
 
 	remove_vq_common(vi);