diff mbox

[net-next,v2,2/5] virtio-net: transmit napi

Message ID 20170418202108.61920-3-willemdebruijn.kernel@gmail.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Willem de Bruijn April 18, 2017, 8:21 p.m. UTC
From: Willem de Bruijn <willemb@google.com>

Convert virtio-net to a standard napi tx completion path. This enables
better TCP pacing using TCP small queues and increases single stream
throughput.

The virtio-net driver currently cleans tx descriptors on transmission
of new packets in ndo_start_xmit. Latency depends on new traffic, so
is unbounded. To avoid deadlock when a socket reaches its snd limit,
packets are orphaned on tranmission. This breaks socket backpressure,
including TSQ.

Napi increases the number of interrupts generated compared to the
current model, which keeps interrupts disabled as long as the ring
has enough free descriptors. Keep tx napi optional and disabled for
now. Follow-on patches will reduce the interrupt cost.

Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 67 insertions(+), 10 deletions(-)

Comments

Jason Wang April 20, 2017, 6:12 a.m. UTC | #1
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> From: Willem de Bruijn <willemb@google.com>
>
> Convert virtio-net to a standard napi tx completion path. This enables
> better TCP pacing using TCP small queues and increases single stream
> throughput.
>
> The virtio-net driver currently cleans tx descriptors on transmission
> of new packets in ndo_start_xmit. Latency depends on new traffic, so
> is unbounded. To avoid deadlock when a socket reaches its snd limit,
> packets are orphaned on tranmission. This breaks socket backpressure,
> including TSQ.
>
> Napi increases the number of interrupts generated compared to the
> current model, which keeps interrupts disabled as long as the ring
> has enough free descriptors. Keep tx napi optional and disabled for
> now. Follow-on patches will reduce the interrupt cost.
>
> Signed-off-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
>   drivers/net/virtio_net.c | 77 +++++++++++++++++++++++++++++++++++++++++-------
>   1 file changed, 67 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b9c1df29892c..c173e85dc7b8 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -33,9 +33,10 @@
>   static int napi_weight = NAPI_POLL_WEIGHT;
>   module_param(napi_weight, int, 0444);
>   
> -static bool csum = true, gso = true;
> +static bool csum = true, gso = true, napi_tx;
>   module_param(csum, bool, 0444);
>   module_param(gso, bool, 0444);
> +module_param(napi_tx, bool, 0644);
>   
>   /* FIXME: MTU in config. */
>   #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
> @@ -86,6 +87,8 @@ struct send_queue {
>   
>   	/* Name of the send queue: output.$index */
>   	char name[40];
> +
> +	struct napi_struct napi;
>   };
>   
>   /* Internal representation of a receive virtqueue */
> @@ -262,12 +265,16 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
>   static void skb_xmit_done(struct virtqueue *vq)
>   {
>   	struct virtnet_info *vi = vq->vdev->priv;
> +	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
>   
>   	/* Suppress further interrupts. */
>   	virtqueue_disable_cb(vq);
>   
> -	/* We were probably waiting for more output buffers. */
> -	netif_wake_subqueue(vi->dev, vq2txq(vq));
> +	if (napi->weight)
> +		virtqueue_napi_schedule(napi, vq);
> +	else
> +		/* We were probably waiting for more output buffers. */
> +		netif_wake_subqueue(vi->dev, vq2txq(vq));
>   }
>   
>   static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
> @@ -972,6 +979,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
>   	local_bh_enable();
>   }
>   
> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> +				   struct virtqueue *vq,
> +				   struct napi_struct *napi)
> +{
> +	if (!napi->weight)
> +		return;
> +
> +	if (!vi->affinity_hint_set) {
> +		napi->weight = 0;
> +		return;
> +	}
> +
> +	return virtnet_napi_enable(vq, napi);
> +}
> +
>   static void refill_work(struct work_struct *work)
>   {
>   	struct virtnet_info *vi =
> @@ -1046,6 +1068,7 @@ static int virtnet_open(struct net_device *dev)
>   			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
>   				schedule_delayed_work(&vi->refill, 0);
>   		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
> +		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
>   	}
>   
>   	return 0;
> @@ -1081,6 +1104,25 @@ static void free_old_xmit_skbs(struct send_queue *sq)
>   	u64_stats_update_end(&stats->tx_syncp);
>   }
>   
> +static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> +{
> +	struct send_queue *sq = container_of(napi, struct send_queue, napi);
> +	struct virtnet_info *vi = sq->vq->vdev->priv;
> +	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
> +
> +	if (__netif_tx_trylock(txq)) {
> +		free_old_xmit_skbs(sq);
> +		__netif_tx_unlock(txq);
> +	}
> +
> +	virtqueue_napi_complete(napi, sq->vq, 0);
> +
> +	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
> +		netif_tx_wake_queue(txq);
> +
> +	return 0;
> +}
> +
>   static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>   {
>   	struct virtio_net_hdr_mrg_rxbuf *hdr;
> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>   	int err;
>   	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>   	bool kick = !skb->xmit_more;
> +	bool use_napi = sq->napi.weight;
>   
>   	/* Free up any pending old buffers before queueing new ones. */
> -	free_old_xmit_skbs(sq);
> +	if (!use_napi)
> +		free_old_xmit_skbs(sq);

I'm not sure this is best or even correct. Consider we clean xmit 
packets speculatively in virtnet_poll_tx(), we need call 
free_old_xmit_skbs() unconditionally. This can also help to reduce the 
possible of napi rescheduling in virtnet_poll_tx().

Thanks
Jason Wang April 20, 2017, 6:27 a.m. UTC | #2
On 2017年04月19日 04:21, Willem de Bruijn wrote:
> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> +				   struct virtqueue *vq,
> +				   struct napi_struct *napi)
> +{
> +	if (!napi->weight)
> +		return;
> +
> +	if (!vi->affinity_hint_set) {
> +		napi->weight = 0;
> +		return;
> +	}
> +
> +	return virtnet_napi_enable(vq, napi);
> +}
> +
>   static void refill_work(struct work_struct *work)

Maybe I was wrong, but according to Michael's comment it looks like he 
want check affinity_hint_set just for speculative tx polling on rx napi 
instead of disabling it at all.

And I'm not convinced this is really needed, driver only provide 
affinity hint instead of affinity, so it's not guaranteed that tx and rx 
interrupt are in the same vcpus.

Thanks
Willem de Bruijn April 20, 2017, 1:58 p.m. UTC | #3
On Thu, Apr 20, 2017 at 2:27 AM, Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 2017年04月19日 04:21, Willem de Bruijn wrote:
>>
>> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
>> +                                  struct virtqueue *vq,
>> +                                  struct napi_struct *napi)
>> +{
>> +       if (!napi->weight)
>> +               return;
>> +
>> +       if (!vi->affinity_hint_set) {
>> +               napi->weight = 0;
>> +               return;
>> +       }
>> +
>> +       return virtnet_napi_enable(vq, napi);
>> +}
>> +
>>   static void refill_work(struct work_struct *work)
>
>
> Maybe I was wrong, but according to Michael's comment it looks like he want
> check affinity_hint_set just for speculative tx polling on rx napi instead
> of disabling it at all.
>
> And I'm not convinced this is really needed, driver only provide affinity
> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> are in the same vcpus.

You're right. I made the restriction broader than the request, to really err
on the side of caution for the initial merge of napi tx. And enabling
the optimization is always a win over keeping it off, even without irq
affinity.

The cycle cost is significant without affinity regardless of whether the
optimization is used. Though this is not limited to napi-tx, it is more
pronounced in that mode than without napi.

1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:

upstream:

1,1,1: 28985 Mbps, 278 Gcyc
1,0,2: 30067 Mbps, 402 Gcyc

napi tx:

1,1,1: 34492 Mbps, 269 Gcyc
1,0,2: 36527 Mbps, 537 Gcyc (!)
1,0,1: 36269 Mbps, 394 Gcyc
1,0,0: 34674 Mbps, 402 Gcyc

This is a particularly strong example. It is also representative
of most RR tests. It is less pronounced in other streaming tests.
10x TCP_RR, for instance:

upstream:

1,1,1: 42267 Mbps, 301 Gcyc
1,0,2: 40663 Mbps, 445 Gcyc

napi tx:

1,1,1: 42420 Mbps, 303 Gcyc
1,0,2:  42267 Mbps, 431 Gcyc

These numbers were obtained with the virtqueue_enable_cb_delayed
optimization after xmit_skb, btw. It turns out that moving that before
increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
100x TCP_RR a bit.
Willem de Bruijn April 20, 2017, 4:02 p.m. UTC | #4
>>   static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>>   {
>>         struct virtio_net_hdr_mrg_rxbuf *hdr;
>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>> struct net_device *dev)
>>         int err;
>>         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>>         bool kick = !skb->xmit_more;
>> +       bool use_napi = sq->napi.weight;
>>         /* Free up any pending old buffers before queueing new ones. */
>> -       free_old_xmit_skbs(sq);
>> +       if (!use_napi)
>> +               free_old_xmit_skbs(sq);
>
>
> I'm not sure this is best or even correct. Consider we clean xmit packets
> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
> unconditionally. This can also help to reduce the possible of napi
> rescheduling in virtnet_poll_tx().

Because of the use of trylock there. Absolutely, thanks! Perhaps I should
only use trylock in the opportunistic clean path from the rx softirq and
full locking in the tx softirq.

I previously observed that cleaning here would, counterintuitively,
reduce efficiency. It reverted the improvements of cleaning transmit
completions from the receive softirq. Going through my data, I did
not observe this regression anymore on the latest patchset.

Let me test again, with and without the new
virtqueue_enable_cb_delayed patch. Perhaps that made a
difference.
Jason Wang April 21, 2017, 3:53 a.m. UTC | #5
On 2017年04月20日 21:58, Willem de Bruijn wrote:
> On Thu, Apr 20, 2017 at 2:27 AM, Jason Wang <jasowang@redhat.com> wrote:
>>
>> On 2017年04月19日 04:21, Willem de Bruijn wrote:
>>> +static void virtnet_napi_tx_enable(struct virtnet_info *vi,
>>> +                                  struct virtqueue *vq,
>>> +                                  struct napi_struct *napi)
>>> +{
>>> +       if (!napi->weight)
>>> +               return;
>>> +
>>> +       if (!vi->affinity_hint_set) {
>>> +               napi->weight = 0;
>>> +               return;
>>> +       }
>>> +
>>> +       return virtnet_napi_enable(vq, napi);
>>> +}
>>> +
>>>    static void refill_work(struct work_struct *work)
>>
>> Maybe I was wrong, but according to Michael's comment it looks like he want
>> check affinity_hint_set just for speculative tx polling on rx napi instead
>> of disabling it at all.
>>
>> And I'm not convinced this is really needed, driver only provide affinity
>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> are in the same vcpus.
> You're right. I made the restriction broader than the request, to really err
> on the side of caution for the initial merge of napi tx. And enabling
> the optimization is always a win over keeping it off, even without irq
> affinity.
>
> The cycle cost is significant without affinity regardless of whether the
> optimization is used.

Yes, I noticed this in the past too.

> Though this is not limited to napi-tx, it is more
> pronounced in that mode than without napi.
>
> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>
> upstream:
>
> 1,1,1: 28985 Mbps, 278 Gcyc
> 1,0,2: 30067 Mbps, 402 Gcyc
>
> napi tx:
>
> 1,1,1: 34492 Mbps, 269 Gcyc
> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> 1,0,1: 36269 Mbps, 394 Gcyc
> 1,0,0: 34674 Mbps, 402 Gcyc
>
> This is a particularly strong example. It is also representative
> of most RR tests. It is less pronounced in other streaming tests.
> 10x TCP_RR, for instance:
>
> upstream:
>
> 1,1,1: 42267 Mbps, 301 Gcyc
> 1,0,2: 40663 Mbps, 445 Gcyc
>
> napi tx:
>
> 1,1,1: 42420 Mbps, 303 Gcyc
> 1,0,2:  42267 Mbps, 431 Gcyc
>
> These numbers were obtained with the virtqueue_enable_cb_delayed
> optimization after xmit_skb, btw. It turns out that moving that before
> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> 100x TCP_RR a bit.

I see, so I think we can leave the affinity hint optimization/check for 
future investigation:

- to avoid endless optimization (e.g we may want to share a single 
vector/napi for tx/rx queue pairs in the future) for this series.
- tx napi is disabled by default which means we can do optimization on top.

Thanks
Willem de Bruijn April 21, 2017, 2:50 p.m. UTC | #6
>>> Maybe I was wrong, but according to Michael's comment it looks like he
>>> want
>>> check affinity_hint_set just for speculative tx polling on rx napi
>>> instead
>>> of disabling it at all.
>>>
>>> And I'm not convinced this is really needed, driver only provide affinity
>>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>>> are in the same vcpus.
>>
>> You're right. I made the restriction broader than the request, to really
>> err
>> on the side of caution for the initial merge of napi tx. And enabling
>> the optimization is always a win over keeping it off, even without irq
>> affinity.
>>
>> The cycle cost is significant without affinity regardless of whether the
>> optimization is used.
>
>
> Yes, I noticed this in the past too.
>
>> Though this is not limited to napi-tx, it is more
>> pronounced in that mode than without napi.
>>
>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>
>> upstream:
>>
>> 1,1,1: 28985 Mbps, 278 Gcyc
>> 1,0,2: 30067 Mbps, 402 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 34492 Mbps, 269 Gcyc
>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> 1,0,1: 36269 Mbps, 394 Gcyc
>> 1,0,0: 34674 Mbps, 402 Gcyc
>>
>> This is a particularly strong example. It is also representative
>> of most RR tests. It is less pronounced in other streaming tests.
>> 10x TCP_RR, for instance:
>>
>> upstream:
>>
>> 1,1,1: 42267 Mbps, 301 Gcyc
>> 1,0,2: 40663 Mbps, 445 Gcyc
>>
>> napi tx:
>>
>> 1,1,1: 42420 Mbps, 303 Gcyc
>> 1,0,2:  42267 Mbps, 431 Gcyc
>>
>> These numbers were obtained with the virtqueue_enable_cb_delayed
>> optimization after xmit_skb, btw. It turns out that moving that before
>> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> 100x TCP_RR a bit.
>
>
> I see, so I think we can leave the affinity hint optimization/check for
> future investigation:
>
> - to avoid endless optimization (e.g we may want to share a single
> vector/napi for tx/rx queue pairs in the future) for this series.
> - tx napi is disabled by default which means we can do optimization on top.

Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
Willem de Bruijn April 21, 2017, 6:10 p.m. UTC | #7
On Thu, Apr 20, 2017 at 12:02 PM, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>>>   static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
>>>   {
>>>         struct virtio_net_hdr_mrg_rxbuf *hdr;
>>> @@ -1130,9 +1172,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb,
>>> struct net_device *dev)
>>>         int err;
>>>         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
>>>         bool kick = !skb->xmit_more;
>>> +       bool use_napi = sq->napi.weight;
>>>         /* Free up any pending old buffers before queueing new ones. */
>>> -       free_old_xmit_skbs(sq);
>>> +       if (!use_napi)
>>> +               free_old_xmit_skbs(sq);
>>
>>
>> I'm not sure this is best or even correct. Consider we clean xmit packets
>> speculatively in virtnet_poll_tx(), we need call free_old_xmit_skbs()
>> unconditionally. This can also help to reduce the possible of napi
>> rescheduling in virtnet_poll_tx().
>
> Because of the use of trylock there. Absolutely, thanks! Perhaps I should
> only use trylock in the opportunistic clean path from the rx softirq and
> full locking in the tx softirq.
>
> I previously observed that cleaning here would, counterintuitively,
> reduce efficiency. It reverted the improvements of cleaning transmit
> completions from the receive softirq. Going through my data, I did
> not observe this regression anymore on the latest patchset.
>
> Let me test again, with and without the new
> virtqueue_enable_cb_delayed patch. Perhaps that made a
> difference.

Neither cleaning in start_xmit nor converting the napi tx trylock to lock
shows a significant impact on loadtests, whether cpu affine or not.

I'll make both changes, as the first reduces patch size and code complexity
and the second is a more obviously correct codepath than than with trylock.

To be clear, the variant called from the rx napi handler will still
opportunistically use trylock.
Michael S. Tsirkin April 24, 2017, 4:40 p.m. UTC | #8
On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
> >>> Maybe I was wrong, but according to Michael's comment it looks like he
> >>> want
> >>> check affinity_hint_set just for speculative tx polling on rx napi
> >>> instead
> >>> of disabling it at all.
> >>>
> >>> And I'm not convinced this is really needed, driver only provide affinity
> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> >>> are in the same vcpus.
> >>
> >> You're right. I made the restriction broader than the request, to really
> >> err
> >> on the side of caution for the initial merge of napi tx. And enabling
> >> the optimization is always a win over keeping it off, even without irq
> >> affinity.
> >>
> >> The cycle cost is significant without affinity regardless of whether the
> >> optimization is used.
> >
> >
> > Yes, I noticed this in the past too.
> >
> >> Though this is not limited to napi-tx, it is more
> >> pronounced in that mode than without napi.
> >>
> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >>
> >> upstream:
> >>
> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >>
> >> napi tx:
> >>
> >> 1,1,1: 34492 Mbps, 269 Gcyc
> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> >> 1,0,1: 36269 Mbps, 394 Gcyc
> >> 1,0,0: 34674 Mbps, 402 Gcyc
> >>
> >> This is a particularly strong example. It is also representative
> >> of most RR tests. It is less pronounced in other streaming tests.
> >> 10x TCP_RR, for instance:
> >>
> >> upstream:
> >>
> >> 1,1,1: 42267 Mbps, 301 Gcyc
> >> 1,0,2: 40663 Mbps, 445 Gcyc
> >>
> >> napi tx:
> >>
> >> 1,1,1: 42420 Mbps, 303 Gcyc
> >> 1,0,2:  42267 Mbps, 431 Gcyc
> >>
> >> These numbers were obtained with the virtqueue_enable_cb_delayed
> >> optimization after xmit_skb, btw. It turns out that moving that before
> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> >> 100x TCP_RR a bit.
> >
> >
> > I see, so I think we can leave the affinity hint optimization/check for
> > future investigation:
> >
> > - to avoid endless optimization (e.g we may want to share a single
> > vector/napi for tx/rx queue pairs in the future) for this series.
> > - tx napi is disabled by default which means we can do optimization on top.
> 
> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.

I kind of like it, let's be conservative. But I'd prefer a comment
near it explaining why it's there.
Willem de Bruijn April 24, 2017, 5:05 p.m. UTC | #9
On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>> >>> Maybe I was wrong, but according to Michael's comment it looks like he
>> >>> want
>> >>> check affinity_hint_set just for speculative tx polling on rx napi
>> >>> instead
>> >>> of disabling it at all.
>> >>>
>> >>> And I'm not convinced this is really needed, driver only provide affinity
>> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>> >>> are in the same vcpus.
>> >>
>> >> You're right. I made the restriction broader than the request, to really
>> >> err
>> >> on the side of caution for the initial merge of napi tx. And enabling
>> >> the optimization is always a win over keeping it off, even without irq
>> >> affinity.
>> >>
>> >> The cycle cost is significant without affinity regardless of whether the
>> >> optimization is used.
>> >
>> >
>> > Yes, I noticed this in the past too.
>> >
>> >> Though this is not limited to napi-tx, it is more
>> >> pronounced in that mode than without napi.
>> >>
>> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 28985 Mbps, 278 Gcyc
>> >> 1,0,2: 30067 Mbps, 402 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 34492 Mbps, 269 Gcyc
>> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>> >> 1,0,1: 36269 Mbps, 394 Gcyc
>> >> 1,0,0: 34674 Mbps, 402 Gcyc
>> >>
>> >> This is a particularly strong example. It is also representative
>> >> of most RR tests. It is less pronounced in other streaming tests.
>> >> 10x TCP_RR, for instance:
>> >>
>> >> upstream:
>> >>
>> >> 1,1,1: 42267 Mbps, 301 Gcyc
>> >> 1,0,2: 40663 Mbps, 445 Gcyc
>> >>
>> >> napi tx:
>> >>
>> >> 1,1,1: 42420 Mbps, 303 Gcyc
>> >> 1,0,2:  42267 Mbps, 431 Gcyc
>> >>
>> >> These numbers were obtained with the virtqueue_enable_cb_delayed
>> >> optimization after xmit_skb, btw. It turns out that moving that before
>> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>> >> 100x TCP_RR a bit.
>> >
>> >
>> > I see, so I think we can leave the affinity hint optimization/check for
>> > future investigation:
>> >
>> > - to avoid endless optimization (e.g we may want to share a single
>> > vector/napi for tx/rx queue pairs in the future) for this series.
>> > - tx napi is disabled by default which means we can do optimization on top.
>>
>> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
>
> I kind of like it, let's be conservative. But I'd prefer a comment
> near it explaining why it's there.

I don't feel strongly. Was minutes away from sending a v3 with this
code reverted, but I'll reinstate it and add a comment. Other planned
changes based on Jason's feedback to v2:

  v2 -> v3:
    - convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
          ensure that the handler always cleans, to avoid deadlock
    - unconditionally clean in start_xmit
          avoid adding an unnecessary "if (use_napi)" branch
    - remove virtqueue_disable_cb in patch 5/5
          a noop in the common event_idx based loop
Michael S. Tsirkin April 24, 2017, 5:14 p.m. UTC | #10
On Mon, Apr 24, 2017 at 01:05:45PM -0400, Willem de Bruijn wrote:
> On Mon, Apr 24, 2017 at 12:40 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
> >> >>> Maybe I was wrong, but according to Michael's comment it looks like he
> >> >>> want
> >> >>> check affinity_hint_set just for speculative tx polling on rx napi
> >> >>> instead
> >> >>> of disabling it at all.
> >> >>>
> >> >>> And I'm not convinced this is really needed, driver only provide affinity
> >> >>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
> >> >>> are in the same vcpus.
> >> >>
> >> >> You're right. I made the restriction broader than the request, to really
> >> >> err
> >> >> on the side of caution for the initial merge of napi tx. And enabling
> >> >> the optimization is always a win over keeping it off, even without irq
> >> >> affinity.
> >> >>
> >> >> The cycle cost is significant without affinity regardless of whether the
> >> >> optimization is used.
> >> >
> >> >
> >> > Yes, I noticed this in the past too.
> >> >
> >> >> Though this is not limited to napi-tx, it is more
> >> >> pronounced in that mode than without napi.
> >> >>
> >> >> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
> >> >>
> >> >> upstream:
> >> >>
> >> >> 1,1,1: 28985 Mbps, 278 Gcyc
> >> >> 1,0,2: 30067 Mbps, 402 Gcyc
> >> >>
> >> >> napi tx:
> >> >>
> >> >> 1,1,1: 34492 Mbps, 269 Gcyc
> >> >> 1,0,2: 36527 Mbps, 537 Gcyc (!)
> >> >> 1,0,1: 36269 Mbps, 394 Gcyc
> >> >> 1,0,0: 34674 Mbps, 402 Gcyc
> >> >>
> >> >> This is a particularly strong example. It is also representative
> >> >> of most RR tests. It is less pronounced in other streaming tests.
> >> >> 10x TCP_RR, for instance:
> >> >>
> >> >> upstream:
> >> >>
> >> >> 1,1,1: 42267 Mbps, 301 Gcyc
> >> >> 1,0,2: 40663 Mbps, 445 Gcyc
> >> >>
> >> >> napi tx:
> >> >>
> >> >> 1,1,1: 42420 Mbps, 303 Gcyc
> >> >> 1,0,2:  42267 Mbps, 431 Gcyc
> >> >>
> >> >> These numbers were obtained with the virtqueue_enable_cb_delayed
> >> >> optimization after xmit_skb, btw. It turns out that moving that before
> >> >> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
> >> >> 100x TCP_RR a bit.
> >> >
> >> >
> >> > I see, so I think we can leave the affinity hint optimization/check for
> >> > future investigation:
> >> >
> >> > - to avoid endless optimization (e.g we may want to share a single
> >> > vector/napi for tx/rx queue pairs in the future) for this series.
> >> > - tx napi is disabled by default which means we can do optimization on top.
> >>
> >> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
> >
> > I kind of like it, let's be conservative. But I'd prefer a comment
> > near it explaining why it's there.
> 
> I don't feel strongly. Was minutes away from sending a v3 with this
> code reverted, but I'll reinstate it and add a comment. Other planned
> changes based on Jason's feedback to v2:
> 
>   v2 -> v3:
>     - convert __netif_tx_trylock to __netif_tx_lock on tx napi poll
>           ensure that the handler always cleans, to avoid deadlock
>     - unconditionally clean in start_xmit
>           avoid adding an unnecessary "if (use_napi)" branch
>     - remove virtqueue_disable_cb in patch 5/5
>           a noop in the common event_idx based loop

Makes sense, thanks!
Jason Wang April 25, 2017, 8:39 a.m. UTC | #11
On 2017年04月25日 00:40, Michael S. Tsirkin wrote:
> On Fri, Apr 21, 2017 at 10:50:12AM -0400, Willem de Bruijn wrote:
>>>>> Maybe I was wrong, but according to Michael's comment it looks like he
>>>>> want
>>>>> check affinity_hint_set just for speculative tx polling on rx napi
>>>>> instead
>>>>> of disabling it at all.
>>>>>
>>>>> And I'm not convinced this is really needed, driver only provide affinity
>>>>> hint instead of affinity, so it's not guaranteed that tx and rx interrupt
>>>>> are in the same vcpus.
>>>> You're right. I made the restriction broader than the request, to really
>>>> err
>>>> on the side of caution for the initial merge of napi tx. And enabling
>>>> the optimization is always a win over keeping it off, even without irq
>>>> affinity.
>>>>
>>>> The cycle cost is significant without affinity regardless of whether the
>>>> optimization is used.
>>>
>>> Yes, I noticed this in the past too.
>>>
>>>> Though this is not limited to napi-tx, it is more
>>>> pronounced in that mode than without napi.
>>>>
>>>> 1x TCP_RR for affinity configuration {process, rx_irq, tx_irq}:
>>>>
>>>> upstream:
>>>>
>>>> 1,1,1: 28985 Mbps, 278 Gcyc
>>>> 1,0,2: 30067 Mbps, 402 Gcyc
>>>>
>>>> napi tx:
>>>>
>>>> 1,1,1: 34492 Mbps, 269 Gcyc
>>>> 1,0,2: 36527 Mbps, 537 Gcyc (!)
>>>> 1,0,1: 36269 Mbps, 394 Gcyc
>>>> 1,0,0: 34674 Mbps, 402 Gcyc
>>>>
>>>> This is a particularly strong example. It is also representative
>>>> of most RR tests. It is less pronounced in other streaming tests.
>>>> 10x TCP_RR, for instance:
>>>>
>>>> upstream:
>>>>
>>>> 1,1,1: 42267 Mbps, 301 Gcyc
>>>> 1,0,2: 40663 Mbps, 445 Gcyc
>>>>
>>>> napi tx:
>>>>
>>>> 1,1,1: 42420 Mbps, 303 Gcyc
>>>> 1,0,2:  42267 Mbps, 431 Gcyc
>>>>
>>>> These numbers were obtained with the virtqueue_enable_cb_delayed
>>>> optimization after xmit_skb, btw. It turns out that moving that before
>>>> increases 1x TCP_RR further to ~39 Gbps, at the cost of reducing
>>>> 100x TCP_RR a bit.
>>>
>>> I see, so I think we can leave the affinity hint optimization/check for
>>> future investigation:
>>>
>>> - to avoid endless optimization (e.g we may want to share a single
>>> vector/napi for tx/rx queue pairs in the future) for this series.
>>> - tx napi is disabled by default which means we can do optimization on top.
>> Okay. I'll drop the vi->affinity_hint_set from the patch set for now.
> I kind of like it, let's be conservative. But I'd prefer a comment
> near it explaining why it's there.
>

Another issue for affinity_hint_set is that it could be changed when 
setting channels. I think we've already conservative enough (e.g it was 
disabled by default).

Thanks
diff mbox

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9c1df29892c..c173e85dc7b8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -33,9 +33,10 @@ 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
 
-static bool csum = true, gso = true;
+static bool csum = true, gso = true, napi_tx;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
+module_param(napi_tx, bool, 0644);
 
 /* FIXME: MTU in config. */
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
@@ -86,6 +87,8 @@  struct send_queue {
 
 	/* Name of the send queue: output.$index */
 	char name[40];
+
+	struct napi_struct napi;
 };
 
 /* Internal representation of a receive virtqueue */
@@ -262,12 +265,16 @@  static void virtqueue_napi_complete(struct napi_struct *napi,
 static void skb_xmit_done(struct virtqueue *vq)
 {
 	struct virtnet_info *vi = vq->vdev->priv;
+	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
 
 	/* Suppress further interrupts. */
 	virtqueue_disable_cb(vq);
 
-	/* We were probably waiting for more output buffers. */
-	netif_wake_subqueue(vi->dev, vq2txq(vq));
+	if (napi->weight)
+		virtqueue_napi_schedule(napi, vq);
+	else
+		/* We were probably waiting for more output buffers. */
+		netif_wake_subqueue(vi->dev, vq2txq(vq));
 }
 
 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
@@ -972,6 +979,21 @@  static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
 	local_bh_enable();
 }
 
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
+				   struct virtqueue *vq,
+				   struct napi_struct *napi)
+{
+	if (!napi->weight)
+		return;
+
+	if (!vi->affinity_hint_set) {
+		napi->weight = 0;
+		return;
+	}
+
+	return virtnet_napi_enable(vq, napi);
+}
+
 static void refill_work(struct work_struct *work)
 {
 	struct virtnet_info *vi =
@@ -1046,6 +1068,7 @@  static int virtnet_open(struct net_device *dev)
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
 				schedule_delayed_work(&vi->refill, 0);
 		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
 	}
 
 	return 0;
@@ -1081,6 +1104,25 @@  static void free_old_xmit_skbs(struct send_queue *sq)
 	u64_stats_update_end(&stats->tx_syncp);
 }
 
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct send_queue *sq = container_of(napi, struct send_queue, napi);
+	struct virtnet_info *vi = sq->vq->vdev->priv;
+	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+
+	if (__netif_tx_trylock(txq)) {
+		free_old_xmit_skbs(sq);
+		__netif_tx_unlock(txq);
+	}
+
+	virtqueue_napi_complete(napi, sq->vq, 0);
+
+	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+		netif_tx_wake_queue(txq);
+
+	return 0;
+}
+
 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
 {
 	struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -1130,9 +1172,11 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	int err;
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
 	bool kick = !skb->xmit_more;
+	bool use_napi = sq->napi.weight;
 
 	/* Free up any pending old buffers before queueing new ones. */
-	free_old_xmit_skbs(sq);
+	if (!use_napi)
+		free_old_xmit_skbs(sq);
 
 	/* timestamp packet in software */
 	skb_tx_timestamp(skb);
@@ -1152,8 +1196,10 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 	/* Don't wait up for transmitted skbs to be freed. */
-	skb_orphan(skb);
-	nf_reset(skb);
+	if (!use_napi) {
+		skb_orphan(skb);
+		nf_reset(skb);
+	}
 
 	/* If running out of space, stop queue to avoid getting packets that we
 	 * are then unable to transmit.
@@ -1167,7 +1213,8 @@  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 */
 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 		netif_stop_subqueue(dev, qnum);
-		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+		if (!use_napi &&
+		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 			/* More just got used, free them then recheck. */
 			free_old_xmit_skbs(sq);
 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
@@ -1371,8 +1418,10 @@  static int virtnet_close(struct net_device *dev)
 	/* Make sure refill_work doesn't re-enable napi! */
 	cancel_delayed_work_sync(&vi->refill);
 
-	for (i = 0; i < vi->max_queue_pairs; i++)
+	for (i = 0; i < vi->max_queue_pairs; i++) {
 		napi_disable(&vi->rq[i].napi);
+		napi_disable(&vi->sq[i].napi);
+	}
 
 	return 0;
 }
@@ -1727,8 +1776,10 @@  static void virtnet_freeze_down(struct virtio_device *vdev)
 	cancel_delayed_work_sync(&vi->refill);
 
 	if (netif_running(vi->dev)) {
-		for (i = 0; i < vi->max_queue_pairs; i++)
+		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
+			napi_disable(&vi->sq[i].napi);
+		}
 	}
 }
 
@@ -1751,8 +1802,11 @@  static int virtnet_restore_up(struct virtio_device *vdev)
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
 				schedule_delayed_work(&vi->refill, 0);
 
-		for (i = 0; i < vi->max_queue_pairs; i++)
+		for (i = 0; i < vi->max_queue_pairs; i++) {
 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+					       &vi->sq[i].napi);
+		}
 	}
 
 	netif_device_attach(vi->dev);
@@ -1957,6 +2011,7 @@  static void virtnet_free_queues(struct virtnet_info *vi)
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		napi_hash_del(&vi->rq[i].napi);
 		netif_napi_del(&vi->rq[i].napi);
+		netif_napi_del(&vi->sq[i].napi);
 	}
 
 	/* We called napi_hash_del() before netif_napi_del(),
@@ -2142,6 +2197,8 @@  static int virtnet_alloc_queues(struct virtnet_info *vi)
 		vi->rq[i].pages = NULL;
 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
 			       napi_weight);
+		netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
+			       napi_tx ? napi_weight : 0);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);