diff mbox series

[net-next,v8,7/7] net: vhost: make busyloop_intr more accurate

Message ID 1534680686-3108-8-git-send-email-xiangxia.m.yue@gmail.com
State Deferred, archived
Delegated to: David Miller
Headers show
Series net: vhost: improve performance when enable busyloop | expand

Commit Message

Tonghao Zhang Aug. 19, 2018, 12:11 p.m. UTC
From: Tonghao Zhang <xiangxia.m.yue@gmail.com>

The patch uses vhost_has_work_pending() to check if
the specified handler is scheduled, because in the most case,
vhost_has_work() return true when other side handler is added
to worker list. Use the vhost_has_work_pending() insead of
vhost_has_work().

Topology:
[Host] ->linux bridge -> tap vhost-net ->[Guest]

TCP_STREAM (netperf):
* Without the patch:  38035.39 Mbps, 3.37 us mean latency
* With the patch:     38409.44 Mbps, 3.34 us mean latency

Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/vhost/net.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

Comments

Jason Wang Aug. 21, 2018, 12:33 a.m. UTC | #1
On 2018年08月19日 20:11, xiangxia.m.yue@gmail.com wrote:
> From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
>
> The patch uses vhost_has_work_pending() to check if
> the specified handler is scheduled, because in the most case,
> vhost_has_work() return true when other side handler is added
> to worker list. Use the vhost_has_work_pending() insead of
> vhost_has_work().
>
> Topology:
> [Host] ->linux bridge -> tap vhost-net ->[Guest]
>
> TCP_STREAM (netperf):
> * Without the patch:  38035.39 Mbps, 3.37 us mean latency
> * With the patch:     38409.44 Mbps, 3.34 us mean latency

The improvement is not obvious as last version. Do you imply there's 
some recent changes of vhost that make it faster?

Thanks

>
> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> ---
>   drivers/vhost/net.c | 9 ++++++---
>   1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index db63ae2..b6939ef 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -487,10 +487,8 @@ static void vhost_net_busy_poll(struct vhost_net *net,
>   	endtime = busy_clock() + busyloop_timeout;
>   
>   	while (vhost_can_busy_poll(endtime)) {
> -		if (vhost_has_work(&net->dev)) {
> -			*busyloop_intr = true;
> +		if (vhost_has_work(&net->dev))
>   			break;
> -		}
>   
>   		if ((sock_has_rx_data(sock) &&
>   		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
> @@ -513,6 +511,11 @@ static void vhost_net_busy_poll(struct vhost_net *net,
>   	    !vhost_has_work_pending(&net->dev, VHOST_NET_VQ_RX))
>   		vhost_net_enable_vq(net, rvq);
>   
> +	if (vhost_has_work_pending(&net->dev,
> +				   poll_rx ?
> +				   VHOST_NET_VQ_RX: VHOST_NET_VQ_TX))
> +		*busyloop_intr = true;
> +
>   	mutex_unlock(&vq->mutex);
>   }
>
Jason Wang Aug. 21, 2018, 12:47 a.m. UTC | #2
On 2018年08月21日 08:33, Jason Wang wrote:
>
>
> On 2018年08月19日 20:11, xiangxia.m.yue@gmail.com wrote:
>> From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
>>
>> The patch uses vhost_has_work_pending() to check if
>> the specified handler is scheduled, because in the most case,
>> vhost_has_work() return true when other side handler is added
>> to worker list. Use the vhost_has_work_pending() insead of
>> vhost_has_work().
>>
>> Topology:
>> [Host] ->linux bridge -> tap vhost-net ->[Guest]
>>
>> TCP_STREAM (netperf):
>> * Without the patch:  38035.39 Mbps, 3.37 us mean latency
>> * With the patch:     38409.44 Mbps, 3.34 us mean latency
>
> The improvement is not obvious as last version. Do you imply there's 
> some recent changes of vhost that make it faster?
>

I misunderstood the numbers, please ignore this.

It shows less than 1% improvement. I'm not sure it's worth to do so. Can 
you try bi-directional pktgen to see if it has more obvious effect?

Thanks

> Thanks
>
>>
>> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
>> ---
>>   drivers/vhost/net.c | 9 ++++++---
>>   1 file changed, 6 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
>> index db63ae2..b6939ef 100644
>> --- a/drivers/vhost/net.c
>> +++ b/drivers/vhost/net.c
>> @@ -487,10 +487,8 @@ static void vhost_net_busy_poll(struct vhost_net 
>> *net,
>>       endtime = busy_clock() + busyloop_timeout;
>>         while (vhost_can_busy_poll(endtime)) {
>> -        if (vhost_has_work(&net->dev)) {
>> -            *busyloop_intr = true;
>> +        if (vhost_has_work(&net->dev))
>>               break;
>> -        }
>>             if ((sock_has_rx_data(sock) &&
>>                !vhost_vq_avail_empty(&net->dev, rvq)) ||
>> @@ -513,6 +511,11 @@ static void vhost_net_busy_poll(struct vhost_net 
>> *net,
>>           !vhost_has_work_pending(&net->dev, VHOST_NET_VQ_RX))
>>           vhost_net_enable_vq(net, rvq);
>>   +    if (vhost_has_work_pending(&net->dev,
>> +                   poll_rx ?
>> +                   VHOST_NET_VQ_RX: VHOST_NET_VQ_TX))
>> +        *busyloop_intr = true;
>> +
>>       mutex_unlock(&vq->mutex);
>>   }
>
> _______________________________________________
> Virtualization mailing list
> Virtualization@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/virtualization
Michael S. Tsirkin June 1, 2020, 1:26 p.m. UTC | #3
On Tue, Aug 21, 2018 at 08:47:35AM +0800, Jason Wang wrote:
> 
> 
> On 2018年08月21日 08:33, Jason Wang wrote:
> > 
> > 
> > On 2018年08月19日 20:11, xiangxia.m.yue@gmail.com wrote:
> > > From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> > > 
> > > The patch uses vhost_has_work_pending() to check if
> > > the specified handler is scheduled, because in the most case,
> > > vhost_has_work() return true when other side handler is added
> > > to worker list. Use the vhost_has_work_pending() insead of
> > > vhost_has_work().
> > > 
> > > Topology:
> > > [Host] ->linux bridge -> tap vhost-net ->[Guest]
> > > 
> > > TCP_STREAM (netperf):
> > > * Without the patch:  38035.39 Mbps, 3.37 us mean latency
> > > * With the patch:     38409.44 Mbps, 3.34 us mean latency
> > 
> > The improvement is not obvious as last version. Do you imply there's
> > some recent changes of vhost that make it faster?
> > 
> 
> I misunderstood the numbers, please ignore this.
> 
> It shows less than 1% improvement. I'm not sure it's worth to do so. Can you
> try bi-directional pktgen to see if it has more obvious effect?
> 
> Thanks


Right, this kind of gain is in the noise. Try measuring CPU utilization?

> > Thanks
> > 
> > > 
> > > Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
> > > ---
> > >   drivers/vhost/net.c | 9 ++++++---
> > >   1 file changed, 6 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> > > index db63ae2..b6939ef 100644
> > > --- a/drivers/vhost/net.c
> > > +++ b/drivers/vhost/net.c
> > > @@ -487,10 +487,8 @@ static void vhost_net_busy_poll(struct
> > > vhost_net *net,
> > >       endtime = busy_clock() + busyloop_timeout;
> > >         while (vhost_can_busy_poll(endtime)) {
> > > -        if (vhost_has_work(&net->dev)) {
> > > -            *busyloop_intr = true;
> > > +        if (vhost_has_work(&net->dev))
> > >               break;
> > > -        }
> > >             if ((sock_has_rx_data(sock) &&
> > >                !vhost_vq_avail_empty(&net->dev, rvq)) ||
> > > @@ -513,6 +511,11 @@ static void vhost_net_busy_poll(struct
> > > vhost_net *net,
> > >           !vhost_has_work_pending(&net->dev, VHOST_NET_VQ_RX))
> > >           vhost_net_enable_vq(net, rvq);
> > >   +    if (vhost_has_work_pending(&net->dev,
> > > +                   poll_rx ?
> > > +                   VHOST_NET_VQ_RX: VHOST_NET_VQ_TX))
> > > +        *busyloop_intr = true;
> > > +
> > >       mutex_unlock(&vq->mutex);
> > >   }
> > 
> > _______________________________________________
> > Virtualization mailing list
> > Virtualization@lists.linux-foundation.org
> > https://lists.linuxfoundation.org/mailman/listinfo/virtualization
diff mbox series

Patch

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index db63ae2..b6939ef 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -487,10 +487,8 @@  static void vhost_net_busy_poll(struct vhost_net *net,
 	endtime = busy_clock() + busyloop_timeout;
 
 	while (vhost_can_busy_poll(endtime)) {
-		if (vhost_has_work(&net->dev)) {
-			*busyloop_intr = true;
+		if (vhost_has_work(&net->dev))
 			break;
-		}
 
 		if ((sock_has_rx_data(sock) &&
 		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
@@ -513,6 +511,11 @@  static void vhost_net_busy_poll(struct vhost_net *net,
 	    !vhost_has_work_pending(&net->dev, VHOST_NET_VQ_RX))
 		vhost_net_enable_vq(net, rvq);
 
+	if (vhost_has_work_pending(&net->dev,
+				   poll_rx ?
+				   VHOST_NET_VQ_RX: VHOST_NET_VQ_TX))
+		*busyloop_intr = true;
+
 	mutex_unlock(&vq->mutex);
 }