Message ID | 1530678698-33427-4-git-send-email-xiangxia.m.yue@gmail.com |
---|---|
State | Changes Requested, archived |
Delegated to: | David Miller |
Headers | show |
Series | net: vhost: improve performance when enable busyloop | expand |
On 2018/07/04 13:31, xiangxia.m.yue@gmail.com wrote: ... > +static void vhost_net_busy_poll(struct vhost_net *net, > + struct vhost_virtqueue *rvq, > + struct vhost_virtqueue *tvq, > + bool rx) > +{ > + unsigned long uninitialized_var(endtime); > + unsigned long busyloop_timeout; > + struct socket *sock; > + struct vhost_virtqueue *vq = rx ? tvq : rvq; > + > + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > + > + vhost_disable_notify(&net->dev, vq); > + sock = rvq->private_data; > + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; > + > + preempt_disable(); > + endtime = busy_clock() + busyloop_timeout; > + while (vhost_can_busy_poll(tvq->dev, endtime) && > + !(sock && sk_has_rx_data(sock->sk)) && > + vhost_vq_avail_empty(tvq->dev, tvq)) > + cpu_relax(); > + preempt_enable(); > + > + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) || > + (!rx && (sock && sk_has_rx_data(sock->sk)))) { > + vhost_poll_queue(&vq->poll); > + } else if (vhost_enable_notify(&net->dev, vq) && rx) { Hmm... on tx here sock has no rx data, so you are waiting for sock wakeup for rx and vhost_enable_notify() seems not needed. Do you want this actually? } else if (rx && vhost_enable_notify(&net->dev, vq)) { > + vhost_disable_notify(&net->dev, vq); > + vhost_poll_queue(&vq->poll); > + }
On 2018年07月04日 15:59, Toshiaki Makita wrote: > On 2018/07/04 13:31, xiangxia.m.yue@gmail.com wrote: > ... >> +static void vhost_net_busy_poll(struct vhost_net *net, >> + struct vhost_virtqueue *rvq, >> + struct vhost_virtqueue *tvq, >> + bool rx) >> +{ >> + unsigned long uninitialized_var(endtime); >> + unsigned long busyloop_timeout; >> + struct socket *sock; >> + struct vhost_virtqueue *vq = rx ? tvq : rvq; >> + >> + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); >> + >> + vhost_disable_notify(&net->dev, vq); >> + sock = rvq->private_data; >> + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; >> + >> + preempt_disable(); >> + endtime = busy_clock() + busyloop_timeout; >> + while (vhost_can_busy_poll(tvq->dev, endtime) && >> + !(sock && sk_has_rx_data(sock->sk)) && >> + vhost_vq_avail_empty(tvq->dev, tvq)) >> + cpu_relax(); >> + preempt_enable(); >> + >> + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) || >> + (!rx && (sock && sk_has_rx_data(sock->sk)))) { >> + vhost_poll_queue(&vq->poll); >> + } else if (vhost_enable_notify(&net->dev, vq) && rx) { > Hmm... on tx here sock has no rx data, so you are waiting for sock > wakeup for rx and vhost_enable_notify() seems not needed. Do you want > this actually? > > } else if (rx && vhost_enable_notify(&net->dev, vq)) { Right, rx need to be checked first here. Thanks >> + vhost_disable_notify(&net->dev, vq); >> + vhost_poll_queue(&vq->poll); >> + }
On Wed, Jul 4, 2018 at 5:18 PM Jason Wang <jasowang@redhat.com> wrote: > > > > On 2018年07月04日 15:59, Toshiaki Makita wrote: > > On 2018/07/04 13:31, xiangxia.m.yue@gmail.com wrote: > > ... > >> +static void vhost_net_busy_poll(struct vhost_net *net, > >> + struct vhost_virtqueue *rvq, > >> + struct vhost_virtqueue *tvq, > >> + bool rx) > >> +{ > >> + unsigned long uninitialized_var(endtime); > >> + unsigned long busyloop_timeout; > >> + struct socket *sock; > >> + struct vhost_virtqueue *vq = rx ? tvq : rvq; > >> + > >> + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > >> + > >> + vhost_disable_notify(&net->dev, vq); > >> + sock = rvq->private_data; > >> + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; > >> + > >> + preempt_disable(); > >> + endtime = busy_clock() + busyloop_timeout; > >> + while (vhost_can_busy_poll(tvq->dev, endtime) && > >> + !(sock && sk_has_rx_data(sock->sk)) && > >> + vhost_vq_avail_empty(tvq->dev, tvq)) > >> + cpu_relax(); > >> + preempt_enable(); > >> + > >> + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) || > >> + (!rx && (sock && sk_has_rx_data(sock->sk)))) { > >> + vhost_poll_queue(&vq->poll); > >> + } else if (vhost_enable_notify(&net->dev, vq) && rx) { > > Hmm... on tx here sock has no rx data, so you are waiting for sock > > wakeup for rx and vhost_enable_notify() seems not needed. Do you want > > this actually? > > > > } else if (rx && vhost_enable_notify(&net->dev, vq)) { > > Right, rx need to be checked first here. thanks, if we dont call the vhost_enable_notify for tx. so we dont need to call vhost_disable_notify for tx? @@ -451,7 +451,9 @@ static void vhost_net_busy_poll(struct vhost_net *net, tvq->busyloop_timeout; mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); - vhost_disable_notify(&net->dev, vq); + + if (rx) + vhost_disable_notify(&net->dev, vq); preempt_disable(); endtime = busy_clock() + busyloop_timeout; > Thanks > > >> + vhost_disable_notify(&net->dev, vq); > >> + vhost_poll_queue(&vq->poll); > >> + } >
On 2018年07月04日 17:46, Tonghao Zhang wrote: > On Wed, Jul 4, 2018 at 5:18 PM Jason Wang <jasowang@redhat.com> wrote: >> >> >> On 2018年07月04日 15:59, Toshiaki Makita wrote: >>> On 2018/07/04 13:31, xiangxia.m.yue@gmail.com wrote: >>> ... >>>> +static void vhost_net_busy_poll(struct vhost_net *net, >>>> + struct vhost_virtqueue *rvq, >>>> + struct vhost_virtqueue *tvq, >>>> + bool rx) >>>> +{ >>>> + unsigned long uninitialized_var(endtime); >>>> + unsigned long busyloop_timeout; >>>> + struct socket *sock; >>>> + struct vhost_virtqueue *vq = rx ? tvq : rvq; >>>> + >>>> + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); >>>> + >>>> + vhost_disable_notify(&net->dev, vq); >>>> + sock = rvq->private_data; >>>> + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; >>>> + >>>> + preempt_disable(); >>>> + endtime = busy_clock() + busyloop_timeout; >>>> + while (vhost_can_busy_poll(tvq->dev, endtime) && >>>> + !(sock && sk_has_rx_data(sock->sk)) && >>>> + vhost_vq_avail_empty(tvq->dev, tvq)) >>>> + cpu_relax(); >>>> + preempt_enable(); >>>> + >>>> + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) || >>>> + (!rx && (sock && sk_has_rx_data(sock->sk)))) { >>>> + vhost_poll_queue(&vq->poll); >>>> + } else if (vhost_enable_notify(&net->dev, vq) && rx) { >>> Hmm... on tx here sock has no rx data, so you are waiting for sock >>> wakeup for rx and vhost_enable_notify() seems not needed. Do you want >>> this actually? >>> >>> } else if (rx && vhost_enable_notify(&net->dev, vq)) { >> Right, rx need to be checked first here. > thanks, if we dont call the vhost_enable_notify for tx. so we dont > need to call vhost_disable_notify for tx? > > @@ -451,7 +451,9 @@ static void vhost_net_busy_poll(struct vhost_net *net, > tvq->busyloop_timeout; > > mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); > - vhost_disable_notify(&net->dev, vq); > + > + if (rx) > + vhost_disable_notify(&net->dev, vq); > > preempt_disable(); > endtime = busy_clock() + busyloop_timeout; Sorry for being unclear. We need enable tx notification at end end of the loop. I meant we need replace + } else if (vhost_enable_notify(&net->dev, vq) && rx) { with + } else if (rx && vhost_enable_notify(&net->dev, vq)) { We only need rx notification when there's no avail buffers. This means we need only enable notification for tx. And maybe we can simplify the logic as if (rx) { ...... } else { ...... } here. (not a must). Thanks > >> Thanks >> >>>> + vhost_disable_notify(&net->dev, vq); >>>> + vhost_poll_queue(&vq->poll); >>>> + }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 62bb8e8..2790959 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -429,6 +429,52 @@ static int vhost_net_enable_vq(struct vhost_net *n, return vhost_poll_start(poll, sock->file); } +static int sk_has_rx_data(struct sock *sk) +{ + struct socket *sock = sk->sk_socket; + + if (sock->ops->peek_len) + return sock->ops->peek_len(sock); + + return skb_queue_empty(&sk->sk_receive_queue); +} + +static void vhost_net_busy_poll(struct vhost_net *net, + struct vhost_virtqueue *rvq, + struct vhost_virtqueue *tvq, + bool rx) +{ + unsigned long uninitialized_var(endtime); + unsigned long busyloop_timeout; + struct socket *sock; + struct vhost_virtqueue *vq = rx ? tvq : rvq; + + mutex_lock_nested(&vq->mutex, rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX); + + vhost_disable_notify(&net->dev, vq); + sock = rvq->private_data; + busyloop_timeout = rx ? rvq->busyloop_timeout : tvq->busyloop_timeout; + + preempt_disable(); + endtime = busy_clock() + busyloop_timeout; + while (vhost_can_busy_poll(tvq->dev, endtime) && + !(sock && sk_has_rx_data(sock->sk)) && + vhost_vq_avail_empty(tvq->dev, tvq)) + cpu_relax(); + preempt_enable(); + + if ((rx && !vhost_vq_avail_empty(&net->dev, vq)) || + (!rx && (sock && sk_has_rx_data(sock->sk)))) { + vhost_poll_queue(&vq->poll); + } else if (vhost_enable_notify(&net->dev, vq) && rx) { + vhost_disable_notify(&net->dev, vq); + vhost_poll_queue(&vq->poll); + } + + mutex_unlock(&vq->mutex); +} + + static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, @@ -621,16 +667,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) return len; } -static int sk_has_rx_data(struct sock *sk) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->peek_len) - return sock->ops->peek_len(sock); - - return skb_queue_empty(&sk->sk_receive_queue); -} - static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) { struct vhost_virtqueue *vq = &nvq->vq; @@ -645,39 +681,19 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) { - struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; - struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; - struct vhost_virtqueue *vq = &nvq->vq; - unsigned long uninitialized_var(endtime); - int len = peek_head_len(rvq, sk); + struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; + struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; - if (!len && vq->busyloop_timeout) { - /* Flush batched heads first */ - vhost_rx_signal_used(rvq); - /* Both tx vq and rx socket were polled here */ - mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); - vhost_disable_notify(&net->dev, vq); + int len = peek_head_len(rnvq, sk); - preempt_disable(); - endtime = busy_clock() + vq->busyloop_timeout; - - while (vhost_can_busy_poll(&net->dev, endtime) && - !sk_has_rx_data(sk) && - vhost_vq_avail_empty(&net->dev, vq)) - cpu_relax(); - - preempt_enable(); - - if (!vhost_vq_avail_empty(&net->dev, vq)) - vhost_poll_queue(&vq->poll); - else if (unlikely(vhost_enable_notify(&net->dev, vq))) { - vhost_disable_notify(&net->dev, vq); - vhost_poll_queue(&vq->poll); - } + if (!len && rnvq->vq.busyloop_timeout) { + /* Flush batched heads first */ + vhost_rx_signal_used(rnvq); - mutex_unlock(&vq->mutex); + /* Both tx vq and rx socket were polled here */ + vhost_net_busy_poll(net, &rnvq->vq, &tnvq->vq, true); - len = peek_head_len(rvq, sk); + len = peek_head_len(rnvq, sk); } return len;