diff mbox

[1/2] net: adding memory barrier to the poll and receive callbacks

Message ID 20090629141445.GF3845@jolsa.lab.eng.brq.redhat.com
State Not Applicable, archived
Delegated to: David Miller
Headers show

Commit Message

Jiri Olsa June 29, 2009, 2:14 p.m. UTC
Adding memory barrier after the poll_wait function, paired with
receive callbacks. Adding fuctions sock_poll_wait and sock_has_sleeper
to wrap the memory barrier.

Without the memory barrier, following race can happen.
The race fires, when following code paths meet, and the tp->rcv_nxt 
and __add_wait_queue updates stay in CPU caches.


CPU1                         CPU2

sys_select                   receive packet
  ...                        ...
  __add_wait_queue           update tp->rcv_nxt
  ...                        ...
  tp->rcv_nxt check          sock_def_readable
  ...                        {
  schedule                      ...
                                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                                        wake_up_interruptible(sk->sk_sleep)
                                ...
                             }

If there was no cache the code would work ok, since the wait_queue and
rcv_nxt are opposit to each other.

Meaning that once tp->rcv_nxt is updated by CPU2, the CPU1 either already
passed the tp->rcv_nxt check and sleeps, or will get the new value for
tp->rcv_nxt and will return with new data mask.
In both cases the process (CPU1) is being added to the wait queue, so the
waitqueue_active (CPU2) call cannot miss and will wake up CPU1.

The bad case is when the __add_wait_queue changes done by CPU1 stay in its
cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1 will then
endup calling schedule and sleep forever if there are no more data on the
socket.


Calls to poll_wait in following modules were ommited:
	net/bluetooth/af_bluetooth.c
	net/irda/af_irda.c
	net/irda/irnet/irnet_ppp.c
	net/mac80211/rc80211_pid_debugfs.c
	net/phonet/socket.c
	net/rds/af_rds.c
	net/rfkill/core.c
	net/sunrpc/cache.c
	net/sunrpc/rpc_pipe.c
	net/tipc/socket.c

wbr,
jirka


Signed-off-by: Jiri Olsa <jolsa@redhat.com>

---
 include/linux/poll.h |   11 +++++++++--
 include/net/sock.h   |   40 ++++++++++++++++++++++++++++++++++++++++
 net/atm/common.c     |    6 +++---
 net/core/datagram.c  |    2 +-
 net/core/sock.c      |    8 ++++----
 net/dccp/output.c    |    2 +-
 net/dccp/proto.c     |    2 +-
 net/ipv4/tcp.c       |    2 +-
 net/iucv/af_iucv.c   |    4 ++--
 net/rxrpc/af_rxrpc.c |    4 ++--
 net/unix/af_unix.c   |    8 ++++----
 11 files changed, 68 insertions(+), 21 deletions(-)

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Davide Libenzi June 29, 2009, 3:34 p.m. UTC | #1
On Mon, 29 Jun 2009, Jiri Olsa wrote:

> -static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
> +static inline void __poll_wait(struct file *filp,
> +				wait_queue_head_t *wait_address, poll_table *p)
> +{
> +	p->qproc(filp, wait_address, p);
> +}
> +
> +static inline void poll_wait(struct file *filp,
> +				wait_queue_head_t *wait_address, poll_table *p)
>  {
>  	if (p && wait_address)
> -		p->qproc(filp, wait_address, p);
> +		__poll_wait(filp, wait_address, p);
>  }

> +static inline void sock_poll_wait(struct file *filp, struct sock *sk,
> +		poll_table *p)
> +{
> +	if (p && sk->sk_sleep) {
> +		__poll_wait(filp, sk->sk_sleep, p);
> +		/*
> +		 * We need to be sure we are in sync with the
> +		 * socket flags modification.
> +		 *
> +		 * This memory barrier is paired in the sk_has_sleeper.
> +		*/
> +		smp_mb();
> +	}
> +}

I think Oleg already said this, but you can use directly poll_wait() 
without adding another abstraction, and the compiler will drop the double 
check for you:

extern void foo(int, int, int);
extern void mb(void);

static inline void cfoo(int a, int b, int c) {
	if (b && c)
		foo(a, b, c);
}

void xxx(int a, int b, int c) {
	if (b && c) {
		cfoo(a, b, c);
		mb();
	}
}
-----
xxx:
	subq	$8, %rsp
	testl	%esi, %esi
	je	.L3
	testl	%edx, %edx
	je	.L3
	call	foo
	addq	$8, %rsp
	jmp	mb
.L3:
	addq	$8, %rsp
	ret



- Davide


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 5:19 p.m. UTC | #2
On Mon, Jun 29, 2009 at 04:14:45PM +0200, Jiri Olsa wrote:
...
> +static inline void sock_poll_wait(struct file *filp, struct sock *sk,
...
> +	sock_poll_wait(file, sk->sk_sleep, wait);

-----------------------------^^^^^^^^^^^^
Is it something with my eyes or it wasn't compiled? ;-)

Jarek P.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 5:32 p.m. UTC | #3
On Mon, Jun 29, 2009 at 08:34:55AM -0700, Davide Libenzi wrote:
> On Mon, 29 Jun 2009, Jiri Olsa wrote:
> 
> > -static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
> > +static inline void __poll_wait(struct file *filp,
> > +				wait_queue_head_t *wait_address, poll_table *p)
> > +{
> > +	p->qproc(filp, wait_address, p);
> > +}
> > +
> > +static inline void poll_wait(struct file *filp,
> > +				wait_queue_head_t *wait_address, poll_table *p)
> >  {
> >  	if (p && wait_address)
> > -		p->qproc(filp, wait_address, p);
> > +		__poll_wait(filp, wait_address, p);
> >  }
> 
> > +static inline void sock_poll_wait(struct file *filp, struct sock *sk,
> > +		poll_table *p)
> > +{
> > +	if (p && sk->sk_sleep) {
> > +		__poll_wait(filp, sk->sk_sleep, p);
> > +		/*
> > +		 * We need to be sure we are in sync with the
> > +		 * socket flags modification.
> > +		 *
> > +		 * This memory barrier is paired in the sk_has_sleeper.
> > +		*/
> > +		smp_mb();
> > +	}
> > +}
> 
> I think Oleg already said this, but you can use directly poll_wait() 
> without adding another abstraction, and the compiler will drop the double 
> check for you:

I think Oleg told about cosmetics and let Jiri to choose. I'd only
add it's not mainly about optimization, but easy showing the main
difference, of course depending on taste.

Jarek P. 
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Davide Libenzi June 29, 2009, 5:36 p.m. UTC | #4
On Mon, 29 Jun 2009, Jarek Poplawski wrote:

> > I think Oleg already said this, but you can use directly poll_wait() 
> > without adding another abstraction, and the compiler will drop the double 
> > check for you:
> 
> I think Oleg told about cosmetics and let Jiri to choose. I'd only
> add it's not mainly about optimization, but easy showing the main
> difference, of course depending on taste.

We already have a universally used function to do that, and that's 
poll_wait().
That code (adding an extra __poll_wait()) was entirely about 
optimizations (otherwise why not use the existing poll_wait()?), so if 
the optimization does not actually take place, IMO it's better to not add 
an extra API.



- Davide


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 5:47 p.m. UTC | #5
On Mon, Jun 29, 2009 at 04:14:45PM +0200, Jiri Olsa wrote:
...
> +/**
> + * sk_has_sleeper - check if there are any waiting processes
> + * @sk: socket
> + *
> + * Returns true if socket has waiting processes
> + */
> +static inline int sk_has_sleeper(struct sock *sk)
> +{
> +	/*
> +	 * We need to be sure we are in sync with the
> +	 * add_wait_queue modifications to the wait queue.
> +	 *
> +	 * This memory barrier is paired in the sock_poll_wait.
> +	 */
> +	smp_mb();
> +	return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
> +}
> +

Btw. I hope Jiri won't "listen" to me, but I can't stop to mention
sock_waitqueue_active() looks to me quite naturally better paired
with sock_poll_wait() than sk_has_sleeper(which otherwise is more
conspicuous, sorry Eric.)

Jarek P.

> +/**
> + * sock_poll_wait - place memory barrier behind the __poll_wait call.
> + * @filp: file
> + * @sk:   socket
> + * @p:    poll_table
> + */
> +static inline void sock_poll_wait(struct file *filp, struct sock *sk,
> +		poll_table *p)
> +{
> +	if (p && sk->sk_sleep) {
> +		__poll_wait(filp, sk->sk_sleep, p);
> +		/*
> +		 * We need to be sure we are in sync with the
> +		 * socket flags modification.
> +		 *
> +		 * This memory barrier is paired in the sk_has_sleeper.
> +		*/
> +		smp_mb();
> +	}
> +}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 6:04 p.m. UTC | #6
On Mon, Jun 29, 2009 at 10:36:30AM -0700, Davide Libenzi wrote:
> On Mon, 29 Jun 2009, Jarek Poplawski wrote:
> 
> > > I think Oleg already said this, but you can use directly poll_wait() 
> > > without adding another abstraction, and the compiler will drop the double 
> > > check for you:
> > 
> > I think Oleg told about cosmetics and let Jiri to choose. I'd only
> > add it's not mainly about optimization, but easy showing the main
> > difference, of course depending on taste.
> 
> We already have a universally used function to do that, and that's 
> poll_wait().
> That code (adding an extra __poll_wait()) was entirely about 
> optimizations (otherwise why not use the existing poll_wait()?), so if 
> the optimization does not actually take place, IMO it's better to not add 
> an extra API.

OK, you're right, it is about optimization! But IMHO mainly about
reading optimization... I simply guess me and probably Jiri too,
after reading Oleg's variant thought about compiler, instead of the
real difference.

Btw., maybe I miss something but I guess Oleg proposed something in
between: inlining __poll_wait(), which would save us 'extra API' and
compiler doubts. (But I still prefer Jiri's choice. ;-)

Jarek P.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 6:14 p.m. UTC | #7
On Mon, Jun 29, 2009 at 08:04:31PM +0200, Jarek Poplawski wrote:
...
> Btw., maybe I miss something but I guess Oleg proposed something in
> between: inlining __poll_wait(), which would save us 'extra API' and
> compiler doubts. (But I still prefer Jiri's choice. ;-)

After re-reading I guess Oleg didn't proposed anything in between yet.

Sorry,
Jarek P.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jarek Poplawski June 29, 2009, 7:47 p.m. UTC | #8
On Mon, Jun 29, 2009 at 08:14:42PM +0200, Jarek Poplawski wrote:
...
> > (But I still prefer Jiri's choice. ;-)

...Even if Jiri decides to change his mind, because it really wasn't
intended for any persuasions.

Jarek P.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jiri Olsa June 29, 2009, 8:05 p.m. UTC | #9
On Mon, Jun 29, 2009 at 07:19:26PM +0200, Jarek Poplawski wrote:
> On Mon, Jun 29, 2009 at 04:14:45PM +0200, Jiri Olsa wrote:
> ...
> > +static inline void sock_poll_wait(struct file *filp, struct sock *sk,
> ...
> > +	sock_poll_wait(file, sk->sk_sleep, wait);
> 
> -----------------------------^^^^^^^^^^^^
> Is it something with my eyes or it wasn't compiled? ;-)
> 
> Jarek P.

your eyes are great, my brain is screwed... sry about that, I'll resend

it was compiled actually.. must have ended up with warning I did not
noticed... thanks!

jirka
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jiri Olsa June 29, 2009, 8:17 p.m. UTC | #10
On Mon, Jun 29, 2009 at 10:36:30AM -0700, Davide Libenzi wrote:
> On Mon, 29 Jun 2009, Jarek Poplawski wrote:
> 
> > > I think Oleg already said this, but you can use directly poll_wait() 
> > > without adding another abstraction, and the compiler will drop the double 
> > > check for you:
> > 
> > I think Oleg told about cosmetics and let Jiri to choose. I'd only
> > add it's not mainly about optimization, but easy showing the main
> > difference, of course depending on taste.
> 
> We already have a universally used function to do that, and that's 
> poll_wait().
> That code (adding an extra __poll_wait()) was entirely about 
> optimizations (otherwise why not use the existing poll_wait()?), so if 
> the optimization does not actually take place, IMO it's better to not add 
> an extra API.
> 
> 
> 
> - Davide
> 
>

my thinking was that both variants will endup in the same code anyway,
so it'd be probably better if the more readable (subjective) got in..

however I dont have any strong preffering feelings about either of those choices,
so I can convert easilly :)

jirka
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Davide Libenzi June 29, 2009, 8:20 p.m. UTC | #11
On Mon, 29 Jun 2009, Jiri Olsa wrote:

> my thinking was that both variants will endup in the same code anyway,
> so it'd be probably better if the more readable (subjective) got in..
> 
> however I dont have any strong preffering feelings about either of those choices,
> so I can convert easilly :)

Please use the existing poll_wait() then, as there's no reason to add 
another API.


- Davide


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/linux/poll.h b/include/linux/poll.h
index fa287f2..b2ea8ef 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -35,10 +35,17 @@  typedef struct poll_table_struct {
 	unsigned long key;
 } poll_table;
 
-static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
+static inline void __poll_wait(struct file *filp,
+				wait_queue_head_t *wait_address, poll_table *p)
+{
+	p->qproc(filp, wait_address, p);
+}
+
+static inline void poll_wait(struct file *filp,
+				wait_queue_head_t *wait_address, poll_table *p)
 {
 	if (p && wait_address)
-		p->qproc(filp, wait_address, p);
+		__poll_wait(filp, wait_address, p);
 }
 
 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
diff --git a/include/net/sock.h b/include/net/sock.h
index 352f06b..e9137ed 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,6 +54,7 @@ 
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
+#include <linux/poll.h>
 
 #include <asm/atomic.h>
 #include <net/dst.h>
@@ -1241,6 +1242,45 @@  static inline int sk_has_allocations(const struct sock *sk)
 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
 }
 
+/**
+ * sk_has_sleeper - check if there are any waiting processes
+ * @sk: socket
+ *
+ * Returns true if socket has waiting processes
+ */
+static inline int sk_has_sleeper(struct sock *sk)
+{
+	/*
+	 * We need to be sure we are in sync with the
+	 * add_wait_queue modifications to the wait queue.
+	 *
+	 * This memory barrier is paired in the sock_poll_wait.
+	 */
+	smp_mb();
+	return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the __poll_wait call.
+ * @filp: file
+ * @sk:   socket
+ * @p:    poll_table
+ */
+static inline void sock_poll_wait(struct file *filp, struct sock *sk,
+		poll_table *p)
+{
+	if (p && sk->sk_sleep) {
+		__poll_wait(filp, sk->sk_sleep, p);
+		/*
+		 * We need to be sure we are in sync with the
+		 * socket flags modification.
+		 *
+		 * This memory barrier is paired in the sk_has_sleeper.
+		*/
+		smp_mb();
+	}
+}
+
 /*
  * 	Queue a received datagram if it will fit. Stream and sequenced
  *	protocols can't normally use this as they need to fit buffers in
diff --git a/net/atm/common.c b/net/atm/common.c
index c1c9793..8c4d843 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -92,7 +92,7 @@  static void vcc_sock_destruct(struct sock *sk)
 static void vcc_def_wakeup(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up(sk->sk_sleep);
 	read_unlock(&sk->sk_callback_lock);
 }
@@ -110,7 +110,7 @@  static void vcc_write_space(struct sock *sk)
 	read_lock(&sk->sk_callback_lock);
 
 	if (vcc_writable(sk)) {
-		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+		if (sk_has_sleeper(sk))
 			wake_up_interruptible(sk->sk_sleep);
 
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
@@ -594,7 +594,7 @@  unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 	struct atm_vcc *vcc;
 	unsigned int mask;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	mask = 0;
 
 	vcc = ATM_SD(sock);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 58abee1..b0fe692 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -712,7 +712,7 @@  unsigned int datagram_poll(struct file *file, struct socket *sock,
 	struct sock *sk = sock->sk;
 	unsigned int mask;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	mask = 0;
 
 	/* exceptional events? */
diff --git a/net/core/sock.c b/net/core/sock.c
index b0ba569..6354863 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1715,7 +1715,7 @@  EXPORT_SYMBOL(sock_no_sendpage);
 static void sock_def_wakeup(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up_interruptible_all(sk->sk_sleep);
 	read_unlock(&sk->sk_callback_lock);
 }
@@ -1723,7 +1723,7 @@  static void sock_def_wakeup(struct sock *sk)
 static void sock_def_error_report(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
 	read_unlock(&sk->sk_callback_lock);
@@ -1732,7 +1732,7 @@  static void sock_def_error_report(struct sock *sk)
 static void sock_def_readable(struct sock *sk, int len)
 {
 	read_lock(&sk->sk_callback_lock);
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
 						POLLRDNORM | POLLRDBAND);
 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -1747,7 +1747,7 @@  static void sock_def_write_space(struct sock *sk)
 	 * progress.  --DaveM
 	 */
 	if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
-		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+		if (sk_has_sleeper(sk))
 			wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
 						POLLWRNORM | POLLWRBAND);
 
diff --git a/net/dccp/output.c b/net/dccp/output.c
index c0e88c1..c96119f 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -196,7 +196,7 @@  void dccp_write_space(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
 
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up_interruptible(sk->sk_sleep);
 	/* Should agree with poll, otherwise some programs break */
 	if (sock_writeable(sk))
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 314a1b5..94ca8ea 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -311,7 +311,7 @@  unsigned int dccp_poll(struct file *file, struct socket *sock,
 	unsigned int mask;
 	struct sock *sk = sock->sk;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	if (sk->sk_state == DCCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 17b89c5..aa0ac36 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -339,7 +339,7 @@  unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	struct sock *sk = sock->sk;
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	if (sk->sk_state == TCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 6be5f92..49c15b4 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -306,7 +306,7 @@  static inline int iucv_below_msglim(struct sock *sk)
 static void iucv_sock_wake_msglim(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
-	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+	if (sk_has_sleeper(sk))
 		wake_up_interruptible_all(sk->sk_sleep);
 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	read_unlock(&sk->sk_callback_lock);
@@ -1256,7 +1256,7 @@  unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
 	struct sock *sk = sock->sk;
 	unsigned int mask = 0;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 
 	if (sk->sk_state == IUCV_LISTEN)
 		return iucv_accept_poll(sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index eac5e7b..bfe493e 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -63,7 +63,7 @@  static void rxrpc_write_space(struct sock *sk)
 	_enter("%p", sk);
 	read_lock(&sk->sk_callback_lock);
 	if (rxrpc_writable(sk)) {
-		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+		if (sk_has_sleeper(sk))
 			wake_up_interruptible(sk->sk_sleep);
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
@@ -588,7 +588,7 @@  static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
 	unsigned int mask;
 	struct sock *sk = sock->sk;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	mask = 0;
 
 	/* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 36d4e44..fc3ebb9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@  static void unix_write_space(struct sock *sk)
 {
 	read_lock(&sk->sk_callback_lock);
 	if (unix_writable(sk)) {
-		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+		if (sk_has_sleeper(sk))
 			wake_up_interruptible_sync(sk->sk_sleep);
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
@@ -1985,7 +1985,7 @@  static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
 	struct sock *sk = sock->sk;
 	unsigned int mask;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	mask = 0;
 
 	/* exceptional events? */
@@ -2022,7 +2022,7 @@  static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
 	struct sock *sk = sock->sk, *other;
 	unsigned int mask, writable;
 
-	poll_wait(file, sk->sk_sleep, wait);
+	sock_poll_wait(file, sk->sk_sleep, wait);
 	mask = 0;
 
 	/* exceptional events? */
@@ -2053,7 +2053,7 @@  static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
 		other = unix_peer_get(sk);
 		if (other) {
 			if (unix_peer(other) != sk) {
-				poll_wait(file, &unix_sk(other)->peer_wait,
+				sock_poll_wait(file, &unix_sk(other)->peer_wait,
 					  wait);
 				if (unix_recvq_full(other))
 					writable = 0;