diff mbox series

[v4,2/3] Reduce CAS in __pthread_mutex_lock_full [BZ #28537]

Message ID 20211110001614.2087610-3-hjl.tools@gmail.com
State New
Headers show
Series Optimize CAS [BZ #28537] | expand

Commit Message

H.J. Lu Nov. 10, 2021, 12:16 a.m. UTC
Change __pthread_mutex_lock_full to do an atomic load and skip CAS if
compare may fail to reduce cache line bouncing on contended locks.
---
 nptl/pthread_mutex_lock.c | 38 +++++++++++++++++++++++++++++++-------
 1 file changed, 31 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 2bd41767e0..1126ecba95 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -204,6 +204,10 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 	     our TID | assume_other_futex_waiters.  */
 	  if (__glibc_likely (oldval == 0))
 	    {
+	      oldval = atomic_load_relaxed (&mutex->__data.__lock);
+	      if (oldval != 0)
+		goto tid_failed;
+
 	      oldval
 	        = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
 	            id | assume_other_futex_waiters, 0);
@@ -213,6 +217,13 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 
 	  if ((oldval & FUTEX_OWNER_DIED) != 0)
 	    {
+	      int currval = atomic_load_relaxed (&mutex->__data.__lock);
+	      if (currval != oldval)
+		{
+		  oldval = currval;
+		  continue;
+		}
+
 	      /* The previous owner died.  Try locking the mutex.  */
 	      int newval = id;
 #ifdef NO_INCR
@@ -259,6 +270,7 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 	      return EOWNERDEAD;
 	    }
 
+ tid_failed:
 	  /* Check whether we already hold the mutex.  */
 	  if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
 	    {
@@ -411,11 +423,15 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 # ifdef NO_INCR
 	newval |= FUTEX_WAITERS;
 # endif
+	oldval = atomic_load_relaxed (&mutex->__data.__lock);
+	if (oldval != 0)
+	  goto locked_mutex;
 	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
 						      newval, 0);
 
 	if (oldval != 0)
 	  {
+ locked_mutex:
 	    /* The mutex is locked.  The kernel will now take care of
 	       everything.  */
 	    int private = (robust
@@ -554,6 +570,10 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
 	    oldprio = ceiling;
 
+	    oldval = atomic_load_relaxed (&mutex->__data.__lock);
+	    if (oldval != ceilval)
+	      goto ceilval_failed;
+
 	    oldval
 	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
 #ifdef NO_INCR
@@ -568,10 +588,13 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 
 	    do
 	      {
-		oldval
-		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
-							 ceilval | 2,
-							 ceilval | 1);
+	        oldval = atomic_load_relaxed (&mutex->__data.__lock);
+ ceilval_failed:
+		if (oldval == (ceilval | 1))
+		  oldval
+		    = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+							   ceilval | 2,
+							   ceilval | 1);
 
 		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
 		  break;
@@ -581,9 +604,10 @@  __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 			      ceilval | 2,
 			      PTHREAD_MUTEX_PSHARED (mutex));
 	      }
-	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
-							ceilval | 2, ceilval)
-		   != ceilval);
+	    while (atomic_load_relaxed (&mutex->__data.__lock) != ceilval
+		   || (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+							    ceilval | 2, ceilval)
+		       != ceilval));
 	  }
 	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);