@@ -44,6 +44,24 @@ struct elision_config __elision_aconf =
.skip_trylock_internal_abort = 3,
};
+struct elision_config __elision_rwconf =
+ {
+ /* How often to not attempt to use elision if a transaction aborted
+ because the lock is already acquired. Expressed in number of lock
+ acquisition attempts. */
+ .skip_lock_busy = 3,
+ /* How often to not attempt to use elision if a transaction aborted due
+ to reasons other than other threads' memory accesses. Expressed in
+ number of lock acquisition attempts. */
+ .skip_lock_internal_abort = 3,
+ /* How often we retry using elision if there is chance for the transaction
+ to finish execution (e.g., it wasn't aborted due to the lock being
+ already acquired. */
+ .retry_try_xbegin = 3,
+ /* Same as SKIP_LOCK_INTERNAL_ABORT but for trylock. */
+ .skip_trylock_internal_abort = 3,
+ };
+
struct tune
{
const char *name;
@@ -167,7 +185,7 @@ elision_init (int argc __attribute__ ((unused)),
__pthread_force_elision = __libc_enable_secure ? 0 : __elision_available;
#endif
if (!HAS_RTM)
- __elision_aconf.retry_try_xbegin = 0; /* Disable elision on rwlocks */
+ __elision_rwconf.retry_try_xbegin = 0; /* Disable elision on rwlocks */
/* For static builds need to call this explicitely. Noop for dynamic. */
@@ -33,6 +33,7 @@ struct elision_config
};
extern struct elision_config __elision_aconf attribute_hidden;
+extern struct elision_config __elision_rwconf attribute_hidden;
extern int __elision_available attribute_hidden;
extern int __pthread_force_elision attribute_hidden;
@@ -36,14 +36,14 @@ elision_adapt(signed char *adapt_count, unsigned int status)
/* Right now we skip here. Better would be to wait a bit
and retry. This likely needs some spinning. Be careful
to avoid writing the lock. */
- if (*adapt_count != __elision_aconf.skip_lock_busy)
- ACCESS_ONCE (*adapt_count) = __elision_aconf.skip_lock_busy;
+ if (*adapt_count != __elision_rwconf.skip_lock_busy)
+ ACCESS_ONCE (*adapt_count) = __elision_rwconf.skip_lock_busy;
}
/* Internal abort. There is no chance for retry.
Use the normal locking and next time use lock.
Be careful to avoid writing to the lock. */
- else if (*adapt_count != __elision_aconf.skip_lock_internal_abort)
- ACCESS_ONCE (*adapt_count) = __elision_aconf.skip_lock_internal_abort;
+ else if (*adapt_count != __elision_rwconf.skip_lock_internal_abort)
+ ACCESS_ONCE (*adapt_count) = __elision_rwconf.skip_lock_internal_abort;
return true;
}
@@ -58,7 +58,7 @@ elision_adapt(signed char *adapt_count, unsigned int status)
\
if ((adapt_count) <= 0) \
{ \
- for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \
+ for (int i = __elision_rwconf.retry_try_xbegin; i > 0; i--) \
{ \
unsigned int status; \
if ((status = _xbegin ()) == _XBEGIN_STARTED) \
@@ -84,7 +84,7 @@ elision_adapt(signed char *adapt_count, unsigned int status)
#define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \
int ret = 0; \
- if (__elision_aconf.retry_try_xbegin > 0) \
+ if (__elision_rwconf.retry_try_xbegin > 0) \
{ \
if (write) \
_xabort (_ABORT_NESTED_TRYLOCK); \
From: Andi Kleen <ak@linux.intel.com> Use a separate configuration structure for rwlock elision. This allows a followup patch to change the rwlock elision parameters separately for mutexes and rwlocks. 2014-12-17 Andi Kleen <ak@linux.intel.com> * sysdeps/unix/sysv/linux/x86/elision-conf.c (__elision_rwconf): Add new tuning struct for rwlocks. (elision_init): Use __elision_rwconf to disable rwlock elision. * sysdeps/unix/sysv/linux/x86/elision-conf.h (__elision_rwconf): Declare new symbol. * sysdeps/x86/elide.h (elision_adapt): (elision_adapt): Use __elision_rwconf for retry counts. (ELIDE_TRYLOCK): Dito. --- sysdeps/unix/sysv/linux/x86/elision-conf.c | 20 +++++++++++++++++++- sysdeps/unix/sysv/linux/x86/elision-conf.h | 1 + sysdeps/x86/elide.h | 12 ++++++------ 3 files changed, 26 insertions(+), 7 deletions(-)