get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/811249/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 811249,
    "url": "http://patchwork.ozlabs.org/api/patches/811249/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/glibc/patch/20170907224219.12483-44-albert.aribaud@3adev.fr/",
    "project": {
        "id": 41,
        "url": "http://patchwork.ozlabs.org/api/projects/41/?format=api",
        "name": "GNU C Library",
        "link_name": "glibc",
        "list_id": "libc-alpha.sourceware.org",
        "list_email": "libc-alpha@sourceware.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20170907224219.12483-44-albert.aribaud@3adev.fr>",
    "list_archive_url": null,
    "date": "2017-09-07T22:42:10",
    "name": "[RFC,43/52] Y2038: add functions using futexes",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "9e36098baf08623227ae6e6e189079595794feed",
    "submitter": {
        "id": 65557,
        "url": "http://patchwork.ozlabs.org/api/people/65557/?format=api",
        "name": "Albert ARIBAUD (3ADEV)",
        "email": "albert.aribaud@3adev.fr"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/glibc/patch/20170907224219.12483-44-albert.aribaud@3adev.fr/mbox/",
    "series": [
        {
            "id": 2074,
            "url": "http://patchwork.ozlabs.org/api/series/2074/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/glibc/list/?series=2074",
            "date": "2017-09-07T22:41:27",
            "name": "Make GLIBC Y2038-proof",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/2074/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/811249/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/811249/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<libc-alpha-return-84363-incoming=patchwork.ozlabs.org@sourceware.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "mailing list libc-alpha@sourceware.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=sourceware.org\n\t(client-ip=209.132.180.131; helo=sourceware.org;\n\tenvelope-from=libc-alpha-return-84363-incoming=patchwork.ozlabs.org@sourceware.org;\n\treceiver=<UNKNOWN>)",
            "ozlabs.org; dkim=pass (1024-bit key;\n\tsecure) header.d=sourceware.org header.i=@sourceware.org\n\theader.b=\"x6CicD7g\"; dkim-atps=neutral",
            "sourceware.org; auth=none"
        ],
        "Received": [
            "from sourceware.org (server1.sourceware.org [209.132.180.131])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xpG013JMNz9sDB\n\tfor <incoming@patchwork.ozlabs.org>;\n\tFri,  8 Sep 2017 08:51:13 +1000 (AEST)",
            "(qmail 124411 invoked by alias); 7 Sep 2017 22:45:43 -0000",
            "(qmail 123850 invoked by uid 89); 7 Sep 2017 22:45:41 -0000"
        ],
        "DomainKey-Signature": "a=rsa-sha1; c=nofws; d=sourceware.org; h=list-id\n\t:list-unsubscribe:list-subscribe:list-archive:list-post\n\t:list-help:sender:from:to:cc:subject:date:message-id:in-reply-to\n\t:references; q=dns; s=default; b=EeNUMkTtuQ6x+LtPdmK6OrL+GwRLtT5\n\tnNo7ooDz/ZrT4SEU2G694XDeVEqpV7ZFgYjiaPDkLGJi9BKfp9fV2w1SQ/bEx5CD\n\t8NCQsJSiYdLEkI+/6Z7XD9bF2rQZielmAi1CcMDS9zFfd19C4lfGLvdmR0+ZZf0Q\n\ttLmYJoA+UW0s=",
        "DKIM-Signature": "v=1; a=rsa-sha1; c=relaxed; d=sourceware.org; h=list-id\n\t:list-unsubscribe:list-subscribe:list-archive:list-post\n\t:list-help:sender:from:to:cc:subject:date:message-id:in-reply-to\n\t:references; s=default; bh=ZtRLf9eYWbJQGnV4rSdCT/DW2+A=; b=x6Cic\n\tD7g8A6NFblHXCT8WLhen+BtWgaLTEjPFZh0Xv2N+eRxOIX6s5a8TCGLhAp81R6n8\n\tnNN3IM7u0zyOKdS6xysdpXkGU5Dp9RlR01Ss8J3T95zV/D18j7K++hF9PCORSOBp\n\tlNvBb1Ey5AaRmxr3gDaP60O+UHwrzgX62lKhe4=",
        "Mailing-List": "contact libc-alpha-help@sourceware.org; run by ezmlm",
        "Precedence": "bulk",
        "List-Id": "<libc-alpha.sourceware.org>",
        "List-Unsubscribe": "<mailto:libc-alpha-unsubscribe-incoming=patchwork.ozlabs.org@sourceware.org>",
        "List-Subscribe": "<mailto:libc-alpha-subscribe@sourceware.org>",
        "List-Archive": "<http://sourceware.org/ml/libc-alpha/>",
        "List-Post": "<mailto:libc-alpha@sourceware.org>",
        "List-Help": "<mailto:libc-alpha-help@sourceware.org>,\n\t<http://sourceware.org/ml/#faqs>",
        "Sender": "libc-alpha-owner@sourceware.org",
        "X-Virus-Found": "No",
        "X-Spam-SWARE-Status": "No, score=-26.6 required=5.0 tests=BAYES_00, GIT_PATCH_0,\n\tGIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3,\n\tKAM_LAZY_DOMAIN_SECURITY,\n\tRCVD_IN_DNSWL_LOW autolearn=ham version=3.3.2 spammy=discounted,\n\tWake, Changing",
        "X-HELO": "smtp6-g21.free.fr",
        "From": "\"Albert ARIBAUD (3ADEV)\" <albert.aribaud@3adev.fr>",
        "To": "libc-alpha@sourceware.org",
        "Cc": "\"Albert ARIBAUD (3ADEV)\" <albert.aribaud@3adev.fr>",
        "Subject": "[RFC PATCH 43/52] Y2038: add functions using futexes",
        "Date": "Fri,  8 Sep 2017 00:42:10 +0200",
        "Message-Id": "<20170907224219.12483-44-albert.aribaud@3adev.fr>",
        "In-Reply-To": "<20170907224219.12483-43-albert.aribaud@3adev.fr>",
        "References": "<20170907224219.12483-1-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-2-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-3-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-4-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-5-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-6-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-7-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-8-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-9-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-10-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-11-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-12-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-13-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-14-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-15-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-16-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-17-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-18-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-19-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-20-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-21-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-22-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-23-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-24-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-25-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-26-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-27-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-28-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-29-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-30-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-31-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-32-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-33-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-34-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-35-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-36-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-37-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-38-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-39-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-40-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-41-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-42-albert.aribaud@3adev.fr>\n\t<20170907224219.12483-43-albert.aribaud@3adev.fr>"
    },
    "content": "This creates 64-bit time versions of the following APIs:\n- pthread_rwlock_timedrdlock\n- pthread_rwlock_timedwrlock\n- pthread_mutex_timedlock\n- pthread_cond_timedwait\n- sem_timedwait\n- aio_suspend\n\nIt also creates 64-bit time versions of the following\nfunctions or macros:\n- lll_timedlock_elision\n- lll_timedlock\n- __lll_timedlock_wait\n- futex_reltimed_wait_cancelable\n- lll_futex_timed_wait\n- __pthread_cond_wait_common\n- futex_abstimed_wait_cancelable\n- lll_futex_timed_wait_bitset\n- do_aio_misc_wait\n- AIO_MISC_WAIT\n- __new_sem_wait_slow\n- do_futex_wait\n- __pthread_rwlock_wrlock_full\n- __pthread_rwlock_rdlock_full\n- futex_abstimed_wait\n\nSigned-off-by: Albert ARIBAUD (3ADEV) <albert.aribaud@3adev.fr>\n---\n nptl/Versions                                |  11 +\n nptl/lll_timedlock_wait.c                    |  37 ++\n nptl/pthread_cond_wait.c                     | 285 +++++++++++++\n nptl/pthread_mutex_timedlock.c               | 616 +++++++++++++++++++++++++++\n nptl/pthread_rwlock_common.c                 | 591 +++++++++++++++++++++++++\n nptl/pthread_rwlock_timedrdlock.c            |  19 +\n nptl/pthread_rwlock_timedwrlock.c            |  19 +\n nptl/sem_timedwait.c                         |  18 +\n nptl/sem_wait.c                              |  24 ++\n nptl/sem_waitcommon.c                        | 172 ++++++++\n rt/Versions                                  |   1 +\n sysdeps/nptl/aio_misc.h                      |  39 ++\n sysdeps/nptl/lowlevellock.h                  |  17 +\n sysdeps/pthread/aio_suspend.c                | 164 +++++++\n sysdeps/unix/sysv/linux/futex-internal.h     | 123 ++++++\n sysdeps/unix/sysv/linux/lowlevellock-futex.h |  22 +\n 16 files changed, 2158 insertions(+)",
    "diff": "diff --git a/nptl/Versions b/nptl/Versions\nindex 0ae5def464..9ed0872eab 100644\n--- a/nptl/Versions\n+++ b/nptl/Versions\n@@ -272,4 +272,15 @@ libpthread {\n     __pthread_barrier_init; __pthread_barrier_wait;\n     __shm_directory;\n   }\n+\n+  # Y2038 symbols are given their own version until they can be put in\n+  # the right place\n+\n+  GLIBC_Y2038 {\n+    __pthread_rwlock_rdlock_t64;\n+    __pthread_rwlock_wrlock_t64;\n+    __pthread_mutex_timedlock_t64;\n+    __sem_timedwait_t64;\n+    __pthread_cond_timedwait_t64;\n+  }\n }\ndiff --git a/nptl/lll_timedlock_wait.c b/nptl/lll_timedlock_wait.c\nindex 604953c04c..d757d9d92e 100644\n--- a/nptl/lll_timedlock_wait.c\n+++ b/nptl/lll_timedlock_wait.c\n@@ -57,3 +57,40 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)\n \n   return 0;\n }\n+\n+/* 64-bit time version */\n+\n+int\n+__lll_timedlock_wait_t64 (int *futex, const struct __timespec64 *abstime, int private)\n+{\n+  /* Reject invalid timeouts.  */\n+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)\n+    return EINVAL;\n+\n+  /* Try locking.  */\n+  while (atomic_exchange_acq (futex, 2) != 0)\n+    {\n+      struct timeval tv;\n+\n+      /* Get the current time.  */\n+      (void) __gettimeofday (&tv, NULL);\n+\n+      /* Compute relative timeout.  */\n+      struct timespec rt;\n+      rt.tv_sec = abstime->tv_sec - tv.tv_sec;\n+      rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;\n+      if (rt.tv_nsec < 0)\n+        {\n+          rt.tv_nsec += 1000000000;\n+          --rt.tv_sec;\n+        }\n+\n+      if (rt.tv_sec < 0)\n+        return ETIMEDOUT;\n+\n+      /* If *futex == 2, wait until woken or timeout.  */\n+      lll_futex_timed_wait (futex, 2, &rt, private);\n+    }\n+\n+  return 0;\n+}\ndiff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c\nindex 7812b94a3a..4246e25aeb 100644\n--- a/nptl/pthread_cond_wait.c\n+++ b/nptl/pthread_cond_wait.c\n@@ -647,6 +647,280 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,\n   return (err != 0) ? err : result;\n }\n \n+/* 64-bit time variant */\n+\n+static __always_inline int\n+__pthread_cond_wait_common_t64 (pthread_cond_t *cond, pthread_mutex_t *mutex,\n+    const struct __timespec64 *abstime)\n+{\n+  const int maxspin = 0;\n+  int err;\n+  int result = 0;\n+\n+  LIBC_PROBE (cond_wait, 2, cond, mutex);\n+\n+  /* Acquire a position (SEQ) in the waiter sequence (WSEQ).  We use an\n+     atomic operation because signals and broadcasts may update the group\n+     switch without acquiring the mutex.  We do not need release MO here\n+     because we do not need to establish any happens-before relation with\n+     signalers (see __pthread_cond_signal); modification order alone\n+     establishes a total order of waiters/signals.  We do need acquire MO\n+     to synchronize with group reinitialization in\n+     __condvar_quiesce_and_switch_g1.  */\n+  uint64_t wseq = __condvar_fetch_add_wseq_acquire (cond, 2);\n+  /* Find our group's index.  We always go into what was G2 when we acquired\n+     our position.  */\n+  unsigned int g = wseq & 1;\n+  uint64_t seq = wseq >> 1;\n+\n+  /* Increase the waiter reference count.  Relaxed MO is sufficient because\n+     we only need to synchronize when decrementing the reference count.  */\n+  unsigned int flags = atomic_fetch_add_relaxed (&cond->__data.__wrefs, 8);\n+  int private = __condvar_get_private (flags);\n+\n+  /* Now that we are registered as a waiter, we can release the mutex.\n+     Waiting on the condvar must be atomic with releasing the mutex, so if\n+     the mutex is used to establish a happens-before relation with any\n+     signaler, the waiter must be visible to the latter; thus, we release the\n+     mutex after registering as waiter.\n+     If releasing the mutex fails, we just cancel our registration as a\n+     waiter and confirm that we have woken up.  */\n+  err = __pthread_mutex_unlock_usercnt (mutex, 0);\n+  if (__glibc_unlikely (err != 0))\n+    {\n+      __condvar_cancel_waiting (cond, seq, g, private);\n+      __condvar_confirm_wakeup (cond, private);\n+      return err;\n+    }\n+\n+  /* Now wait until a signal is available in our group or it is closed.\n+     Acquire MO so that if we observe a value of zero written after group\n+     switching in __condvar_quiesce_and_switch_g1, we synchronize with that\n+     store and will see the prior update of __g1_start done while switching\n+     groups too.  */\n+  unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);\n+\n+  do\n+    {\n+      while (1)\n+\t{\n+\t  /* Spin-wait first.\n+\t     Note that spinning first without checking whether a timeout\n+\t     passed might lead to what looks like a spurious wake-up even\n+\t     though we should return ETIMEDOUT (e.g., if the caller provides\n+\t     an absolute timeout that is clearly in the past).  However,\n+\t     (1) spurious wake-ups are allowed, (2) it seems unlikely that a\n+\t     user will (ab)use pthread_cond_wait as a check for whether a\n+\t     point in time is in the past, and (3) spinning first without\n+\t     having to compare against the current time seems to be the right\n+\t     choice from a performance perspective for most use cases.  */\n+\t  unsigned int spin = maxspin;\n+\t  while (signals == 0 && spin > 0)\n+\t    {\n+\t      /* Check that we are not spinning on a group that's already\n+\t\t closed.  */\n+\t      if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))\n+\t\tgoto done;\n+\n+\t      /* TODO Back off.  */\n+\n+\t      /* Reload signals.  See above for MO.  */\n+\t      signals = atomic_load_acquire (cond->__data.__g_signals + g);\n+\t      spin--;\n+\t    }\n+\n+\t  /* If our group will be closed as indicated by the flag on signals,\n+\t     don't bother grabbing a signal.  */\n+\t  if (signals & 1)\n+\t    goto done;\n+\n+\t  /* If there is an available signal, don't block.  */\n+\t  if (signals != 0)\n+\t    break;\n+\n+\t  /* No signals available after spinning, so prepare to block.\n+\t     We first acquire a group reference and use acquire MO for that so\n+\t     that we synchronize with the dummy read-modify-write in\n+\t     __condvar_quiesce_and_switch_g1 if we read from that.  In turn,\n+\t     in this case this will make us see the closed flag on __g_signals\n+\t     that designates a concurrent attempt to reuse the group's slot.\n+\t     We use acquire MO for the __g_signals check to make the\n+\t     __g1_start check work (see spinning above).\n+\t     Note that the group reference acquisition will not mask the\n+\t     release MO when decrementing the reference count because we use\n+\t     an atomic read-modify-write operation and thus extend the release\n+\t     sequence.  */\n+\t  atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);\n+\t  if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0)\n+\t      || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)))\n+\t    {\n+\t      /* Our group is closed.  Wake up any signalers that might be\n+\t\t waiting.  */\n+\t      __condvar_dec_grefs (cond, g, private);\n+\t      goto done;\n+\t    }\n+\n+\t  // Now block.\n+\t  struct _pthread_cleanup_buffer buffer;\n+\t  struct _condvar_cleanup_buffer cbuffer;\n+\t  cbuffer.wseq = wseq;\n+\t  cbuffer.cond = cond;\n+\t  cbuffer.mutex = mutex;\n+\t  cbuffer.private = private;\n+\t  __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);\n+\n+\t  if (abstime == NULL)\n+\t    {\n+\t      /* Block without a timeout.  */\n+\t      err = futex_wait_cancelable (\n+\t\t  cond->__data.__g_signals + g, 0, private);\n+\t    }\n+\t  else\n+\t    {\n+\t      /* Block, but with a timeout.\n+\t\t Work around the fact that the kernel rejects negative timeout\n+\t\t values despite them being valid.  */\n+\t      if (__glibc_unlikely (abstime->tv_sec < 0))\n+\t        err = ETIMEDOUT;\n+\n+\t      else if ((flags & __PTHREAD_COND_CLOCK_MONOTONIC_MASK) != 0)\n+\t\t{\n+\t\t  /* CLOCK_MONOTONIC is requested.  */\n+\t\t  struct timespec rt;\n+\t\t  struct __timespec64 rt64;\n+\t\t  if (__clock_gettime (CLOCK_MONOTONIC, &rt) != 0)\n+\t\t    __libc_fatal (\"clock_gettime does not support \"\n+\t\t\t\t  \"CLOCK_MONOTONIC\");\n+\t\t  /* Convert the absolute timeout value to a relative\n+\t\t     timeout.  */\n+\t\t  rt64.tv_sec = abstime->tv_sec - rt.tv_sec;\n+\t\t  rt64.tv_nsec = abstime->tv_nsec - rt.tv_nsec;\n+\t\t  if (rt64.tv_nsec < 0)\n+\t\t    {\n+\t\t      rt64.tv_nsec += 1000000000;\n+\t\t      --rt64.tv_sec;\n+\t\t    }\n+\t\t  /* Did we already time out?  */\n+\t\t  if (__glibc_unlikely (rt64.tv_sec < 0))\n+\t\t    err = ETIMEDOUT;\n+\t\t  else\n+\t\t    err = futex_reltimed_wait_cancelable_t64\n+\t\t\t(cond->__data.__g_signals + g, 0, &rt64, private);\n+\t\t}\n+\t      else\n+\t\t{\n+\t\t  /* Use CLOCK_REALTIME.  */\n+\t\t  err = futex_abstimed_wait_cancelable_t64\n+\t\t      (cond->__data.__g_signals + g, 0, abstime, private);\n+\t\t}\n+\t    }\n+\n+\t  __pthread_cleanup_pop (&buffer, 0);\n+\n+\t  if (__glibc_unlikely (err == ETIMEDOUT))\n+\t    {\n+\t      __condvar_dec_grefs (cond, g, private);\n+\t      /* If we timed out, we effectively cancel waiting.  Note that\n+\t\t we have decremented __g_refs before cancellation, so that a\n+\t\t deadlock between waiting for quiescence of our group in\n+\t\t __condvar_quiesce_and_switch_g1 and us trying to acquire\n+\t\t the lock during cancellation is not possible.  */\n+\t      __condvar_cancel_waiting (cond, seq, g, private);\n+\t      result = ETIMEDOUT;\n+\t      goto done;\n+\t    }\n+\t  else\n+\t    __condvar_dec_grefs (cond, g, private);\n+\n+\t  /* Reload signals.  See above for MO.  */\n+\t  signals = atomic_load_acquire (cond->__data.__g_signals + g);\n+\t}\n+\n+    }\n+  /* Try to grab a signal.  Use acquire MO so that we see an up-to-date value\n+     of __g1_start below (see spinning above for a similar case).  In\n+     particular, if we steal from a more recent group, we will also see a\n+     more recent __g1_start below.  */\n+  while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g,\n+\t\t\t\t\t\t&signals, signals - 2));\n+\n+  /* We consumed a signal but we could have consumed from a more recent group\n+     that aliased with ours due to being in the same group slot.  If this\n+     might be the case our group must be closed as visible through\n+     __g1_start.  */\n+  uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);\n+  if (seq < (g1_start >> 1))\n+    {\n+      /* We potentially stole a signal from a more recent group but we do not\n+\t know which group we really consumed from.\n+\t We do not care about groups older than current G1 because they are\n+\t closed; we could have stolen from these, but then we just add a\n+\t spurious wake-up for the current groups.\n+\t We will never steal a signal from current G2 that was really intended\n+\t for G2 because G2 never receives signals (until it becomes G1).  We\n+\t could have stolen a signal from G2 that was conservatively added by a\n+\t previous waiter that also thought it stole a signal -- but given that\n+\t that signal was added unnecessarily, it's not a problem if we steal\n+\t it.\n+\t Thus, the remaining case is that we could have stolen from the current\n+\t G1, where \"current\" means the __g1_start value we observed.  However,\n+\t if the current G1 does not have the same slot index as we do, we did\n+\t not steal from it and do not need to undo that.  This is the reason\n+\t for putting a bit with G2's index into__g1_start as well.  */\n+      if (((g1_start & 1) ^ 1) == g)\n+\t{\n+\t  /* We have to conservatively undo our potential mistake of stealing\n+\t     a signal.  We can stop trying to do that when the current G1\n+\t     changes because other spinning waiters will notice this too and\n+\t     __condvar_quiesce_and_switch_g1 has checked that there are no\n+\t     futex waiters anymore before switching G1.\n+\t     Relaxed MO is fine for the __g1_start load because we need to\n+\t     merely be able to observe this fact and not have to observe\n+\t     something else as well.\n+\t     ??? Would it help to spin for a little while to see whether the\n+\t     current G1 gets closed?  This might be worthwhile if the group is\n+\t     small or close to being closed.  */\n+\t  unsigned int s = atomic_load_relaxed (cond->__data.__g_signals + g);\n+\t  while (__condvar_load_g1_start_relaxed (cond) == g1_start)\n+\t    {\n+\t      /* Try to add a signal.  We don't need to acquire the lock\n+\t\t because at worst we can cause a spurious wake-up.  If the\n+\t\t group is in the process of being closed (LSB is true), this\n+\t\t has an effect similar to us adding a signal.  */\n+\t      if (((s & 1) != 0)\n+\t\t  || atomic_compare_exchange_weak_relaxed\n+\t\t       (cond->__data.__g_signals + g, &s, s + 2))\n+\t\t{\n+\t\t  /* If we added a signal, we also need to add a wake-up on\n+\t\t     the futex.  We also need to do that if we skipped adding\n+\t\t     a signal because the group is being closed because\n+\t\t     while __condvar_quiesce_and_switch_g1 could have closed\n+\t\t     the group, it might stil be waiting for futex waiters to\n+\t\t     leave (and one of those waiters might be the one we stole\n+\t\t     the signal from, which cause it to block using the\n+\t\t     futex).  */\n+\t\t  futex_wake (cond->__data.__g_signals + g, 1, private);\n+\t\t  break;\n+\t\t}\n+\t      /* TODO Back off.  */\n+\t    }\n+\t}\n+    }\n+\n+ done:\n+\n+  /* Confirm that we have been woken.  We do that before acquiring the mutex\n+     to allow for execution of pthread_cond_destroy while having acquired the\n+     mutex.  */\n+  __condvar_confirm_wakeup (cond, private);\n+\n+  /* Woken up; now re-acquire the mutex.  If this doesn't fail, return RESULT,\n+     which is set to ETIMEDOUT if a timeout occured, or zero otherwise.  */\n+  err = __pthread_mutex_cond_lock (mutex);\n+  /* XXX Abort on errors that are disallowed by POSIX?  */\n+  return (err != 0) ? err : result;\n+}\n+\n \n /* See __pthread_cond_wait_common.  */\n int\n@@ -667,6 +941,17 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,\n   return __pthread_cond_wait_common (cond, mutex, abstime);\n }\n \n+int\n+__pthread_cond_timedwait_t64 (pthread_cond_t *cond, pthread_mutex_t *mutex,\n+    const struct __timespec64 *abstime)\n+{\n+  /* Check parameter validity.  This should also tell the compiler that\n+     it can assume that abstime is not NULL.  */\n+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)\n+    return EINVAL;\n+  return __pthread_cond_wait_common_t64 (cond, mutex, abstime);\n+}\n+\n versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,\n \t\t  GLIBC_2_3_2);\n versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,\ndiff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c\nindex d5ec3141f3..6cad951129 100644\n--- a/nptl/pthread_mutex_timedlock.c\n+++ b/nptl/pthread_mutex_timedlock.c\n@@ -32,6 +32,10 @@\n #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)\n #endif\n \n+#ifndef lll_timedlock_elision_t64\n+#define lll_timedlock_elision_t64(a,dummy,b,c) lll_timedlock_t64(a, b, c)\n+#endif\n+\n #ifndef lll_trylock_elision\n #define lll_trylock_elision(a,t) lll_trylock(a)\n #endif\n@@ -638,3 +642,615 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex,\n   return result;\n }\n weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)\n+\n+/* 64-bit time version */\n+\n+int\n+pthread_mutex_timedlock_t64 (pthread_mutex_t *mutex,\n+\t\t\t const struct __timespec64 *abstime)\n+{\n+  int oldval;\n+  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);\n+  int result = 0;\n+\n+  LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);\n+\n+  /* We must not check ABSTIME here.  If the thread does not block\n+     abstime must not be checked for a valid value.  */\n+\n+  switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),\n+\t\t\t    PTHREAD_MUTEX_TIMED_NP))\n+    {\n+      /* Recursive mutex.  */\n+    case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:\n+    case PTHREAD_MUTEX_RECURSIVE_NP:\n+      /* Check whether we already hold the mutex.  */\n+      if (mutex->__data.__owner == id)\n+\t{\n+\t  /* Just bump the counter.  */\n+\t  if (__glibc_unlikely (mutex->__data.__count + 1 == 0))\n+\t    /* Overflow of the counter.  */\n+\t    return EAGAIN;\n+\n+\t  ++mutex->__data.__count;\n+\n+\t  goto out;\n+\t}\n+\n+      /* We have to get the mutex.  */\n+      result = lll_timedlock_t64 (mutex->__data.__lock, abstime,\n+\t\t\t          PTHREAD_MUTEX_PSHARED (mutex));\n+\n+      if (result != 0)\n+\tgoto out;\n+\n+      /* Only locked once so far.  */\n+      mutex->__data.__count = 1;\n+      break;\n+\n+      /* Error checking mutex.  */\n+    case PTHREAD_MUTEX_ERRORCHECK_NP:\n+      /* Check whether we already hold the mutex.  */\n+      if (__glibc_unlikely (mutex->__data.__owner == id))\n+\treturn EDEADLK;\n+\n+      /* Don't do lock elision on an error checking mutex.  */\n+      goto simple;\n+\n+    case PTHREAD_MUTEX_TIMED_NP:\n+      FORCE_ELISION (mutex, goto elision);\n+    simple:\n+      /* Normal mutex.  */\n+      result = lll_timedlock_t64 (mutex->__data.__lock, abstime,\n+\t\t\t          PTHREAD_MUTEX_PSHARED (mutex));\n+      break;\n+\n+    case PTHREAD_MUTEX_TIMED_ELISION_NP:\n+    elision: __attribute__((unused))\n+      /* Don't record ownership */\n+      return lll_timedlock_elision_t64 (mutex->__data.__lock,\n+\t\t\t\t    mutex->__data.__spins,\n+\t\t\t\t    abstime,\n+\t\t\t\t    PTHREAD_MUTEX_PSHARED (mutex));\n+\n+\n+    case PTHREAD_MUTEX_ADAPTIVE_NP:\n+      if (! __is_smp)\n+\tgoto simple;\n+\n+      if (lll_trylock (mutex->__data.__lock) != 0)\n+\t{\n+\t  int cnt = 0;\n+\t  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,\n+\t\t\t     mutex->__data.__spins * 2 + 10);\n+\t  do\n+\t    {\n+\t      if (cnt++ >= max_cnt)\n+\t\t{\n+\t\t  result = lll_timedlock_t64 (mutex->__data.__lock, abstime,\n+\t\t\t\t\t      PTHREAD_MUTEX_PSHARED (mutex));\n+\t\t  break;\n+\t\t}\n+\t      atomic_spin_nop ();\n+\t    }\n+\t  while (lll_trylock (mutex->__data.__lock) != 0);\n+\n+\t  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;\n+\t}\n+      break;\n+\n+    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:\n+    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:\n+    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:\n+    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:\n+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,\n+\t\t     &mutex->__data.__list.__next);\n+      /* We need to set op_pending before starting the operation.  Also\n+\t see comments at ENQUEUE_MUTEX.  */\n+      __asm (\"\" ::: \"memory\");\n+\n+      oldval = mutex->__data.__lock;\n+      /* This is set to FUTEX_WAITERS iff we might have shared the\n+\t FUTEX_WAITERS flag with other threads, and therefore need to keep it\n+\t set to avoid lost wake-ups.  We have the same requirement in the\n+\t simple mutex algorithm.  */\n+      unsigned int assume_other_futex_waiters = 0;\n+      while (1)\n+\t{\n+\t  /* Try to acquire the lock through a CAS from 0 (not acquired) to\n+\t     our TID | assume_other_futex_waiters.  */\n+\t  if (__glibc_likely (oldval == 0))\n+\t    {\n+\t      oldval\n+\t        = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t            id | assume_other_futex_waiters, 0);\n+\t      if (__glibc_likely (oldval == 0))\n+\t\tbreak;\n+\t    }\n+\n+\t  if ((oldval & FUTEX_OWNER_DIED) != 0)\n+\t    {\n+\t      /* The previous owner died.  Try locking the mutex.  */\n+\t      int newval = id | (oldval & FUTEX_WAITERS)\n+\t\t  | assume_other_futex_waiters;\n+\n+\t      newval\n+\t\t= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t       newval, oldval);\n+\t      if (newval != oldval)\n+\t\t{\n+\t\t  oldval = newval;\n+\t\t  continue;\n+\t\t}\n+\n+\t      /* We got the mutex.  */\n+\t      mutex->__data.__count = 1;\n+\t      /* But it is inconsistent unless marked otherwise.  */\n+\t      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;\n+\n+\t      /* We must not enqueue the mutex before we have acquired it.\n+\t\t Also see comments at ENQUEUE_MUTEX.  */\n+\t      __asm (\"\" ::: \"memory\");\n+\t      ENQUEUE_MUTEX (mutex);\n+\t      /* We need to clear op_pending after we enqueue the mutex.  */\n+\t      __asm (\"\" ::: \"memory\");\n+\t      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\n+\t      /* Note that we deliberately exit here.  If we fall\n+\t\t through to the end of the function __nusers would be\n+\t\t incremented which is not correct because the old\n+\t\t owner has to be discounted.  */\n+\t      return EOWNERDEAD;\n+\t    }\n+\n+\t  /* Check whether we already hold the mutex.  */\n+\t  if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))\n+\t    {\n+\t      int kind = PTHREAD_MUTEX_TYPE (mutex);\n+\t      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)\n+\t\t{\n+\t\t  /* We do not need to ensure ordering wrt another memory\n+\t\t     access.  Also see comments at ENQUEUE_MUTEX. */\n+\t\t  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,\n+\t\t\t\t NULL);\n+\t\t  return EDEADLK;\n+\t\t}\n+\n+\t      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)\n+\t\t{\n+\t\t  /* We do not need to ensure ordering wrt another memory\n+\t\t     access.  */\n+\t\t  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,\n+\t\t\t\t NULL);\n+\n+\t\t  /* Just bump the counter.  */\n+\t\t  if (__glibc_unlikely (mutex->__data.__count + 1 == 0))\n+\t\t    /* Overflow of the counter.  */\n+\t\t    return EAGAIN;\n+\n+\t\t  ++mutex->__data.__count;\n+\n+\t\t  LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);\n+\n+\t\t  return 0;\n+\t\t}\n+\t    }\n+\n+\t  /* We are about to block; check whether the timeout is invalid.  */\n+\t  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)\n+\t    return EINVAL;\n+\t  /* Work around the fact that the kernel rejects negative timeout\n+\t     values despite them being valid.  */\n+\t  if (__glibc_unlikely (abstime->tv_sec < 0))\n+\t    return ETIMEDOUT;\n+#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \\\n+     || !defined lll_futex_timed_wait_bitset)\n+\t  struct timeval tv;\n+\t  struct timespec rt;\n+\n+\t  /* Get the current time.  */\n+\t  (void) __gettimeofday (&tv, NULL);\n+\n+\t  /* Compute relative timeout.  */\n+\t  rt.tv_sec = abstime->tv_sec - tv.tv_sec;\n+\t  rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;\n+\t  if (rt.tv_nsec < 0)\n+\t    {\n+\t      rt.tv_nsec += 1000000000;\n+\t      --rt.tv_sec;\n+\t    }\n+\n+\t  /* Already timed out?  */\n+\t  if (rt.tv_sec < 0)\n+\t    return ETIMEDOUT;\n+#endif\n+\n+\t  /* We cannot acquire the mutex nor has its owner died.  Thus, try\n+\t     to block using futexes.  Set FUTEX_WAITERS if necessary so that\n+\t     other threads are aware that there are potentially threads\n+\t     blocked on the futex.  Restart if oldval changed in the\n+\t     meantime.  */\n+\t  if ((oldval & FUTEX_WAITERS) == 0)\n+\t    {\n+\t      if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t\toldval | FUTEX_WAITERS,\n+\t\t\t\t\t\t\toldval)\n+\t\t  != 0)\n+\t\t{\n+\t\t  oldval = mutex->__data.__lock;\n+\t\t  continue;\n+\t\t}\n+\t      oldval |= FUTEX_WAITERS;\n+\t    }\n+\n+\t  /* It is now possible that we share the FUTEX_WAITERS flag with\n+\t     another thread; therefore, update assume_other_futex_waiters so\n+\t     that we do not forget about this when handling other cases\n+\t     above and thus do not cause lost wake-ups.  */\n+\t  assume_other_futex_waiters |= FUTEX_WAITERS;\n+\n+\t  /* Block using the futex.  */\n+#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \\\n+     || !defined lll_futex_timed_wait_bitset)\n+\t  lll_futex_timed wait_64 (&mutex->__data.__lock, oldval,\n+\t\t\t\t&rt, PTHREAD_ROBUST_MUTEX_PSHARED (mutex));\n+#else\n+\t  int err = lll_futex_timed_wait_bitset_t64 (&mutex->__data.__lock,\n+\t      oldval, abstime, FUTEX_CLOCK_REALTIME,\n+\t      PTHREAD_ROBUST_MUTEX_PSHARED (mutex));\n+\t  /* The futex call timed out.  */\n+\t  if (err == -ETIMEDOUT)\n+\t    return -err;\n+#endif\n+\t  /* Reload current lock value.  */\n+\t  oldval = mutex->__data.__lock;\n+\t}\n+\n+      /* We have acquired the mutex; check if it is still consistent.  */\n+      if (__builtin_expect (mutex->__data.__owner\n+\t\t\t    == PTHREAD_MUTEX_NOTRECOVERABLE, 0))\n+\t{\n+\t  /* This mutex is now not recoverable.  */\n+\t  mutex->__data.__count = 0;\n+\t  int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);\n+\t  lll_unlock (mutex->__data.__lock, private);\n+\t  /* FIXME This violates the mutex destruction requirements.  See\n+\t     __pthread_mutex_unlock_full.  */\n+\t  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\t  return ENOTRECOVERABLE;\n+\t}\n+\n+      mutex->__data.__count = 1;\n+      /* We must not enqueue the mutex before we have acquired it.\n+\t Also see comments at ENQUEUE_MUTEX.  */\n+      __asm (\"\" ::: \"memory\");\n+      ENQUEUE_MUTEX (mutex);\n+      /* We need to clear op_pending after we enqueue the mutex.  */\n+      __asm (\"\" ::: \"memory\");\n+      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+      break;\n+\n+    /* The PI support requires the Linux futex system call.  If that's not\n+       available, pthread_mutex_init should never have allowed the type to\n+       be set.  So it will get the default case for an invalid type.  */\n+#ifdef __NR_futex\n+    case PTHREAD_MUTEX_PI_RECURSIVE_NP:\n+    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:\n+    case PTHREAD_MUTEX_PI_NORMAL_NP:\n+    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:\n+    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:\n+    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:\n+    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:\n+    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:\n+      {\n+\tint kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;\n+\tint robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;\n+\n+\tif (robust)\n+\t  {\n+\t    /* Note: robust PI futexes are signaled by setting bit 0.  */\n+\t    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,\n+\t\t\t   (void *) (((uintptr_t) &mutex->__data.__list.__next)\n+\t\t\t\t     | 1));\n+\t    /* We need to set op_pending before starting the operation.  Also\n+\t       see comments at ENQUEUE_MUTEX.  */\n+\t    __asm (\"\" ::: \"memory\");\n+\t  }\n+\n+\toldval = mutex->__data.__lock;\n+\n+\t/* Check whether we already hold the mutex.  */\n+\tif (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))\n+\t  {\n+\t    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)\n+\t      {\n+\t\t/* We do not need to ensure ordering wrt another memory\n+\t\t   access.  */\n+\t\tTHREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\t\treturn EDEADLK;\n+\t      }\n+\n+\t    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)\n+\t      {\n+\t\t/* We do not need to ensure ordering wrt another memory\n+\t\t   access.  */\n+\t\tTHREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\n+\t\t/* Just bump the counter.  */\n+\t\tif (__glibc_unlikely (mutex->__data.__count + 1 == 0))\n+\t\t  /* Overflow of the counter.  */\n+\t\t  return EAGAIN;\n+\n+\t\t++mutex->__data.__count;\n+\n+\t\tLIBC_PROBE (mutex_timedlock_acquired, 1, mutex);\n+\n+\t\treturn 0;\n+\t      }\n+\t  }\n+\n+\toldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t      id, 0);\n+\n+\tif (oldval != 0)\n+\t  {\n+\t    /* The mutex is locked.  The kernel will now take care of\n+\t       everything.  The timeout value must be a relative value.\n+\t       Convert it.  */\n+\t    int private = (robust\n+\t\t\t   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)\n+\t\t\t   : PTHREAD_MUTEX_PSHARED (mutex));\n+\t    INTERNAL_SYSCALL_DECL (__err);\n+\n+\t    int e;\n+\t    \n+\t    if (abstime->tv_sec > INT_MAX)\n+\t    {\n+\t      e = EOVERFLOW;\n+\t    }\n+\t    else\n+\t    {\n+\t      struct timespec ts;\n+\t      ts.tv_sec = abstime->tv_sec;\n+\t      ts.tv_nsec = abstime->tv_nsec;\n+\t      e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,\n+\t\t\t\t      __lll_private_flag (FUTEX_LOCK_PI,\n+\t\t\t\t\t\t\t  private), 1,\n+\t\t\t\t      &ts);\n+\t    }\n+\t    if (INTERNAL_SYSCALL_ERROR_P (e, __err))\n+\t      {\n+\t\tif (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)\n+\t\t  return ETIMEDOUT;\n+\n+\t\tif (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH\n+\t\t    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)\n+\t\t  {\n+\t\t    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK\n+\t\t\t    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP\n+\t\t\t\t&& kind != PTHREAD_MUTEX_RECURSIVE_NP));\n+\t\t    /* ESRCH can happen only for non-robust PI mutexes where\n+\t\t       the owner of the lock died.  */\n+\t\t    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH\n+\t\t\t    || !robust);\n+\n+\t\t    /* Delay the thread until the timeout is reached.\n+\t\t       Then return ETIMEDOUT.  */\n+\t\t    struct timespec reltime;\n+\t\t    struct __timespec64 now;\n+\n+\t\t    INTERNAL_SYSCALL (clock_gettime64, __err, 2, CLOCK_REALTIME,\n+\t\t\t\t      &now);\n+\t\t    reltime.tv_sec = abstime->tv_sec - now.tv_sec;\n+\t\t    reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;\n+\t\t    if (reltime.tv_nsec < 0)\n+\t\t      {\n+\t\t\treltime.tv_nsec += 1000000000;\n+\t\t\t--reltime.tv_sec;\n+\t\t      }\n+\t\t    if (reltime.tv_sec >= 0)\n+\t\t      while (nanosleep_not_cancel (&reltime, &reltime) != 0)\n+\t\t\tcontinue;\n+\n+\t\t    return ETIMEDOUT;\n+\t\t  }\n+\n+\t\treturn INTERNAL_SYSCALL_ERRNO (e, __err);\n+\t      }\n+\n+\t    oldval = mutex->__data.__lock;\n+\n+\t    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);\n+\t  }\n+\n+\tif (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))\n+\t  {\n+\t    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);\n+\n+\t    /* We got the mutex.  */\n+\t    mutex->__data.__count = 1;\n+\t    /* But it is inconsistent unless marked otherwise.  */\n+\t    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;\n+\n+\t    /* We must not enqueue the mutex before we have acquired it.\n+\t       Also see comments at ENQUEUE_MUTEX.  */\n+\t    __asm (\"\" ::: \"memory\");\n+\t    ENQUEUE_MUTEX_PI (mutex);\n+\t    /* We need to clear op_pending after we enqueue the mutex.  */\n+\t    __asm (\"\" ::: \"memory\");\n+\t    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\n+\t    /* Note that we deliberately exit here.  If we fall\n+\t       through to the end of the function __nusers would be\n+\t       incremented which is not correct because the old owner\n+\t       has to be discounted.  */\n+\t    return EOWNERDEAD;\n+\t  }\n+\n+\tif (robust\n+\t    && __builtin_expect (mutex->__data.__owner\n+\t\t\t\t == PTHREAD_MUTEX_NOTRECOVERABLE, 0))\n+\t  {\n+\t    /* This mutex is now not recoverable.  */\n+\t    mutex->__data.__count = 0;\n+\n+\t    INTERNAL_SYSCALL_DECL (__err);\n+\t    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,\n+\t\t\t      __lll_private_flag (FUTEX_UNLOCK_PI,\n+\t\t\t\t\t\t  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),\n+\t\t\t      0, 0);\n+\n+\t    /* To the kernel, this will be visible after the kernel has\n+\t       acquired the mutex in the syscall.  */\n+\t    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\t    return ENOTRECOVERABLE;\n+\t  }\n+\n+\tmutex->__data.__count = 1;\n+\tif (robust)\n+\t  {\n+\t    /* We must not enqueue the mutex before we have acquired it.\n+\t       Also see comments at ENQUEUE_MUTEX.  */\n+\t    __asm (\"\" ::: \"memory\");\n+\t    ENQUEUE_MUTEX_PI (mutex);\n+\t    /* We need to clear op_pending after we enqueue the mutex.  */\n+\t    __asm (\"\" ::: \"memory\");\n+\t    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);\n+\t  }\n+\t}\n+      break;\n+#endif  /* __NR_futex.  */\n+\n+    case PTHREAD_MUTEX_PP_RECURSIVE_NP:\n+    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:\n+    case PTHREAD_MUTEX_PP_NORMAL_NP:\n+    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:\n+      {\n+\tint kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;\n+\n+\toldval = mutex->__data.__lock;\n+\n+\t/* Check whether we already hold the mutex.  */\n+\tif (mutex->__data.__owner == id)\n+\t  {\n+\t    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)\n+\t      return EDEADLK;\n+\n+\t    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)\n+\t      {\n+\t\t/* Just bump the counter.  */\n+\t\tif (__glibc_unlikely (mutex->__data.__count + 1 == 0))\n+\t\t  /* Overflow of the counter.  */\n+\t\t  return EAGAIN;\n+\n+\t\t++mutex->__data.__count;\n+\n+\t\tLIBC_PROBE (mutex_timedlock_acquired, 1, mutex);\n+\n+\t\treturn 0;\n+\t      }\n+\t  }\n+\n+\tint oldprio = -1, ceilval;\n+\tdo\n+\t  {\n+\t    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)\n+\t\t\t  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;\n+\n+\t    if (__pthread_current_priority () > ceiling)\n+\t      {\n+\t\tresult = EINVAL;\n+\t      failpp:\n+\t\tif (oldprio != -1)\n+\t\t  __pthread_tpp_change_priority (oldprio, -1);\n+\t\treturn result;\n+\t      }\n+\n+\t    result = __pthread_tpp_change_priority (oldprio, ceiling);\n+\t    if (result)\n+\t      return result;\n+\n+\t    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;\n+\t    oldprio = ceiling;\n+\n+\t    oldval\n+\t      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t     ceilval | 1, ceilval);\n+\n+\t    if (oldval == ceilval)\n+\t      break;\n+\n+\t    do\n+\t      {\n+\t\toldval\n+\t\t  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t\t ceilval | 2,\n+\t\t\t\t\t\t\t ceilval | 1);\n+\n+\t\tif ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)\n+\t\t  break;\n+\n+\t\tif (oldval != ceilval)\n+\t\t  {\n+\t\t    /* Reject invalid timeouts.  */\n+\t\t    if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)\n+\t\t      {\n+\t\t\tresult = EINVAL;\n+\t\t\tgoto failpp;\n+\t\t      }\n+\n+\t\t    struct timeval tv;\n+\t\t    struct timespec rt;\n+\n+\t\t    /* Get the current time.  */\n+\t\t    (void) __gettimeofday (&tv, NULL);\n+\n+\t\t    /* Compute relative timeout.  */\n+\t\t    rt.tv_sec = abstime->tv_sec - tv.tv_sec;\n+\t\t    rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;\n+\t\t    if (rt.tv_nsec < 0)\n+\t\t      {\n+\t\t\trt.tv_nsec += 1000000000;\n+\t\t\t--rt.tv_sec;\n+\t\t      }\n+\n+\t\t    /* Already timed out?  */\n+\t\t    if (rt.tv_sec < 0)\n+\t\t      {\n+\t\t\tresult = ETIMEDOUT;\n+\t\t\tgoto failpp;\n+\t\t      }\n+\n+\t\t    lll_futex_timed_wait (&mutex->__data.__lock,\n+\t\t\t\t\t  ceilval | 2, &rt,\n+\t\t\t\t\t  PTHREAD_MUTEX_PSHARED (mutex));\n+\t\t  }\n+\t      }\n+\t    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,\n+\t\t\t\t\t\t\tceilval | 2, ceilval)\n+\t\t   != ceilval);\n+\t  }\n+\twhile ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);\n+\n+\tassert (mutex->__data.__owner == 0);\n+\tmutex->__data.__count = 1;\n+      }\n+      break;\n+\n+    default:\n+      /* Correct code cannot set any other type.  */\n+      return EINVAL;\n+    }\n+\n+  if (result == 0)\n+    {\n+      /* Record the ownership.  */\n+      mutex->__data.__owner = id;\n+      ++mutex->__data.__nusers;\n+\n+      LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);\n+    }\n+\n+ out:\n+  return result;\n+}\ndiff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c\nindex 846687e1cf..b07f86342d 100644\n--- a/nptl/pthread_rwlock_common.c\n+++ b/nptl/pthread_rwlock_common.c\n@@ -507,6 +507,240 @@ __pthread_rwlock_rdlock_full (pthread_rwlock_t *rwlock,\n   return 0;\n }\n \n+/* 64-bit time version */\n+\n+static __always_inline int\n+__pthread_rwlock_rdlock_full_t64 (pthread_rwlock_t *rwlock,\n+    const struct __timespec64 *abstime)\n+{\n+  unsigned int r;\n+\n+  /* Make sure we are not holding the rwlock as a writer.  This is a deadlock\n+     situation we recognize and report.  */\n+  if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)\n+      == THREAD_GETMEM (THREAD_SELF, tid)))\n+    return EDEADLK;\n+\n+  /* If we prefer writers, recursive rdlock is disallowed, we are in a read\n+     phase, and there are other readers present, we try to wait without\n+     extending the read phase.  We will be unblocked by either one of the\n+     other active readers, or if the writer gives up WRLOCKED (e.g., on\n+     timeout).\n+     If there are no other readers, we simply race with any existing primary\n+     writer; it would have been a race anyway, and changing the odds slightly\n+     will likely not make a big difference.  */\n+  if (rwlock->__data.__flags == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)\n+    {\n+      r = atomic_load_relaxed (&rwlock->__data.__readers);\n+      while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)\n+\t      && ((r & PTHREAD_RWLOCK_WRLOCKED) != 0)\n+\t      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) > 0))\n+\t{\n+\t  /* TODO Spin first.  */\n+\t  /* Try setting the flag signaling that we are waiting without having\n+\t     incremented the number of readers.  Relaxed MO is fine because\n+\t     this is just about waiting for a state change in __readers.  */\n+\t  if (atomic_compare_exchange_weak_relaxed\n+\t      (&rwlock->__data.__readers, &r, r | PTHREAD_RWLOCK_RWAITING))\n+\t    {\n+\t      /* Wait for as long as the flag is set.  An ABA situation is\n+\t\t harmless because the flag is just about the state of\n+\t\t __readers, and all threads set the flag under the same\n+\t\t conditions.  */\n+\t      while ((atomic_load_relaxed (&rwlock->__data.__readers)\n+\t\t  & PTHREAD_RWLOCK_RWAITING) != 0)\n+\t\t{\n+\t\t  int private = __pthread_rwlock_get_private (rwlock);\n+\t\t  int err = futex_abstimed_wait_t64 (&rwlock->__data.__readers,\n+\t\t      r, abstime, private);\n+\t\t  /* We ignore EAGAIN and EINTR.  On time-outs, we can just\n+\t\t     return because we don't need to clean up anything.  */\n+\t\t  if (err == ETIMEDOUT)\n+\t\t    return err;\n+\t\t}\n+\t      /* It makes sense to not break out of the outer loop here\n+\t\t because we might be in the same situation again.  */\n+\t    }\n+\t  else\n+\t    {\n+\t      /* TODO Back-off.  */\n+\t    }\n+\t}\n+    }\n+  /* Register as a reader, using an add-and-fetch so that R can be used as\n+     expected value for future operations.  Acquire MO so we synchronize with\n+     prior writers as well as the last reader of the previous read phase (see\n+     below).  */\n+  r = atomic_fetch_add_acquire (&rwlock->__data.__readers,\n+      (1 << PTHREAD_RWLOCK_READER_SHIFT)) + (1 << PTHREAD_RWLOCK_READER_SHIFT);\n+\n+  /* Check whether there is an overflow in the number of readers.  We assume\n+     that the total number of threads is less than half the maximum number\n+     of readers that we have bits for in __readers (i.e., with 32-bit int and\n+     PTHREAD_RWLOCK_READER_SHIFT of 3, we assume there are less than\n+     1 << (32-3-1) concurrent threads).\n+     If there is an overflow, we use a CAS to try to decrement the number of\n+     readers if there still is an overflow situation.  If so, we return\n+     EAGAIN; if not, we are not a thread causing an overflow situation, and so\n+     we just continue.  Using a fetch-add instead of the CAS isn't possible\n+     because other readers might release the lock concurrently, which could\n+     make us the last reader and thus responsible for handing ownership over\n+     to writers (which requires a CAS too to make the decrement and ownership\n+     transfer indivisible).  */\n+  while (__glibc_unlikely (r >= PTHREAD_RWLOCK_READER_OVERFLOW))\n+    {\n+      /* Relaxed MO is okay because we just want to undo our registration and\n+\t cannot have changed the rwlock state substantially if the CAS\n+\t succeeds.  */\n+      if (atomic_compare_exchange_weak_relaxed (&rwlock->__data.__readers, &r,\n+\t  r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))\n+\treturn EAGAIN;\n+    }\n+\n+  /* We have registered as a reader, so if we are in a read phase, we have\n+     acquired a read lock.  This is also the reader--reader fast-path.\n+     Even if there is a primary writer, we just return.  If writers are to\n+     be preferred and we are the only active reader, we could try to enter a\n+     write phase to let the writer proceed.  This would be okay because we\n+     cannot have acquired the lock previously as a reader (which could result\n+     in deadlock if we would wait for the primary writer to run).  However,\n+     this seems to be a corner case and handling it specially not be worth the\n+     complexity.  */\n+  if (__glibc_likely ((r & PTHREAD_RWLOCK_WRPHASE) == 0))\n+    return 0;\n+\n+  /* If there is no primary writer but we are in a write phase, we can try\n+     to install a read phase ourself.  */\n+  while (((r & PTHREAD_RWLOCK_WRPHASE) != 0)\n+      && ((r & PTHREAD_RWLOCK_WRLOCKED) == 0))\n+    {\n+       /* Try to enter a read phase: If the CAS below succeeds, we have\n+\t ownership; if it fails, we will simply retry and reassess the\n+\t situation.\n+\t Acquire MO so we synchronize with prior writers.  */\n+      if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers, &r,\n+\t  r ^ PTHREAD_RWLOCK_WRPHASE))\n+\t{\n+\t  /* We started the read phase, so we are also responsible for\n+\t     updating the write-phase futex.  Relaxed MO is sufficient.\n+\t     Note that there can be no other reader that we have to wake\n+\t     because all other readers will see the read phase started by us\n+\t     (or they will try to start it themselves); if a writer started\n+\t     the read phase, we cannot have started it.  Furthermore, we\n+\t     cannot discard a PTHREAD_RWLOCK_FUTEX_USED flag because we will\n+\t     overwrite the value set by the most recent writer (or the readers\n+\t     before it in case of explicit hand-over) and we know that there\n+\t     are no waiting readers.  */\n+\t  atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 0);\n+\t  return 0;\n+\t}\n+      else\n+\t{\n+\t  /* TODO Back off before retrying.  Also see above.  */\n+\t}\n+    }\n+\n+  if ((r & PTHREAD_RWLOCK_WRPHASE) != 0)\n+    {\n+      /* We are in a write phase, and there must be a primary writer because\n+\t of the previous loop.  Block until the primary writer gives up the\n+\t write phase.  This case requires explicit hand-over using\n+\t __wrphase_futex.\n+\t However, __wrphase_futex might not have been set to 1 yet (either\n+\t because explicit hand-over to the writer is still ongoing, or because\n+\t the writer has started the write phase but does not yet have updated\n+\t __wrphase_futex).  The least recent value of __wrphase_futex we can\n+\t read from here is the modification of the last read phase (because\n+\t we synchronize with the last reader in this read phase through\n+\t __readers; see the use of acquire MO on the fetch_add above).\n+\t Therefore, if we observe a value of 0 for __wrphase_futex, we need\n+\t to subsequently check that __readers now indicates a read phase; we\n+\t need to use acquire MO for this so that if we observe a read phase,\n+\t we will also see the modification of __wrphase_futex by the previous\n+\t writer.  We then need to load __wrphase_futex again and continue to\n+\t wait if it is not 0, so that we do not skip explicit hand-over.\n+\t Relaxed MO is sufficient for the load from __wrphase_futex because\n+\t we just use it as an indicator for when we can proceed; we use\n+\t __readers and the acquire MO accesses to it to eventually read from\n+\t the proper stores to __wrphase_futex.  */\n+      unsigned int wpf;\n+      bool ready = false;\n+      for (;;)\n+\t{\n+\t  while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))\n+\t      | PTHREAD_RWLOCK_FUTEX_USED) == (1 | PTHREAD_RWLOCK_FUTEX_USED))\n+\t    {\n+\t      int private = __pthread_rwlock_get_private (rwlock);\n+\t      if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)\n+\t\t  && !atomic_compare_exchange_weak_relaxed\n+\t\t      (&rwlock->__data.__wrphase_futex,\n+\t\t       &wpf, wpf | PTHREAD_RWLOCK_FUTEX_USED))\n+\t\tcontinue;\n+\t      int err = futex_abstimed_wait_t64 (&rwlock->__data.__wrphase_futex,\n+\t\t  1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);\n+\t      if (err == ETIMEDOUT)\n+\t\t{\n+\t\t  /* If we timed out, we need to unregister.  If no read phase\n+\t\t     has been installed while we waited, we can just decrement\n+\t\t     the number of readers.  Otherwise, we just acquire the\n+\t\t     lock, which is allowed because we give no precise timing\n+\t\t     guarantees, and because the timeout is only required to\n+\t\t     be in effect if we would have had to wait for other\n+\t\t     threads (e.g., if futex_wait would time-out immediately\n+\t\t     because the given absolute time is in the past).  */\n+\t\t  r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t\t  while ((r & PTHREAD_RWLOCK_WRPHASE) != 0)\n+\t\t    {\n+\t\t      /* We don't need to make anything else visible to\n+\t\t\t others besides unregistering, so relaxed MO is\n+\t\t\t sufficient.  */\n+\t\t      if (atomic_compare_exchange_weak_relaxed\n+\t\t\t  (&rwlock->__data.__readers, &r,\n+\t\t\t   r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))\n+\t\t\treturn ETIMEDOUT;\n+\t\t      /* TODO Back-off.  */\n+\t\t    }\n+\t\t  /* Use the acquire MO fence to mirror the steps taken in the\n+\t\t     non-timeout case.  Note that the read can happen both\n+\t\t     in the atomic_load above as well as in the failure case\n+\t\t     of the CAS operation.  */\n+\t\t  atomic_thread_fence_acquire ();\n+\t\t  /* We still need to wait for explicit hand-over, but we must\n+\t\t     not use futex_wait anymore because we would just time out\n+\t\t     in this case and thus make the spin-waiting we need\n+\t\t     unnecessarily expensive.  */\n+\t\t  while ((atomic_load_relaxed (&rwlock->__data.__wrphase_futex)\n+\t\t      | PTHREAD_RWLOCK_FUTEX_USED)\n+\t\t      == (1 | PTHREAD_RWLOCK_FUTEX_USED))\n+\t\t    {\n+\t\t      /* TODO Back-off?  */\n+\t\t    }\n+\t\t  ready = true;\n+\t\t  break;\n+\t\t}\n+\t      /* If we got interrupted (EINTR) or the futex word does not have the\n+\t\t expected value (EAGAIN), retry.  */\n+\t    }\n+\t  if (ready)\n+\t    /* See below.  */\n+\t    break;\n+\t  /* We need acquire MO here so that we synchronize with the lock\n+\t     release of the writer, and so that we observe a recent value of\n+\t     __wrphase_futex (see below).  */\n+\t  if ((atomic_load_acquire (&rwlock->__data.__readers)\n+\t      & PTHREAD_RWLOCK_WRPHASE) == 0)\n+\t    /* We are in a read phase now, so the least recent modification of\n+\t       __wrphase_futex we can read from is the store by the writer\n+\t       with value 1.  Thus, only now we can assume that if we observe\n+\t       a value of 0, explicit hand-over is finished. Retry the loop\n+\t       above one more time.  */\n+\t    ready = true;\n+\t}\n+    }\n+\n+  return 0;\n+}\n+\n \n static __always_inline void\n __pthread_rwlock_wrunlock (pthread_rwlock_t *rwlock)\n@@ -924,3 +1158,360 @@ __pthread_rwlock_wrlock_full (pthread_rwlock_t *rwlock,\n       THREAD_GETMEM (THREAD_SELF, tid));\n   return 0;\n }\n+\n+/* 64-bit time version */\n+\n+static __always_inline int\n+__pthread_rwlock_wrlock_full_t64 (pthread_rwlock_t *rwlock,\n+    const struct __timespec64 *abstime)\n+{\n+  /* Make sure we are not holding the rwlock as a writer.  This is a deadlock\n+     situation we recognize and report.  */\n+  if (__glibc_unlikely (atomic_load_relaxed (&rwlock->__data.__cur_writer)\n+      == THREAD_GETMEM (THREAD_SELF, tid)))\n+    return EDEADLK;\n+\n+  /* First we try to acquire the role of primary writer by setting WRLOCKED;\n+     if it was set before, there already is a primary writer.  Acquire MO so\n+     that we synchronize with previous primary writers.\n+\n+     We do not try to change to a write phase right away using a fetch_or\n+     because we would have to reset it again and wake readers if there are\n+     readers present (some readers could try to acquire the lock more than\n+     once, so setting a write phase in the middle of this could cause\n+     deadlock).  Changing to a write phase eagerly would only speed up the\n+     transition from a read phase to a write phase in the uncontended case,\n+     but it would slow down the contended case if readers are preferred (which\n+     is the default).\n+     We could try to CAS from a state with no readers to a write phase, but\n+     this could be less scalable if readers arrive and leave frequently.  */\n+  bool may_share_futex_used_flag = false;\n+  unsigned int r = atomic_fetch_or_acquire (&rwlock->__data.__readers,\n+      PTHREAD_RWLOCK_WRLOCKED);\n+  if (__glibc_unlikely ((r & PTHREAD_RWLOCK_WRLOCKED) != 0))\n+    {\n+      /* There is another primary writer.  */\n+      bool prefer_writer =\n+\t  (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP);\n+      if (prefer_writer)\n+\t{\n+\t  /* We register as a waiting writer, so that we can make use of\n+\t     writer--writer hand-over.  Relaxed MO is fine because we just\n+\t     want to register.  We assume that the maximum number of threads\n+\t     is less than the capacity in __writers.  */\n+\t  atomic_fetch_add_relaxed (&rwlock->__data.__writers, 1);\n+\t}\n+      for (;;)\n+\t{\n+\t  /* TODO Spin until WRLOCKED is 0 before trying the CAS below.\n+\t     But pay attention to not delay trying writer--writer hand-over\n+\t     for too long (which we must try eventually anyway).  */\n+\t  if ((r & PTHREAD_RWLOCK_WRLOCKED) == 0)\n+\t    {\n+\t      /* Try to become the primary writer or retry.  Acquire MO as in\n+\t\t the fetch_or above.  */\n+\t      if (atomic_compare_exchange_weak_acquire\n+\t\t  (&rwlock->__data.__readers, &r,\n+\t\t      r | PTHREAD_RWLOCK_WRLOCKED))\n+\t\t{\n+\t\t  if (prefer_writer)\n+\t\t    {\n+\t\t      /* Unregister as a waiting writer.  Note that because we\n+\t\t\t acquired WRLOCKED, WRHANDOVER will not be set.\n+\t\t\t Acquire MO on the CAS above ensures that\n+\t\t\t unregistering happens after the previous writer;\n+\t\t\t this sorts the accesses to __writers by all\n+\t\t\t primary writers in a useful way (e.g., any other\n+\t\t\t primary writer acquiring after us or getting it from\n+\t\t\t us through WRHANDOVER will see both our changes to\n+\t\t\t __writers).\n+\t\t\t ??? Perhaps this is not strictly necessary for\n+\t\t\t reasons we do not yet know of.  */\n+\t\t      atomic_fetch_add_relaxed (&rwlock->__data.__writers,\n+\t\t\t  -1);\n+\t\t    }\n+\t\t  break;\n+\t\t}\n+\t      /* Retry if the CAS fails (r will have been updated).  */\n+\t      continue;\n+\t    }\n+\t  /* If writer--writer hand-over is available, try to become the\n+\t     primary writer this way by grabbing the WRHANDOVER token.  If we\n+\t     succeed, we own WRLOCKED.  */\n+\t  if (prefer_writer)\n+\t    {\n+\t      unsigned int w = atomic_load_relaxed\n+\t\t  (&rwlock->__data.__writers);\n+\t      if ((w & PTHREAD_RWLOCK_WRHANDOVER) != 0)\n+\t\t{\n+\t\t  /* Acquire MO is required here so that we synchronize with\n+\t\t     the writer that handed over WRLOCKED.  We also need this\n+\t\t     for the reload of __readers below because our view of\n+\t\t     __readers must be at least as recent as the view of the\n+\t\t     writer that handed over WRLOCKED; we must avoid an ABA\n+\t\t     through WRHANDOVER, which could, for example, lead to us\n+\t\t     assuming we are still in a write phase when in fact we\n+\t\t     are not.  */\n+\t\t  if (atomic_compare_exchange_weak_acquire\n+\t\t      (&rwlock->__data.__writers,\n+\t\t       &w, (w - PTHREAD_RWLOCK_WRHANDOVER - 1)))\n+\t\t    {\n+\t\t      /* Reload so our view is consistent with the view of\n+\t\t\t the previous owner of WRLOCKED.  See above.  */\n+\t\t      r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t\t      break;\n+\t\t    }\n+\t\t  /* We do not need to reload __readers here.  We should try\n+\t\t     to perform writer--writer hand-over if possible; if it\n+\t\t     is not possible anymore, we will reload __readers\n+\t\t     elsewhere in this loop.  */\n+\t\t  continue;\n+\t\t}\n+\t    }\n+\t  /* We did not acquire WRLOCKED nor were able to use writer--writer\n+\t     hand-over, so we block on __writers_futex.  */\n+\t  int private = __pthread_rwlock_get_private (rwlock);\n+\t  unsigned int wf = atomic_load_relaxed\n+\t      (&rwlock->__data.__writers_futex);\n+\t  if (((wf & ~(unsigned int) PTHREAD_RWLOCK_FUTEX_USED) != 1)\n+\t      || ((wf != (1 | PTHREAD_RWLOCK_FUTEX_USED))\n+\t\t  && !atomic_compare_exchange_weak_relaxed\n+\t\t      (&rwlock->__data.__writers_futex, &wf,\n+\t\t       1 | PTHREAD_RWLOCK_FUTEX_USED)))\n+\t    {\n+\t      /* If we cannot block on __writers_futex because there is no\n+\t\t primary writer, or we cannot set PTHREAD_RWLOCK_FUTEX_USED,\n+\t\t we retry.  We must reload __readers here in case we cannot\n+\t\t block on __writers_futex so that we can become the primary\n+\t\t writer and are not stuck in a loop that just continuously\n+\t\t fails to block on __writers_futex.  */\n+\t      r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t      continue;\n+\t    }\n+\t  /* We set the flag that signals that the futex is used, or we could\n+\t     have set it if we had been faster than other waiters.  As a\n+\t     result, we may share the flag with an unknown number of other\n+\t     writers.  Therefore, we must keep this flag set when we acquire\n+\t     the lock.  We do not need to do this when we do not reach this\n+\t     point here because then we are not part of the group that may\n+\t     share the flag, and another writer will wake one of the writers\n+\t     in this group.  */\n+\t  may_share_futex_used_flag = true;\n+\t  int err = futex_abstimed_wait_t64 (&rwlock->__data.__writers_futex,\n+\t      1 | PTHREAD_RWLOCK_FUTEX_USED, abstime, private);\n+\t  if (err == ETIMEDOUT)\n+\t    {\n+\t      if (prefer_writer)\n+\t\t{\n+\t\t  /* We need to unregister as a waiting writer.  If we are the\n+\t\t     last writer and writer--writer hand-over is available,\n+\t\t     we must make use of it because nobody else will reset\n+\t\t     WRLOCKED otherwise.  (If we use it, we simply pretend\n+\t\t     that this happened before the timeout; see\n+\t\t     pthread_rwlock_rdlock_full for the full reasoning.)\n+\t\t     Also see the similar code above.  */\n+\t\t  unsigned int w = atomic_load_relaxed\n+\t\t      (&rwlock->__data.__writers);\n+\t\t  while (!atomic_compare_exchange_weak_acquire\n+\t\t      (&rwlock->__data.__writers, &w,\n+\t\t\t  (w == PTHREAD_RWLOCK_WRHANDOVER + 1 ? 0 : w - 1)))\n+\t\t    {\n+\t\t      /* TODO Back-off.  */\n+\t\t    }\n+\t\t  if (w == PTHREAD_RWLOCK_WRHANDOVER + 1)\n+\t\t    {\n+\t\t      /* We must continue as primary writer.  See above.  */\n+\t\t      r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t\t      break;\n+\t\t    }\n+\t\t}\n+\t      /* We cleaned up and cannot have stolen another waiting writer's\n+\t\t futex wake-up, so just return.  */\n+\t      return ETIMEDOUT;\n+\t    }\n+\t  /* If we got interrupted (EINTR) or the futex word does not have the\n+\t     expected value (EAGAIN), retry after reloading __readers.  */\n+\t  r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t}\n+      /* Our snapshot of __readers is up-to-date at this point because we\n+\t either set WRLOCKED using a CAS or were handed over WRLOCKED from\n+\t another writer whose snapshot of __readers we inherit.  */\n+    }\n+\n+  /* If we are in a read phase and there are no readers, try to start a write\n+     phase.  */\n+  while (((r & PTHREAD_RWLOCK_WRPHASE) == 0)\n+      && ((r >> PTHREAD_RWLOCK_READER_SHIFT) == 0))\n+    {\n+      /* Acquire MO so that we synchronize with prior writers and do\n+\t not interfere with their updates to __writers_futex, as well\n+\t as regarding prior readers and their updates to __wrphase_futex,\n+\t respectively.  */\n+      if (atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers,\n+\t  &r, r | PTHREAD_RWLOCK_WRPHASE))\n+\t{\n+\t  /* We have started a write phase, so need to enable readers to wait.\n+\t     See the similar case in__pthread_rwlock_rdlock_full.  */\n+\t  atomic_store_relaxed (&rwlock->__data.__wrphase_futex, 1);\n+\t  /* Make sure we fall through to the end of the function.  */\n+\t  r |= PTHREAD_RWLOCK_WRPHASE;\n+\t  break;\n+\t}\n+      /* TODO Back-off.  */\n+    }\n+\n+  /* We are the primary writer; enable blocking on __writers_futex.  Relaxed\n+     MO is sufficient for futex words; acquire MO on the previous\n+     modifications of __readers ensures that this store happens after the\n+     store of value 0 by the previous primary writer.  */\n+  atomic_store_relaxed (&rwlock->__data.__writers_futex,\n+      1 | (may_share_futex_used_flag ? PTHREAD_RWLOCK_FUTEX_USED : 0));\n+\n+  if (__glibc_unlikely ((r & PTHREAD_RWLOCK_WRPHASE) == 0))\n+    {\n+      /* We are not in a read phase and there are readers (because of the\n+\t previous loop).  Thus, we have to wait for explicit hand-over from\n+\t one of these readers.\n+\t We basically do the same steps as for the similar case in\n+\t __pthread_rwlock_rdlock_full, except that we additionally might try\n+\t to directly hand over to another writer and need to wake up\n+\t other writers or waiting readers (i.e., PTHREAD_RWLOCK_RWAITING).  */\n+      unsigned int wpf;\n+      bool ready = false;\n+      for (;;)\n+\t{\n+\t  while (((wpf = atomic_load_relaxed (&rwlock->__data.__wrphase_futex))\n+\t      | PTHREAD_RWLOCK_FUTEX_USED) == PTHREAD_RWLOCK_FUTEX_USED)\n+\t    {\n+\t      int private = __pthread_rwlock_get_private (rwlock);\n+\t      if (((wpf & PTHREAD_RWLOCK_FUTEX_USED) == 0)\n+\t\t  && !atomic_compare_exchange_weak_relaxed\n+\t\t      (&rwlock->__data.__wrphase_futex, &wpf,\n+\t\t       PTHREAD_RWLOCK_FUTEX_USED))\n+\t\tcontinue;\n+\t      int err = futex_abstimed_wait_t64 (&rwlock->__data.__wrphase_futex,\n+\t\t  PTHREAD_RWLOCK_FUTEX_USED, abstime, private);\n+\t      if (err == ETIMEDOUT)\n+\t\t{\n+\t\t  if (rwlock->__data.__flags\n+\t\t      != PTHREAD_RWLOCK_PREFER_READER_NP)\n+\t\t    {\n+\t\t      /* We try writer--writer hand-over.  */\n+\t\t      unsigned int w = atomic_load_relaxed\n+\t\t\t  (&rwlock->__data.__writers);\n+\t\t      if (w != 0)\n+\t\t\t{\n+\t\t\t  /* We are about to hand over WRLOCKED, so we must\n+\t\t\t     release __writers_futex too; otherwise, we'd have\n+\t\t\t     a pending store, which could at least prevent\n+\t\t\t     other threads from waiting using the futex\n+\t\t\t     because it could interleave with the stores\n+\t\t\t     by subsequent writers.  In turn, this means that\n+\t\t\t     we have to clean up when we do not hand over\n+\t\t\t     WRLOCKED.\n+\t\t\t     Release MO so that another writer that gets\n+\t\t\t     WRLOCKED from us can take over our view of\n+\t\t\t     __readers.  */\n+\t\t\t  unsigned int wf = atomic_exchange_relaxed\n+\t\t\t      (&rwlock->__data.__writers_futex, 0);\n+\t\t\t  while (w != 0)\n+\t\t\t    {\n+\t\t\t      if (atomic_compare_exchange_weak_release\n+\t\t\t\t  (&rwlock->__data.__writers, &w,\n+\t\t\t\t      w | PTHREAD_RWLOCK_WRHANDOVER))\n+\t\t\t\t{\n+\t\t\t\t  /* Wake other writers.  */\n+\t\t\t\t  if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)\n+\t\t\t\t    futex_wake\n+\t\t\t\t\t(&rwlock->__data.__writers_futex, 1,\n+\t\t\t\t\t private);\n+\t\t\t\t  return ETIMEDOUT;\n+\t\t\t\t}\n+\t\t\t      /* TODO Back-off.  */\n+\t\t\t    }\n+\t\t\t  /* We still own WRLOCKED and someone else might set\n+\t\t\t     a write phase concurrently, so enable waiting\n+\t\t\t     again.  Make sure we don't loose the flag that\n+\t\t\t     signals whether there are threads waiting on\n+\t\t\t     this futex.  */\n+\t\t\t  atomic_store_relaxed\n+\t\t\t      (&rwlock->__data.__writers_futex, wf);\n+\t\t\t}\n+\t\t    }\n+\t\t  /* If we timed out and we are not in a write phase, we can\n+\t\t     just stop being a primary writer.  Otherwise, we just\n+\t\t     acquire the lock.  */\n+\t\t  r = atomic_load_relaxed (&rwlock->__data.__readers);\n+\t\t  if ((r & PTHREAD_RWLOCK_WRPHASE) == 0)\n+\t\t    {\n+\t\t      /* We are about to release WRLOCKED, so we must release\n+\t\t\t __writers_futex too; see the handling of\n+\t\t\t writer--writer hand-over above.  */\n+\t\t      unsigned int wf = atomic_exchange_relaxed\n+\t\t\t  (&rwlock->__data.__writers_futex, 0);\n+\t\t      while ((r & PTHREAD_RWLOCK_WRPHASE) == 0)\n+\t\t\t{\n+\t\t\t  /* While we don't need to make anything from a\n+\t\t\t     caller's critical section visible to other\n+\t\t\t     threads, we need to ensure that our changes to\n+\t\t\t     __writers_futex are properly ordered.\n+\t\t\t     Therefore, use release MO to synchronize with\n+\t\t\t     subsequent primary writers.  Also wake up any\n+\t\t\t     waiting readers as they are waiting because of\n+\t\t\t     us.  */\n+\t\t\t  if (atomic_compare_exchange_weak_release\n+\t\t\t      (&rwlock->__data.__readers, &r,\n+\t\t\t       (r ^ PTHREAD_RWLOCK_WRLOCKED)\n+\t\t\t       & ~(unsigned int) PTHREAD_RWLOCK_RWAITING))\n+\t\t\t    {\n+\t\t\t      /* Wake other writers.  */\n+\t\t\t      if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)\n+\t\t\t\tfutex_wake (&rwlock->__data.__writers_futex,\n+\t\t\t\t    1, private);\n+\t\t\t      /* Wake waiting readers.  */\n+\t\t\t      if ((r & PTHREAD_RWLOCK_RWAITING) != 0)\n+\t\t\t\tfutex_wake (&rwlock->__data.__readers,\n+\t\t\t\t    INT_MAX, private);\n+\t\t\t      return ETIMEDOUT;\n+\t\t\t    }\n+\t\t\t}\n+\t\t      /* We still own WRLOCKED and someone else might set a\n+\t\t\t write phase concurrently, so enable waiting again.\n+\t\t\t Make sure we don't loose the flag that signals\n+\t\t\t whether there are threads waiting on this futex.  */\n+\t\t      atomic_store_relaxed (&rwlock->__data.__writers_futex,\n+\t\t\t  wf);\n+\t\t    }\n+\t\t  /* Use the acquire MO fence to mirror the steps taken in the\n+\t\t     non-timeout case.  Note that the read can happen both\n+\t\t     in the atomic_load above as well as in the failure case\n+\t\t     of the CAS operation.  */\n+\t\t  atomic_thread_fence_acquire ();\n+\t\t  /* We still need to wait for explicit hand-over, but we must\n+\t\t     not use futex_wait anymore.  */\n+\t\t  while ((atomic_load_relaxed\n+\t\t      (&rwlock->__data.__wrphase_futex)\n+\t\t       | PTHREAD_RWLOCK_FUTEX_USED)\n+\t\t      == PTHREAD_RWLOCK_FUTEX_USED)\n+\t\t    {\n+\t\t      /* TODO Back-off.  */\n+\t\t    }\n+\t\t  ready = true;\n+\t\t  break;\n+\t\t}\n+\t      /* If we got interrupted (EINTR) or the futex word does not have\n+\t\t the expected value (EAGAIN), retry.  */\n+\t    }\n+\t  /* See pthread_rwlock_rdlock_full.  */\n+\t  if (ready)\n+\t    break;\n+\t  if ((atomic_load_acquire (&rwlock->__data.__readers)\n+\t      & PTHREAD_RWLOCK_WRPHASE) != 0)\n+\t    ready = true;\n+\t}\n+    }\n+\n+  atomic_store_relaxed (&rwlock->__data.__cur_writer,\n+      THREAD_GETMEM (THREAD_SELF, tid));\n+  return 0;\n+}\ndiff --git a/nptl/pthread_rwlock_timedrdlock.c b/nptl/pthread_rwlock_timedrdlock.c\nindex 9f084f8c34..174ddf11cd 100644\n--- a/nptl/pthread_rwlock_timedrdlock.c\n+++ b/nptl/pthread_rwlock_timedrdlock.c\n@@ -35,3 +35,22 @@ pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,\n \n   return __pthread_rwlock_rdlock_full (rwlock, abstime);\n }\n+\n+/* 64-bit time version */\n+\n+int\n+pthread_rwlock_timedrdlock_t64 (pthread_rwlock_t *rwlock,\n+    const struct __timespec64 *abstime)\n+{\n+  /* Make sure the passed in timeout value is valid.  Note that the previous\n+     implementation assumed that this check *must* not be performed if there\n+     would in fact be no blocking; however, POSIX only requires that \"the\n+     validity of the abstime parameter need not be checked if the lock can be\n+     immediately acquired\" (i.e., we need not but may check it).  */\n+  /* ??? Just move this to __pthread_rwlock_rdlock_full?  */\n+  if (__glibc_unlikely (abstime->tv_nsec >= 1000000000\n+      || abstime->tv_nsec < 0))\n+    return EINVAL;\n+\n+  return __pthread_rwlock_rdlock_full_t64 (rwlock, abstime);\n+}\ndiff --git a/nptl/pthread_rwlock_timedwrlock.c b/nptl/pthread_rwlock_timedwrlock.c\nindex 5626505d2c..7b954d4a12 100644\n--- a/nptl/pthread_rwlock_timedwrlock.c\n+++ b/nptl/pthread_rwlock_timedwrlock.c\n@@ -35,3 +35,22 @@ pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,\n \n   return __pthread_rwlock_wrlock_full (rwlock, abstime);\n }\n+\n+/* 64-bit time version */\n+\n+int\n+pthread_rwlock_timedwrlock_t64 (pthread_rwlock_t *rwlock,\n+    const struct __timespec64 *abstime)\n+{\n+  /* Make sure the passed in timeout value is valid.  Note that the previous\n+     implementation assumed that this check *must* not be performed if there\n+     would in fact be no blocking; however, POSIX only requires that \"the\n+     validity of the abstime parameter need not be checked if the lock can be\n+     immediately acquired\" (i.e., we need not but may check it).  */\n+  /* ??? Just move this to __pthread_rwlock_wrlock_full?  */\n+  if (__glibc_unlikely (abstime->tv_nsec >= 1000000000\n+      || abstime->tv_nsec < 0))\n+    return EINVAL;\n+\n+  return __pthread_rwlock_wrlock_full_t64 (rwlock, abstime);\n+}\ndiff --git a/nptl/sem_timedwait.c b/nptl/sem_timedwait.c\nindex 22b0778cc2..893954a969 100644\n--- a/nptl/sem_timedwait.c\n+++ b/nptl/sem_timedwait.c\n@@ -38,3 +38,21 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)\n   else\n     return __new_sem_wait_slow((struct new_sem *) sem, abstime);\n }\n+\n+int\n+sem_timedwait_t64 (sem_t *sem, const struct __timespec64 *abstime)\n+{\n+  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)\n+    {\n+      __set_errno (EINVAL);\n+      return -1;\n+    }\n+\n+  /* Check sem_wait.c for a more detailed explanation why it is required.  */\n+  __pthread_testcancel ();\n+\n+  if (__new_sem_wait_fast ((struct new_sem *) sem, 0) == 0)\n+    return 0;\n+  else\n+    return __new_sem_wait_slow_t64 ((struct new_sem *) sem, abstime);\n+}\ndiff --git a/nptl/sem_wait.c b/nptl/sem_wait.c\nindex 625bf08c83..b9f37f49bf 100644\n--- a/nptl/sem_wait.c\n+++ b/nptl/sem_wait.c\n@@ -43,6 +43,30 @@ __new_sem_wait (sem_t *sem)\n }\n versioned_symbol (libpthread, __new_sem_wait, sem_wait, GLIBC_2_1);\n \n+/* 64-bit time version */\n+\n+int\n+__new_sem_wait_t64 (sem_t *sem)\n+{\n+  /* We need to check whether we need to act upon a cancellation request here\n+     because POSIX specifies that cancellation points \"shall occur\" in\n+     sem_wait and sem_timedwait, which also means that they need to check\n+     this regardless whether they block or not (unlike \"may occur\"\n+     functions).  See the POSIX Rationale for this requirement: Section\n+     \"Thread Cancellation Overview\" [1] and austin group issue #1076 [2]\n+     for thoughs on why this may be a suboptimal design.\n+\n+     [1] http://pubs.opengroup.org/onlinepubs/9699919799/xrat/V4_xsh_chap02.html\n+     [2] http://austingroupbugs.net/view.php?id=1076 for thoughts on why this\n+   */\n+  __pthread_testcancel ();\n+\n+  if (__new_sem_wait_fast ((struct new_sem *) sem, 0) == 0)\n+    return 0;\n+  else\n+    return __new_sem_wait_slow_t64 ((struct new_sem *) sem, NULL);\n+}\n+\n #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)\n int\n attribute_compat_text_section\ndiff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c\nindex a3412a0d35..d5bfe04aeb 100644\n--- a/nptl/sem_waitcommon.c\n+++ b/nptl/sem_waitcommon.c\n@@ -119,6 +119,24 @@ do_futex_wait (struct new_sem *sem, const struct timespec *abstime)\n   return err;\n }\n \n+static int\n+__attribute__ ((noinline))\n+do_futex_wait_t64 (struct new_sem *sem, const struct __timespec64 *abstime)\n+{\n+  int err;\n+\n+#if __HAVE_64B_ATOMICS\n+  err = futex_abstimed_wait_cancelable_t64 (\n+      (unsigned int *) &sem->data + SEM_VALUE_OFFSET, 0, abstime,\n+      sem->private);\n+#else\n+  err = futex_abstimed_wait_cancelable_t64 (&sem->value, SEM_NWAITERS_MASK,\n+\t\t\t\t\tabstime, sem->private);\n+#endif\n+\n+  return err;\n+}\n+\n /* Fast path: Try to grab a token without blocking.  */\n static int\n __new_sem_wait_fast (struct new_sem *sem, int definitive_result)\n@@ -310,6 +328,160 @@ error:\n   return err;\n }\n \n+/* 64-bit time version */\n+\n+static int\n+__attribute__ ((noinline))\n+__new_sem_wait_slow_t64 (struct new_sem *sem, const struct __timespec64 *abstime)\n+{\n+  int err = 0;\n+\n+#if __HAVE_64B_ATOMICS\n+  /* Add a waiter.  Relaxed MO is sufficient because we can rely on the\n+     ordering provided by the RMW operations we use.  */\n+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,\n+      (uint64_t) 1 << SEM_NWAITERS_SHIFT);\n+\n+  pthread_cleanup_push (__sem_wait_cleanup, sem);\n+\n+  /* Wait for a token to be available.  Retry until we can grab one.  */\n+  for (;;)\n+    {\n+      /* If there is no token available, sleep until there is.  */\n+      if ((d & SEM_VALUE_MASK) == 0)\n+\t{\n+\t  err = do_futex_wait_t64 (sem, abstime);\n+\t  /* A futex return value of 0 or EAGAIN is due to a real or spurious\n+\t     wake-up, or due to a change in the number of tokens.  We retry in\n+\t     these cases.\n+\t     If we timed out, forward this to the caller.\n+\t     EINTR is returned if we are interrupted by a signal; we\n+\t     forward this to the caller.  (See futex_wait and related\n+\t     documentation.  Before Linux 2.6.22, EINTR was also returned on\n+\t     spurious wake-ups; we only support more recent Linux versions,\n+\t     so do not need to consider this here.)  */\n+\t  if (err == ETIMEDOUT || err == EINTR)\n+\t    {\n+\t      __set_errno (err);\n+\t      err = -1;\n+\t      /* Stop being registered as a waiter.  */\n+\t      atomic_fetch_add_relaxed (&sem->data,\n+\t\t  -((uint64_t) 1 << SEM_NWAITERS_SHIFT));\n+\t      break;\n+\t    }\n+\t  /* Relaxed MO is sufficient; see below.  */\n+\t  d = atomic_load_relaxed (&sem->data);\n+\t}\n+      else\n+\t{\n+\t  /* Try to grab both a token and stop being a waiter.  We need\n+\t     acquire MO so this synchronizes with all token providers (i.e.,\n+\t     the RMW operation we read from or all those before it in\n+\t     modification order; also see sem_post).  On the failure path,\n+\t     relaxed MO is sufficient because we only eventually need the\n+\t     up-to-date value; the futex_wait or the CAS perform the real\n+\t     work.  */\n+\t  if (atomic_compare_exchange_weak_acquire (&sem->data,\n+\t      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))\n+\t    {\n+\t      err = 0;\n+\t      break;\n+\t    }\n+\t}\n+    }\n+\n+  pthread_cleanup_pop (0);\n+#else\n+  /* The main difference to the 64b-atomics implementation is that we need to\n+     access value and nwaiters in separate steps, and that the nwaiters bit\n+     in the value can temporarily not be set even if nwaiters is nonzero.\n+     We work around incorrectly unsetting the nwaiters bit by letting sem_wait\n+     set the bit again and waking the number of waiters that could grab a\n+     token.  There are two additional properties we need to ensure:\n+     (1) We make sure that whenever unsetting the bit, we see the increment of\n+     nwaiters by the other thread that set the bit.  IOW, we will notice if\n+     we make a mistake.\n+     (2) When setting the nwaiters bit, we make sure that we see the unsetting\n+     of the bit by another waiter that happened before us.  This avoids having\n+     to blindly set the bit whenever we need to block on it.  We set/unset\n+     the bit while having incremented nwaiters (i.e., are a registered\n+     waiter), and the problematic case only happens when one waiter indeed\n+     followed another (i.e., nwaiters was never larger than 1); thus, this\n+     works similarly as with a critical section using nwaiters (see the MOs\n+     and related comments below).\n+\n+     An alternative approach would be to unset the bit after decrementing\n+     nwaiters; however, that would result in needing Dekker-like\n+     synchronization and thus full memory barriers.  We also would not be able\n+     to prevent misspeculation, so this alternative scheme does not seem\n+     beneficial.  */\n+  unsigned int v;\n+\n+  /* Add a waiter.  We need acquire MO so this synchronizes with the release\n+     MO we use when decrementing nwaiters below; it ensures that if another\n+     waiter unset the bit before us, we see that and set it again.  Also see\n+     property (2) above.  */\n+  atomic_fetch_add_acquire (&sem->nwaiters, 1);\n+\n+  pthread_cleanup_push (__sem_wait_cleanup, sem);\n+\n+  /* Wait for a token to be available.  Retry until we can grab one.  */\n+  /* We do not need any ordering wrt. to this load's reads-from, so relaxed\n+     MO is sufficient.  The acquire MO above ensures that in the problematic\n+     case, we do see the unsetting of the bit by another waiter.  */\n+  v = atomic_load_relaxed (&sem->value);\n+  do\n+    {\n+      do\n+\t{\n+\t  /* We are about to block, so make sure that the nwaiters bit is\n+\t     set.  We need release MO on the CAS to ensure that when another\n+\t     waiter unsets the nwaiters bit, it will also observe that we\n+\t     incremented nwaiters in the meantime (also see the unsetting of\n+\t     the bit below).  Relaxed MO on CAS failure is sufficient (see\n+\t     above).  */\n+\t  do\n+\t    {\n+\t      if ((v & SEM_NWAITERS_MASK) != 0)\n+\t\tbreak;\n+\t    }\n+\t  while (!atomic_compare_exchange_weak_release (&sem->value,\n+\t      &v, v | SEM_NWAITERS_MASK));\n+\t  /* If there is no token, wait.  */\n+\t  if ((v >> SEM_VALUE_SHIFT) == 0)\n+\t    {\n+\t      /* See __HAVE_64B_ATOMICS variant.  */\n+\t      err = do_futex_wait_t64 (sem, abstime);\n+\t      if (err == ETIMEDOUT || err == EINTR)\n+\t\t{\n+\t\t  __set_errno (err);\n+\t\t  err = -1;\n+\t\t  goto error;\n+\t\t}\n+\t      err = 0;\n+\t      /* We blocked, so there might be a token now.  Relaxed MO is\n+\t\t sufficient (see above).  */\n+\t      v = atomic_load_relaxed (&sem->value);\n+\t    }\n+\t}\n+      /* If there is no token, we must not try to grab one.  */\n+      while ((v >> SEM_VALUE_SHIFT) == 0);\n+    }\n+  /* Try to grab a token.  We need acquire MO so this synchronizes with\n+     all token providers (i.e., the RMW operation we read from or all those\n+     before it in modification order; also see sem_post).  */\n+  while (!atomic_compare_exchange_weak_acquire (&sem->value,\n+      &v, v - (1 << SEM_VALUE_SHIFT)));\n+\n+error:\n+  pthread_cleanup_pop (0);\n+\n+  __sem_wait_32_finish (sem);\n+#endif\n+\n+  return err;\n+}\n+\n /* Stop being a registered waiter (non-64b-atomics code only).  */\n #if !__HAVE_64B_ATOMICS\n static void\ndiff --git a/rt/Versions b/rt/Versions\nindex 1eef2e604f..a1c98a8576 100644\n--- a/rt/Versions\n+++ b/rt/Versions\n@@ -48,5 +48,6 @@ librt {\n     __timerfd_settime64;\n     __mq_timedreceive_t64;\n     __mq_timedsend_t64;\n+    __aio_suspend_t64;\n   }\n }\ndiff --git a/sysdeps/nptl/aio_misc.h b/sysdeps/nptl/aio_misc.h\nindex 47b1a36479..3c74f8484c 100644\n--- a/sysdeps/nptl/aio_misc.h\n+++ b/sysdeps/nptl/aio_misc.h\n@@ -71,4 +71,43 @@\n       }\t\t\t\t\t\t\t\t\t      \\\n   } while (0)\n \n+#define AIO_MISC_WAIT_T64(result, futex, timeout, cancel)\t\t      \\\n+  do {\t\t\t\t\t\t\t\t\t      \\\n+    volatile unsigned int *futexaddr = &futex;\t\t\t\t      \\\n+    unsigned int oldval = futex;\t\t\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+    if (oldval != 0)\t\t\t\t\t\t\t      \\\n+      {\t\t\t\t\t\t\t\t\t      \\\n+\tpthread_mutex_unlock (&__aio_requests_mutex);\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\tint oldtype;\t\t\t\t\t\t\t      \\\n+\tif (cancel)\t\t\t\t\t\t\t      \\\n+\t  oldtype = LIBC_CANCEL_ASYNC ();\t\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\tint status;\t\t\t\t\t\t\t      \\\n+\tdo\t\t\t\t\t\t\t\t      \\\n+\t  {\t\t\t\t\t\t\t\t      \\\n+\t    status = futex_reltimed_wait_t64 ((unsigned int *) futexaddr,     \\\n+\t\t\t\t\t      oldval, timeout, FUTEX_PRIVATE);\\\n+\t    if (status != EAGAIN)\t\t\t\t\t      \\\n+\t      break;\t\t\t\t\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\t    oldval = *futexaddr;\t\t\t\t\t      \\\n+\t  }\t\t\t\t\t\t\t\t      \\\n+\twhile (oldval != 0);\t\t\t\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\tif (cancel)\t\t\t\t\t\t\t      \\\n+\t  LIBC_CANCEL_RESET (oldtype);\t\t\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\tif (status == EINTR)\t\t\t\t\t\t      \\\n+\t  result = EINTR;\t\t\t\t\t\t      \\\n+\telse if (status == ETIMEDOUT)\t\t\t\t\t      \\\n+\t  result = EAGAIN;\t\t\t\t\t\t      \\\n+\telse\t\t\t\t\t\t\t\t      \\\n+\t  assert (status == 0 || status == EAGAIN);\t\t\t      \\\n+\t\t\t\t\t\t\t\t\t      \\\n+\tpthread_mutex_lock (&__aio_requests_mutex);\t\t\t      \\\n+      }\t\t\t\t\t\t\t\t\t      \\\n+  } while (0)\n+\n #include_next <aio_misc.h>\ndiff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h\nindex 54e3c28b0b..cedc9daa05 100644\n--- a/sysdeps/nptl/lowlevellock.h\n+++ b/sysdeps/nptl/lowlevellock.h\n@@ -122,6 +122,10 @@ extern void __lll_lock_wait (int *futex, int private) attribute_hidden;\n extern int __lll_timedlock_wait (int *futex, const struct timespec *,\n \t\t\t\t int private) attribute_hidden;\n \n+extern int __lll_timedlock_wait_t64 (int *futex,\n+                                     const struct __timespec64 *,\n+\t\t\t\t     int private) attribute_hidden;\n+\n \n /* As __lll_lock, but with a timeout.  If the timeout occurs then return\n    ETIMEDOUT.  If ABSTIME is invalid, return EINVAL.  */\n@@ -138,6 +142,19 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,\n #define lll_timedlock(futex, abstime, private)  \\\n   __lll_timedlock (&(futex), abstime, private)\n \n+#define __lll_timedlock_t64(futex, abstime, private)                \\\n+  ({                                                                \\\n+    int *__futex = (futex);                                         \\\n+    int __val = 0;                                                  \\\n+                                                                    \\\n+    if (__glibc_unlikely                                            \\\n+        (atomic_compare_and_exchange_bool_acq (__futex, 1, 0)))     \\\n+      __val = __lll_timedlock_wait_t64 (__futex, abstime, private); \\\n+    __val;                                                          \\\n+  })\n+#define lll_timedlock_t64(futex, abstime, private)  \\\n+  __lll_timedlock_t64 (&(futex), abstime, private)\n+\n \n /* This is an expression rather than a statement even though its value is\n    void, so that it can be used in a comma expression or as an expression\ndiff --git a/sysdeps/pthread/aio_suspend.c b/sysdeps/pthread/aio_suspend.c\nindex c739285c6a..8a38754a12 100644\n--- a/sysdeps/pthread/aio_suspend.c\n+++ b/sysdeps/pthread/aio_suspend.c\n@@ -254,3 +254,167 @@ aio_suspend (const struct aiocb *const list[], int nent,\n }\n \n weak_alias (aio_suspend, aio_suspend64)\n+\n+#ifdef DONT_NEED_AIO_MISC_COND\n+static int\n+__attribute__ ((noinline))\n+do_aio_misc_wait_t64 (unsigned int *cntr, \n+\t\t      const struct __timespec64 *timeout)\n+{\n+  int result = 0;\n+\n+  AIO_MISC_WAIT_T64 (result, *cntr, timeout, 1);\n+\n+  return result;\n+}\n+#endif\n+\n+int\n+aio_suspend_t64 (const struct aiocb *const list[], int nent,\n+\t         const struct __timespec64 *timeout)\n+{\n+  if (__glibc_unlikely (nent < 0))\n+    {\n+      __set_errno (EINVAL);\n+      return -1;\n+    }\n+\n+  struct waitlist waitlist[nent];\n+  struct requestlist *requestlist[nent];\n+#ifndef DONT_NEED_AIO_MISC_COND\n+  pthread_cond_t cond = PTHREAD_COND_INITIALIZER;\n+#endif\n+  int cnt;\n+  bool any = false;\n+  int result = 0;\n+  unsigned int cntr = 1;\n+\n+  /* Request the mutex.  */\n+  pthread_mutex_lock (&__aio_requests_mutex);\n+\n+  /* There is not yet a finished request.  Signal the request that\n+     we are working for it.  */\n+  for (cnt = 0; cnt < nent; ++cnt)\n+    if (list[cnt] != NULL)\n+      {\n+\tif (list[cnt]->__error_code == EINPROGRESS)\n+\t  {\n+\t    requestlist[cnt] = __aio_find_req ((aiocb_union *) list[cnt]);\n+\n+\t    if (requestlist[cnt] != NULL)\n+\t      {\n+#ifndef DONT_NEED_AIO_MISC_COND\n+\t\twaitlist[cnt].cond = &cond;\n+#endif\n+\t\twaitlist[cnt].result = NULL;\n+\t\twaitlist[cnt].next = requestlist[cnt]->waiting;\n+\t\twaitlist[cnt].counterp = &cntr;\n+\t\twaitlist[cnt].sigevp = NULL;\n+#ifdef BROKEN_THREAD_SIGNALS\n+\t\twaitlist[cnt].caller_pid = 0;\t/* Not needed.  */\n+#endif\n+\t\trequestlist[cnt]->waiting = &waitlist[cnt];\n+\t\tany = true;\n+\t      }\n+\t    else\n+\t      /* We will never suspend.  */\n+\t      break;\n+\t  }\n+\telse\n+\t  /* We will never suspend.  */\n+\t  break;\n+      }\n+\n+\n+  /* Only if none of the entries is NULL or finished to be wait.  */\n+  if (cnt == nent && any)\n+    {\n+      struct clparam clparam =\n+\t{\n+\t  .list = list,\n+\t  .waitlist = waitlist,\n+\t  .requestlist = requestlist,\n+#ifndef DONT_NEED_AIO_MISC_COND\n+\t  .cond = &cond,\n+#endif\n+\t  .nent = nent\n+\t};\n+\n+      pthread_cleanup_push (cleanup, &clparam);\n+\n+#ifdef DONT_NEED_AIO_MISC_COND\n+      result = do_aio_misc_wait_t64 (&cntr, timeout);\n+#else\n+      if (timeout == NULL)\n+\tresult = pthread_cond_wait (&cond, &__aio_requests_mutex);\n+      else\n+\t{\n+\t  /* We have to convert the relative timeout value into an\n+\t     absolute time value with pthread_cond_timedwait expects.  */\n+\t  struct timeval now;\n+\t  struct timespec abstime;\n+\n+\t  __gettimeofday (&now, NULL);\n+\t  abstime.tv_nsec = timeout->tv_nsec + now.tv_usec * 1000;\n+\t  abstime.tv_sec = timeout->tv_sec + now.tv_sec;\n+\t  if (abstime.tv_nsec >= 1000000000)\n+\t    {\n+\t      abstime.tv_nsec -= 1000000000;\n+\t      abstime.tv_sec += 1;\n+\t    }\n+\n+\t  result = __pthread_cond_timedwait_t64 (&cond,\n+\t\t\t\t\t       &__aio_requests_mutex,\n+\t\t\t\t\t       &abstime);\n+\t}\n+#endif\n+\n+      pthread_cleanup_pop (0);\n+    }\n+\n+  /* Now remove the entry in the waiting list for all requests\n+     which didn't terminate.  */\n+  while (cnt-- > 0)\n+    if (list[cnt] != NULL && list[cnt]->__error_code == EINPROGRESS)\n+      {\n+\tstruct waitlist **listp;\n+\n+\tassert (requestlist[cnt] != NULL);\n+\n+\t/* There is the chance that we cannot find our entry anymore. This\n+\t   could happen if the request terminated and restarted again.  */\n+\tlistp = &requestlist[cnt]->waiting;\n+\twhile (*listp != NULL && *listp != &waitlist[cnt])\n+\t  listp = &(*listp)->next;\n+\n+\tif (*listp != NULL)\n+\t  *listp = (*listp)->next;\n+      }\n+\n+#ifndef DONT_NEED_AIO_MISC_COND\n+  /* Release the conditional variable.  */\n+  if (__glibc_unlikely (pthread_cond_destroy (&cond) != 0))\n+    /* This must never happen.  */\n+    abort ();\n+#endif\n+\n+  if (result != 0)\n+    {\n+#ifndef DONT_NEED_AIO_MISC_COND\n+      /* An error occurred.  Possibly it's ETIMEDOUT.  We have to translate\n+\t the timeout error report of `pthread_cond_timedwait' to the\n+\t form expected from `aio_suspend'.  */\n+      if (result == ETIMEDOUT)\n+\t__set_errno (EAGAIN);\n+      else\n+#endif\n+\t__set_errno (result);\n+\n+      result = -1;\n+    }\n+\n+  /* Release the mutex.  */\n+  pthread_mutex_unlock (&__aio_requests_mutex);\n+\n+  return result;\n+}\ndiff --git a/sysdeps/unix/sysv/linux/futex-internal.h b/sysdeps/unix/sysv/linux/futex-internal.h\nindex 1386807f5b..ceffa906c1 100644\n--- a/sysdeps/unix/sysv/linux/futex-internal.h\n+++ b/sysdeps/unix/sysv/linux/futex-internal.h\n@@ -131,6 +131,32 @@ futex_reltimed_wait (unsigned int *futex_word, unsigned int expected,\n     }\n }\n \n+/* 64-bit time version */\n+static __always_inline int\n+futex_reltimed_wait_t64 (unsigned int *futex_word, unsigned int expected,\n+\t\t         const struct __timespec64 *reltime, int private)\n+{\n+  int err = lll_futex_timed_wait_t64 (futex_word, expected, reltime,\n+                                      private);\n+  switch (err)\n+    {\n+    case 0:\n+    case -EAGAIN:\n+    case -EINTR:\n+    case -ETIMEDOUT:\n+      return -err;\n+\n+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */\n+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not\n+\t\t     being normalized.  Must have been caused by a glibc or\n+\t\t     application bug.  */\n+    case -ENOSYS: /* Must have been caused by a glibc bug.  */\n+    /* No other errors are documented at this time.  */\n+    default:\n+      futex_fatal_error ();\n+    }\n+}\n+\n /* See sysdeps/nptl/futex-internal.h for details.  */\n static __always_inline int\n futex_reltimed_wait_cancelable (unsigned int *futex_word,\n@@ -160,6 +186,37 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,\n     }\n }\n \n+/* 64-bit time version */\n+\n+static __always_inline int\n+futex_reltimed_wait_cancelable_t64 (unsigned int *futex_word,\n+\t\t\t\t    unsigned int expected,\n+\t\t\t            const struct __timespec64 *reltime,\n+                                    int private)\n+{\n+  int oldtype;\n+  oldtype = __pthread_enable_asynccancel ();\n+  int err = lll_futex_timed_wait_t64 (futex_word, expected, reltime, private);\n+  __pthread_disable_asynccancel (oldtype);\n+  switch (err)\n+    {\n+    case 0:\n+    case -EAGAIN:\n+    case -EINTR:\n+    case -ETIMEDOUT:\n+      return -err;\n+\n+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */\n+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not\n+\t\t     being normalized.  Must have been caused by a glibc or\n+\t\t     application bug.  */\n+    case -ENOSYS: /* Must have been caused by a glibc bug.  */\n+    /* No other errors are documented at this time.  */\n+    default:\n+      futex_fatal_error ();\n+    }\n+}\n+\n /* See sysdeps/nptl/futex-internal.h for details.  */\n static __always_inline int\n futex_abstimed_wait (unsigned int *futex_word, unsigned int expected,\n@@ -190,6 +247,36 @@ futex_abstimed_wait (unsigned int *futex_word, unsigned int expected,\n     }\n }\n \n+/* 64-bit time version */\n+static __always_inline int\n+futex_abstimed_wait_t64 (unsigned int *futex_word, unsigned int expected,\n+\t\t         const struct __timespec64 *abstime, int private)\n+{\n+  /* Work around the fact that the kernel rejects negative timeout values\n+     despite them being valid.  */\n+  if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))\n+    return ETIMEDOUT;\n+  int err = lll_futex_timed_wait_bitset_t64 (futex_word, expected, abstime,\n+\t\t\t\t\t     FUTEX_CLOCK_REALTIME, private);\n+  switch (err)\n+    {\n+    case 0:\n+    case -EAGAIN:\n+    case -EINTR:\n+    case -ETIMEDOUT:\n+      return -err;\n+\n+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */\n+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not\n+\t\t     being normalized.  Must have been caused by a glibc or\n+\t\t     application bug.  */\n+    case -ENOSYS: /* Must have been caused by a glibc bug.  */\n+    /* No other errors are documented at this time.  */\n+    default:\n+      futex_fatal_error ();\n+    }\n+}\n+\n /* See sysdeps/nptl/futex-internal.h for details.  */\n static __always_inline int\n futex_abstimed_wait_cancelable (unsigned int *futex_word,\n@@ -224,6 +311,42 @@ futex_abstimed_wait_cancelable (unsigned int *futex_word,\n     }\n }\n \n+/* 64-bit time version */\n+\n+static __always_inline int\n+futex_abstimed_wait_cancelable_t64 (unsigned int *futex_word,\n+\t\t\t\t    unsigned int expected,\n+\t\t\t            const struct __timespec64 *abstime,\n+                                    int private)\n+{\n+  /* Work around the fact that the kernel rejects negative timeout values\n+     despite them being valid.  */\n+  if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))\n+    return ETIMEDOUT;\n+  int oldtype;\n+  oldtype = __pthread_enable_asynccancel ();\n+  int err = lll_futex_timed_wait_bitset_t64 (futex_word, expected, abstime,\n+\t\t\t\t\t     FUTEX_CLOCK_REALTIME, private);\n+  __pthread_disable_asynccancel (oldtype);\n+  switch (err)\n+    {\n+    case 0:\n+    case -EAGAIN:\n+    case -EINTR:\n+    case -ETIMEDOUT:\n+      return -err;\n+\n+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */\n+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not\n+\t\t     being normalized.  Must have been caused by a glibc or\n+\t\t     application bug.  */\n+    case -ENOSYS: /* Must have been caused by a glibc bug.  */\n+    /* No other errors are documented at this time.  */\n+    default:\n+      futex_fatal_error ();\n+    }\n+}\n+\n /* See sysdeps/nptl/futex-internal.h for details.  */\n static __always_inline void\n futex_wake (unsigned int *futex_word, int processes_to_wake, int private)\ndiff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h\nindex bb4fbae13b..e1cad10544 100644\n--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h\n+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h\n@@ -97,6 +97,16 @@\n \t\t     __lll_private_flag (FUTEX_WAIT, private),  \\\n \t\t     val, timeout)\n \n+#define lll_futex_timed_wait_t64(futexp, val, timeout, private)     \\\n+  ({                                                                       \\\n+    struct timespec ts;                                                    \\\n+    ts.tv_sec = timeout->tv_sec;                                           \\\n+    ts.tv_nsec = timeout->tv_nsec;                                         \\\n+    lll_futex_syscall (4, futexp,                                 \t   \\\n+\t\t       __lll_private_flag (FUTEX_WAIT, private),  \t   \\\n+\t\t       val, &ts);\t\t\t\t\t   \\\n+  })\n+\n #define lll_futex_timed_wait_bitset(futexp, val, timeout, clockbit, private) \\\n   lll_futex_syscall (6, futexp,                                         \\\n \t\t     __lll_private_flag (FUTEX_WAIT_BITSET | (clockbit), \\\n@@ -104,6 +114,18 @@\n \t\t     val, timeout, NULL /* Unused.  */,                 \\\n \t\t     FUTEX_BITSET_MATCH_ANY)\n \n+#define lll_futex_timed_wait_bitset_t64(futexp, val, timeout, clockbit, private) \\\n+  ({                                                                       \\\n+    struct timespec ts;                                                    \\\n+    ts.tv_sec = timeout->tv_sec;                                           \\\n+    ts.tv_nsec = timeout->tv_nsec;                                         \\\n+    lll_futex_syscall (6, futexp,                                          \\\n+\t\t       __lll_private_flag (FUTEX_WAIT_BITSET | (clockbit), \\\n+\t\t                           private),                       \\\n+\t\t       val, &ts, NULL /* Unused.  */,                      \\\n+\t\t       FUTEX_BITSET_MATCH_ANY);                            \\\n+  })\n+\n #define lll_futex_wake(futexp, nr, private)                             \\\n   lll_futex_syscall (4, futexp,                                         \\\n \t\t     __lll_private_flag (FUTEX_WAKE, private), nr, 0)\n",
    "prefixes": [
        "RFC",
        "43/52"
    ]
}