From patchwork Sat Apr 20 17:21:24 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Iain Buclaw X-Patchwork-Id: 1088420 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (mailfrom) smtp.mailfrom=gcc.gnu.org (client-ip=209.132.180.131; helo=sourceware.org; envelope-from=gcc-patches-return-499504-incoming=patchwork.ozlabs.org@gcc.gnu.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=quarantine dis=none) header.from=gdcproject.org Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=gcc.gnu.org header.i=@gcc.gnu.org header.b="ZiE47IhI"; dkim-atps=neutral Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 44mfll53LYz9s5c for ; Sun, 21 Apr 2019 03:21:50 +1000 (AEST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :mime-version:from:date:message-id:subject:to:content-type; q= dns; s=default; b=WBdS9mYU2VbYOEiQGUDEJvwrxDWe5Nr7vFxzg+tfaRt8Zd bbes84jut0aZBOp8s0BFRS6hNEsSXlxlORCCtr/E7KRKmn+iT236LcUmjiKTdIz2 vDT5NfsrNLfDdYPHD/n8alL7F34oPUIUNSCuKz9MQ7RP7wMNdQ+aARe1JpRsk= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :mime-version:from:date:message-id:subject:to:content-type; s= default; bh=2CypFft9hM9lgi8XLSgFjVxV3L4=; b=ZiE47IhINlafhBt1GR9m egfL9KY2Cv2izXFbvDOjqVvvCXavUrrLK5d6WBCppNYZXn2gvMraPgbTSx+YpA9H DG5qkolRw+d0nJuHMepOyamsH3rf4KkVNduUzaQebcHzQ6RkuV72rwfel8KQLabt t/tJkSOnI2WBOgaktTe+A/Y= Received: (qmail 14832 invoked by alias); 20 Apr 2019 17:21:41 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 14823 invoked by uid 89); 20 Apr 2019 17:21:41 -0000 Authentication-Results: sourceware.org; auth=none X-Spam-SWARE-Status: No, score=-19.1 required=5.0 tests=AWL, BAYES_00, FREEMAIL_FROM, GIT_PATCH_0, GIT_PATCH_1, GIT_PATCH_2, GIT_PATCH_3, RCVD_IN_DNSWL_NONE, SPF_PASS autolearn=ham version=3.3.1 spammy=trusted, pod, ubyte, @nogc X-HELO: mail-qt1-f170.google.com Received: from mail-qt1-f170.google.com (HELO mail-qt1-f170.google.com) (209.85.160.170) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Sat, 20 Apr 2019 17:21:39 +0000 Received: by mail-qt1-f170.google.com with SMTP id z16so8351742qtn.4 for ; Sat, 20 Apr 2019 10:21:39 -0700 (PDT) MIME-Version: 1.0 From: Iain Buclaw Date: Sat, 20 Apr 2019 19:21:24 +0200 Message-ID: Subject: [PATCH, PR d/89293] Committed core.atomic should have fallback when there's no libatomic To: gcc-patches X-IsSubscribed: yes Hi, This patch adds an implementation of core.atomic for when there's no libatomic support linked in, nor provided by the compiler. The main part of which fakes the purity of system mutex_lock/mutex_unlock in order to satisfy the using of it in the pure/nothrow/safe atomic functions. Regression tested on x86_64-linux-gnu, where all libatomic configurables in libphobos were set to false to force testing the new code. Committed to trunk as r270470. diff --git a/libphobos/libdruntime/core/atomic.d b/libphobos/libdruntime/core/atomic.d index 0b39cddb6c9..1d0a2ea8b48 100644 --- a/libphobos/libdruntime/core/atomic.d +++ b/libphobos/libdruntime/core/atomic.d @@ -1353,36 +1353,62 @@ else version (GNU) private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted { - static assert(GNU_Have_Atomics, "cas() not supported on this architecture"); bool res = void; - static if (T.sizeof == byte.sizeof) + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) { - res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis, - false, MemoryOrder.seq, MemoryOrder.seq); - } - else static if (T.sizeof == short.sizeof) - { - res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis, - false, MemoryOrder.seq, MemoryOrder.seq); - } - else static if (T.sizeof == int.sizeof) - { - res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis, - false, MemoryOrder.seq, MemoryOrder.seq); - } - else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics) - { - res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis, - false, MemoryOrder.seq, MemoryOrder.seq); + static if (T.sizeof == byte.sizeof) + { + res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == short.sizeof) + { + res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == int.sizeof) + { + res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics) + { + res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis, + false, MemoryOrder.seq, MemoryOrder.seq); + } + else static if (GNU_Have_LibAtomic) + { + res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis, + MemoryOrder.seq, MemoryOrder.seq); + } + else + static assert(0, "Invalid template type specified."); } - else static if (GNU_Have_LibAtomic) + else { - res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis, - MemoryOrder.seq, MemoryOrder.seq); + static if (T.sizeof == byte.sizeof) + alias U = byte; + else static if (T.sizeof == short.sizeof) + alias U = short; + else static if (T.sizeof == int.sizeof) + alias U = int; + else static if (T.sizeof == long.sizeof) + alias U = long; + else + static assert(0, "Invalid template type specified."); + + getAtomicMutex.lock(); + scope(exit) getAtomicMutex.unlock(); + + if (*cast(U*)here == *cast(U*)&ifThis) + { + *here = writeThis; + res = true; + } + else + res = false; } - else - static assert(0, "Invalid template type specified."); return res; } @@ -1406,36 +1432,44 @@ else version (GNU) { static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad"); static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD"); - static assert(GNU_Have_Atomics, "atomicLoad() not supported on this architecture"); - static if (T.sizeof == ubyte.sizeof) + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) { - ubyte value = __atomic_load_1(&val, ms); - return *cast(HeadUnshared!T*) &value; - } - else static if (T.sizeof == ushort.sizeof) - { - ushort value = __atomic_load_2(&val, ms); - return *cast(HeadUnshared!T*) &value; - } - else static if (T.sizeof == uint.sizeof) - { - uint value = __atomic_load_4(&val, ms); - return *cast(HeadUnshared!T*) &value; - } - else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) - { - ulong value = __atomic_load_8(&val, ms); - return *cast(HeadUnshared!T*) &value; + static if (T.sizeof == ubyte.sizeof) + { + ubyte value = __atomic_load_1(&val, ms); + return *cast(HeadUnshared!T*) &value; + } + else static if (T.sizeof == ushort.sizeof) + { + ushort value = __atomic_load_2(&val, ms); + return *cast(HeadUnshared!T*) &value; + } + else static if (T.sizeof == uint.sizeof) + { + uint value = __atomic_load_4(&val, ms); + return *cast(HeadUnshared!T*) &value; + } + else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) + { + ulong value = __atomic_load_8(&val, ms); + return *cast(HeadUnshared!T*) &value; + } + else static if (GNU_Have_LibAtomic) + { + T value; + __atomic_load(T.sizeof, &val, cast(void*)&value, ms); + return *cast(HeadUnshared!T*) &value; + } + else + static assert(0, "Invalid template type specified."); } - else static if (GNU_Have_LibAtomic) + else { - T value; - __atomic_load(T.sizeof, &val, cast(void*)&value, ms); - return *cast(HeadUnshared!T*) &value; + getAtomicMutex.lock(); + scope(exit) getAtomicMutex.unlock(); + return *cast(HeadUnshared!T*)&val; } - else - static assert(0, "Invalid template type specified."); } @@ -1444,36 +1478,138 @@ else version (GNU) { static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore"); static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD"); - static assert(GNU_Have_Atomics, "atomicStore() not supported on this architecture"); - static if (T.sizeof == ubyte.sizeof) + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) { - __atomic_store_1(&val, *cast(ubyte*) &newval, ms); + static if (T.sizeof == ubyte.sizeof) + { + __atomic_store_1(&val, *cast(ubyte*) &newval, ms); + } + else static if (T.sizeof == ushort.sizeof) + { + __atomic_store_2(&val, *cast(ushort*) &newval, ms); + } + else static if (T.sizeof == uint.sizeof) + { + __atomic_store_4(&val, *cast(uint*) &newval, ms); + } + else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) + { + __atomic_store_8(&val, *cast(ulong*) &newval, ms); + } + else static if (GNU_Have_LibAtomic) + { + __atomic_store(T.sizeof, &val, cast(void*)&newval, ms); + } + else + static assert(0, "Invalid template type specified."); } - else static if (T.sizeof == ushort.sizeof) + else { - __atomic_store_2(&val, *cast(ushort*) &newval, ms); + getAtomicMutex.lock(); + val = newval; + getAtomicMutex.unlock(); } - else static if (T.sizeof == uint.sizeof) + } + + + void atomicFence() nothrow @nogc + { + static if (GNU_Have_Atomics || GNU_Have_LibAtomic) + __atomic_thread_fence(MemoryOrder.seq); + else { - __atomic_store_4(&val, *cast(uint*) &newval, ms); + getAtomicMutex.lock(); + getAtomicMutex.unlock(); } - else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics) + } + + static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic) + { + // Use system mutex for atomics, faking the purity of the functions so + // that they can be used in pure/nothrow/@safe code. + extern (C) private pure @trusted @nogc nothrow { - __atomic_store_8(&val, *cast(ulong*) &newval, ms); + static if (GNU_Thread_Model == ThreadModel.Posix) + { + import core.sys.posix.pthread; + alias atomicMutexHandle = pthread_mutex_t; + + pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*); + pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*); + pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*); + } + else static if (GNU_Thread_Model == ThreadModel.Win32) + { + import core.sys.windows.winbase; + alias atomicMutexHandle = CRITICAL_SECTION; + + pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*); + pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*); + pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*); + } + else + { + alias atomicMutexHandle = int; + } } - else static if (GNU_Have_LibAtomic) + + // Implements lock/unlock operations. + private struct AtomicMutex { - __atomic_store(T.sizeof, &val, cast(void*)&newval, ms); + int lock() pure @trusted @nogc nothrow + { + static if (GNU_Thread_Model == ThreadModel.Posix) + { + if (!_inited) + { + fakePureMutexInit(&_handle, null); + _inited = true; + } + return fakePureMutexLock(&_handle); + } + else + { + static if (GNU_Thread_Model == ThreadModel.Win32) + { + if (!_inited) + { + fakePureMutexInit(&_handle); + _inited = true; + } + fakePureMutexLock(&_handle); + } + return 0; + } + } + + int unlock() pure @trusted @nogc nothrow + { + static if (GNU_Thread_Model == ThreadModel.Posix) + return fakePureMutexUnlock(&_handle); + else + { + static if (GNU_Thread_Model == ThreadModel.Win32) + fakePureMutexUnlock(&_handle); + return 0; + } + } + + private: + atomicMutexHandle _handle; + bool _inited; } - else - static assert(0, "Invalid template type specified."); - } + // Internal static mutex reference. + private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow + { + __gshared static AtomicMutex mutex; + return &mutex; + } - void atomicFence() nothrow @nogc - { - __atomic_thread_fence(MemoryOrder.seq); + // Pure alias for _getAtomicMutex. + pragma(mangle, _getAtomicMutex.mangleof) + private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property; } }