From patchwork Tue Aug 6 08:40:34 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Fran=C3=A7ois_Dumont?= X-Patchwork-Id: 264918 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (Client CN "localhost", Issuer "www.qmailtoaster.com" (not verified)) by ozlabs.org (Postfix) with ESMTPS id 6E4202C007A for ; Tue, 6 Aug 2013 18:40:59 +1000 (EST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:cc:subject:references :in-reply-to:content-type; q=dns; s=default; b=JLVaIB8/uZjY60wyV PImWHgU8GKRwgauYuEO8RaL9K4MgYCveeQm2y/aoCmZFvCjOhQwLnXyl+mRloKb3 9P4iFPQjVPMjpOxK4v05KLILakWs3NSj86jMRFRwMP2CosItu3yYW725AXPDS2lh j+lMvKRJrjby8KmUc2PoJ6IA1M= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:cc:subject:references :in-reply-to:content-type; s=default; bh=K+BgKwvl9Kt/FMeio/+CHIr pO9Q=; b=uIJqV/knKojhxzWos5LEffaueg9SRzewOH6x7GhDiNaoUNmiGi56/vt /AFTt8+XjRcWdXeMKBnAxzgtL5NMAMRtR5NYHA8yCIT6MV0doGLKgp1aFOw3RyHe UYc645iEpX3eOlZXwWSJUVd1F++eaF2g8ou9vxv10IzsCIY6rgmc= Received: (qmail 16710 invoked by alias); 6 Aug 2013 08:40:51 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 16686 invoked by uid 89); 6 Aug 2013 08:40:50 -0000 X-Spam-SWARE-Status: No, score=-3.1 required=5.0 tests=AWL, BAYES_00, FREEMAIL_FROM, KHOP_THREADED, RCVD_IN_DNSWL_LOW, RCVD_IN_HOSTKARMA_YE, RDNS_NONE, SPF_PASS autolearn=ham version=3.3.1 X-Spam-User: qpsmtpd, 2 recipients Received: from Unknown (HELO mail-wg0-f49.google.com) (74.125.82.49) by sourceware.org (qpsmtpd/0.84/v0.84-167-ge50287c) with ESMTP; Tue, 06 Aug 2013 08:40:45 +0000 Received: by mail-wg0-f49.google.com with SMTP id y10so99039wgg.16 for ; Tue, 06 Aug 2013 01:40:37 -0700 (PDT) X-Received: by 10.180.198.44 with SMTP id iz12mr175192wic.32.1375778437032; Tue, 06 Aug 2013 01:40:37 -0700 (PDT) Received: from localhost.localdomain (arf62-1-82-237-250-248.fbx.proxad.net. [82.237.250.248]) by mx.google.com with ESMTPSA id j20sm614599wie.7.2013.08.06.01.40.34 for (version=TLSv1 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Tue, 06 Aug 2013 01:40:35 -0700 (PDT) Message-ID: <5200B682.7010706@gmail.com> Date: Tue, 06 Aug 2013 10:40:34 +0200 From: =?ISO-8859-1?Q?Fran=E7ois_Dumont?= User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120829 Thunderbird/15.0 MIME-Version: 1.0 To: Jonathan Wakely CC: "libstdc++@gcc.gnu.org" , gcc-patches Subject: Re: Limit template parameters in hashtable References: <51E45D4D.8020208@gmail.com> <51E70465.2050708@gmail.com> <51EED836.2090109@gmail.com> <51F54A50.9040202@gmail.com> In-Reply-To: X-Virus-Found: No Ok then attached patch applied. 2013-08-06 François Dumont * include/bits/hashtable_policy.h (_Hashtable_alloc): New. (_ReuseOrAllocNode, _AllocNode): Adapt to use latter rather than _Hashtable. (_Before_begin<>): Remove. * include/bits/hashtable.h (_Hashtable): Inherit from _Hashtable_alloc and adapt. Restore _M_before_begin field. * python/libstdcxx/v6/printers.py (StdHashtableIterator): Adapt access to hashtable before begin. * testsuite/23_containers/unordered_set/ not_default_constructible_hash_neg.cc: Adapt dg-error line number. * testsuite/23_containers/unordered_set/instantiation_neg.cc: Likewise. François On 08/05/2013 11:39 AM, Jonathan Wakely wrote: > On 28 July 2013 17:44, François Dumont wrote: >> Is it ok for this one ? > Yes, sorry for the delay, this is OK to commit. I think the code is > cleaner now, and I agree with your EBO improvement that puts _M_before > _begin back in the main hashtable. > > Thanks very much for doing this. > Index: include/bits/hashtable_policy.h =================================================================== --- include/bits/hashtable_policy.h (revision 201521) +++ include/bits/hashtable_policy.h (working copy) @@ -102,25 +102,26 @@ { return std::get<0>(std::forward<_Tp>(__x)); } }; + template + struct _Hashtable_alloc; + // Functor recycling a pool of nodes and using allocation once the pool is // empty. - template + template struct _ReuseOrAllocNode { private: - using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey, - _Equal, _H1, _H2, _Hash, - _RehashPolicy, _Traits>; - using __val_alloc_type = typename __hashtable::_Value_alloc_type; - using __val_alloc_traits = typename __hashtable::_Value_alloc_traits; - using __node_alloc_traits = typename __hashtable::_Node_alloc_traits; - using __node_type = typename __hashtable::__node_type; + using __node_alloc_type = _NodeAlloc; + using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>; + using __value_alloc_type = typename __hashtable_alloc::__value_alloc_type; + using __value_alloc_traits = + typename __hashtable_alloc::__value_alloc_traits; + using __node_alloc_traits = + typename __hashtable_alloc::__node_alloc_traits; + using __node_type = typename __hashtable_alloc::__node_type; public: - _ReuseOrAllocNode(__node_type* __nodes, __hashtable& __h) + _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h) : _M_nodes(__nodes), _M_h(__h) { } _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete; @@ -136,12 +137,12 @@ __node_type* __node = _M_nodes; _M_nodes = _M_nodes->_M_next(); __node->_M_nxt = nullptr; - __val_alloc_type __a(_M_h._M_node_allocator()); - __val_alloc_traits::destroy(__a, __node->_M_valptr()); + __value_alloc_type __a(_M_h._M_node_allocator()); + __value_alloc_traits::destroy(__a, __node->_M_valptr()); __try { - __val_alloc_traits::construct(__a, __node->_M_valptr(), - std::forward<_Arg>(__arg)); + __value_alloc_traits::construct(__a, __node->_M_valptr(), + std::forward<_Arg>(__arg)); } __catch(...) { @@ -157,24 +158,19 @@ private: mutable __node_type* _M_nodes; - __hashtable& _M_h; + __hashtable_alloc& _M_h; }; // Functor similar to the previous one but without any pool of node to recycle. - template + template struct _AllocNode { private: - using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey, - _Equal, _H1, _H2, _Hash, - _RehashPolicy, _Traits>; - using __node_type = typename __hashtable::__node_type; + using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>; + using __node_type = typename __hashtable_alloc::__node_type; public: - _AllocNode(__hashtable& __h) + _AllocNode(__hashtable_alloc& __h) : _M_h(__h) { } template @@ -183,7 +179,7 @@ { return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); } private: - __hashtable& _M_h; + __hashtable_alloc& _M_h; }; // Auxiliary types used for all instantiations of _Hashtable nodes @@ -247,6 +243,8 @@ template struct _Hash_node_value_base : _Hash_node_base { + typedef _Value value_type; + __gnu_cxx::__aligned_buffer<_Value> _M_storage; _Value* @@ -336,9 +334,9 @@ using __node_type = typename __base_type::__node_type; public: - typedef _Value value_type; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; + typedef _Value value_type; + typedef std::ptrdiff_t difference_type; + typedef std::forward_iterator_tag iterator_category; using pointer = typename std::conditional<__constant_iterators, const _Value*, _Value*>::type; @@ -387,12 +385,12 @@ using __node_type = typename __base_type::__node_type; public: - typedef _Value value_type; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; + typedef _Value value_type; + typedef std::ptrdiff_t difference_type; + typedef std::forward_iterator_tag iterator_category; - typedef const _Value* pointer; - typedef const _Value& reference; + typedef const _Value* pointer; + typedef const _Value& reference; _Node_const_iterator() : __base_type(0) { } @@ -499,8 +497,8 @@ static const std::size_t _S_growth_factor = 2; - float _M_max_load_factor; - mutable std::size_t _M_next_resize; + float _M_max_load_factor; + mutable std::size_t _M_next_resize; }; // Base classes for std::_Hashtable. We define these base classes @@ -697,9 +695,10 @@ using __unique_keys = typename __hashtable_base::__unique_keys; using __ireturn_type = typename __hashtable_base::__ireturn_type; - using __node_gen_type = _AllocNode<_Key, _Value, _Alloc, _ExtractKey, - _Equal, _H1, _H2, _Hash, - _RehashPolicy, _Traits>; + using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>; + using __node_alloc_type = + typename __alloctr_rebind<_Alloc, __node_type>::__type; + using __node_gen_type = _AllocNode<__node_alloc_type>; __hashtable& _M_conjure_hashtable() @@ -979,8 +978,10 @@ { _Hashtable_ebo_helper() = default; - _Hashtable_ebo_helper(const _Tp& __tp) : _Tp(__tp) - { } + template + _Hashtable_ebo_helper(_OtherTp&& __tp) + : _Tp(std::forward<_OtherTp>(__tp)) + { } static const _Tp& _S_cget(const _Hashtable_ebo_helper& __eboh) @@ -997,8 +998,10 @@ { _Hashtable_ebo_helper() = default; - _Hashtable_ebo_helper(const _Tp& __tp) : _M_tp(__tp) - { } + template + _Hashtable_ebo_helper(_OtherTp&& __tp) + : _M_tp(std::forward<_OtherTp>(__tp)) + { } static const _Tp& _S_cget(const _Hashtable_ebo_helper& __eboh) @@ -1431,15 +1434,15 @@ _H1, _H2, _Hash, __cache>; using __hash_code_base = typename __base_type::__hash_code_base; public: - typedef _Value value_type; + typedef _Value value_type; typedef typename std::conditional<__constant_iterators, const _Value*, _Value*>::type pointer; typedef typename std::conditional<__constant_iterators, const _Value&, _Value&>::type reference; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; + typedef std::ptrdiff_t difference_type; + typedef std::forward_iterator_tag iterator_category; _Local_iterator() = default; @@ -1487,11 +1490,11 @@ using __hash_code_base = typename __base_type::__hash_code_base; public: - typedef _Value value_type; - typedef const _Value* pointer; - typedef const _Value& reference; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; + typedef _Value value_type; + typedef const _Value* pointer; + typedef const _Value& reference; + typedef std::ptrdiff_t difference_type; + typedef std::forward_iterator_tag iterator_category; _Local_const_iterator() = default; @@ -1551,11 +1554,11 @@ private _Hashtable_ebo_helper<0, _Equal> { public: - typedef _Key key_type; - typedef _Value value_type; - typedef _Equal key_equal; - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; + typedef _Key key_type; + typedef _Value value_type; + typedef _Equal key_equal; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; using __traits_type = _Traits; using __hash_cached = typename __traits_type::__hash_cached; @@ -1597,9 +1600,6 @@ __hash_code, __hash_cached::value>; protected: - using __node_base = __detail::_Hash_node_base; - using __bucket_type = __node_base*; - _Hashtable_base(const _ExtractKey& __ex, const _H1& __h1, const _H2& __h2, const _Hash& __hash, const _Equal& __eq) : __hash_code_base(__ex, __h1, __h2, __hash), _EqualEBO(__eq) @@ -1787,23 +1787,136 @@ } /** - * This type is to combine a _Hash_node_base instance with an allocator - * instance through inheritance to benefit from EBO when possible. + * This type deals with all allocation and keeps an allocator instance through + * inheritance to benefit from EBO when possible. */ template - struct _Before_begin : public _NodeAlloc + struct _Hashtable_alloc : private _Hashtable_ebo_helper<0, _NodeAlloc> { - _Hash_node_base _M_node; + private: + using __ebo_node_alloc = _Hashtable_ebo_helper<0, _NodeAlloc>; + public: + using __node_type = typename _NodeAlloc::value_type; + using __node_alloc_type = _NodeAlloc; + // Use __gnu_cxx to benefit from _S_always_equal and al. + using __node_alloc_traits = __gnu_cxx::__alloc_traits<__node_alloc_type>; - _Before_begin(const _Before_begin&) = default; - _Before_begin(_Before_begin&&) = default; + using __value_type = typename __node_type::value_type; + using __value_alloc_type = + typename __alloctr_rebind<__node_alloc_type, __value_type>::__type; + using __value_alloc_traits = std::allocator_traits<__value_alloc_type>; + using __node_base = __detail::_Hash_node_base; + using __bucket_type = __node_base*; + using __bucket_alloc_type = + typename __alloctr_rebind<__node_alloc_type, __bucket_type>::__type; + using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>; + + _Hashtable_alloc(const _Hashtable_alloc&) = default; + _Hashtable_alloc(_Hashtable_alloc&&) = default; + template - _Before_begin(_Alloc&& __a) - : _NodeAlloc(std::forward<_Alloc>(__a)) + _Hashtable_alloc(_Alloc&& __a) + : __ebo_node_alloc(std::forward<_Alloc>(__a)) { } + + __node_alloc_type& + _M_node_allocator() + { return __ebo_node_alloc::_S_get(*this); } + + const __node_alloc_type& + _M_node_allocator() const + { return __ebo_node_alloc::_S_cget(*this); } + + template + __node_type* + _M_allocate_node(_Args&&... __args); + + void + _M_deallocate_node(__node_type* __n); + + // Deallocate the linked list of nodes pointed to by __n + void + _M_deallocate_nodes(__node_type* __n); + + __bucket_type* + _M_allocate_buckets(std::size_t __n); + + void + _M_deallocate_buckets(__bucket_type*, std::size_t __n); }; + // Definitions of class template _Hashtable_alloc's out-of-line member + // functions. + template + template + typename _Hashtable_alloc<_NodeAlloc>::__node_type* + _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args) + { + auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1); + __node_type* __n = std::__addressof(*__nptr); + __try + { + __value_alloc_type __a(_M_node_allocator()); + ::new ((void*)__n) __node_type(); + __value_alloc_traits::construct(__a, __n->_M_valptr(), + std::forward<_Args>(__args)...); + return __n; + } + __catch(...) + { + __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1); + __throw_exception_again; + } + } + + template + void + _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n) + { + typedef typename __node_alloc_traits::pointer _Ptr; + auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n); + __value_alloc_type __a(_M_node_allocator()); + __value_alloc_traits::destroy(__a, __n->_M_valptr()); + __n->~__node_type(); + __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1); + } + + template + void + _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n) + { + while (__n) + { + __node_type* __tmp = __n; + __n = __n->_M_next(); + _M_deallocate_node(__tmp); + } + } + + template + typename _Hashtable_alloc<_NodeAlloc>::__bucket_type* + _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __n) + { + __bucket_alloc_type __alloc(_M_node_allocator()); + + auto __ptr = __bucket_alloc_traits::allocate(__alloc, __n); + __bucket_type* __p = std::__addressof(*__ptr); + __builtin_memset(__p, 0, __n * sizeof(__bucket_type)); + return __p; + } + + template + void + _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts, + std::size_t __n) + { + typedef typename __bucket_alloc_traits::pointer _Ptr; + auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts); + __bucket_alloc_type __alloc(_M_node_allocator()); + __bucket_alloc_traits::deallocate(__alloc, __ptr, __n); + } + //@} hashtable-detail _GLIBCXX_END_NAMESPACE_VERSION } // namespace __detail Index: include/bits/hashtable.h =================================================================== --- include/bits/hashtable.h (revision 201521) +++ include/bits/hashtable.h (working copy) @@ -103,7 +103,7 @@ * Each _Hashtable data structure has: * * - _Bucket[] _M_buckets - * - _Hash_node_base _M_bbegin + * - _Hash_node_base _M_before_begin * - size_type _M_bucket_count * - size_type _M_element_count * @@ -181,13 +181,27 @@ public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, _Traits>, public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits> + _H1, _H2, _Hash, _RehashPolicy, _Traits>, + private __detail::_Hashtable_alloc< + typename __alloctr_rebind<_Alloc, + __detail::_Hash_node<_Value, + _Traits::__hash_cached::value> >::__type> { - typedef std::allocator_traits<_Alloc> _Alloc_traits; - typedef typename _Alloc_traits::template rebind_alloc<_Value> - _Value_alloc_type; - typedef __gnu_cxx::__alloc_traits<_Value_alloc_type> _Value_alloc_traits; + using __traits_type = _Traits; + using __hash_cached = typename __traits_type::__hash_cached; + using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>; + using __node_alloc_type = + typename __alloctr_rebind<_Alloc, __node_type>::__type; + using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>; + + using __value_alloc_traits = + typename __hashtable_alloc::__value_alloc_traits; + using __node_alloc_traits = + typename __hashtable_alloc::__node_alloc_traits; + using __node_base = typename __hashtable_alloc::__node_base; + using __bucket_type = typename __hashtable_alloc::__bucket_type; + public: typedef _Key key_type; typedef _Value value_type; @@ -196,8 +210,8 @@ // mapped_type, if present, comes from _Map_base. // hasher, if present, comes from _Hash_code_base/_Hashtable_base. - typedef typename _Value_alloc_traits::pointer pointer; - typedef typename _Value_alloc_traits::const_pointer const_pointer; + typedef typename __value_alloc_traits::pointer pointer; + typedef typename __value_alloc_traits::const_pointer const_pointer; typedef value_type& reference; typedef const value_type& const_reference; @@ -205,8 +219,6 @@ using __rehash_type = _RehashPolicy; using __rehash_state = typename __rehash_type::_State; - using __traits_type = _Traits; - using __hash_cached = typename __traits_type::__hash_cached; using __constant_iterators = typename __traits_type::__constant_iterators; using __unique_keys = typename __traits_type::__unique_keys; @@ -221,9 +233,6 @@ using __hash_code_base = typename __hashtable_base::__hash_code_base; using __hash_code = typename __hashtable_base::__hash_code; - using __node_type = typename __hashtable_base::__node_type; - using __node_base = typename __hashtable_base::__node_base; - using __bucket_type = typename __hashtable_base::__bucket_type; using __ireturn_type = typename __hashtable_base::__ireturn_type; using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, @@ -240,9 +249,7 @@ _RehashPolicy, _Traits>; using __reuse_or_alloc_node_type = - __detail::_ReuseOrAllocNode<_Key, _Value, _Alloc, - _ExtractKey, _Equal, _H1, _H2, _Hash, - _RehashPolicy, _Traits>; + __detail::_ReuseOrAllocNode<__node_alloc_type>; // Metaprogramming for picking apart hash caching. template @@ -309,18 +316,6 @@ bool _Constant_iteratorsa, bool _Unique_keysa> friend struct __detail::_Insert; - template - friend struct __detail::_ReuseOrAllocNode; - - template - friend struct __detail::_AllocNode; - public: using size_type = typename __hashtable_base::size_type; using difference_type = typename __hashtable_base::difference_type; @@ -333,60 +328,20 @@ const_local_iterator; private: - typedef typename _Alloc_traits::template rebind_alloc<__node_type> - _Node_alloc_type; - // Use __gnu_cxx to benefit from _S_always_equal and al. - typedef __gnu_cxx::__alloc_traits<_Node_alloc_type> _Node_alloc_traits; - - typedef - typename _Alloc_traits::template rebind_alloc<__bucket_type> - _Bucket_alloc_type; - typedef std::allocator_traits<_Bucket_alloc_type> _Bucket_alloc_traits; - - using __before_begin = __detail::_Before_begin<_Node_alloc_type>; - __bucket_type* _M_buckets; size_type _M_bucket_count; - __before_begin _M_bbegin; + __node_base _M_before_begin; size_type _M_element_count; _RehashPolicy _M_rehash_policy; - _Node_alloc_type& - _M_node_allocator() - { return _M_bbegin; } + __hashtable_alloc& + _M_base_alloc() { return *this; } - const _Node_alloc_type& - _M_node_allocator() const - { return _M_bbegin; } + using __hashtable_alloc::_M_deallocate_buckets; - __node_base& - _M_before_begin() - { return _M_bbegin._M_node; } - - const __node_base& - _M_before_begin() const - { return _M_bbegin._M_node; } - - template - __node_type* - _M_allocate_node(_Args&&... __args); - void - _M_deallocate_node(__node_type* __n); - - // Deallocate the linked list of nodes pointed to by __n - void - _M_deallocate_nodes(__node_type* __n); - - __bucket_type* - _M_allocate_buckets(size_type __n); - - void - _M_deallocate_buckets(__bucket_type*, size_type __n); - - void _M_deallocate_buckets() - { _M_deallocate_buckets(_M_buckets, _M_bucket_count); } + { this->_M_deallocate_buckets(_M_buckets, _M_bucket_count); } // Gets bucket begin, deals with the fact that non-empty buckets contain // their before begin node. @@ -395,7 +350,7 @@ __node_type* _M_begin() const - { return static_cast<__node_type*>(_M_before_begin()._M_nxt); } + { return static_cast<__node_type*>(_M_before_begin._M_nxt); } template void @@ -477,11 +432,11 @@ _Hashtable& operator=(_Hashtable&& __ht) - noexcept(_Node_alloc_traits::_S_nothrow_move()) + noexcept(__node_alloc_traits::_S_nothrow_move()) { constexpr bool __move_storage = - _Node_alloc_traits::_S_propagate_on_move_assign() - || _Node_alloc_traits::_S_always_equal(); + __node_alloc_traits::_S_propagate_on_move_assign() + || __node_alloc_traits::_S_always_equal(); _M_move_assign(std::move(__ht), integral_constant()); return *this; @@ -491,7 +446,7 @@ operator=(initializer_list __l) { __reuse_or_alloc_node_type __roan(_M_begin(), *this); - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; clear(); this->_M_insert_range(__l.begin(), __l.end(), __roan); return *this; @@ -501,7 +456,7 @@ void swap(_Hashtable&) - noexcept(_Node_alloc_traits::_S_nothrow_swap()); + noexcept(__node_alloc_traits::_S_nothrow_swap()); // Basic container operations iterator @@ -538,11 +493,11 @@ allocator_type get_allocator() const noexcept - { return allocator_type(_M_node_allocator()); } + { return allocator_type(this->_M_node_allocator()); } size_type max_size() const noexcept - { return _Node_alloc_traits::max_size(_M_node_allocator()); } + { return __node_alloc_traits::max_size(this->_M_node_allocator()); } // Observers key_equal @@ -807,101 +762,6 @@ typename _Alloc, typename _ExtractKey, typename _Equal, typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, typename _Traits> - template - typename _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>::__node_type* - _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>:: - _M_allocate_node(_Args&&... __args) - { - auto __nptr = _Node_alloc_traits::allocate(_M_node_allocator(), 1); - __node_type* __n = std::__addressof(*__nptr); - __try - { - _Value_alloc_type __a(_M_node_allocator()); - ::new ((void*)__n) __node_type(); - _Value_alloc_traits::construct(__a, __n->_M_valptr(), - std::forward<_Args>(__args)...); - return __n; - } - __catch(...) - { - _Node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1); - __throw_exception_again; - } - } - - template - void - _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>:: - _M_deallocate_node(__node_type* __n) - { - typedef typename _Node_alloc_traits::pointer _Ptr; - auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n); - _Value_alloc_type __a(_M_node_allocator()); - _Value_alloc_traits::destroy(__a, __n->_M_valptr()); - __n->~__node_type(); - _Node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1); - } - - template - void - _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>:: - _M_deallocate_nodes(__node_type* __n) - { - while (__n) - { - __node_type* __tmp = __n; - __n = __n->_M_next(); - _M_deallocate_node(__tmp); - } - } - - template - typename _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>::__bucket_type* - _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>:: - _M_allocate_buckets(size_type __n) - { - _Bucket_alloc_type __alloc(_M_node_allocator()); - - auto __ptr = _Bucket_alloc_traits::allocate(__alloc, __n); - __bucket_type* __p = std::__addressof(*__ptr); - __builtin_memset(__p, 0, __n * sizeof(__bucket_type)); - return __p; - } - - template - void - _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, - _H1, _H2, _Hash, _RehashPolicy, _Traits>:: - _M_deallocate_buckets(__bucket_type* __bkts, size_type __n) - { - typedef typename _Bucket_alloc_traits::pointer _Ptr; - auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts); - _Bucket_alloc_type __alloc(_M_node_allocator()); - _Bucket_alloc_traits::deallocate(__alloc, __ptr, __n); - } - - template typename _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, _Traits>::__node_type* @@ -926,12 +786,12 @@ : __hashtable_base(__exk, __h1, __h2, __h, __eq), __map_base(), __rehash_base(), - _M_bbegin(__a), + __hashtable_alloc(__node_alloc_type(__a)), _M_element_count(0), _M_rehash_policy() { _M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint); - _M_buckets = _M_allocate_buckets(_M_bucket_count); + _M_buckets = this->_M_allocate_buckets(_M_bucket_count); } template_M_allocate_buckets(_M_bucket_count); __try { for (; __f != __l; ++__f) @@ -987,15 +847,15 @@ if (&__ht == this) return *this; - if (_Node_alloc_traits::_S_propagate_on_copy_assign()) + if (__node_alloc_traits::_S_propagate_on_copy_assign()) { auto& __this_alloc = this->_M_node_allocator(); auto& __that_alloc = __ht._M_node_allocator(); - if (!_Node_alloc_traits::_S_always_equal() + if (!__node_alloc_traits::_S_always_equal() && __this_alloc != __that_alloc) { // Replacement allocator cannot free existing storage. - _M_deallocate_nodes(_M_begin()); + this->_M_deallocate_nodes(_M_begin()); if (__builtin_expect(_M_bucket_count != 0, true)) _M_deallocate_buckets(); _M_reset(); @@ -1008,7 +868,7 @@ { _M_assign(__ht, [this](const __node_type* __n) - { return _M_allocate_node(__n->_M_v()); }); + { return this->_M_allocate_node(__n->_M_v()); }); } __catch(...) { @@ -1030,7 +890,7 @@ if (_M_bucket_count != __ht._M_bucket_count) { __former_buckets = _M_buckets; - _M_buckets = _M_allocate_buckets(__ht._M_bucket_count); + _M_buckets = this->_M_allocate_buckets(__ht._M_bucket_count); _M_bucket_count = __ht._M_bucket_count; } else @@ -1043,12 +903,13 @@ _M_element_count = __ht._M_element_count; _M_rehash_policy = __ht._M_rehash_policy; __reuse_or_alloc_node_type __roan(_M_begin(), *this); - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; _M_assign(__ht, [&__roan](const __node_type* __n) { return __roan(__n->_M_v()); }); if (__former_buckets) - _M_deallocate_buckets(__former_buckets, __former_bucket_count); + this->_M_deallocate_buckets(__former_buckets, + __former_bucket_count); } __catch(...) { @@ -1079,11 +940,11 @@ { __bucket_type* __buckets = nullptr; if (!_M_buckets) - _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count); + _M_buckets = __buckets = this->_M_allocate_buckets(_M_bucket_count); __try { - if (!__ht._M_before_begin()._M_nxt) + if (!__ht._M_before_begin._M_nxt) return; // First deal with the special first node pointed to by @@ -1091,8 +952,8 @@ __node_type* __ht_n = __ht._M_begin(); __node_type* __this_n = __node_gen(__ht_n); this->_M_copy_code(__this_n, __ht_n); - _M_before_begin()._M_nxt = __this_n; - _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin(); + _M_before_begin._M_nxt = __this_n; + _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin; // Then deal with other nodes. __node_base* __prev_n = __this_n; @@ -1128,7 +989,7 @@ _M_rehash_policy._M_reset(); _M_bucket_count = 0; _M_buckets = nullptr; - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; _M_element_count = 0; } @@ -1141,7 +1002,7 @@ _H1, _H2, _Hash, _RehashPolicy, _Traits>:: _M_move_assign(_Hashtable&& __ht, std::true_type) { - _M_deallocate_nodes(_M_begin()); + this->_M_deallocate_nodes(_M_begin()); if (__builtin_expect(_M_bucket_count != 0, true)) _M_deallocate_buckets(); @@ -1149,14 +1010,14 @@ _M_rehash_policy = __ht._M_rehash_policy; _M_buckets = __ht._M_buckets; _M_bucket_count = __ht._M_bucket_count; - _M_before_begin()._M_nxt = __ht._M_before_begin()._M_nxt; + _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt; _M_element_count = __ht._M_element_count; - std::__alloc_on_move(_M_node_allocator(), __ht._M_node_allocator()); + std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator()); // Fix buckets containing the _M_before_begin pointers that can't be // moved. if (_M_begin()) - _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin(); + _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; __ht._M_reset(); } @@ -1169,7 +1030,7 @@ _H1, _H2, _Hash, _RehashPolicy, _Traits>:: _M_move_assign(_Hashtable&& __ht, std::false_type) { - if (__ht._M_node_allocator() == _M_node_allocator()) + if (__ht._M_node_allocator() == this->_M_node_allocator()) _M_move_assign(std::move(__ht), std::true_type()); else { @@ -1181,7 +1042,7 @@ if (_M_bucket_count != __ht._M_bucket_count) { __former_buckets = _M_buckets; - _M_buckets = _M_allocate_buckets(__ht._M_bucket_count); + _M_buckets = this->_M_allocate_buckets(__ht._M_bucket_count); _M_bucket_count = __ht._M_bucket_count; } else @@ -1194,7 +1055,7 @@ _M_element_count = __ht._M_element_count; _M_rehash_policy = __ht._M_rehash_policy; __reuse_or_alloc_node_type __roan(_M_begin(), *this); - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; _M_assign(__ht, [&__roan](__node_type* __n) { return __roan(std::move_if_noexcept(__n->_M_v())); }); @@ -1226,16 +1087,16 @@ : __hashtable_base(__ht), __map_base(__ht), __rehash_base(__ht), + __hashtable_alloc( + __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())), _M_buckets(), _M_bucket_count(__ht._M_bucket_count), - _M_bbegin(_Node_alloc_traits::_S_select_on_copy( - __ht._M_node_allocator())), _M_element_count(__ht._M_element_count), _M_rehash_policy(__ht._M_rehash_policy) { _M_assign(__ht, [this](const __node_type* __n) - { return _M_allocate_node(__n->_M_v()); }); + { return this->_M_allocate_node(__n->_M_v()); }); } template_M_v()); }); + { return this->_M_allocate_node(__n->_M_v()); }); } template_M_node_allocator()) { _M_buckets = __ht._M_buckets; - _M_before_begin()._M_nxt = __ht._M_before_begin()._M_nxt; + _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt; // Update, if necessary, bucket pointing to before begin that hasn't // moved. if (_M_begin()) - _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin(); + _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; __ht._M_reset(); } else @@ -1313,7 +1175,7 @@ _M_assign(__ht, [this](__node_type* __n) { - return _M_allocate_node( + return this->_M_allocate_node( std::move_if_noexcept(__n->_M_v())); }); __ht.clear(); @@ -1341,27 +1203,27 @@ _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, _Traits>:: swap(_Hashtable& __x) - noexcept(_Node_alloc_traits::_S_nothrow_swap()) + noexcept(__node_alloc_traits::_S_nothrow_swap()) { // The only base class with member variables is hash_code_base. // We define _Hash_code_base::_M_swap because different // specializations have different members. this->_M_swap(__x); - std::__alloc_on_swap(_M_node_allocator(), __x._M_node_allocator()); + std::__alloc_on_swap(this->_M_node_allocator(), __x._M_node_allocator()); std::swap(_M_rehash_policy, __x._M_rehash_policy); std::swap(_M_buckets, __x._M_buckets); std::swap(_M_bucket_count, __x._M_bucket_count); - std::swap(_M_before_begin()._M_nxt, __x._M_before_begin()._M_nxt); + std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt); std::swap(_M_element_count, __x._M_element_count); // Fix buckets containing the _M_before_begin pointers that can't be // swapped. if (_M_begin()) - _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin(); + _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin; if (__x._M_begin()) __x._M_buckets[__x._M_bucket_index(__x._M_begin())] - = &(__x._M_before_begin()); + = &__x._M_before_begin; } template_M_nxt = _M_before_begin()._M_nxt; - _M_before_begin()._M_nxt = __node; + __node->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __node; if (__node->_M_nxt) // We must update former begin bucket that is pointing to // _M_before_begin. _M_buckets[_M_bucket_index(__node->_M_next())] = __node; - _M_buckets[__bkt] = &_M_before_begin(); + _M_buckets[__bkt] = &_M_before_begin; } } @@ -1608,8 +1470,8 @@ _M_buckets[__next_bkt] = _M_buckets[__bkt]; // Second update before begin node if necessary - if (&_M_before_begin() == _M_buckets[__bkt]) - _M_before_begin()._M_nxt = __next; + if (&_M_before_begin == _M_buckets[__bkt]) + _M_before_begin._M_nxt = __next; _M_buckets[__bkt] = nullptr; } } @@ -1645,7 +1507,7 @@ _M_emplace(std::true_type, _Args&&... __args) { // First build the node to get access to the hash code - __node_type* __node = _M_allocate_node(std::forward<_Args>(__args)...); + __node_type* __node = this->_M_allocate_node(std::forward<_Args>(__args)...); const key_type& __k = this->_M_extract()(__node->_M_v()); __hash_code __code; __try @@ -1654,7 +1516,7 @@ } __catch(...) { - _M_deallocate_node(__node); + this->_M_deallocate_node(__node); __throw_exception_again; } @@ -1662,7 +1524,7 @@ if (__node_type* __p = _M_find_node(__bkt, __k, __code)) { // There is already an equivalent node, no insertion - _M_deallocate_node(__node); + this->_M_deallocate_node(__node); return std::make_pair(iterator(__p), false); } @@ -1684,7 +1546,8 @@ _M_emplace(const_iterator __hint, std::false_type, _Args&&... __args) { // First build the node to get its hash code. - __node_type* __node = _M_allocate_node(std::forward<_Args>(__args)...); + __node_type* __node = + this->_M_allocate_node(std::forward<_Args>(__args)...); __hash_code __code; __try @@ -1693,7 +1556,7 @@ } __catch(...) { - _M_deallocate_node(__node); + this->_M_deallocate_node(__node); __throw_exception_again; } @@ -1733,7 +1596,7 @@ } __catch(...) { - _M_deallocate_node(__node); + this->_M_deallocate_node(__node); __throw_exception_again; } } @@ -1799,7 +1662,7 @@ } __catch(...) { - _M_deallocate_node(__node); + this->_M_deallocate_node(__node); __throw_exception_again; } } @@ -1899,7 +1762,7 @@ __prev_n->_M_nxt = __n->_M_nxt; iterator __result(__n->_M_next()); - _M_deallocate_node(__n); + this->_M_deallocate_node(__n); --_M_element_count; return __result; @@ -1972,7 +1835,7 @@ do { __node_type* __p = __n->_M_next(); - _M_deallocate_node(__n); + this->_M_deallocate_node(__n); __n = __p; ++__result; --_M_element_count; @@ -2014,7 +1877,7 @@ { __node_type* __tmp = __n; __n = __n->_M_next(); - _M_deallocate_node(__tmp); + this->_M_deallocate_node(__tmp); --_M_element_count; if (!__n) break; @@ -2044,10 +1907,10 @@ _H1, _H2, _Hash, _RehashPolicy, _Traits>:: clear() noexcept { - _M_deallocate_nodes(_M_begin()); + this->_M_deallocate_nodes(_M_begin()); __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type)); _M_element_count = 0; - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; } template:: _M_rehash_aux(size_type __n, std::true_type) { - __bucket_type* __new_buckets = _M_allocate_buckets(__n); + __bucket_type* __new_buckets = this->_M_allocate_buckets(__n); __node_type* __p = _M_begin(); - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; std::size_t __bbegin_bkt = 0; while (__p) { @@ -2114,9 +1977,9 @@ std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n); if (!__new_buckets[__bkt]) { - __p->_M_nxt = _M_before_begin()._M_nxt; - _M_before_begin()._M_nxt = __p; - __new_buckets[__bkt] = &_M_before_begin(); + __p->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __p; + __new_buckets[__bkt] = &_M_before_begin; if (__p->_M_nxt) __new_buckets[__bbegin_bkt] = __p; __bbegin_bkt = __bkt; @@ -2146,10 +2009,10 @@ _H1, _H2, _Hash, _RehashPolicy, _Traits>:: _M_rehash_aux(size_type __n, std::false_type) { - __bucket_type* __new_buckets = _M_allocate_buckets(__n); + __bucket_type* __new_buckets = this->_M_allocate_buckets(__n); __node_type* __p = _M_begin(); - _M_before_begin()._M_nxt = nullptr; + _M_before_begin._M_nxt = nullptr; std::size_t __bbegin_bkt = 0; std::size_t __prev_bkt = 0; __node_type* __prev_p = nullptr; @@ -2194,9 +2057,9 @@ if (!__new_buckets[__bkt]) { - __p->_M_nxt = _M_before_begin()._M_nxt; - _M_before_begin()._M_nxt = __p; - __new_buckets[__bkt] = &_M_before_begin(); + __p->_M_nxt = _M_before_begin._M_nxt; + _M_before_begin._M_nxt = __p; + __new_buckets[__bkt] = &_M_before_begin; if (__p->_M_nxt) __new_buckets[__bbegin_bkt] = __p; __bbegin_bkt = __bkt; Index: testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc =================================================================== --- testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc (revision 201521) +++ testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc (working copy) @@ -19,7 +19,7 @@ // with this library; see the file COPYING3. If not see // . -// { dg-error "default constructible" "" { target *-*-* } 276 } +// { dg-error "default constructible" "" { target *-*-* } 283 } #include Index: testsuite/23_containers/unordered_set/instantiation_neg.cc =================================================================== --- testsuite/23_containers/unordered_set/instantiation_neg.cc (revision 201521) +++ testsuite/23_containers/unordered_set/instantiation_neg.cc (working copy) @@ -19,7 +19,7 @@ // with this library; see the file COPYING3. If not see // . -// { dg-error "with noexcept" "" { target *-*-* } 258 } +// { dg-error "with noexcept" "" { target *-*-* } 265 } #include Index: python/libstdcxx/v6/printers.py =================================================================== --- python/libstdcxx/v6/printers.py (revision 201521) +++ python/libstdcxx/v6/printers.py (working copy) @@ -652,7 +652,7 @@ class StdHashtableIterator: def __init__(self, hash): - self.node = hash['_M_bbegin']['_M_node']['_M_nxt'] + self.node = hash['_M_before_begin']['_M_nxt'] self.node_type = find_type(hash.type, '__node_type').pointer() def __iter__(self):