diff mbox

[v3] Fix management of non empty hash functor

Message ID 50CA498A.2070902@gmail.com
State New
Headers show

Commit Message

François Dumont Dec. 13, 2012, 9:32 p.m. UTC
Hi

     As part of a performance patch proposed in an other mailing thread 
was a patch to improve management of hash functor with state. This part 
is I think less sensible than the performance patch so I propose it 
independently. I only would like to commit the modification on the 
performance tests here if you don't mind.

     Thanks to this patch caching the hash code or not doesn't depend on 
the hash functor to be empty of final anymore. I only keep the default 
constructible condition so that local_iterator can be default 
constructible, considering it is a Standard request.

2012-12-14  François Dumont  <fdumont@gcc.gnu.org>

     * include/bits/hashtable_policy.h (_Local_iterator_base): Use
     _Hashtable_ebo_helper to embed necessary functors into the
     local_iterator. Pass information about functors involved in hash
     code by copy.
     * include/bits/hashtable.h (__cache_default): Cache only if the
     hash functor is not noexcept qualified or if it is slow or if the
     hash functor do not expose a default constructor.
     * include/debug/unordered_set
     (std::__debug::unordered_set<>::erase): Detect local iterators to
     invalidate using contained node rather than generating a dummy
     local_iterator instance.
     (std::__debug::unordered_multiset<>::erase): Likewise.
     * include/debug/unordered_map
     (std::__debug::unordered_map<>::erase): Likewise.
     (std::__debug::unordered_multimap<>::erase): Likewise.
     * testsuite/performance/23_containers/insert_erase/41975.cc: Test
     std::tr1 and std versions of unordered_set regardless of any
     macro. Add test on default cache behavior.
     * testsuite/performance/23_containers/insert/54075.cc: Likewise.
     * testsuite/23_containers/unordered_set/instantiation_neg.cc:
     Adapt line number.
     * testsuite/23_containers/unordered_set/
     not_default_constructible_hash_neg.cc: New.
     * testsuite/23_containers/unordered_set/buckets/swap.cc: New.

Tested under Linux x86_64, normal and debug modes.

Ok to commit ?

François
diff mbox

Patch

Index: include/bits/hashtable_policy.h
===================================================================
--- include/bits/hashtable_policy.h	(revision 194488)
+++ include/bits/hashtable_policy.h	(working copy)
@@ -981,9 +981,17 @@ 
       typedef void* 					__hash_code;
       typedef _Hash_node<_Value, false>			__node_type;
 
-      // We need the default constructor for the local iterators.
+    public:
+      // We need the following public for local iterators.
+
       _Hash_code_base() = default;
+      _Hash_code_base(const _Hash_code_base&) = default;
 
+      std::size_t
+      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      { return _M_ranged_hash()(_M_extract()(__p->_M_v), __n); }
+
+    protected:
       _Hash_code_base(const _ExtractKey& __ex, const _H1&, const _H2&,
 		      const _Hash& __h)
       : _EboExtractKey(__ex), _EboHash(__h) { }
@@ -996,10 +1004,6 @@ 
       _M_bucket_index(const _Key& __k, __hash_code, std::size_t __n) const
       { return _M_ranged_hash()(__k, __n); }
 
-      std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
-      { return _M_ranged_hash()(_M_extract()(__p->_M_v), __n); }
-
       void
       _M_store_code(__node_type*, __hash_code) const
       { }
@@ -1062,13 +1066,19 @@ 
       hash_function() const
       { return _M_h1(); }
 
+      // We need the following public for the local iterators.
       typedef std::size_t 				__hash_code;
       typedef _Hash_node<_Value, false>			__node_type;
 
-    protected:
-      // We need the default constructor for the local iterators.
       _Hash_code_base() = default;
+      _Hash_code_base(const _Hash_code_base&) = default;
 
+      std::size_t
+      _M_bucket_index(const __node_type* __p,
+		      std::size_t __n) const
+      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v)), __n); }
+
+    protected:
       _Hash_code_base(const _ExtractKey& __ex,
 		      const _H1& __h1, const _H2& __h2,
 		      const _Default_ranged_hash&)
@@ -1082,11 +1092,6 @@ 
       _M_bucket_index(const _Key&, __hash_code __c, std::size_t __n) const
       { return _M_h2()(__c, __n); }
 
-      std::size_t
-      _M_bucket_index(const __node_type* __p,
-		      std::size_t __n) const
-      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v)), __n); }
-
       void
       _M_store_code(__node_type*, __hash_code) const
       { }
@@ -1148,6 +1153,10 @@ 
       typedef std::size_t 				__hash_code;
       typedef _Hash_node<_Value, true>			__node_type;
 
+      // Need the following public for local iterators.
+      const _H2&
+      _M_h2() const { return _EboH2::_S_cget(*this); }
+
     protected:
       _Hash_code_base(const _ExtractKey& __ex,
 		      const _H1& __h1, const _H2& __h2,
@@ -1195,9 +1204,6 @@ 
       _H1&
       _M_h1() { return _EboH1::_S_get(*this); }
 
-      const _H2&
-      _M_h2() const { return _EboH2::_S_cget(*this); }
-
       _H2&
       _M_h2() { return _EboH2::_S_get(*this); }
     };
@@ -1250,12 +1256,20 @@ 
 	   typename _H1, typename _H2, typename _Hash>
     struct _Local_iterator_base<_Key, _Value, _ExtractKey,
 				_H1, _H2, _Hash, true>
-    : private _H2
+    : private _Hashtable_ebo_helper<0, _H2>
     {
+    protected:
+      typedef _Hashtable_ebo_helper<0, _H2> _EboH2;
+      typedef _Hash_code_base<_Key, _Value, _ExtractKey,
+			      _H1, _H2, _Hash, true> _HashCodeBase;
+
+    public:
       _Local_iterator_base() = default;
-      _Local_iterator_base(_Hash_node<_Value, true>* __p,
+      _Local_iterator_base(const _HashCodeBase& __base,
+			   _Hash_node<_Value, true>* __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+	: _EboH2(__base._M_h2()),
+	  _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
 
       void
       _M_incr()
@@ -1263,15 +1277,13 @@ 
 	_M_cur = _M_cur->_M_next();
 	if (_M_cur)
 	  {
-	    std::size_t __bkt = _M_h2()(_M_cur->_M_hash_code, _M_bucket_count);
+	    std::size_t __bkt
+	      = _EboH2::_S_get(*this)(_M_cur->_M_hash_code, _M_bucket_count);
 	    if (__bkt != _M_bucket)
 	      _M_cur = nullptr;
 	  }
       }
 
-      const _H2& _M_h2() const
-      { return *this; }
-
       _Hash_node<_Value, true>*  _M_cur;
       std::size_t _M_bucket;
       std::size_t _M_bucket_count;
@@ -1282,13 +1294,29 @@ 
 	   typename _H1, typename _H2, typename _Hash>
     struct _Local_iterator_base<_Key, _Value, _ExtractKey,
 				_H1, _H2, _Hash, false>
-    : private _Hash_code_base<_Key, _Value, _ExtractKey,
-			      _H1, _H2, _Hash, false>
+    : private _Hashtable_ebo_helper<0,
+				    _Hash_code_base<_Key, _Value, _ExtractKey,
+						    _H1, _H2, _Hash, false>>
     {
+    protected:
+      typedef _Hash_code_base<_Key, _Value, _ExtractKey,
+			      _H1, _H2, _Hash, false> _HashCodeBase;
+      typedef _Hashtable_ebo_helper<0, _HashCodeBase> _EboHashCodeBase;
+
+      std::size_t
+      _M_bucket_index(_Hash_node<_Value, false>* __cur, std::size_t __bck_count)
+      {
+	return _EboHashCodeBase::_S_get(*this)._M_bucket_index(__cur,
+							       __bck_count);
+      }
+
+    public:
       _Local_iterator_base() = default;
-      _Local_iterator_base(_Hash_node<_Value, false>* __p,
+      _Local_iterator_base(const _HashCodeBase& __base,
+			   _Hash_node<_Value, false>* __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+	: _EboHashCodeBase(__base),
+	  _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
 
       void
       _M_incr()
@@ -1296,7 +1324,7 @@ 
 	_M_cur = _M_cur->_M_next();
 	if (_M_cur)
 	  {
-	    std::size_t __bkt = this->_M_bucket_index(_M_cur, _M_bucket_count);
+	    std::size_t __bkt = _M_bucket_index(_M_cur, _M_bucket_count);
 	    if (__bkt != _M_bucket)
 	      _M_cur = nullptr;
 	  }
@@ -1333,6 +1361,11 @@ 
     : public _Local_iterator_base<_Key, _Value, _ExtractKey,
 				  _H1, _H2, _Hash, __cache>
     {
+    private:
+      typedef _Local_iterator_base<_Key, _Value, _ExtractKey,
+				   _H1, _H2, _Hash, __cache> _Base;
+      typedef typename _Base::_HashCodeBase _HashCodeBase;
+    public:
       typedef _Value                                   value_type;
       typedef typename std::conditional<__constant_iterators,
 					const _Value*, _Value*>::type
@@ -1346,10 +1379,10 @@ 
       _Local_iterator() = default;
 
       explicit
-      _Local_iterator(_Hash_node<_Value, __cache>* __p,
+      _Local_iterator(const _HashCodeBase& __hash_code_base,
+		      _Hash_node<_Value, __cache>* __p,
 		      std::size_t __bkt, std::size_t __bkt_count)
-      : _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
-			     __cache>(__p, __bkt, __bkt_count)
+	: _Base(__hash_code_base, __p, __bkt, __bkt_count)
       { }
 
       reference
@@ -1384,6 +1417,12 @@ 
     : public _Local_iterator_base<_Key, _Value, _ExtractKey,
 				  _H1, _H2, _Hash, __cache>
     {
+    private:
+      typedef _Local_iterator_base<_Key, _Value, _ExtractKey,
+				   _H1, _H2, _Hash, __cache> _Base;
+      typedef typename _Base::_HashCodeBase _HashCodeBase;
+
+    public:
       typedef _Value                                   value_type;
       typedef const _Value*                            pointer;
       typedef const _Value&                            reference;
@@ -1393,19 +1432,17 @@ 
       _Local_const_iterator() = default;
 
       explicit
-      _Local_const_iterator(_Hash_node<_Value, __cache>* __p,
+      _Local_const_iterator(const _HashCodeBase& __hash_code_base,
+			    _Hash_node<_Value, __cache>* __p,
 			    std::size_t __bkt, std::size_t __bkt_count)
-      : _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
-			     __cache>(__p, __bkt, __bkt_count)
+	: _Base(__hash_code_base, __p, __bkt, __bkt_count)
       { }
 
       _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
 						  _H1, _H2, _Hash,
 						  __constant_iterators,
 						  __cache>& __x)
-      : _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
-			     __cache>(__x._M_cur, __x._M_bucket,
-				      __x._M_bucket_count)
+	: _Base(__x)
       { }
 
       reference
Index: include/bits/hashtable.h
===================================================================
--- include/bits/hashtable.h	(revision 194488)
+++ include/bits/hashtable.h	(working copy)
@@ -39,10 +39,14 @@ 
 _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
   template<typename _Tp, typename _Hash>
-    using __cache_default =  __not_<__and_<is_integral<_Tp>,
-					   is_empty<_Hash>,
-				  integral_constant<bool, !__is_final(_Hash)>,
-				 __detail::__is_noexcept_hash<_Tp, _Hash> >>;
+    using __cache_default
+      =  __not_<__and_<// Do not cache for builtin types.
+		       is_integral<_Tp>,
+		       // Mandatory to make local_iterator default
+		       // constructible.
+		       is_default_constructible<_Hash>,
+		       // Mandatory to have erase not throwing.
+		       __detail::__is_noexcept_hash<_Tp, _Hash>>>;
 
   /**
    *  Primary class template _Hashtable.
@@ -249,21 +253,21 @@ 
 		    " or qualify your hash functor with noexcept");
 
       // Following two static assertions are necessary to guarantee
-      // that swapping two hashtable instances won't invalidate
-      // associated local iterators.
+      // that local_iterator will be default constructible.
 
-      // When hash codes are cached local iterator only uses H2 which
-      // must then be empty.
-      static_assert(__if_hash_cached<is_empty<_H2>>::value,
+      // When hash codes are cached local iterator uses H2 functor which must
+      // then be default constructible.
+      static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
 		    "Functor used to map hash code to bucket index"
-		    " must be empty");
+		    " must be default constructible");
 
       // When hash codes are not cached local iterator is going to use
       // __hash_code_base above to compute node bucket index so it has
-      // to be empty.
-      static_assert(__if_hash_not_cached<is_empty<__hash_code_base>>::value,
-		   "Cache the hash code or make functors involved in hash code"
-		   " and bucket index computation empty");
+      // to be default constructible.
+      static_assert(__if_hash_not_cached<
+		      is_default_constructible<__hash_code_base>>::value,
+		    "Cache the hash code or make functors involved in hash code"
+		    " and bucket index computation default constructibles");
 
     public:
       template<typename _Keya, typename _Valuea, typename _Alloca,
@@ -500,30 +504,37 @@ 
 
       local_iterator
       begin(size_type __n)
-      { return local_iterator(_M_bucket_begin(__n), __n, _M_bucket_count); }
+      {
+	return local_iterator(*this, _M_bucket_begin(__n),
+			      __n, _M_bucket_count);
+      }
 
       local_iterator
       end(size_type __n)
-      { return local_iterator(nullptr, __n, _M_bucket_count); }
+      { return local_iterator(*this, nullptr, __n, _M_bucket_count); }
 
       const_local_iterator
       begin(size_type __n) const
-      { return const_local_iterator(_M_bucket_begin(__n), __n,
-				    _M_bucket_count); }
+      {
+	return const_local_iterator(*this, _M_bucket_begin(__n),
+				    __n, _M_bucket_count);
+      }
 
       const_local_iterator
       end(size_type __n) const
-      { return const_local_iterator(nullptr, __n, _M_bucket_count); }
+      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
 
       // DR 691.
       const_local_iterator
       cbegin(size_type __n) const
-      { return const_local_iterator(_M_bucket_begin(__n), __n,
-				    _M_bucket_count); }
+      {
+	return const_local_iterator(*this, _M_bucket_begin(__n),
+				    __n, _M_bucket_count);
+      }
 
       const_local_iterator
       cend(size_type __n) const
-      { return const_local_iterator(nullptr, __n, _M_bucket_count); }
+      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
 
       float
       load_factor() const noexcept
@@ -1141,7 +1152,7 @@ 
 	{
 	  if (this->_M_equals(__k, __code, __p))
 	    return __prev_p;
-	  if (!(__p->_M_nxt) || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
 	    break;
 	  __prev_p = __p;
 	}
Index: include/debug/unordered_map
===================================================================
--- include/debug/unordered_map	(revision 194488)
+++ include/debug/unordered_map	(working copy)
@@ -364,10 +364,9 @@ 
 	  {
 	    this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			    { return __it == __victim; });
-	    _Base_local_iterator __local_victim = _S_to_local(__victim);
 	    this->_M_invalidate_local_if(
-			    [__local_victim](_Base_const_local_iterator __it)
-			    { return __it == __local_victim; });
+			    [__victim](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __victim._M_cur; });
 	    size_type __bucket_count = this->bucket_count();
 	    _Base::erase(__victim);
 	    _M_check_rehashed(__bucket_count);
@@ -383,10 +382,9 @@ 
 	_Base_const_iterator __victim = __it.base();
 	this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			{ return __it == __victim; });
-	_Base_const_local_iterator __local_victim = _S_to_local(__victim);
 	this->_M_invalidate_local_if(
-			[__local_victim](_Base_const_local_iterator __it)
-			{ return __it == __local_victim; });
+			[__victim](_Base_const_local_iterator __it)
+			{ return __it._M_cur == __victim._M_cur; });
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__it.base()); 
 	_M_check_rehashed(__bucket_count);
@@ -410,10 +408,9 @@ 
 				  ._M_iterator(__last, "last"));
 	    this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
 			    { return __it == __tmp; });
-	    _Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
 	    this->_M_invalidate_local_if(
-			    [__local_tmp](_Base_const_local_iterator __it)
-			    { return __it == __local_tmp; });
+			    [__tmp](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __tmp._M_cur; });
 	  }
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__first.base(), __last.base());
@@ -452,22 +449,6 @@ 
 	if (__prev_count != this->bucket_count())
 	  _M_invalidate_locals();
       }
-
-      static _Base_local_iterator
-      _S_to_local(_Base_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_local_iterator(__it._M_cur, 0, 0);
-      }
-
-      static _Base_const_local_iterator
-      _S_to_local(_Base_const_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_const_local_iterator(__it._M_cur, 0, 0);
-      }
     };
 
   template<typename _Key, typename _Tp, typename _Hash,
@@ -812,10 +793,9 @@ 
 	  {
 	    this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			    { return __it == __victim; });
-	    _Base_local_iterator __local_victim = _S_to_local(__victim);
 	    this->_M_invalidate_local_if(
-			    [__local_victim](_Base_const_local_iterator __it)
-			    { return __it == __local_victim; });
+			    [__victim](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __victim._M_cur; });
 	    _Base::erase(__victim++);
 	    ++__ret;
 	  }
@@ -830,10 +810,9 @@ 
 	_Base_const_iterator __victim = __it.base();
 	this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			{ return __it == __victim; });
-	_Base_const_local_iterator __local_victim = _S_to_local(__victim);
 	this->_M_invalidate_local_if(
-			[__local_victim](_Base_const_local_iterator __it)
-			{ return __it == __local_victim; });
+			[__victim](_Base_const_local_iterator __it)
+			{ return __it._M_cur == __victim._M_cur; });
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__it.base());
 	_M_check_rehashed(__bucket_count);
@@ -857,10 +836,9 @@ 
 				  ._M_iterator(__last, "last"));
 	    this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
 			    { return __it == __tmp; });
-	    _Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
 	    this->_M_invalidate_local_if(
-			    [__local_tmp](_Base_const_local_iterator __it)
-			    { return __it == __local_tmp; });
+			    [__tmp](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __tmp._M_cur; });
 	  }
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__first.base(), __last.base());
@@ -899,22 +877,6 @@ 
 	if (__prev_count != this->bucket_count())
 	  _M_invalidate_locals();
       }
-
-      static _Base_local_iterator
-      _S_to_local(_Base_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_local_iterator(__it._M_cur, 0, 0);
-      }
-
-      static _Base_const_local_iterator
-      _S_to_local(_Base_const_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_const_local_iterator(__it._M_cur, 0, 0);
-      }
     };
 
   template<typename _Key, typename _Tp, typename _Hash,
Index: include/debug/unordered_set
===================================================================
--- include/debug/unordered_set	(revision 194488)
+++ include/debug/unordered_set	(working copy)
@@ -359,10 +359,9 @@ 
 	    this->_M_invalidate_if(
 			    [__victim](_Base_const_iterator __it)
 			    { return __it == __victim; });
-	    _Base_local_iterator __local_victim = _S_to_local(__victim);
 	    this->_M_invalidate_local_if(
-			    [__local_victim](_Base_const_local_iterator __it)
-			    { return __it == __local_victim; });
+			    [__victim](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __victim._M_cur; });
 	    size_type __bucket_count = this->bucket_count();
 	    _Base::erase(__victim);
 	    _M_check_rehashed(__bucket_count);
@@ -379,10 +378,9 @@ 
 	this->_M_invalidate_if(
 			[__victim](_Base_const_iterator __it)
 			{ return __it == __victim; });
-	_Base_const_local_iterator __local_victim = _S_to_local(__victim);
 	this->_M_invalidate_local_if(
-			[__local_victim](_Base_const_local_iterator __it)
-			{ return __it == __local_victim; });
+			[__victim](_Base_const_local_iterator __it)
+			{ return __it._M_cur == __victim._M_cur; });
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__it.base());
 	_M_check_rehashed(__bucket_count);
@@ -407,10 +405,9 @@ 
 	    this->_M_invalidate_if(
 			    [__tmp](_Base_const_iterator __it)
 			    { return __it == __tmp; });
-	    _Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
 	    this->_M_invalidate_local_if(
-			    [__local_tmp](_Base_const_local_iterator __it)
-			    { return __it == __local_tmp; });
+			    [__tmp](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __tmp._M_cur; });
 	  }
 	size_type __bucket_count = this->bucket_count();
 	_Base_iterator __next = _Base::erase(__first.base(),
@@ -451,22 +448,6 @@ 
 	if (__prev_count != this->bucket_count())
 	  _M_invalidate_locals();
       }
-
-      static _Base_local_iterator
-      _S_to_local(_Base_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_local_iterator(__it._M_cur, 0, 0);
-      }
-
-      static _Base_const_local_iterator
-      _S_to_local(_Base_const_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_const_local_iterator(__it._M_cur, 0, 0);
-      }
     };
 
   template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>
@@ -803,10 +784,9 @@ 
 	  {
 	    this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			    { return __it == __victim; });
-	    _Base_local_iterator __local_victim = _S_to_local(__victim);
 	    this->_M_invalidate_local_if(
-			    [__local_victim](_Base_const_local_iterator __it)
-			    { return __it == __local_victim; });
+			    [__victim](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __victim._M_cur; });
 	    _Base::erase(__victim++);
 	    ++__ret;
 	  }
@@ -820,10 +800,9 @@ 
 	_Base_const_iterator __victim = __it.base();
 	this->_M_invalidate_if([__victim](_Base_const_iterator __it)
 			{ return __it == __victim; });
-	_Base_const_local_iterator __local_victim = _S_to_local(__victim);
 	this->_M_invalidate_local_if(
-			[__local_victim](_Base_const_local_iterator __it)
-			{ return __it == __local_victim; });
+			[__victim](_Base_const_local_iterator __it)
+			{ return __it._M_cur == __victim._M_cur; });
 	return iterator(_Base::erase(__it.base()), this);
       }
 
@@ -844,10 +823,9 @@ 
 				  ._M_iterator(__last, "last"));
 	    this->_M_invalidate_if([__tmp](_Base_const_iterator __it)
 			    { return __it == __tmp; });
-	    _Base_const_local_iterator __local_tmp = _S_to_local(__tmp);
 	    this->_M_invalidate_local_if(
-			    [__local_tmp](_Base_const_local_iterator __it)
-			    { return __it == __local_tmp; });
+			    [__tmp](_Base_const_local_iterator __it)
+			    { return __it._M_cur == __tmp._M_cur; });
 	  }
 	return iterator(_Base::erase(__first.base(),
 				     __last.base()), this);
@@ -884,22 +862,6 @@ 
 	if (__prev_count != this->bucket_count())
 	  _M_invalidate_locals();
       }
-
-      static _Base_local_iterator
-      _S_to_local(_Base_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_local_iterator(__it._M_cur, 0, 0);
-      }
-
-      static _Base_const_local_iterator
-      _S_to_local(_Base_const_iterator __it)
-      {
-        // The returned local iterator will not be incremented so we don't
-	// need to compute __it's node bucket
-	return _Base_const_local_iterator(__it._M_cur, 0, 0);
-      }
     };
 
   template<typename _Value, typename _Hash, typename _Pred, typename _Alloc>
Index: testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc
===================================================================
--- testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc	(revision 0)
+++ testsuite/23_containers/unordered_set/not_default_constructible_hash_neg.cc	(revision 0)
@@ -0,0 +1,51 @@ 
+// { dg-do compile }
+// { dg-options "-std=c++11" }
+// { dg-require-normal-mode "" }
+
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-error "default constructibles" "" { target *-*-* } 268 }
+
+#include <unordered_set>
+
+namespace
+{
+  struct hash
+  {
+    hash(std::size_t seed)
+      : _M_seed(seed)
+    { }
+
+    std::size_t operator() (int val) const noexcept
+    { return val ^ _M_seed; }
+
+  private:
+    std::size_t _M_seed;
+  };
+}
+
+void
+test01()
+{
+  using traits = std::__detail::_Hashtable_traits<false, true, true>;
+  using hashtable = std::__uset_hashtable<int, hash,
+					  std::equal_to<int>,
+					  std::allocator<int>, traits>;
+
+  hashtable ht(10, hash(1));
+}
Index: testsuite/23_containers/unordered_set/buckets/swap.cc
===================================================================
--- testsuite/23_containers/unordered_set/buckets/swap.cc	(revision 0)
+++ testsuite/23_containers/unordered_set/buckets/swap.cc	(revision 0)
@@ -0,0 +1,72 @@ 
+// { dg-options "-std=c++11" }
+
+// Copyright (C) 2012 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <testsuite_hooks.h>
+#include <unordered_set>
+
+namespace
+{
+  struct hash
+  {
+    hash() = default;
+    hash(int modulo)
+      : _M_modulo(modulo)
+    { }
+
+    std::size_t operator() (int val) const noexcept
+    { return val % _M_modulo; }
+
+    int _M_modulo;
+  };
+}
+
+void
+test01()
+{
+  bool test __attribute__((unused)) = true;
+
+  // static_assert(std::__cache_default<int, hash>::value,
+  // 		"Unexpected default cache value");
+  typedef std::unordered_set<int, hash> us_t;
+  us_t us1(10, hash(13));
+  us_t us2(10, hash(7));
+
+  VERIFY( us1.hash_function()._M_modulo == 13 );
+  VERIFY( us2.hash_function()._M_modulo == 7 );
+
+  const int nb = 5;
+  for (int i = 0; i != nb * us1.hash_function()._M_modulo; ++i)
+    us1.insert(i);
+
+  us_t::local_iterator lit = us1.begin(12);
+  us_t::local_iterator litend = us1.end(12);
+  VERIFY( std::distance(lit, litend) == nb );
+
+  us1.swap(us2);
+
+  VERIFY( us1.hash_function()._M_modulo == 7 );
+  VERIFY( us2.hash_function()._M_modulo == 13 );
+
+  VERIFY( std::distance(lit, litend) == nb );
+}
+
+int main()
+{
+  test01();
+}
Index: testsuite/23_containers/unordered_set/instantiation_neg.cc
===================================================================
--- testsuite/23_containers/unordered_set/instantiation_neg.cc	(revision 194488)
+++ testsuite/23_containers/unordered_set/instantiation_neg.cc	(working copy)
@@ -19,7 +19,7 @@ 
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// { dg-error "with noexcept" "" { target *-*-* } 247 }
+// { dg-error "with noexcept" "" { target *-*-* } 251 }
 
 #include <unordered_set>
 
Index: testsuite/performance/23_containers/insert_erase/41975.cc
===================================================================
--- testsuite/performance/23_containers/insert_erase/41975.cc	(revision 194488)
+++ testsuite/performance/23_containers/insert_erase/41975.cc	(working copy)
@@ -18,25 +18,18 @@ 
 // <http://www.gnu.org/licenses/>.
 
 #include <sstream>
-#ifdef _USE_TR1
-#  include <tr1/unordered_set>
-#else
-#  include <unordered_set>
-#endif
+#include <tr1/unordered_set>
+#include <unordered_set>
 #include <testsuite_performance.h>
 
 namespace
 {
   // Bench using an unordered_set<int>. Hash functor for int is quite
   // predictable so it helps bench very specific use cases.
-  template<bool use_cache>
-    void bench()
+  template<typename _ContType>
+    void bench(const char* desc)
     {
       using namespace __gnu_test;
-      std::ostringstream ostr;
-      ostr << "unordered_set<int> " << (use_cache ? "with" : "without")
-	   << " cache";
-      const std::string desc = ostr.str();
 
       time_counter time;
       resource_counter resource;
@@ -44,20 +37,12 @@ 
       const int nb = 200000;
       start_counters(time, resource);
 
-#ifdef _USE_TR1
-      std::tr1::__unordered_set<int, std::hash<int>, std::equal_to<int>,
-				std::allocator<int>,
-			       	use_cache> us;
-#else
-      std::__uset_hashtable<int, std::hash<int>, std::equal_to<int>,
-			    std::allocator<int>,
-			    std::__uset_traits<use_cache>> us;
-#endif
+      _ContType us;
       for (int i = 0; i != nb; ++i)
 	  us.insert(i);
 
       stop_counters(time, resource);
-      ostr.str("");
+      std::ostringstream ostr;
       ostr << desc << ": first insert";
       report_performance(__FILE__, ostr.str().c_str(), time, resource);
 
@@ -110,14 +95,10 @@ 
   // Bench using unordered_set<string> that show how important it is to cache
   // hash code as computing string hash code is quite expensive compared to
   // computing it for int.
-  template<bool use_cache>
-    void bench_str()
+  template<typename _ContType>
+    void bench_str(const char* desc)
     {
       using namespace __gnu_test;
-      std::ostringstream ostr;
-      ostr << "unordered_set<string> " << (use_cache ? "with" : "without")
-	   << " cache";
-      const std::string desc = ostr.str();
 
       time_counter time;
       resource_counter resource;
@@ -125,6 +106,7 @@ 
       const int nb = 200000;
       // First generate once strings that are going to be used throughout the
       // bench:
+      std::ostringstream ostr;
       std::vector<std::string> strs;
       strs.reserve(nb);
       for (int i = 0; i != nb; ++i)
@@ -136,17 +118,7 @@ 
 
       start_counters(time, resource);
 
-#ifdef _USE_TR1
-      std::tr1::__unordered_set<std::string, std::hash<std::string>,
-				std::equal_to<std::string>,
-				std::allocator<std::string>,
-				use_cache> us;
-#else
-      std::__uset_hashtable<std::string, std::hash<std::string>,
-			    std::equal_to<std::string>,
-			    std::allocator<std::string>,
-			    std::__uset_traits<use_cache>> us;
-#endif
+      _ContType us;
       for (int i = 0; i != nb; ++i)
 	us.insert(strs[i]);
 
@@ -192,11 +164,53 @@ 
     }
 }
 
+template<bool cache>
+  using __uset =
+	      std::__uset_hashtable<int, std::hash<int>, std::equal_to<int>,
+				    std::allocator<int>,
+				    std::__uset_traits<cache>>;
+
+template<bool cache>
+  using __tr1_uset =
+	      std::tr1::__unordered_set<int, std::hash<int>, std::equal_to<int>,
+					std::allocator<int>,
+					cache>;
+
+template<bool cache>
+  using __str_uset = 
+	      std::__uset_hashtable<std::string, std::hash<std::string>,
+				    std::equal_to<std::string>,
+				    std::allocator<std::string>,
+				    std::__uset_traits<cache>>;
+
+template<bool cache>
+  using __tr1_str_uset = 
+	      std::tr1::__unordered_set<std::string, std::hash<std::string>,
+					std::equal_to<std::string>,
+					std::allocator<std::string>,
+					cache>;
+
 int main()
 {
-  bench<false>();
-  bench<true>();
-  bench_str<false>();
-  bench_str<true>();
+  bench<__tr1_uset<false>>(
+	"std::tr1::unordered_set<int> without hash code cached");
+  bench<__tr1_uset<true>>(
+	"std::tr1::unordered_set<int> with hash code cached");
+  bench<__uset<false>>(
+	"std::unordered_set<int> without hash code cached");
+  bench<__uset<true>>(
+	"std::unordered_set<int> with hash code cached");
+  bench<std::unordered_set<int>>(
+	"std::unordered_set<int> default cache");
+  bench_str<__tr1_str_uset<false>>(
+	"std::tr1::unordered_set<string> without hash code cached");
+  bench_str<__tr1_str_uset<true>>(
+	"std::tr1::unordered_set<string> with hash code cached");
+  bench_str<__str_uset<false>>(
+	"std::unordered_set<string> without hash code cached");
+  bench_str<__str_uset<true>>(
+	"std::unordered_set<string> with hash code cached");
+    bench_str<std::unordered_set<std::string>>(
+	"std::unordered_set<string> default cache");
   return 0;
 }
Index: testsuite/performance/23_containers/insert/54075.cc
===================================================================
--- testsuite/performance/23_containers/insert/54075.cc	(revision 194488)
+++ testsuite/performance/23_containers/insert/54075.cc	(working copy)
@@ -15,14 +15,18 @@ 
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
+// { dg-add-options no_pch }
 // { dg-options "-std=c++11" }
 
 #include <testsuite_performance.h>
 #include <random>
 #include <sstream>
 #include <tr1/unordered_set>
-#include<unordered_set>
+#include <unordered_set>
 
+#define USE_MY_FOO 1
+
+#if USE_MY_FOO
 struct Foo
 {
   typedef std::random_device::result_type _Type;
@@ -54,36 +58,57 @@ 
     { return t.hash(); }
 };
 
+#else
+
+struct Foo
+{
+  int bar;
+  int baz;
+  int meh;
+  Foo() 
+  {
+    bar = random(); baz = random(); meh = random();
+  }
+  Foo(const Foo&) = default;
+  size_t hash() const noexcept { return bar^baz^meh; }
+  inline bool operator==(const Foo& other) const {
+    return other.bar == bar && other.baz == baz && other.meh == meh;
+  }
+};
+
+struct HashFunction
+{
+  template<typename T>
+  size_t operator()(const T& t) const noexcept {
+    return t.hash();
+  }
+};
+
+#endif
+
+const int sz = 300000;
+
 template<typename _ContType>
-  void bench(const char* container_desc)
+void bench(const char* container_desc, const Foo* foos)
   {
     using namespace __gnu_test;
 
+    _ContType s;
+
     time_counter time;
     resource_counter resource;
-
-    const int sz = 300000;
-
-    Foo foos[sz];
-    {
-      std::random_device randev;
-      for (int i = 0; i != sz; ++i)
-	foos[i].init(randev);
-    }
-
-    _ContType s;
     start_counters(time, resource);
 
     for (int i = 0; i != sz ; ++i)
-      s.insert(foos[i]);
+	s.insert(foos[i]);
 
     stop_counters(time, resource);
     std::ostringstream ostr;
-    ostr << container_desc << sz << " Foo insertions";
+    ostr << container_desc << sz << " Foo insertions, " 
+	 << s.size() << " inserted";
     report_performance(__FILE__, ostr.str().c_str(), time, resource);
 
     // Try to insert again to check performance of collision detection
-    
     const int nb_loop = 10;
     start_counters(time, resource);
 
@@ -121,12 +146,48 @@ 
 
 int main()
 {
-  bench<__tr1_uset<false>>("std::tr1::unordered_set without hash code cached ");
-  bench<__tr1_uset<true>>("std::tr1::unordered_set with hash code cached ");
-  bench<__tr1_umset<false>>("std::tr1::unordered_multiset without hash code cached ");
-  bench<__tr1_umset<true>>("std::tr1::unordered_multiset with hash code cached ");
-  bench<__uset<false>>("std::unordered_set without hash code cached ");
-  bench<__uset<true>>("std::unordered_set with hash code cached ");
-  bench<__umset<false>>("std::unordered_multiset without hash code cached ");
-  bench<__umset<true>>("std::unordered_multiset with hash code cached ");
+  using namespace __gnu_test;
+
+  Foo foos[sz];
+#if USE_MY_FOO
+  {
+    std::random_device randev;
+    for (int i = 0; i != sz; ++i)
+      foos[i].init(randev);
+  }
+#endif
+
+  time_counter time;
+  resource_counter resource;
+  start_counters(time, resource);
+
+  bench<__tr1_uset<false>>(
+	"std::tr1::unordered_set without hash code cached ", foos);
+  bench<__tr1_uset<true>>(
+	"std::tr1::unordered_set with hash code cached ", foos);
+  bench<__tr1_umset<false>>(
+	"std::tr1::unordered_multiset without hash code cached ", foos);
+  bench<__tr1_umset<true>>(
+	"std::tr1::unordered_multiset with hash code cached ", foos);
+
+  stop_counters(time, resource);
+  report_performance(__FILE__, "tr1 benches", time, resource);
+
+  start_counters(time, resource);
+  bench<__uset<false>>(
+	"std::unordered_set without hash code cached ", foos);
+  bench<__uset<true>>(
+	"std::unordered_set with hash code cached ", foos);
+  bench<__umset<false>>(
+	"std::unordered_multiset without hash code cached ", foos);
+  bench<__umset<true>>(
+	"std::unordered_multiset with hash code cached ", foos);
+
+  stop_counters(time, resource);
+  report_performance(__FILE__, "std benches", time, resource);
+
+  bench<std::unordered_set<Foo, HashFunction>>(
+	"std::unordered_set default cache ", foos);
+  bench<std::unordered_multiset<Foo, HashFunction>>(
+	"std::unordered_multiset default cache ", foos);
 }