libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2025 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
35 #endif
36 
37 #include <bits/hashtable_policy.h>
39 #include <bits/stl_algobase.h> // fill_n, is_permutation
40 #include <bits/stl_function.h> // __has_is_transparent_t
41 #if __cplusplus > 201402L
42 # include <bits/node_handle.h>
43 #endif
44 
45 #pragma GCC diagnostic push
46 #pragma GCC diagnostic ignored "-Wc++11-extensions"
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 /// @cond undocumented
52 
53  template<typename _Tp, typename _Hash>
54  using __cache_default
55  = __not_<__and_<// Do not cache for fast hasher.
56  __is_fast_hash<_Hash>,
57  // Mandatory for the rehash process.
58  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
59 
60  // Helper to conditionally delete the default constructor.
61  // The _Hash_node_base type is used to distinguish this specialization
62  // from any other potentially-overlapping subobjects of the hashtable.
63  template<typename _Equal, typename _Hash, typename _Allocator>
64  using _Hashtable_enable_default_ctor
65  = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
66  is_default_constructible<_Hash>,
67  is_default_constructible<_Allocator>>{},
68  __detail::_Hash_node_base>;
69 
70  /**
71  * Primary class template _Hashtable.
72  *
73  * @ingroup hashtable-detail
74  *
75  * @tparam _Value CopyConstructible type.
76  *
77  * @tparam _Key CopyConstructible type.
78  *
79  * @tparam _Alloc An allocator type
80  * ([lib.allocator.requirements]) whose _Alloc::value_type is
81  * _Value. As a conforming extension, we allow for
82  * _Alloc::value_type != _Value.
83  *
84  * @tparam _ExtractKey Function object that takes an object of type
85  * _Value and returns a value of type _Key.
86  *
87  * @tparam _Equal Function object that takes two objects of type k
88  * and returns a bool-like value that is true if the two objects
89  * are considered equal.
90  *
91  * @tparam _Hash The hash function. A unary function object with
92  * argument type _Key and result type size_t. Return values should
93  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
94  *
95  * @tparam _RangeHash The range-hashing function (in the terminology of
96  * Tavori and Dreizin). A binary function object whose argument
97  * types and result type are all size_t. Given arguments r and N,
98  * the return value is in the range [0, N).
99  *
100  * @tparam _Unused Not used.
101  *
102  * @tparam _RehashPolicy Policy class with three members, all of
103  * which govern the bucket count. _M_next_bkt(n) returns a bucket
104  * count no smaller than n. _M_bkt_for_elements(n) returns a
105  * bucket count appropriate for an element count of n.
106  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
107  * current bucket count is n_bkt and the current element count is
108  * n_elt, we need to increase the bucket count for n_ins insertions.
109  * If so, returns make_pair(true, n), where n is the new bucket count. If
110  * not, returns make_pair(false, <anything>)
111  *
112  * @tparam _Traits Compile-time class with three boolean
113  * std::integral_constant members: __cache_hash_code, __constant_iterators,
114  * __unique_keys.
115  *
116  * Each _Hashtable data structure has:
117  *
118  * - _Bucket[] _M_buckets
119  * - _Hash_node_base _M_before_begin
120  * - size_type _M_bucket_count
121  * - size_type _M_element_count
122  *
123  * with _Bucket being _Hash_node_base* and _Hash_node containing:
124  *
125  * - _Hash_node* _M_next
126  * - Tp _M_value
127  * - size_t _M_hash_code if cache_hash_code is true
128  *
129  * In terms of Standard containers the hashtable is like the aggregation of:
130  *
131  * - std::forward_list<_Node> containing the elements
132  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
133  *
134  * The non-empty buckets contain the node before the first node in the
135  * bucket. This design makes it possible to implement something like a
136  * std::forward_list::insert_after on container insertion and
137  * std::forward_list::erase_after on container erase
138  * calls. _M_before_begin is equivalent to
139  * std::forward_list::before_begin. Empty buckets contain
140  * nullptr. Note that one of the non-empty buckets contains
141  * &_M_before_begin which is not a dereferenceable node so the
142  * node pointer in a bucket shall never be dereferenced, only its
143  * next node can be.
144  *
145  * Walking through a bucket's nodes requires a check on the hash code to
146  * see if each node is still in the bucket. Such a design assumes a
147  * quite efficient hash functor and is one of the reasons it is
148  * highly advisable to set __cache_hash_code to true.
149  *
150  * The container iterators are simply built from nodes. This way
151  * incrementing the iterator is perfectly efficient independent of
152  * how many empty buckets there are in the container.
153  *
154  * On insert we compute the element's hash code and use it to find the
155  * bucket index. If the element must be inserted in an empty bucket
156  * we add it at the beginning of the singly linked list and make the
157  * bucket point to _M_before_begin. The bucket that used to point to
158  * _M_before_begin, if any, is updated to point to its new before
159  * begin node.
160  *
161  * Note that all equivalent values, if any, are next to each other, if
162  * we find a non-equivalent value after an equivalent one it means that
163  * we won't find any new equivalent value.
164  *
165  * On erase, the simple iterator design requires using the hash
166  * functor to get the index of the bucket to update. For this
167  * reason, when __cache_hash_code is set to false the hash functor must
168  * not throw and this is enforced by a static assertion.
169  *
170  * Functionality is implemented by decomposition into base classes,
171  * where the derived _Hashtable class is used in _Map_base and
172  * _Rehash_base base classes to access the
173  * "this" pointer. _Hashtable_base is used in the base classes as a
174  * non-recursive, fully-completed-type so that detailed nested type
175  * information, such as iterator type and node type, can be
176  * used. This is similar to the "Curiously Recurring Template
177  * Pattern" (CRTP) technique, but uses a reconstructed, not
178  * explicitly passed, template pattern.
179  *
180  * Base class templates are:
181  * - __detail::_Hashtable_base
182  * - __detail::_Map_base
183  * - __detail::_Rehash_base
184  */
185  template<typename _Key, typename _Value, typename _Alloc,
186  typename _ExtractKey, typename _Equal,
187  typename _Hash, typename _RangeHash, typename _Unused,
188  typename _RehashPolicy, typename _Traits>
189  class _Hashtable
190  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
191  _Hash, _RangeHash, _Unused, _Traits>,
192  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193  _Hash, _RangeHash, _Unused,
194  _RehashPolicy, _Traits>,
195  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
196  _Hash, _RangeHash, _Unused,
197  _RehashPolicy, _Traits>,
198  private __detail::_Hashtable_alloc<
199  __alloc_rebind<_Alloc,
200  __detail::_Hash_node<_Value,
201  _Traits::__hash_cached::value>>>,
202  private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
203  {
204  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
205  "unordered container must have a non-const, non-volatile value_type");
206 #if __cplusplus > 201703L || defined __STRICT_ANSI__
207  static_assert(is_same<typename _Alloc::value_type, _Value>{},
208  "unordered container must have the same value_type as its allocator");
209 #endif
210  static_assert(is_copy_constructible<_Hash>::value,
211  "hash function must be copy constructible");
212 
213  using __traits_type = _Traits;
214  using __hash_cached = typename __traits_type::__hash_cached;
215  using __constant_iterators = typename __traits_type::__constant_iterators;
216  using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
217  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
218 
219  using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
220 
221  using __node_value_type =
222  __detail::_Hash_node_value<_Value, __hash_cached::value>;
223  using __node_ptr = typename __hashtable_alloc::__node_ptr;
224  using __value_alloc_traits =
225  typename __hashtable_alloc::__value_alloc_traits;
226  using __node_alloc_traits =
227  typename __hashtable_alloc::__node_alloc_traits;
228  using __node_base = typename __hashtable_alloc::__node_base;
229  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
230  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
231 
232  using __enable_default_ctor
233  = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
234  using __rehash_guard_t
235  = __detail::_RehashStateGuard<_RehashPolicy>;
236 
237  public:
238  typedef _Key key_type;
239  typedef _Value value_type;
240  typedef _Alloc allocator_type;
241  typedef _Equal key_equal;
242 
243  // mapped_type, if present, comes from _Map_base.
244  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
245  typedef typename __value_alloc_traits::pointer pointer;
246  typedef typename __value_alloc_traits::const_pointer const_pointer;
247  typedef value_type& reference;
248  typedef const value_type& const_reference;
249 
250  using iterator
251  = __detail::_Node_iterator<_Value, __constant_iterators::value,
252  __hash_cached::value>;
253 
254  using const_iterator
255  = __detail::_Node_const_iterator<_Value, __constant_iterators::value,
256  __hash_cached::value>;
257 
258  using local_iterator = __detail::_Local_iterator<key_type, _Value,
259  _ExtractKey, _Hash, _RangeHash, _Unused,
260  __constant_iterators::value,
261  __hash_cached::value>;
262 
263  using const_local_iterator = __detail::_Local_const_iterator<
264  key_type, _Value,
265  _ExtractKey, _Hash, _RangeHash, _Unused,
266  __constant_iterators::value, __hash_cached::value>;
267 
268  private:
269  using __rehash_type = _RehashPolicy;
270 
271  using __unique_keys = typename __traits_type::__unique_keys;
272 
273  using __hashtable_base = __detail::
274  _Hashtable_base<_Key, _Value, _ExtractKey,
275  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
276 
277  using __hash_code_base = typename __hashtable_base::__hash_code_base;
278  using __hash_code = typename __hashtable_base::__hash_code;
279  using __ireturn_type = __conditional_t<__unique_keys::value,
281  iterator>;
282 
283  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
284  _Equal, _Hash, _RangeHash, _Unused,
285  _RehashPolicy, _Traits>;
286 
287  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
288  _ExtractKey, _Equal,
289  _Hash, _RangeHash, _Unused,
290  _RehashPolicy, _Traits>;
291 
292  using __node_builder_t = __detail::_NodeBuilder<_ExtractKey>;
293 
294  // Simple RAII type for managing a node containing an element
295  struct _Scoped_node
296  {
297  // Take ownership of a node with a constructed element.
298  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
299  : _M_h(__h), _M_node(__n) { }
300 
301  // Allocate a node and construct an element within it.
302  template<typename... _Args>
303  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
304  : _M_h(__h),
305  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
306  { }
307 
308  // Destroy element and deallocate node.
309  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
310 
311  _Scoped_node(const _Scoped_node&) = delete;
312  _Scoped_node& operator=(const _Scoped_node&) = delete;
313 
314  __hashtable_alloc* _M_h;
315  __node_ptr _M_node;
316  };
317 
318  // Compile-time diagnostics.
319 
320  // _Hash_code_base has everything protected, so use this derived type to
321  // access it.
322  struct __hash_code_base_access : __hash_code_base
323  { using __hash_code_base::_M_bucket_index; };
324 
325  // To get bucket index we need _RangeHash to be non-throwing.
326  static_assert(is_nothrow_default_constructible<_RangeHash>::value,
327  "Functor used to map hash code to bucket index"
328  " must be nothrow default constructible");
329  static_assert(noexcept(
330  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
331  "Functor used to map hash code to bucket index must be"
332  " noexcept");
333 
334  // To compute bucket index we also need _ExtractKey to be non-throwing.
335  static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
336  "_ExtractKey must be nothrow default constructible");
337  static_assert(noexcept(
338  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
339  "_ExtractKey functor must be noexcept invocable");
340 
341  template<typename _Keya, typename _Valuea, typename _Alloca,
342  typename _ExtractKeya, typename _Equala,
343  typename _Hasha, typename _RangeHasha, typename _Unuseda,
344  typename _RehashPolicya, typename _Traitsa,
345  bool _Unique_keysa>
346  friend struct __detail::_Map_base;
347 
348  public:
349  using size_type = typename __hashtable_base::size_type;
350  using difference_type = typename __hashtable_base::difference_type;
351 
352 #if __cplusplus > 201402L
353  using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
354  using insert_return_type = _Node_insert_return<iterator, node_type>;
355 #endif
356 
357  private:
358  __buckets_ptr _M_buckets = &_M_single_bucket;
359  size_type _M_bucket_count = 1;
360  __node_base _M_before_begin;
361  size_type _M_element_count = 0;
362  _RehashPolicy _M_rehash_policy;
363 
364  // A single bucket used when only need for 1 bucket. Especially
365  // interesting in move semantic to leave hashtable with only 1 bucket
366  // which is not allocated so that we can have those operations noexcept
367  // qualified.
368  // Note that we can't leave hashtable with 0 bucket without adding
369  // numerous checks in the code to avoid 0 modulus.
370  __node_base_ptr _M_single_bucket = nullptr;
371 
372  void
373  _M_update_bbegin()
374  {
375  if (auto __begin = _M_begin())
376  _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
377  }
378 
379  void
380  _M_update_bbegin(__node_ptr __n)
381  {
382  _M_before_begin._M_nxt = __n;
383  _M_update_bbegin();
384  }
385 
386  bool
387  _M_uses_single_bucket(__buckets_ptr __bkts) const
388  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
389 
390  bool
391  _M_uses_single_bucket() const
392  { return _M_uses_single_bucket(_M_buckets); }
393 
394  static constexpr size_t
395  __small_size_threshold() noexcept
396  {
397  return
398  __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
399  }
400 
401  __hashtable_alloc&
402  _M_base_alloc() { return *this; }
403 
404  __buckets_ptr
405  _M_allocate_buckets(size_type __bkt_count)
406  {
407  if (__builtin_expect(__bkt_count == 1, false))
408  {
409  _M_single_bucket = nullptr;
410  return &_M_single_bucket;
411  }
412 
413  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
414  }
415 
416  void
417  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
418  {
419  if (_M_uses_single_bucket(__bkts))
420  return;
421 
422  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
423  }
424 
425  void
426  _M_deallocate_buckets()
427  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
428 
429  // Gets bucket begin, deals with the fact that non-empty buckets contain
430  // their before begin node.
431  __node_ptr
432  _M_bucket_begin(size_type __bkt) const
433  {
434  __node_base_ptr __n = _M_buckets[__bkt];
435  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
436  }
437 
438  __node_ptr
439  _M_begin() const
440  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
441 
442  // Assign *this using another _Hashtable instance. Whether elements
443  // are copied or moved depends on the _Ht reference.
444  template<typename _Ht>
445  void
446  _M_assign_elements(_Ht&&);
447 
448  template<typename _Ht>
449  void
450  _M_assign(_Ht&& __ht)
451  {
452  __detail::_AllocNode<__node_alloc_type> __alloc_node_gen(*this);
453  _M_assign(std::forward<_Ht>(__ht), __alloc_node_gen);
454  }
455 
456  template<typename _Ht, typename _NodeGenerator>
457  void
458  _M_assign(_Ht&&, _NodeGenerator&);
459 
460  void
461  _M_move_assign(_Hashtable&&, true_type);
462 
463  void
464  _M_move_assign(_Hashtable&&, false_type);
465 
466  void
467  _M_reset() noexcept;
468 
469  _Hashtable(const _Hash& __h, const _Equal& __eq,
470  const allocator_type& __a)
471  : __hashtable_base(__h, __eq),
472  __hashtable_alloc(__node_alloc_type(__a)),
473  __enable_default_ctor(_Enable_default_constructor_tag{})
474  { }
475 
476  template<bool _No_realloc = true>
477  static constexpr bool
478  _S_nothrow_move()
479  {
480 #if __cplusplus <= 201402L
481  return __and_<__bool_constant<_No_realloc>,
482  is_nothrow_copy_constructible<_Hash>,
483  is_nothrow_copy_constructible<_Equal>>::value;
484 #else
485  if constexpr (_No_realloc)
486  if constexpr (is_nothrow_copy_constructible<_Hash>())
487  return is_nothrow_copy_constructible<_Equal>();
488  return false;
489 #endif
490  }
491 
492  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
493  true_type /* alloc always equal */)
494  noexcept(_S_nothrow_move());
495 
496  _Hashtable(_Hashtable&&, __node_alloc_type&&,
497  false_type /* alloc always equal */);
498 
499  template<typename _InputIterator>
500  _Hashtable(_InputIterator __first, _InputIterator __last,
501  size_type __bkt_count_hint,
502  const _Hash&, const _Equal&, const allocator_type&,
503  true_type __uks);
504 
505  template<typename _InputIterator>
506  _Hashtable(_InputIterator __first, _InputIterator __last,
507  size_type __bkt_count_hint,
508  const _Hash&, const _Equal&, const allocator_type&,
509  false_type __uks);
510 
511  public:
512  // Constructor, destructor, assignment, swap
513  _Hashtable() = default;
514 
515  _Hashtable(const _Hashtable&);
516 
517  _Hashtable(const _Hashtable&, const allocator_type&);
518 
519  explicit
520  _Hashtable(size_type __bkt_count_hint,
521  const _Hash& __hf = _Hash(),
522  const key_equal& __eql = key_equal(),
523  const allocator_type& __a = allocator_type());
524 
525  // Use delegating constructors.
526  _Hashtable(_Hashtable&& __ht)
527  noexcept(_S_nothrow_move())
528  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
529  true_type{})
530  { }
531 
532  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
533  noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
534  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
535  typename __node_alloc_traits::is_always_equal{})
536  { }
537 
538  explicit
539  _Hashtable(const allocator_type& __a)
540  : __hashtable_alloc(__node_alloc_type(__a)),
541  __enable_default_ctor(_Enable_default_constructor_tag{})
542  { }
543 
544  template<typename _InputIterator>
545  _Hashtable(_InputIterator __f, _InputIterator __l,
546  size_type __bkt_count_hint = 0,
547  const _Hash& __hf = _Hash(),
548  const key_equal& __eql = key_equal(),
549  const allocator_type& __a = allocator_type())
550  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
551  __unique_keys{})
552  { }
553 
554  _Hashtable(initializer_list<value_type> __l,
555  size_type __bkt_count_hint = 0,
556  const _Hash& __hf = _Hash(),
557  const key_equal& __eql = key_equal(),
558  const allocator_type& __a = allocator_type())
559  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
560  __hf, __eql, __a, __unique_keys{})
561  { }
562 
563  _Hashtable&
564  operator=(const _Hashtable& __ht);
565 
566  _Hashtable&
567  operator=(_Hashtable&& __ht)
568  noexcept(__node_alloc_traits::_S_nothrow_move()
569  && is_nothrow_move_assignable<_Hash>::value
570  && is_nothrow_move_assignable<_Equal>::value)
571  {
572  constexpr bool __move_storage =
573  __node_alloc_traits::_S_propagate_on_move_assign()
574  || __node_alloc_traits::_S_always_equal();
575  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
576  return *this;
577  }
578 
579 #pragma GCC diagnostic push
580 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
581  _Hashtable&
582  operator=(initializer_list<value_type> __l)
583  {
584  using __reuse_or_alloc_node_gen_t =
585  __detail::_ReuseOrAllocNode<__node_alloc_type>;
586 
587  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
588  _M_before_begin._M_nxt = nullptr;
589  clear();
590 
591  // We assume that all elements of __l are likely to be inserted.
592  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
593 
594  // Excess buckets might have been intentionally reserved by the user,
595  // so rehash if we need to grow, but don't shrink.
596  if (_M_bucket_count < __l_bkt_count)
597  rehash(__l_bkt_count);
598 
599  __hash_code __code;
600  size_type __bkt;
601  for (auto& __e : __l)
602  {
603  const key_type& __k = _ExtractKey{}(__e);
604 
605  if constexpr (__unique_keys::value)
606  {
607  if (auto __loc = _M_locate(__k))
608  continue; // Found existing element with equivalent key
609  else
610  {
611  __code = __loc._M_hash_code;
612  __bkt = __loc._M_bucket_index;
613  }
614  }
615  else
616  {
617  __code = this->_M_hash_code(__k);
618  __bkt = _M_bucket_index(__code);
619  }
620 
621  _M_insert_unique_node(__bkt, __code, __roan(__e));
622  }
623 
624  return *this;
625  }
626 #pragma GCC diagnostic pop
627 
628  ~_Hashtable() noexcept;
629 
630  void
631  swap(_Hashtable&)
632  noexcept(__and_<__is_nothrow_swappable<_Hash>,
633  __is_nothrow_swappable<_Equal>>::value);
634 
635  // Basic container operations
636  iterator
637  begin() noexcept
638  { return iterator(_M_begin()); }
639 
640  const_iterator
641  begin() const noexcept
642  { return const_iterator(_M_begin()); }
643 
644  iterator
645  end() noexcept
646  { return iterator(nullptr); }
647 
648  const_iterator
649  end() const noexcept
650  { return const_iterator(nullptr); }
651 
652  const_iterator
653  cbegin() const noexcept
654  { return const_iterator(_M_begin()); }
655 
656  const_iterator
657  cend() const noexcept
658  { return const_iterator(nullptr); }
659 
660  size_type
661  size() const noexcept
662  { return _M_element_count; }
663 
664  _GLIBCXX_NODISCARD bool
665  empty() const noexcept
666  { return size() == 0; }
667 
668  allocator_type
669  get_allocator() const noexcept
670  { return allocator_type(this->_M_node_allocator()); }
671 
672  size_type
673  max_size() const noexcept
674  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
675 
676  // Observers
677  key_equal
678  key_eq() const
679  { return this->_M_eq(); }
680 
681  // hash_function, if present, comes from _Hash_code_base.
682 
683  // Bucket operations
684  size_type
685  bucket_count() const noexcept
686  { return _M_bucket_count; }
687 
688  size_type
689  max_bucket_count() const noexcept
690  { return max_size(); }
691 
692  size_type
693  bucket_size(size_type __bkt) const
694  { return std::distance(begin(__bkt), end(__bkt)); }
695 
696  size_type
697  bucket(const key_type& __k) const
698  { return _M_bucket_index(this->_M_hash_code(__k)); }
699 
700  local_iterator
701  begin(size_type __bkt)
702  {
703  return local_iterator(*this, _M_bucket_begin(__bkt),
704  __bkt, _M_bucket_count);
705  }
706 
707  local_iterator
708  end(size_type __bkt)
709  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
710 
711  const_local_iterator
712  begin(size_type __bkt) const
713  {
714  return const_local_iterator(*this, _M_bucket_begin(__bkt),
715  __bkt, _M_bucket_count);
716  }
717 
718  const_local_iterator
719  end(size_type __bkt) const
720  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
721 
722  // DR 691.
723  const_local_iterator
724  cbegin(size_type __bkt) const
725  {
726  return const_local_iterator(*this, _M_bucket_begin(__bkt),
727  __bkt, _M_bucket_count);
728  }
729 
730  const_local_iterator
731  cend(size_type __bkt) const
732  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
733 
734  float
735  load_factor() const noexcept
736  {
737  return static_cast<float>(size()) / static_cast<float>(bucket_count());
738  }
739 
740  // max_load_factor, if present, comes from _Rehash_base.
741 
742  // Generalization of max_load_factor. Extension, not found in
743  // TR1. Only useful if _RehashPolicy is something other than
744  // the default.
745  const _RehashPolicy&
746  __rehash_policy() const
747  { return _M_rehash_policy; }
748 
749  void
750  __rehash_policy(const _RehashPolicy& __pol)
751  { _M_rehash_policy = __pol; }
752 
753  // Lookup.
754  iterator
755  find(const key_type& __k);
756 
757  const_iterator
758  find(const key_type& __k) const;
759 
760  size_type
761  count(const key_type& __k) const;
762 
764  equal_range(const key_type& __k);
765 
767  equal_range(const key_type& __k) const;
768 
769 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
770  template<typename _Kt,
771  typename = __has_is_transparent_t<_Hash, _Kt>,
772  typename = __has_is_transparent_t<_Equal, _Kt>>
773  iterator
774  _M_find_tr(const _Kt& __k);
775 
776  template<typename _Kt,
777  typename = __has_is_transparent_t<_Hash, _Kt>,
778  typename = __has_is_transparent_t<_Equal, _Kt>>
779  const_iterator
780  _M_find_tr(const _Kt& __k) const;
781 
782  template<typename _Kt,
783  typename = __has_is_transparent_t<_Hash, _Kt>,
784  typename = __has_is_transparent_t<_Equal, _Kt>>
785  size_type
786  _M_count_tr(const _Kt& __k) const;
787 
788  template<typename _Kt,
789  typename = __has_is_transparent_t<_Hash, _Kt>,
790  typename = __has_is_transparent_t<_Equal, _Kt>>
791  pair<iterator, iterator>
792  _M_equal_range_tr(const _Kt& __k);
793 
794  template<typename _Kt,
795  typename = __has_is_transparent_t<_Hash, _Kt>,
796  typename = __has_is_transparent_t<_Equal, _Kt>>
797  pair<const_iterator, const_iterator>
798  _M_equal_range_tr(const _Kt& __k) const;
799 #endif // __glibcxx_generic_unordered_lookup
800 
801  void _M_rehash_insert(size_type __n);
802 
803  private:
804  // Bucket index computation helpers.
805  size_type
806  _M_bucket_index(const __node_value_type& __n) const noexcept
807  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
808 
809  size_type
810  _M_bucket_index(__hash_code __c) const
811  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
812 
813 #pragma GCC diagnostic push
814 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
815  // Get hash code for a node that comes from another _Hashtable.
816  // Reuse a cached hash code if the hash function is stateless,
817  // otherwise recalculate it using our own hash function.
818  __hash_code
819  _M_hash_code_ext(const __node_value_type& __from) const
820  {
821  if constexpr (__and_<__hash_cached, is_empty<_Hash>>::value)
822  return __from._M_hash_code;
823  else
824  return this->_M_hash_code(_ExtractKey{}(__from._M_v()));
825  }
826 
827  // Like _M_bucket_index but when the node is coming from another
828  // container instance.
829  size_type
830  _M_bucket_index_ext(const __node_value_type& __from) const
831  { return _RangeHash{}(_M_hash_code_ext(__from), _M_bucket_count); }
832 
833  void
834  _M_copy_code(__node_value_type& __to,
835  const __node_value_type& __from) const
836  {
837  if constexpr (__hash_cached::value)
838  __to._M_hash_code = _M_hash_code_ext(__from);
839  }
840 
841  void
842  _M_store_code(__node_value_type& __to, __hash_code __code) const
843  {
844  if constexpr (__hash_cached::value)
845  __to._M_hash_code = __code;
846  }
847 #pragma GCC diagnostic pop
848 
849  // Find and insert helper functions and types
850 
851  // Find the node before the one matching the criteria.
852  __node_base_ptr
853  _M_find_before_node(size_type, const key_type&, __hash_code) const;
854 
855  template<typename _Kt>
856  __node_base_ptr
857  _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
858 
859  // A pointer to a particular node and/or a hash code and bucket index
860  // where such a node would be found in the container.
861  struct __location_type
862  {
863  // True if _M_node() is a valid node pointer.
864  explicit operator bool() const noexcept
865  { return static_cast<bool>(_M_before); }
866 
867  // An iterator that refers to the node, or end().
868  explicit operator iterator() const noexcept
869  { return iterator(_M_node()); }
870 
871  // A const_iterator that refers to the node, or cend().
872  explicit operator const_iterator() const noexcept
873  { return const_iterator(_M_node()); }
874 
875  // A pointer to the node, or null.
876  __node_ptr _M_node() const
877  {
878  if (_M_before)
879  return static_cast<__node_ptr>(_M_before->_M_nxt);
880  return __node_ptr();
881  }
882 
883  __node_base_ptr _M_before{}; // Must only be used to get _M_nxt
884  __hash_code _M_hash_code{}; // Only valid if _M_bucket_index != -1
885  size_type _M_bucket_index = size_type(-1);
886  };
887 
888  // Adaptive lookup to find key, or which bucket it would be in.
889  // For a container smaller than the small size threshold use a linear
890  // search through the whole container, just testing for equality.
891  // Otherwise, calculate the hash code and bucket index for the key,
892  // and search in that bucket.
893  // The return value will have a pointer to the node _before_ the first
894  // node matching the key, if any such node exists. Returning the node
895  // before the desired one allows the result to be used for erasure.
896  // If no matching element is present, the hash code and bucket for the
897  // key will be set, allowing a new node to be inserted at that location.
898  // (The hash code and bucket might also be set when a node is found.)
899  // The _M_before pointer might point to _M_before_begin, so must not be
900  // cast to __node_ptr, and it must not be used to modify *_M_before
901  // except in non-const member functions, such as erase.
902  __location_type
903  _M_locate(const key_type& __k) const;
904 
905  __node_ptr
906  _M_find_node(size_type __bkt, const key_type& __key,
907  __hash_code __c) const
908  {
909  if (__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c))
910  return static_cast<__node_ptr>(__before_n->_M_nxt);
911  return nullptr;
912  }
913 
914  template<typename _Kt>
915  __node_ptr
916  _M_find_node_tr(size_type __bkt, const _Kt& __key,
917  __hash_code __c) const
918  {
919  if (auto __before_n = _M_find_before_node_tr(__bkt, __key, __c))
920  return static_cast<__node_ptr>(__before_n->_M_nxt);
921  return nullptr;
922  }
923 
924  // Insert a node at the beginning of a bucket.
925  void
926  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
927  {
928  if (_M_buckets[__bkt])
929  {
930  // Bucket is not empty, we just need to insert the new node
931  // after the bucket before begin.
932  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
933  _M_buckets[__bkt]->_M_nxt = __node;
934  }
935  else
936  {
937  // The bucket is empty, the new node is inserted at the
938  // beginning of the singly-linked list and the bucket will
939  // contain _M_before_begin pointer.
940  __node->_M_nxt = _M_before_begin._M_nxt;
941  _M_before_begin._M_nxt = __node;
942 
943  if (__node->_M_nxt)
944  // We must update former begin bucket that is pointing to
945  // _M_before_begin.
946  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
947 
948  _M_buckets[__bkt] = &_M_before_begin;
949  }
950  }
951 
952  // Remove the bucket first node
953  void
954  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
955  size_type __next_bkt)
956  {
957  if (!__next_n)
958  _M_buckets[__bkt] = nullptr;
959  else if (__next_bkt != __bkt)
960  {
961  _M_buckets[__next_bkt] = _M_buckets[__bkt];
962  _M_buckets[__bkt] = nullptr;
963  }
964  }
965 
966  // Get the node before __n in the bucket __bkt
967  __node_base_ptr
968  _M_get_previous_node(size_type __bkt, __node_ptr __n);
969 
970  pair<__node_ptr, __hash_code>
971  _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const;
972 
973  // Insert node __n with hash code __code, in bucket __bkt (or another
974  // bucket if rehashing is needed).
975  // Assumes no element with equivalent key is already present.
976  // Takes ownership of __n if insertion succeeds, throws otherwise.
977  // __n_elt is an estimated number of elements we expect to insert,
978  // used as a hint for rehashing when inserting a range.
979  iterator
980  _M_insert_unique_node(size_type __bkt, __hash_code,
981  __node_ptr __n, size_type __n_elt = 1);
982 
983  // Insert node __n with key __k and hash code __code.
984  // Takes ownership of __n if insertion succeeds, throws otherwise.
985  iterator
986  _M_insert_multi_node(__node_ptr __hint,
987  __hash_code __code, __node_ptr __n);
988 
989  template<typename... _Args>
991  _M_emplace_uniq(_Args&&... __args);
992 
993 #pragma GCC diagnostic push
994 #pragma GCC diagnostic ignored "-Wc++14-extensions" // variable templates
995  template<typename _Arg, typename _DArg = __remove_cvref_t<_Arg>,
996  typename = _ExtractKey>
997  static constexpr bool __is_key_type = false;
998 
999  template<typename _Arg>
1000  static constexpr bool
1001  __is_key_type<_Arg, key_type, __detail::_Identity> = true;
1002 
1003  template<typename _Arg, typename _Arg1, typename _Arg2>
1004  static constexpr bool
1005  __is_key_type<_Arg, pair<_Arg1, _Arg2>, __detail::_Select1st>
1006  = is_same<__remove_cvref_t<_Arg1>, key_type>::value;
1007 #pragma GCC diagnostic pop
1008 
1009  template<typename... _Args>
1010  iterator
1011  _M_emplace_multi(const_iterator, _Args&&... __args);
1012 
1013  iterator
1014  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1015 
1016  template<typename _InputIterator>
1017  void
1018  _M_insert_range_multi(_InputIterator __first, _InputIterator __last);
1019 
1020  public:
1021 #pragma GCC diagnostic push
1022 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1023  // Emplace
1024  template<typename... _Args>
1025  __ireturn_type
1026  emplace(_Args&&... __args)
1027  {
1028  if constexpr (__unique_keys::value)
1029  return _M_emplace_uniq(std::forward<_Args>(__args)...);
1030  else
1031  return _M_emplace_multi(cend(), std::forward<_Args>(__args)...);
1032  }
1033 
1034  template<typename... _Args>
1035  iterator
1036  emplace_hint(const_iterator __hint, _Args&&... __args)
1037  {
1038  if constexpr (__unique_keys::value)
1039  return _M_emplace_uniq(std::forward<_Args>(__args)...).first;
1040  else
1041  return _M_emplace_multi(__hint, std::forward<_Args>(__args)...);
1042  }
1043 
1044  // Insert
1045  __ireturn_type
1046  insert(const value_type& __v)
1047  {
1048  if constexpr (__unique_keys::value)
1049  return _M_emplace_uniq(__v);
1050  else
1051  return _M_emplace_multi(cend(), __v);
1052  }
1053 
1054  iterator
1055  insert(const_iterator __hint, const value_type& __v)
1056  {
1057  if constexpr (__unique_keys::value)
1058  return _M_emplace_uniq(__v).first;
1059  else
1060  return _M_emplace_multi(__hint, __v);
1061  }
1062 
1063  __ireturn_type
1064  insert(value_type&& __v)
1065  {
1066  if constexpr (__unique_keys::value)
1067  return _M_emplace_uniq(std::move(__v));
1068  else
1069  return _M_emplace_multi(cend(), std::move(__v));
1070  }
1071 
1072  iterator
1073  insert(const_iterator __hint, value_type&& __v)
1074  {
1075  if constexpr (__unique_keys::value)
1076  return _M_emplace_uniq(std::move(__v)).first;
1077  else
1078  return _M_emplace_multi(__hint, std::move(__v));
1079  }
1080 
1081 #ifdef __glibcxx_unordered_map_try_emplace // C++ >= 17 && HOSTED
1082  template<typename _KType, typename... _Args>
1084  try_emplace(const_iterator, _KType&& __k, _Args&&... __args)
1085  {
1086  __hash_code __code;
1087  size_type __bkt;
1088  if (auto __loc = _M_locate(__k))
1089  return { iterator(__loc), false };
1090  else
1091  {
1092  __code = __loc._M_hash_code;
1093  __bkt = __loc._M_bucket_index;
1094  }
1095 
1096  _Scoped_node __node {
1097  this,
1099  std::forward_as_tuple(std::forward<_KType>(__k)),
1100  std::forward_as_tuple(std::forward<_Args>(__args)...)
1101  };
1102  auto __it = _M_insert_unique_node(__bkt, __code, __node._M_node);
1103  __node._M_node = nullptr;
1104  return { __it, true };
1105  }
1106 #endif
1107 
1108  void
1109  insert(initializer_list<value_type> __l)
1110  { this->insert(__l.begin(), __l.end()); }
1111 
1112  template<typename _InputIterator>
1113  void
1114  insert(_InputIterator __first, _InputIterator __last)
1115  {
1116  if constexpr (__unique_keys::value)
1117  for (; __first != __last; ++__first)
1118  _M_emplace_uniq(*__first);
1119  else
1120  return _M_insert_range_multi(__first, __last);
1121  }
1122 
1123  // This overload is only defined for maps, not sets.
1124  template<typename _Pair,
1125  typename = _Require<__not_<is_same<_Key, _Value>>,
1126  is_constructible<value_type, _Pair&&>>>
1127  __ireturn_type
1128  insert(_Pair&& __v)
1129  {
1130  if constexpr (__unique_keys::value)
1131  return _M_emplace_uniq(std::forward<_Pair>(__v));
1132  else
1133  return _M_emplace_multi(cend(), std::forward<_Pair>(__v));
1134  }
1135 
1136  // This overload is only defined for maps, not sets.
1137  template<typename _Pair,
1138  typename = _Require<__not_<is_same<_Key, _Value>>,
1139  is_constructible<value_type, _Pair&&>>>
1140  iterator
1141  insert(const_iterator __hint, _Pair&& __v)
1142  {
1143  if constexpr (__unique_keys::value)
1144  return _M_emplace_uniq(std::forward<_Pair>(__v));
1145  else
1146  return _M_emplace_multi(__hint, std::forward<_Pair>(__v));
1147  }
1148 #pragma GCC diagnostic pop
1149 
1150  // Erase
1151  iterator
1152  erase(const_iterator);
1153 
1154  // _GLIBCXX_RESOLVE_LIB_DEFECTS
1155  // 2059. C++0x ambiguity problem with map::erase
1156  iterator
1157  erase(iterator __it)
1158  { return erase(const_iterator(__it)); }
1159 
1160  size_type
1161  erase(const key_type& __k);
1162 
1163  iterator
1164  erase(const_iterator, const_iterator);
1165 
1166  void
1167  clear() noexcept;
1168 
1169  // Set number of buckets keeping it appropriate for container's number
1170  // of elements.
1171  void rehash(size_type __bkt_count);
1172 
1173  // DR 1189.
1174  // reserve, if present, comes from _Rehash_base.
1175 
1176 #if __glibcxx_node_extract // >= C++17 && HOSTED
1177  /// Re-insert an extracted node into a container with unique keys.
1178  insert_return_type
1179  _M_reinsert_node(node_type&& __nh)
1180  {
1181  insert_return_type __ret;
1182  if (__nh.empty())
1183  __ret.position = end();
1184  else
1185  {
1186  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1187 
1188  if (auto __loc = _M_locate(__nh._M_key()))
1189  {
1190  __ret.node = std::move(__nh);
1191  __ret.position = iterator(__loc);
1192  __ret.inserted = false;
1193  }
1194  else
1195  {
1196  auto __code = __loc._M_hash_code;
1197  auto __bkt = __loc._M_bucket_index;
1198  __ret.position
1199  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1200  __ret.inserted = true;
1201  __nh.release();
1202  }
1203  }
1204  return __ret;
1205  }
1206 
1207  /// Re-insert an extracted node into a container with equivalent keys.
1208  iterator
1209  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1210  {
1211  if (__nh.empty())
1212  return end();
1213 
1214  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1215 
1216  const key_type& __k = __nh._M_key();
1217  auto __code = this->_M_hash_code(__k);
1218  auto __ret
1219  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1220  __nh.release();
1221  return __ret;
1222  }
1223 
1224  private:
1225  node_type
1226  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1227  {
1228  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1229  if (__prev_n == _M_buckets[__bkt])
1230  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1231  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1232  else if (__n->_M_nxt)
1233  {
1234  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1235  if (__next_bkt != __bkt)
1236  _M_buckets[__next_bkt] = __prev_n;
1237  }
1238 
1239  __prev_n->_M_nxt = __n->_M_nxt;
1240  __n->_M_nxt = nullptr;
1241  --_M_element_count;
1242  return { __n, this->_M_node_allocator() };
1243  }
1244 
1245  // Hash code for node __src_n with key __k, using this->hash_function().
1246  // Will use a hash code cached in the node if safe to do so. This is
1247  // for use in _M_merge_multi where the node comes from another container
1248  // with a hash function that might not match this->hash_function().
1249  template<typename _H2>
1250  __hash_code
1251  _M_src_hash_code(const _H2&, const __node_value_type& __src_n) const
1252  {
1253  if constexpr (__and_<__hash_cached,
1254  is_same<_H2, _Hash>, is_empty<_Hash>>::value)
1255  // If the node has a cached hash code, it's OK to use it.
1256  return __src_n._M_hash_code;
1257  else
1258  return this->_M_hash_code(_ExtractKey{}(__src_n._M_v()));
1259  }
1260 
1261  public:
1262  // Extract a node.
1263  node_type
1264  extract(const_iterator __pos)
1265  {
1266  size_t __bkt = _M_bucket_index(*__pos._M_cur);
1267  return _M_extract_node(__bkt,
1268  _M_get_previous_node(__bkt, __pos._M_cur));
1269  }
1270 
1271  /// Extract a node.
1272  node_type
1273  extract(const _Key& __k)
1274  {
1275  node_type __nh;
1276  __hash_code __code = this->_M_hash_code(__k);
1277  std::size_t __bkt = _M_bucket_index(__code);
1278  if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
1279  __nh = _M_extract_node(__bkt, __prev_node);
1280  return __nh;
1281  }
1282 
1283  /// Merge from another container of the same type.
1284  void
1285  _M_merge_unique(_Hashtable& __src)
1286  {
1287  __glibcxx_assert(get_allocator() == __src.get_allocator());
1288 
1289  using _PTr = pointer_traits<__node_base_ptr>;
1290 
1291  auto __n_elt = __src.size();
1292  size_type __first = 1;
1293  // For a container of identical type we can use its private members,
1294  // __src._M_before_begin, __src._M_bucket_index etc.
1295  auto __prev = _PTr::pointer_to(__src._M_before_begin);
1296  while (__n_elt--)
1297  {
1298  const auto __next = __prev->_M_nxt;
1299  const auto& __node = static_cast<__node_type&>(*__next);
1300  const key_type& __k = _ExtractKey{}(__node._M_v());
1301  const auto __loc = _M_locate(__k);
1302  if (__loc)
1303  {
1304  __prev = __next;
1305  continue;
1306  }
1307 
1308  auto __src_bkt = __src._M_bucket_index(__node);
1309  auto __nh = __src._M_extract_node(__src_bkt, __prev);
1310  _M_insert_unique_node(__loc._M_bucket_index, __loc._M_hash_code,
1311  __nh._M_ptr, __first * __n_elt + 1);
1312  __nh.release();
1313  __first = 0;
1314  }
1315  }
1316 
1317  /// Merge from a compatible container into one with unique keys.
1318  template<typename _Compatible_Hashtable>
1319  void
1320  _M_merge_unique(_Compatible_Hashtable& __src)
1321  {
1322  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1323  node_type>, "Node types are compatible");
1324  __glibcxx_assert(get_allocator() == __src.get_allocator());
1325 
1326  auto __n_elt = __src.size();
1327  size_type __first = 1;
1328  // For a compatible container we can only use the public API,
1329  // so cbegin(), cend(), hash_function(), and extract(iterator).
1330  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1331  {
1332  --__n_elt;
1333  auto __pos = __i++;
1334  const key_type& __k = _ExtractKey{}(*__pos);
1335  const auto __loc = _M_locate(__k);
1336  if (__loc)
1337  continue;
1338 
1339  auto __nh = __src.extract(__pos);
1340  _M_insert_unique_node(__loc._M_bucket_index,
1341  __loc._M_hash_code, __nh._M_ptr,
1342  __first * __n_elt + 1);
1343  __nh.release();
1344  __first = 0;
1345  }
1346  }
1347 
1348  /// Merge from another container of the same type.
1349  void
1350  _M_merge_multi(_Hashtable& __src)
1351  {
1352  __glibcxx_assert(get_allocator() == __src.get_allocator());
1353 
1354  if (__src.size() == 0) [[__unlikely__]]
1355  return;
1356 
1357  using _PTr = pointer_traits<__node_base_ptr>;
1358 
1359  __node_ptr __hint = nullptr;
1360  this->reserve(size() + __src.size());
1361  // For a container of identical type we can use its private members,
1362  // __src._M_before_begin, __src._M_bucket_index etc.
1363  auto __prev = _PTr::pointer_to(__src._M_before_begin);
1364  do
1365  {
1366  const auto& __node = static_cast<__node_type&>(*__prev->_M_nxt);
1367  // Hash code from this:
1368  auto __code = _M_hash_code_ext(__node);
1369  // Bucket index in __src, using code from __src.hash_function():
1370  size_type __src_bkt = __src._M_bucket_index(__node);
1371  auto __nh = __src._M_extract_node(__src_bkt, __prev);
1372  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1373  __nh.release();
1374  }
1375  while (__prev->_M_nxt != nullptr);
1376  }
1377 
1378  /// Merge from a compatible container into one with equivalent keys.
1379  template<typename _Compatible_Hashtable>
1380  void
1381  _M_merge_multi(_Compatible_Hashtable& __src)
1382  {
1383  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1384  node_type>, "Node types are compatible");
1385  __glibcxx_assert(get_allocator() == __src.get_allocator());
1386 
1387  __node_ptr __hint = nullptr;
1388  this->reserve(size() + __src.size());
1389  // For a compatible container we can only use the public API,
1390  // so cbegin(), cend(), hash_function(), and extract(iterator).
1391  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1392  {
1393  auto __pos = __i++;
1394  __hash_code __code
1395  = _M_src_hash_code(__src.hash_function(), *__pos._M_cur);
1396  auto __nh = __src.extract(__pos);
1397  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1398  __nh.release();
1399  }
1400  }
1401 #endif // C++17 __glibcxx_node_extract
1402 
1403  bool
1404  _M_equal(const _Hashtable& __other) const;
1405 
1406  private:
1407  // Helper rehash method used when keys are unique.
1408  void _M_rehash(size_type __bkt_count, true_type __uks);
1409 
1410  // Helper rehash method used when keys can be non-unique.
1411  void _M_rehash(size_type __bkt_count, false_type __uks);
1412  };
1413 
1414  // Definitions of class template _Hashtable's out-of-line member functions.
1415  template<typename _Key, typename _Value, typename _Alloc,
1416  typename _ExtractKey, typename _Equal,
1417  typename _Hash, typename _RangeHash, typename _Unused,
1418  typename _RehashPolicy, typename _Traits>
1419  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1420  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1421  _Hashtable(size_type __bkt_count_hint,
1422  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1423  : _Hashtable(__h, __eq, __a)
1424  {
1425  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1426  if (__bkt_count > _M_bucket_count)
1427  {
1428  _M_buckets = _M_allocate_buckets(__bkt_count);
1429  _M_bucket_count = __bkt_count;
1430  }
1431  }
1432 
1433  template<typename _Key, typename _Value, typename _Alloc,
1434  typename _ExtractKey, typename _Equal,
1435  typename _Hash, typename _RangeHash, typename _Unused,
1436  typename _RehashPolicy, typename _Traits>
1437  template<typename _InputIterator>
1438  inline
1439  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1440  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1441  _Hashtable(_InputIterator __f, _InputIterator __l,
1442  size_type __bkt_count_hint,
1443  const _Hash& __h, const _Equal& __eq,
1444  const allocator_type& __a, true_type /* __uks */)
1445  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1446  { this->insert(__f, __l); }
1447 
1448  template<typename _Key, typename _Value, typename _Alloc,
1449  typename _ExtractKey, typename _Equal,
1450  typename _Hash, typename _RangeHash, typename _Unused,
1451  typename _RehashPolicy, typename _Traits>
1452  template<typename _InputIterator>
1453  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1454  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1455  _Hashtable(_InputIterator __f, _InputIterator __l,
1456  size_type __bkt_count_hint,
1457  const _Hash& __h, const _Equal& __eq,
1458  const allocator_type& __a, false_type __uks)
1459  : _Hashtable(__h, __eq, __a)
1460  {
1461  auto __nb_elems = __detail::__distance_fw(__f, __l);
1462  auto __bkt_count =
1463  _M_rehash_policy._M_next_bkt(
1464  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1465  __bkt_count_hint));
1466 
1467  if (__bkt_count > _M_bucket_count)
1468  {
1469  _M_buckets = _M_allocate_buckets(__bkt_count);
1470  _M_bucket_count = __bkt_count;
1471  }
1472 
1473  for (; __f != __l; ++__f)
1474  _M_emplace_multi(cend(), *__f);
1475  }
1476 
1477  template<typename _Key, typename _Value, typename _Alloc,
1478  typename _ExtractKey, typename _Equal,
1479  typename _Hash, typename _RangeHash, typename _Unused,
1480  typename _RehashPolicy, typename _Traits>
1481  auto
1482  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1483  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1484  operator=(const _Hashtable& __ht)
1485  -> _Hashtable&
1486  {
1487  if (&__ht == this)
1488  return *this;
1489 
1490  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1491  {
1492  auto& __this_alloc = this->_M_node_allocator();
1493  auto& __that_alloc = __ht._M_node_allocator();
1494  if (!__node_alloc_traits::_S_always_equal()
1495  && __this_alloc != __that_alloc)
1496  {
1497  // Replacement allocator cannot free existing storage.
1498  this->_M_deallocate_nodes(_M_begin());
1499  _M_before_begin._M_nxt = nullptr;
1500  _M_deallocate_buckets();
1501  _M_buckets = nullptr;
1502  std::__alloc_on_copy(__this_alloc, __that_alloc);
1503  __hashtable_base::operator=(__ht);
1504  _M_bucket_count = __ht._M_bucket_count;
1505  _M_element_count = __ht._M_element_count;
1506  _M_rehash_policy = __ht._M_rehash_policy;
1507 
1508  struct _Guard
1509  {
1510  ~_Guard() { if (_M_ht) _M_ht->_M_reset(); }
1511  _Hashtable* _M_ht;
1512  };
1513  // If _M_assign exits via an exception it will have deallocated
1514  // all memory. This guard will ensure *this is in a usable state.
1515  _Guard __guard{this};
1516  _M_assign(__ht);
1517  __guard._M_ht = nullptr;
1518  return *this;
1519  }
1520  std::__alloc_on_copy(__this_alloc, __that_alloc);
1521  }
1522 
1523  // Reuse allocated buckets and nodes.
1524  _M_assign_elements(__ht);
1525  return *this;
1526  }
1527 
1528  template<typename _Key, typename _Value, typename _Alloc,
1529  typename _ExtractKey, typename _Equal,
1530  typename _Hash, typename _RangeHash, typename _Unused,
1531  typename _RehashPolicy, typename _Traits>
1532  template<typename _Ht>
1533  void
1534  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1535  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1536  _M_assign_elements(_Ht&& __ht)
1537  {
1538  using __reuse_or_alloc_node_gen_t =
1539  __detail::_ReuseOrAllocNode<__node_alloc_type>;
1540 
1541  __buckets_ptr __former_buckets = nullptr;
1542  std::size_t __former_bucket_count = _M_bucket_count;
1543  __rehash_guard_t __rehash_guard(_M_rehash_policy);
1544 
1545  if (_M_bucket_count != __ht._M_bucket_count)
1546  {
1547  __former_buckets = _M_buckets;
1548  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1549  _M_bucket_count = __ht._M_bucket_count;
1550  }
1551  else
1552  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1553 
1554  __try
1555  {
1556  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1557  _M_element_count = __ht._M_element_count;
1558  _M_rehash_policy = __ht._M_rehash_policy;
1559  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1560  _M_before_begin._M_nxt = nullptr;
1561  _M_assign(std::forward<_Ht>(__ht), __roan);
1562  if (__former_buckets)
1563  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1564  __rehash_guard._M_guarded_obj = nullptr;
1565  }
1566  __catch(...)
1567  {
1568  if (__former_buckets)
1569  {
1570  // Restore previous buckets.
1571  _M_deallocate_buckets();
1572  _M_buckets = __former_buckets;
1573  _M_bucket_count = __former_bucket_count;
1574  }
1575  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1576  __throw_exception_again;
1577  }
1578  }
1579 
1580  template<typename _Key, typename _Value, typename _Alloc,
1581  typename _ExtractKey, typename _Equal,
1582  typename _Hash, typename _RangeHash, typename _Unused,
1583  typename _RehashPolicy, typename _Traits>
1584  template<typename _Ht, typename _NodeGenerator>
1585  void
1586  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1587  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1588  _M_assign(_Ht&& __ht, _NodeGenerator& __node_gen)
1589  {
1590  struct _Guard
1591  {
1592  ~_Guard()
1593  {
1594  if (_M_ht)
1595  {
1596  _M_ht->clear();
1597  if (_M_dealloc_buckets)
1598  _M_ht->_M_deallocate_buckets();
1599  }
1600  }
1601  _Hashtable* _M_ht = nullptr;
1602  bool _M_dealloc_buckets = false;
1603  };
1604  _Guard __guard;
1605 
1606  if (!_M_buckets)
1607  {
1608  _M_buckets = _M_allocate_buckets(_M_bucket_count);
1609  __guard._M_dealloc_buckets = true;
1610  }
1611 
1612  if (!__ht._M_before_begin._M_nxt)
1613  return;
1614 
1615  __guard._M_ht = this;
1616 
1617  using _FromVal = __conditional_t<is_lvalue_reference<_Ht>::value,
1618  const value_type&, value_type&&>;
1619 
1620  // First deal with the special first node pointed to by
1621  // _M_before_begin.
1622  __node_ptr __ht_n = __ht._M_begin();
1623  __node_ptr __this_n
1624  = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1625  _M_copy_code(*__this_n, *__ht_n);
1626  _M_update_bbegin(__this_n);
1627 
1628  // Then deal with other nodes.
1629  __node_ptr __prev_n = __this_n;
1630  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1631  {
1632  __this_n = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1633  __prev_n->_M_nxt = __this_n;
1634  _M_copy_code(*__this_n, *__ht_n);
1635  size_type __bkt = _M_bucket_index(*__this_n);
1636  if (!_M_buckets[__bkt])
1637  _M_buckets[__bkt] = __prev_n;
1638  __prev_n = __this_n;
1639  }
1640  __guard._M_ht = nullptr;
1641  }
1642 
1643  template<typename _Key, typename _Value, typename _Alloc,
1644  typename _ExtractKey, typename _Equal,
1645  typename _Hash, typename _RangeHash, typename _Unused,
1646  typename _RehashPolicy, typename _Traits>
1647  void
1648  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1649  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1650  _M_reset() noexcept
1651  {
1652  _M_rehash_policy._M_reset();
1653  _M_bucket_count = 1;
1654  _M_single_bucket = nullptr;
1655  _M_buckets = &_M_single_bucket;
1656  _M_before_begin._M_nxt = nullptr;
1657  _M_element_count = 0;
1658  }
1659 
1660  template<typename _Key, typename _Value, typename _Alloc,
1661  typename _ExtractKey, typename _Equal,
1662  typename _Hash, typename _RangeHash, typename _Unused,
1663  typename _RehashPolicy, typename _Traits>
1664  void
1665  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1666  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1667  _M_move_assign(_Hashtable&& __ht, true_type)
1668  {
1669  if (__builtin_expect(std::__addressof(__ht) == this, false))
1670  return;
1671 
1672  this->_M_deallocate_nodes(_M_begin());
1673  _M_deallocate_buckets();
1674  __hashtable_base::operator=(std::move(__ht));
1675  _M_rehash_policy = __ht._M_rehash_policy;
1676  if (!__ht._M_uses_single_bucket())
1677  _M_buckets = __ht._M_buckets;
1678  else
1679  {
1680  _M_buckets = &_M_single_bucket;
1681  _M_single_bucket = __ht._M_single_bucket;
1682  }
1683 
1684  _M_bucket_count = __ht._M_bucket_count;
1685  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1686  _M_element_count = __ht._M_element_count;
1687  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1688 
1689  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1690  _M_update_bbegin();
1691  __ht._M_reset();
1692  }
1693 
1694  template<typename _Key, typename _Value, typename _Alloc,
1695  typename _ExtractKey, typename _Equal,
1696  typename _Hash, typename _RangeHash, typename _Unused,
1697  typename _RehashPolicy, typename _Traits>
1698  void
1699  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1700  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1701  _M_move_assign(_Hashtable&& __ht, false_type)
1702  {
1703  if (__ht._M_node_allocator() == this->_M_node_allocator())
1704  _M_move_assign(std::move(__ht), true_type{});
1705  else
1706  {
1707  // Can't move memory, move elements then.
1708  _M_assign_elements(std::move(__ht));
1709  __ht.clear();
1710  }
1711  }
1712 
1713  template<typename _Key, typename _Value, typename _Alloc,
1714  typename _ExtractKey, typename _Equal,
1715  typename _Hash, typename _RangeHash, typename _Unused,
1716  typename _RehashPolicy, typename _Traits>
1717  inline
1718  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1719  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1720  _Hashtable(const _Hashtable& __ht)
1721  : __hashtable_base(__ht),
1722  __map_base(__ht),
1723  __rehash_base(__ht),
1724  __hashtable_alloc(
1725  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1726  __enable_default_ctor(__ht),
1727  _M_buckets(nullptr),
1728  _M_bucket_count(__ht._M_bucket_count),
1729  _M_element_count(__ht._M_element_count),
1730  _M_rehash_policy(__ht._M_rehash_policy)
1731  {
1732  _M_assign(__ht);
1733  }
1734 
1735  template<typename _Key, typename _Value, typename _Alloc,
1736  typename _ExtractKey, typename _Equal,
1737  typename _Hash, typename _RangeHash, typename _Unused,
1738  typename _RehashPolicy, typename _Traits>
1739  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1740  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1741  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1742  true_type /* alloc always equal */)
1743  noexcept(_S_nothrow_move())
1744  : __hashtable_base(__ht),
1745  __map_base(__ht),
1746  __rehash_base(__ht),
1747  __hashtable_alloc(std::move(__a)),
1748  __enable_default_ctor(__ht),
1749  _M_buckets(__ht._M_buckets),
1750  _M_bucket_count(__ht._M_bucket_count),
1751  _M_before_begin(__ht._M_before_begin._M_nxt),
1752  _M_element_count(__ht._M_element_count),
1753  _M_rehash_policy(__ht._M_rehash_policy)
1754  {
1755  // Update buckets if __ht is using its single bucket.
1756  if (__ht._M_uses_single_bucket())
1757  {
1758  _M_buckets = &_M_single_bucket;
1759  _M_single_bucket = __ht._M_single_bucket;
1760  }
1761 
1762  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1763  _M_update_bbegin();
1764 
1765  __ht._M_reset();
1766  }
1767 
1768  template<typename _Key, typename _Value, typename _Alloc,
1769  typename _ExtractKey, typename _Equal,
1770  typename _Hash, typename _RangeHash, typename _Unused,
1771  typename _RehashPolicy, typename _Traits>
1772  inline
1773  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1774  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1775  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1776  : __hashtable_base(__ht),
1777  __map_base(__ht),
1778  __rehash_base(__ht),
1779  __hashtable_alloc(__node_alloc_type(__a)),
1780  __enable_default_ctor(__ht),
1781  _M_buckets(),
1782  _M_bucket_count(__ht._M_bucket_count),
1783  _M_element_count(__ht._M_element_count),
1784  _M_rehash_policy(__ht._M_rehash_policy)
1785  {
1786  _M_assign(__ht);
1787  }
1788 
1789  template<typename _Key, typename _Value, typename _Alloc,
1790  typename _ExtractKey, typename _Equal,
1791  typename _Hash, typename _RangeHash, typename _Unused,
1792  typename _RehashPolicy, typename _Traits>
1793  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1794  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1795  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1796  false_type /* alloc always equal */)
1797  : __hashtable_base(__ht),
1798  __map_base(__ht),
1799  __rehash_base(__ht),
1800  __hashtable_alloc(std::move(__a)),
1801  __enable_default_ctor(__ht),
1802  _M_buckets(nullptr),
1803  _M_bucket_count(__ht._M_bucket_count),
1804  _M_element_count(__ht._M_element_count),
1805  _M_rehash_policy(__ht._M_rehash_policy)
1806  {
1807  if (__ht._M_node_allocator() == this->_M_node_allocator())
1808  {
1809  if (__ht._M_uses_single_bucket())
1810  {
1811  _M_buckets = &_M_single_bucket;
1812  _M_single_bucket = __ht._M_single_bucket;
1813  }
1814  else
1815  _M_buckets = __ht._M_buckets;
1816 
1817  // Fix bucket containing the _M_before_begin pointer that can't be
1818  // moved.
1819  _M_update_bbegin(__ht._M_begin());
1820 
1821  __ht._M_reset();
1822  }
1823  else
1824  {
1825  using _Fwd_Ht = __conditional_t<
1826  __move_if_noexcept_cond<value_type>::value,
1827  const _Hashtable&, _Hashtable&&>;
1828  _M_assign(std::forward<_Fwd_Ht>(__ht));
1829  __ht.clear();
1830  }
1831  }
1832 
1833  template<typename _Key, typename _Value, typename _Alloc,
1834  typename _ExtractKey, typename _Equal,
1835  typename _Hash, typename _RangeHash, typename _Unused,
1836  typename _RehashPolicy, typename _Traits>
1837  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1838  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1839  ~_Hashtable() noexcept
1840  {
1841  // Getting a bucket index from a node shall not throw because it is used
1842  // during the rehash process. This static_assert purpose is limited to usage
1843  // of _Hashtable with _Hashtable_traits requesting non-cached hash code.
1844  // Need a complete type to check this, so do it in the destructor not at
1845  // class scope.
1846  static_assert(noexcept(declval<const __hash_code_base_access&>()
1847  ._M_bucket_index(declval<const __node_value_type&>(),
1848  (std::size_t)0)),
1849  "Cache the hash code or qualify your functors involved"
1850  " in hash code and bucket index computation with noexcept");
1851 
1852  this->_M_deallocate_nodes(_M_begin());
1853  _M_deallocate_buckets();
1854  }
1855 
1856  template<typename _Key, typename _Value, typename _Alloc,
1857  typename _ExtractKey, typename _Equal,
1858  typename _Hash, typename _RangeHash, typename _Unused,
1859  typename _RehashPolicy, typename _Traits>
1860  void
1861  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1862  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1863  swap(_Hashtable& __x)
1864  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1865  __is_nothrow_swappable<_Equal>>::value)
1866  {
1867  using std::swap;
1868  swap(__hash_code_base::_M_hash._M_obj,
1869  __x.__hash_code_base::_M_hash._M_obj);
1870  swap(__hashtable_base::_M_equal._M_obj,
1871  __x.__hashtable_base::_M_equal._M_obj);
1872 
1873 #pragma GCC diagnostic push
1874 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1875  if constexpr (__node_alloc_traits::propagate_on_container_swap::value)
1876  swap(this->_M_node_allocator(), __x._M_node_allocator());
1877 #pragma GCC diagnostic pop
1878 
1879  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1880 
1881  // Deal properly with potentially moved instances.
1882  if (this->_M_uses_single_bucket())
1883  {
1884  if (!__x._M_uses_single_bucket())
1885  {
1886  _M_buckets = __x._M_buckets;
1887  __x._M_buckets = &__x._M_single_bucket;
1888  }
1889  }
1890  else if (__x._M_uses_single_bucket())
1891  {
1892  __x._M_buckets = _M_buckets;
1893  _M_buckets = &_M_single_bucket;
1894  }
1895  else
1896  std::swap(_M_buckets, __x._M_buckets);
1897 
1898  std::swap(_M_bucket_count, __x._M_bucket_count);
1899  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1900  std::swap(_M_element_count, __x._M_element_count);
1901  std::swap(_M_single_bucket, __x._M_single_bucket);
1902 
1903  // Fix buckets containing the _M_before_begin pointers that can't be
1904  // swapped.
1905  _M_update_bbegin();
1906  __x._M_update_bbegin();
1907  }
1908 
1909  template<typename _Key, typename _Value, typename _Alloc,
1910  typename _ExtractKey, typename _Equal,
1911  typename _Hash, typename _RangeHash, typename _Unused,
1912  typename _RehashPolicy, typename _Traits>
1913  auto
1914  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1915  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1916  find(const key_type& __k)
1917  -> iterator
1918  { return iterator(_M_locate(__k)); }
1919 
1920  template<typename _Key, typename _Value, typename _Alloc,
1921  typename _ExtractKey, typename _Equal,
1922  typename _Hash, typename _RangeHash, typename _Unused,
1923  typename _RehashPolicy, typename _Traits>
1924  auto
1925  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1926  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1927  find(const key_type& __k) const
1928  -> const_iterator
1929  { return const_iterator(_M_locate(__k)); }
1930 
1931 #if __cplusplus > 201703L
1932  template<typename _Key, typename _Value, typename _Alloc,
1933  typename _ExtractKey, typename _Equal,
1934  typename _Hash, typename _RangeHash, typename _Unused,
1935  typename _RehashPolicy, typename _Traits>
1936  template<typename _Kt, typename, typename>
1937  auto
1938  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1939  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1940  _M_find_tr(const _Kt& __k)
1941  -> iterator
1942  {
1943  if (size() <= __small_size_threshold())
1944  {
1945  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1946  if (this->_M_key_equals_tr(__k, *__n))
1947  return iterator(__n);
1948  return end();
1949  }
1950 
1951  __hash_code __code = this->_M_hash_code_tr(__k);
1952  std::size_t __bkt = _M_bucket_index(__code);
1953  return iterator(_M_find_node_tr(__bkt, __k, __code));
1954  }
1955 
1956  template<typename _Key, typename _Value, typename _Alloc,
1957  typename _ExtractKey, typename _Equal,
1958  typename _Hash, typename _RangeHash, typename _Unused,
1959  typename _RehashPolicy, typename _Traits>
1960  template<typename _Kt, typename, typename>
1961  auto
1962  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1963  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1964  _M_find_tr(const _Kt& __k) const
1965  -> const_iterator
1966  {
1967  if (size() <= __small_size_threshold())
1968  {
1969  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1970  if (this->_M_key_equals_tr(__k, *__n))
1971  return const_iterator(__n);
1972  return end();
1973  }
1974 
1975  __hash_code __code = this->_M_hash_code_tr(__k);
1976  std::size_t __bkt = _M_bucket_index(__code);
1977  return const_iterator(_M_find_node_tr(__bkt, __k, __code));
1978  }
1979 #endif
1980 
1981  template<typename _Key, typename _Value, typename _Alloc,
1982  typename _ExtractKey, typename _Equal,
1983  typename _Hash, typename _RangeHash, typename _Unused,
1984  typename _RehashPolicy, typename _Traits>
1985  auto
1986  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1987  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1988  count(const key_type& __k) const
1989  -> size_type
1990  {
1991  auto __it = find(__k);
1992  if (!__it._M_cur)
1993  return 0;
1994 
1995  if (__unique_keys::value)
1996  return 1;
1997 
1998  size_type __result = 1;
1999  for (auto __ref = __it++;
2000  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
2001  ++__it)
2002  ++__result;
2003 
2004  return __result;
2005  }
2006 
2007 #if __cplusplus > 201703L
2008  template<typename _Key, typename _Value, typename _Alloc,
2009  typename _ExtractKey, typename _Equal,
2010  typename _Hash, typename _RangeHash, typename _Unused,
2011  typename _RehashPolicy, typename _Traits>
2012  template<typename _Kt, typename, typename>
2013  auto
2014  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2015  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2016  _M_count_tr(const _Kt& __k) const
2017  -> size_type
2018  {
2019  if (size() <= __small_size_threshold())
2020  {
2021  size_type __result = 0;
2022  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
2023  {
2024  if (this->_M_key_equals_tr(__k, *__n))
2025  {
2026  ++__result;
2027  continue;
2028  }
2029 
2030  if (__result)
2031  break;
2032  }
2033 
2034  return __result;
2035  }
2036 
2037  __hash_code __code = this->_M_hash_code_tr(__k);
2038  std::size_t __bkt = _M_bucket_index(__code);
2039  auto __n = _M_find_node_tr(__bkt, __k, __code);
2040  if (!__n)
2041  return 0;
2042 
2043  iterator __it(__n);
2044  size_type __result = 1;
2045  for (++__it;
2046  __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
2047  ++__it)
2048  ++__result;
2049 
2050  return __result;
2051  }
2052 #endif
2053 
2054  template<typename _Key, typename _Value, typename _Alloc,
2055  typename _ExtractKey, typename _Equal,
2056  typename _Hash, typename _RangeHash, typename _Unused,
2057  typename _RehashPolicy, typename _Traits>
2058  auto
2059  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2060  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2061  equal_range(const key_type& __k)
2062  -> pair<iterator, iterator>
2063  {
2064  auto __ite = find(__k);
2065  if (!__ite._M_cur)
2066  return { __ite, __ite };
2067 
2068  auto __beg = __ite++;
2069  if (__unique_keys::value)
2070  return { __beg, __ite };
2071 
2072  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2073  ++__ite;
2074 
2075  return { __beg, __ite };
2076  }
2077 
2078  template<typename _Key, typename _Value, typename _Alloc,
2079  typename _ExtractKey, typename _Equal,
2080  typename _Hash, typename _RangeHash, typename _Unused,
2081  typename _RehashPolicy, typename _Traits>
2082  auto
2083  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2084  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2085  equal_range(const key_type& __k) const
2086  -> pair<const_iterator, const_iterator>
2087  {
2088  auto __ite = find(__k);
2089  if (!__ite._M_cur)
2090  return { __ite, __ite };
2091 
2092  auto __beg = __ite++;
2093  if (__unique_keys::value)
2094  return { __beg, __ite };
2095 
2096  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2097  ++__ite;
2098 
2099  return { __beg, __ite };
2100  }
2101 
2102 #if __cplusplus > 201703L
2103  template<typename _Key, typename _Value, typename _Alloc,
2104  typename _ExtractKey, typename _Equal,
2105  typename _Hash, typename _RangeHash, typename _Unused,
2106  typename _RehashPolicy, typename _Traits>
2107  template<typename _Kt, typename, typename>
2108  auto
2109  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2110  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2111  _M_equal_range_tr(const _Kt& __k)
2112  -> pair<iterator, iterator>
2113  {
2114  if (size() <= __small_size_threshold())
2115  {
2116  __node_ptr __n, __beg = nullptr;
2117  for (__n = _M_begin(); __n; __n = __n->_M_next())
2118  {
2119  if (this->_M_key_equals_tr(__k, *__n))
2120  {
2121  if (!__beg)
2122  __beg = __n;
2123  continue;
2124  }
2125 
2126  if (__beg)
2127  break;
2128  }
2129 
2130  return { iterator(__beg), iterator(__n) };
2131  }
2132 
2133  __hash_code __code = this->_M_hash_code_tr(__k);
2134  std::size_t __bkt = _M_bucket_index(__code);
2135  auto __n = _M_find_node_tr(__bkt, __k, __code);
2136  iterator __ite(__n);
2137  if (!__n)
2138  return { __ite, __ite };
2139 
2140  auto __beg = __ite++;
2141  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2142  ++__ite;
2143 
2144  return { __beg, __ite };
2145  }
2146 
2147  template<typename _Key, typename _Value, typename _Alloc,
2148  typename _ExtractKey, typename _Equal,
2149  typename _Hash, typename _RangeHash, typename _Unused,
2150  typename _RehashPolicy, typename _Traits>
2151  template<typename _Kt, typename, typename>
2152  auto
2153  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2154  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2155  _M_equal_range_tr(const _Kt& __k) const
2156  -> pair<const_iterator, const_iterator>
2157  {
2158  if (size() <= __small_size_threshold())
2159  {
2160  __node_ptr __n, __beg = nullptr;
2161  for (__n = _M_begin(); __n; __n = __n->_M_next())
2162  {
2163  if (this->_M_key_equals_tr(__k, *__n))
2164  {
2165  if (!__beg)
2166  __beg = __n;
2167  continue;
2168  }
2169 
2170  if (__beg)
2171  break;
2172  }
2173 
2174  return { const_iterator(__beg), const_iterator(__n) };
2175  }
2176 
2177  __hash_code __code = this->_M_hash_code_tr(__k);
2178  std::size_t __bkt = _M_bucket_index(__code);
2179  auto __n = _M_find_node_tr(__bkt, __k, __code);
2180  const_iterator __ite(__n);
2181  if (!__n)
2182  return { __ite, __ite };
2183 
2184  auto __beg = __ite++;
2185  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2186  ++__ite;
2187 
2188  return { __beg, __ite };
2189  }
2190 #endif
2191 
2192  // Find the node before the one whose key compares equal to k in the bucket
2193  // bkt. Return nullptr if no node is found.
2194  template<typename _Key, typename _Value, typename _Alloc,
2195  typename _ExtractKey, typename _Equal,
2196  typename _Hash, typename _RangeHash, typename _Unused,
2197  typename _RehashPolicy, typename _Traits>
2198  auto
2199  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2200  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2201  _M_find_before_node(size_type __bkt, const key_type& __k,
2202  __hash_code __code) const
2203  -> __node_base_ptr
2204  {
2205  __node_base_ptr __prev_p = _M_buckets[__bkt];
2206  if (!__prev_p)
2207  return nullptr;
2208 
2209  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2210  __p = __p->_M_next())
2211  {
2212  if (this->_M_equals(__k, __code, *__p))
2213  return __prev_p;
2214 
2215  if (__builtin_expect (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2216  break;
2217  __prev_p = __p;
2218  }
2219 
2220  return nullptr;
2221  }
2222 
2223  template<typename _Key, typename _Value, typename _Alloc,
2224  typename _ExtractKey, typename _Equal,
2225  typename _Hash, typename _RangeHash, typename _Unused,
2226  typename _RehashPolicy, typename _Traits>
2227  template<typename _Kt>
2228  auto
2229  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2230  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2231  _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
2232  __hash_code __code) const
2233  -> __node_base_ptr
2234  {
2235  __node_base_ptr __prev_p = _M_buckets[__bkt];
2236  if (!__prev_p)
2237  return nullptr;
2238 
2239  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2240  __p = __p->_M_next())
2241  {
2242  if (this->_M_equals_tr(__k, __code, *__p))
2243  return __prev_p;
2244 
2245  if (__builtin_expect (!__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2246  break;
2247  __prev_p = __p;
2248  }
2249 
2250  return nullptr;
2251  }
2252 
2253  template<typename _Key, typename _Value, typename _Alloc,
2254  typename _ExtractKey, typename _Equal,
2255  typename _Hash, typename _RangeHash, typename _Unused,
2256  typename _RehashPolicy, typename _Traits>
2257  inline auto
2258  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2259  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2260  _M_locate(const key_type& __k) const
2261  -> __location_type
2262  {
2263  __location_type __loc;
2264  const auto __size = size();
2265 
2266  if (__size <= __small_size_threshold())
2267  {
2268  __loc._M_before = pointer_traits<__node_base_ptr>::
2269  pointer_to(const_cast<__node_base&>(_M_before_begin));
2270  while (__loc._M_before->_M_nxt)
2271  {
2272  if (this->_M_key_equals(__k, *__loc._M_node()))
2273  return __loc;
2274  __loc._M_before = __loc._M_before->_M_nxt;
2275  }
2276  __loc._M_before = nullptr; // Didn't find it.
2277  }
2278 
2279  __loc._M_hash_code = this->_M_hash_code(__k);
2280  __loc._M_bucket_index = _M_bucket_index(__loc._M_hash_code);
2281 
2282  if (__size > __small_size_threshold())
2283  __loc._M_before = _M_find_before_node(__loc._M_bucket_index, __k,
2284  __loc._M_hash_code);
2285 
2286  return __loc;
2287  }
2288 
2289  template<typename _Key, typename _Value, typename _Alloc,
2290  typename _ExtractKey, typename _Equal,
2291  typename _Hash, typename _RangeHash, typename _Unused,
2292  typename _RehashPolicy, typename _Traits>
2293  auto
2294  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2295  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2296  _M_get_previous_node(size_type __bkt, __node_ptr __n)
2297  -> __node_base_ptr
2298  {
2299  __node_base_ptr __prev_n = _M_buckets[__bkt];
2300  while (__prev_n->_M_nxt != __n)
2301  __prev_n = __prev_n->_M_nxt;
2302  return __prev_n;
2303  }
2304 
2305 #pragma GCC diagnostic push
2306 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2307  template<typename _Key, typename _Value, typename _Alloc,
2308  typename _ExtractKey, typename _Equal,
2309  typename _Hash, typename _RangeHash, typename _Unused,
2310  typename _RehashPolicy, typename _Traits>
2311  template<typename... _Args>
2312  auto
2313  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2314  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2315  _M_emplace_uniq(_Args&&... __args)
2316  -> pair<iterator, bool>
2317  {
2318  const key_type* __kp = nullptr;
2319 
2320  if constexpr (sizeof...(_Args) == 1)
2321  {
2322  if constexpr (__is_key_type<_Args...>)
2323  {
2324  const auto& __key = _ExtractKey{}(__args...);
2325  __kp = std::__addressof(__key);
2326  }
2327  }
2328  else if constexpr (sizeof...(_Args) == 2)
2329  {
2330  if constexpr (__is_key_type<pair<const _Args&...>>)
2331  {
2332  pair<const _Args&...> __refs(__args...);
2333  const auto& __key = _ExtractKey{}(__refs);
2334  __kp = std::__addressof(__key);
2335  }
2336  }
2337 
2338  _Scoped_node __node { __node_ptr(), this }; // Do not create node yet.
2339  __hash_code __code = 0;
2340  size_type __bkt = 0;
2341 
2342  if (__kp == nullptr)
2343  {
2344  // Didn't extract a key from the args, so build the node.
2345  __node._M_node
2346  = this->_M_allocate_node(std::forward<_Args>(__args)...);
2347  const key_type& __key = _ExtractKey{}(__node._M_node->_M_v());
2348  __kp = std::__addressof(__key);
2349  }
2350 
2351  if (auto __loc = _M_locate(*__kp))
2352  // There is already an equivalent node, no insertion.
2353  return { iterator(__loc), false };
2354  else
2355  {
2356  __code = __loc._M_hash_code;
2357  __bkt = __loc._M_bucket_index;
2358  }
2359 
2360  if (!__node._M_node)
2361  __node._M_node
2362  = this->_M_allocate_node(std::forward<_Args>(__args)...);
2363 
2364  // Insert the node
2365  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2366  __node._M_node = nullptr;
2367  return { __pos, true };
2368  }
2369 #pragma GCC diagnostic pop
2370 
2371  template<typename _Key, typename _Value, typename _Alloc,
2372  typename _ExtractKey, typename _Equal,
2373  typename _Hash, typename _RangeHash, typename _Unused,
2374  typename _RehashPolicy, typename _Traits>
2375  template<typename... _Args>
2376  auto
2377  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2378  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2379  _M_emplace_multi(const_iterator __hint, _Args&&... __args)
2380  -> iterator
2381  {
2382  // First build the node to get its hash code.
2383  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2384  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2385 
2386  auto __res = this->_M_compute_hash_code(__hint._M_cur, __k);
2387  auto __pos
2388  = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2389  __node._M_node = nullptr;
2390  return __pos;
2391  }
2392 
2393  template<typename _Key, typename _Value, typename _Alloc,
2394  typename _ExtractKey, typename _Equal,
2395  typename _Hash, typename _RangeHash, typename _Unused,
2396  typename _RehashPolicy, typename _Traits>
2397  void
2398  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2399  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2400  _M_rehash_insert(size_type __n)
2401  {
2402  using __pair_type = std::pair<bool, std::size_t>;
2403  if (__n == 0)
2404  return;
2405 
2406  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2407  __pair_type __do_rehash
2408  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n);
2409 
2410  if (__do_rehash.first)
2411  _M_rehash(__do_rehash.second, false_type{});
2412 
2413  __rehash_guard._M_guarded_obj = nullptr;
2414  }
2415 
2416 
2417  template<typename _Key, typename _Value, typename _Alloc,
2418  typename _ExtractKey, typename _Equal,
2419  typename _Hash, typename _RangeHash, typename _Unused,
2420  typename _RehashPolicy, typename _Traits>
2421  template<typename _InputIterator>
2422  void
2423  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2424  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2425  _M_insert_range_multi(_InputIterator __first, _InputIterator __last)
2426  {
2427  _M_rehash_insert(__detail::__distance_fw(__first, __last));
2428  for (; __first != __last; ++__first)
2429  _M_emplace_multi(cend(), *__first);
2430  }
2431 
2432  template<typename _Key, typename _Value, typename _Alloc,
2433  typename _ExtractKey, typename _Equal,
2434  typename _Hash, typename _RangeHash, typename _Unused,
2435  typename _RehashPolicy, typename _Traits>
2436  auto
2437  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2438  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2439  _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const
2440  -> pair<__node_ptr, __hash_code>
2441  {
2442  if (size() <= __small_size_threshold())
2443  {
2444  if (__hint)
2445  {
2446  for (auto __it = __hint; __it; __it = __it->_M_next())
2447  if (this->_M_key_equals(__k, *__it))
2448  return { __it, this->_M_hash_code(*__it) };
2449  }
2450 
2451  for (auto __it = _M_begin(); __it != __hint; __it = __it->_M_next())
2452  if (this->_M_key_equals(__k, *__it))
2453  return { __it, this->_M_hash_code(*__it) };
2454 
2455  __hint = nullptr;
2456  }
2457 
2458  return { __hint, this->_M_hash_code(__k) };
2459  }
2460 
2461  template<typename _Key, typename _Value, typename _Alloc,
2462  typename _ExtractKey, typename _Equal,
2463  typename _Hash, typename _RangeHash, typename _Unused,
2464  typename _RehashPolicy, typename _Traits>
2465  auto
2466  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2467  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2468  _M_insert_unique_node(size_type __bkt, __hash_code __code,
2469  __node_ptr __node, size_type __n_elt)
2470  -> iterator
2471  {
2472  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2473  std::pair<bool, std::size_t> __do_rehash
2474  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2475  __n_elt);
2476 
2477  if (__do_rehash.first)
2478  {
2479  _M_rehash(__do_rehash.second, true_type{});
2480  __bkt = _M_bucket_index(__code);
2481  }
2482 
2483  __rehash_guard._M_guarded_obj = nullptr;
2484  _M_store_code(*__node, __code);
2485 
2486  // Always insert at the beginning of the bucket.
2487  _M_insert_bucket_begin(__bkt, __node);
2488  ++_M_element_count;
2489  return iterator(__node);
2490  }
2491 
2492  template<typename _Key, typename _Value, typename _Alloc,
2493  typename _ExtractKey, typename _Equal,
2494  typename _Hash, typename _RangeHash, typename _Unused,
2495  typename _RehashPolicy, typename _Traits>
2496  auto
2497  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2498  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2499  _M_insert_multi_node(__node_ptr __hint,
2500  __hash_code __code, __node_ptr __node)
2501  -> iterator
2502  {
2503  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2504  std::pair<bool, std::size_t> __do_rehash
2505  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2506 
2507  if (__do_rehash.first)
2508  _M_rehash(__do_rehash.second, false_type{});
2509 
2510  __rehash_guard._M_guarded_obj = nullptr;
2511  _M_store_code(*__node, __code);
2512  const key_type& __k = _ExtractKey{}(__node->_M_v());
2513  size_type __bkt = _M_bucket_index(__code);
2514 
2515  // Find the node before an equivalent one or use hint if it exists and
2516  // if it is equivalent.
2517  __node_base_ptr __prev
2518  = __builtin_expect(__hint != nullptr, false)
2519  && this->_M_equals(__k, __code, *__hint)
2520  ? __hint
2521  : _M_find_before_node(__bkt, __k, __code);
2522 
2523  if (__prev)
2524  {
2525  // Insert after the node before the equivalent one.
2526  __node->_M_nxt = __prev->_M_nxt;
2527  __prev->_M_nxt = __node;
2528  if (__builtin_expect(__prev == __hint, false))
2529  // hint might be the last bucket node, in this case we need to
2530  // update next bucket.
2531  if (__node->_M_nxt
2532  && !this->_M_equals(__k, __code, *__node->_M_next()))
2533  {
2534  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2535  if (__next_bkt != __bkt)
2536  _M_buckets[__next_bkt] = __node;
2537  }
2538  }
2539  else
2540  // The inserted node has no equivalent in the hashtable. We must
2541  // insert the new node at the beginning of the bucket to preserve
2542  // equivalent elements' relative positions.
2543  _M_insert_bucket_begin(__bkt, __node);
2544  ++_M_element_count;
2545  return iterator(__node);
2546  }
2547 
2548  template<typename _Key, typename _Value, typename _Alloc,
2549  typename _ExtractKey, typename _Equal,
2550  typename _Hash, typename _RangeHash, typename _Unused,
2551  typename _RehashPolicy, typename _Traits>
2552  auto
2553  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2554  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2555  erase(const_iterator __it)
2556  -> iterator
2557  {
2558  __node_ptr __n = __it._M_cur;
2559  std::size_t __bkt = _M_bucket_index(*__n);
2560 
2561  // Look for previous node to unlink it from the erased one, this
2562  // is why we need buckets to contain the before begin to make
2563  // this search fast.
2564  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2565  return _M_erase(__bkt, __prev_n, __n);
2566  }
2567 
2568  template<typename _Key, typename _Value, typename _Alloc,
2569  typename _ExtractKey, typename _Equal,
2570  typename _Hash, typename _RangeHash, typename _Unused,
2571  typename _RehashPolicy, typename _Traits>
2572  auto
2573  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2574  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2575  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2576  -> iterator
2577  {
2578  if (__prev_n == _M_buckets[__bkt])
2579  _M_remove_bucket_begin(__bkt, __n->_M_next(),
2580  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2581  else if (__n->_M_nxt)
2582  {
2583  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2584  if (__next_bkt != __bkt)
2585  _M_buckets[__next_bkt] = __prev_n;
2586  }
2587 
2588  __prev_n->_M_nxt = __n->_M_nxt;
2589  iterator __result(__n->_M_next());
2590  this->_M_deallocate_node(__n);
2591  --_M_element_count;
2592 
2593  return __result;
2594  }
2595 
2596 #pragma GCC diagnostic push
2597 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2598  template<typename _Key, typename _Value, typename _Alloc,
2599  typename _ExtractKey, typename _Equal,
2600  typename _Hash, typename _RangeHash, typename _Unused,
2601  typename _RehashPolicy, typename _Traits>
2602  auto
2603  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2604  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2605  erase(const key_type& __k)
2606  -> size_type
2607  {
2608  auto __loc = _M_locate(__k);
2609  if (!__loc)
2610  return 0;
2611 
2612  __node_base_ptr __prev_n = __loc._M_before;
2613  __node_ptr __n = __loc._M_node();
2614  auto __bkt = __loc._M_bucket_index;
2615  if (__bkt == size_type(-1))
2616  __bkt = _M_bucket_index(*__n);
2617 
2618  if constexpr (__unique_keys::value)
2619  {
2620  _M_erase(__bkt, __prev_n, __n);
2621  return 1;
2622  }
2623  else
2624  {
2625  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2626  // 526. Is it undefined if a function in the standard changes
2627  // in parameters?
2628  // We use one loop to find all matching nodes and another to
2629  // deallocate them so that the key stays valid during the first loop.
2630  // It might be invalidated indirectly when destroying nodes.
2631  __node_ptr __n_last = __n->_M_next();
2632  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2633  __n_last = __n_last->_M_next();
2634 
2635  std::size_t __n_last_bkt
2636  = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2637 
2638  // Deallocate nodes.
2639  size_type __result = 0;
2640  do
2641  {
2642  __node_ptr __p = __n->_M_next();
2643  this->_M_deallocate_node(__n);
2644  __n = __p;
2645  ++__result;
2646  }
2647  while (__n != __n_last);
2648 
2649  _M_element_count -= __result;
2650  if (__prev_n == _M_buckets[__bkt])
2651  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2652  else if (__n_last_bkt != __bkt)
2653  _M_buckets[__n_last_bkt] = __prev_n;
2654  __prev_n->_M_nxt = __n_last;
2655  return __result;
2656  }
2657  }
2658 #pragma GCC diagnostic pop
2659 
2660  template<typename _Key, typename _Value, typename _Alloc,
2661  typename _ExtractKey, typename _Equal,
2662  typename _Hash, typename _RangeHash, typename _Unused,
2663  typename _RehashPolicy, typename _Traits>
2664  auto
2665  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2666  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2667  erase(const_iterator __first, const_iterator __last)
2668  -> iterator
2669  {
2670  __node_ptr __n = __first._M_cur;
2671  __node_ptr __last_n = __last._M_cur;
2672  if (__n == __last_n)
2673  return iterator(__n);
2674 
2675  std::size_t __bkt = _M_bucket_index(*__n);
2676 
2677  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2678  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2679  std::size_t __n_bkt = __bkt;
2680  for (;;)
2681  {
2682  do
2683  {
2684  __node_ptr __tmp = __n;
2685  __n = __n->_M_next();
2686  this->_M_deallocate_node(__tmp);
2687  --_M_element_count;
2688  if (!__n)
2689  break;
2690  __n_bkt = _M_bucket_index(*__n);
2691  }
2692  while (__n != __last_n && __n_bkt == __bkt);
2693  if (__is_bucket_begin)
2694  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2695  if (__n == __last_n)
2696  break;
2697  __is_bucket_begin = true;
2698  __bkt = __n_bkt;
2699  }
2700 
2701  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2702  _M_buckets[__n_bkt] = __prev_n;
2703  __prev_n->_M_nxt = __n;
2704  return iterator(__n);
2705  }
2706 
2707  template<typename _Key, typename _Value, typename _Alloc,
2708  typename _ExtractKey, typename _Equal,
2709  typename _Hash, typename _RangeHash, typename _Unused,
2710  typename _RehashPolicy, typename _Traits>
2711  void
2712  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2713  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2714  clear() noexcept
2715  {
2716  this->_M_deallocate_nodes(_M_begin());
2717  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
2718  _M_element_count = 0;
2719  _M_before_begin._M_nxt = nullptr;
2720  }
2721 
2722  template<typename _Key, typename _Value, typename _Alloc,
2723  typename _ExtractKey, typename _Equal,
2724  typename _Hash, typename _RangeHash, typename _Unused,
2725  typename _RehashPolicy, typename _Traits>
2726  void
2727  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2728  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2729  rehash(size_type __bkt_count)
2730  {
2731  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2732  __bkt_count
2733  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2734  __bkt_count);
2735  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2736 
2737  if (__bkt_count != _M_bucket_count)
2738  {
2739  _M_rehash(__bkt_count, __unique_keys{});
2740  __rehash_guard._M_guarded_obj = nullptr;
2741  }
2742  }
2743 
2744  // Rehash when there is no equivalent elements.
2745  template<typename _Key, typename _Value, typename _Alloc,
2746  typename _ExtractKey, typename _Equal,
2747  typename _Hash, typename _RangeHash, typename _Unused,
2748  typename _RehashPolicy, typename _Traits>
2749  void
2750  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2751  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2752  _M_rehash(size_type __bkt_count, true_type /* __uks */)
2753  {
2754  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2755  __node_ptr __p = _M_begin();
2756  _M_before_begin._M_nxt = nullptr;
2757  std::size_t __bbegin_bkt = 0;
2758  while (__p)
2759  {
2760  __node_ptr __next = __p->_M_next();
2761  std::size_t __bkt
2762  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2763  if (!__new_buckets[__bkt])
2764  {
2765  __p->_M_nxt = _M_before_begin._M_nxt;
2766  _M_before_begin._M_nxt = __p;
2767  __new_buckets[__bkt] = &_M_before_begin;
2768  if (__p->_M_nxt)
2769  __new_buckets[__bbegin_bkt] = __p;
2770  __bbegin_bkt = __bkt;
2771  }
2772  else
2773  {
2774  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2775  __new_buckets[__bkt]->_M_nxt = __p;
2776  }
2777 
2778  __p = __next;
2779  }
2780 
2781  _M_deallocate_buckets();
2782  _M_bucket_count = __bkt_count;
2783  _M_buckets = __new_buckets;
2784  }
2785 
2786  // Rehash when there can be equivalent elements, preserve their relative
2787  // order.
2788  template<typename _Key, typename _Value, typename _Alloc,
2789  typename _ExtractKey, typename _Equal,
2790  typename _Hash, typename _RangeHash, typename _Unused,
2791  typename _RehashPolicy, typename _Traits>
2792  void
2793  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2794  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2795  _M_rehash(size_type __bkt_count, false_type /* __uks */)
2796  {
2797  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2798  __node_ptr __p = _M_begin();
2799  _M_before_begin._M_nxt = nullptr;
2800  std::size_t __bbegin_bkt = 0;
2801  std::size_t __prev_bkt = 0;
2802  __node_ptr __prev_p = nullptr;
2803  bool __check_bucket = false;
2804 
2805  while (__p)
2806  {
2807  __node_ptr __next = __p->_M_next();
2808  std::size_t __bkt
2809  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2810 
2811  if (__prev_p && __prev_bkt == __bkt)
2812  {
2813  // Previous insert was already in this bucket, we insert after
2814  // the previously inserted one to preserve equivalent elements
2815  // relative order.
2816  __p->_M_nxt = __prev_p->_M_nxt;
2817  __prev_p->_M_nxt = __p;
2818 
2819  // Inserting after a node in a bucket require to check that we
2820  // haven't change the bucket last node, in this case next
2821  // bucket containing its before begin node must be updated. We
2822  // schedule a check as soon as we move out of the sequence of
2823  // equivalent nodes to limit the number of checks.
2824  __check_bucket = true;
2825  }
2826  else
2827  {
2828  if (__check_bucket)
2829  {
2830  // Check if we shall update the next bucket because of
2831  // insertions into __prev_bkt bucket.
2832  if (__prev_p->_M_nxt)
2833  {
2834  std::size_t __next_bkt
2835  = __hash_code_base::_M_bucket_index(
2836  *__prev_p->_M_next(), __bkt_count);
2837  if (__next_bkt != __prev_bkt)
2838  __new_buckets[__next_bkt] = __prev_p;
2839  }
2840  __check_bucket = false;
2841  }
2842 
2843  if (!__new_buckets[__bkt])
2844  {
2845  __p->_M_nxt = _M_before_begin._M_nxt;
2846  _M_before_begin._M_nxt = __p;
2847  __new_buckets[__bkt] = &_M_before_begin;
2848  if (__p->_M_nxt)
2849  __new_buckets[__bbegin_bkt] = __p;
2850  __bbegin_bkt = __bkt;
2851  }
2852  else
2853  {
2854  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2855  __new_buckets[__bkt]->_M_nxt = __p;
2856  }
2857  }
2858  __prev_p = __p;
2859  __prev_bkt = __bkt;
2860  __p = __next;
2861  }
2862 
2863  if (__check_bucket && __prev_p->_M_nxt)
2864  {
2865  std::size_t __next_bkt
2866  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2867  __bkt_count);
2868  if (__next_bkt != __prev_bkt)
2869  __new_buckets[__next_bkt] = __prev_p;
2870  }
2871 
2872  _M_deallocate_buckets();
2873  _M_bucket_count = __bkt_count;
2874  _M_buckets = __new_buckets;
2875  }
2876 
2877 #pragma GCC diagnostic push
2878 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2879 
2880  // This is for implementing equality comparison for unordered containers,
2881  // per N3068, by John Lakos and Pablo Halpern.
2882  // Algorithmically, we follow closely the reference implementations therein.
2883  template<typename _Key, typename _Value, typename _Alloc,
2884  typename _ExtractKey, typename _Equal,
2885  typename _Hash, typename _RangeHash, typename _Unused,
2886  typename _RehashPolicy, typename _Traits>
2887  bool
2888  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2889  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2890  _M_equal(const _Hashtable& __other) const
2891  {
2892  if (size() != __other.size())
2893  return false;
2894 
2895  if constexpr (__unique_keys::value)
2896  for (auto __x_n = _M_begin(); __x_n; __x_n = __x_n->_M_next())
2897  {
2898  std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2899  auto __prev_n = __other._M_buckets[__ybkt];
2900  if (!__prev_n)
2901  return false;
2902 
2903  for (__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);;
2904  __n = __n->_M_next())
2905  {
2906  if (__n->_M_v() == __x_n->_M_v())
2907  break;
2908 
2909  if (!__n->_M_nxt
2910  || __other._M_bucket_index(*__n->_M_next()) != __ybkt)
2911  return false;
2912  }
2913  }
2914  else // non-unique keys
2915  for (auto __x_n = _M_begin(); __x_n;)
2916  {
2917  std::size_t __x_count = 1;
2918  auto __x_n_end = __x_n->_M_next();
2919  for (; __x_n_end
2920  && key_eq()(_ExtractKey{}(__x_n->_M_v()),
2921  _ExtractKey{}(__x_n_end->_M_v()));
2922  __x_n_end = __x_n_end->_M_next())
2923  ++__x_count;
2924 
2925  std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2926  auto __y_prev_n = __other._M_buckets[__ybkt];
2927  if (!__y_prev_n)
2928  return false;
2929 
2930  __node_ptr __y_n = static_cast<__node_ptr>(__y_prev_n->_M_nxt);
2931  for (;;)
2932  {
2933  if (key_eq()(_ExtractKey{}(__y_n->_M_v()),
2934  _ExtractKey{}(__x_n->_M_v())))
2935  break;
2936 
2937  auto __y_ref_n = __y_n;
2938  for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
2939  if (!__other._M_node_equals(*__y_ref_n, *__y_n))
2940  break;
2941 
2942  if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
2943  return false;
2944  }
2945 
2946  auto __y_n_end = __y_n;
2947  for (; __y_n_end; __y_n_end = __y_n_end->_M_next())
2948  if (--__x_count == 0)
2949  break;
2950 
2951  if (__x_count != 0)
2952  return false;
2953 
2954  const_iterator __itx(__x_n), __itx_end(__x_n_end);
2955  const_iterator __ity(__y_n);
2956  if (!std::is_permutation(__itx, __itx_end, __ity))
2957  return false;
2958 
2959  __x_n = __x_n_end;
2960  }
2961 
2962  return true;
2963  }
2964 #pragma GCC diagnostic pop
2965 
2966 #if __cplusplus > 201402L
2967  template<typename, typename, typename> class _Hash_merge_helper { };
2968 #endif // C++17
2969 
2970 #if __cpp_deduction_guides >= 201606
2971  // Used to constrain deduction guides
2972  template<typename _Hash>
2973  using _RequireNotAllocatorOrIntegral
2974  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
2975 #endif
2976 
2977 /// @endcond
2978 _GLIBCXX_END_NAMESPACE_VERSION
2979 } // namespace std
2980 
2981 #pragma GCC diagnostic pop
2982 
2983 #endif // _HASHTABLE_H
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:116
__bool_constant< false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:119
pair(_T1, _T2) -> pair< _T1, _T2 >
Two pairs are equal iff their members are equal.
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:52
constexpr piecewise_construct_t piecewise_construct
Tag for piecewise construction of std::pair objects.
Definition: stl_pair.h:82
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:138
constexpr tuple< _Elements &&... > forward_as_tuple(_Elements &&... __args) noexcept
Create a tuple of lvalue or rvalue references to the arguments.
Definition: tuple:2678
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:72
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1251
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1229
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:258
void swap(basic_regex< _Ch_type, _Rx_traits > &__lhs, basic_regex< _Ch_type, _Rx_traits > &__rhs) noexcept
Swaps the contents of two regular expression objects.
Definition: regex.h:918
ISO C++ entities toplevel namespace is std.
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:144
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:294
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:274
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:132
constexpr const _Tp * end(initializer_list< _Tp > __ils) noexcept
Return an iterator pointing to one past the last element of the initializer_list.
Struct holding two objects of arbitrary type.
Definition: stl_pair.h:304
_T1 first
The first member.
Definition: stl_pair.h:308
_T2 second
The second member.
Definition: stl_pair.h:309