1// Copyright (C) 2022 Joaquin M Lopez Munoz.
2// Copyright (C) 2022 Christian Mazakas
3//
4// Distributed under the Boost Software License, Version 1.0. (See accompanying
5// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6
7#ifndef BOOST_UNORDERED_DETAIL_FCA_HPP
8#define BOOST_UNORDERED_DETAIL_FCA_HPP
9
10/*
11
12The general structure of the fast closed addressing implementation is that we
13use straight-forward separate chaining (i.e. each bucket contains its own linked
14list) and then improve iteration time by adding an array of "bucket groups".
15
16A bucket group is a constant-width view into a subsection of the buckets array,
17containing a bitmask that indicates which one of the buckets in the subsection
18contains a list of nodes. This allows the code to test N buckets for occupancy
19in a single operation. Additional speed can be found by inter-linking occupied
20bucket groups with one another in a doubly-linked list. To this end, large
21swathes of the bucket groups array no longer need to be iterated and have their
22bitmasks examined for occupancy.
23
24A bucket group iterator contains a pointer to a bucket group along with a
25pointer into the buckets array. The iterator's bucket pointer is guaranteed to
26point to a bucket within the bucket group's view of the array. To advance the
27iterator, we need to determine if we need to skip to the next bucket group or
28simply move to the next occupied bucket as denoted by the bitmask.
29
30To accomplish this, we perform something roughly equivalent to this:
31```
32bucket_iterator itb = ...
33bucket_pointer p = itb.p
34bucket_group_pointer pbg = itb.pbg
35
36offset = p - pbg->buckets
37// because we wish to see if the _next_ bit in the mask is occupied, we'll
38// generate a testing mask from the current offset + 1
39//
40testing_mask = reset_first_bits(offset + 1)
41n = ctz(pbg->bitmask & testing_mask)
42
43if (n < N) {
44 p = pbg->buckets + n
45} else {
46 pbg = pbg->next
47 p = pbg->buckets + ctz(pbg->bitmask)
48}
49```
50
51`reset_first_bits` yields an unsigned integral with the first n bits set to 0
52and then by counting the number of trailing zeroes when AND'd against the bucket
53group's bitmask, we can derive the offset into the buckets array. When the
54calculated offset is equal to N, we know we've reached the end of a bucket group
55and we can advance to the next one.
56
57This is a rough explanation for how iterator incrementation should work for a
58fixed width size of N as 3 for the bucket groups
59```
60N = 3
61p = buckets
62pbg->bitmask = 0b101
63pbg->buckets = buckets
64
65offset = p - pbg->buckets // => 0
66testing_mask = reset_first_bits(offset + 1) // reset_first_bits(1) => 0b110
67
68x = bitmask & testing_mask // => 0b101 & 0b110 => 0b100
69ctz(x) // ctz(0b100) => 2
70// 2 < 3
71=> p = pbg->buckets + 2
72
73// increment again...
74offset = p - pbg->buckets // => 2
75testing_mask = reset_first_bits(offset + 1) // reset_first_bits(3) => 0b000
76
77bitmask & testing_mask // 0b101 & 0b000 => 0b000
78ctz(0b000) => 3
79// 3 < 3 is false now
80pbg = pbg->next
81initial_offset = ctz(pbg->bitmask)
82p = pbg->buckets + initial_offset
83```
84
85For `size_` number of buckets, there are `1 + (size_ / N)` bucket groups where
86`N` is the width of a bucket group, determined at compile-time.
87
88We allocate space for `size_ + 1` buckets, using the last one as a dummy bucket
89which is kept permanently empty so it can act as a sentinel value in the
90implementation of `iterator end();`. We set the last bucket group to act as a
91sentinel.
92
93```
94num_groups = size_ / N + 1
95groups = allocate(num_groups)
96pbg = groups + (num_groups - 1)
97
98// not guaranteed to point to exactly N buckets
99pbg->buckets = buckets + N * (size_ / N)
100
101// this marks the true end of the bucket array
102buckets pbg->bitmask = set_bit(size_ % N)
103
104// links in on itself
105pbg->next = pbg->prev = pbg
106```
107
108To this end, we can devise a safe iteration scheme while also creating a useful
109sentinel to use as the end iterator.
110
111Otherwise, usage of the data structure is relatively straight-forward compared
112to normal separate chaining implementations.
113
114*/
115
116#include <boost/unordered/detail/prime_fmod.hpp>
117
118#include <boost/core/addressof.hpp>
119#include <boost/core/allocator_access.hpp>
120#include <boost/core/bit.hpp>
121#include <boost/core/empty_value.hpp>
122#include <boost/core/no_exceptions_support.hpp>
123#include <boost/cstdint.hpp>
124#include <boost/move/core.hpp>
125#include <boost/move/utility_core.hpp>
126#include <boost/swap.hpp>
127#include <boost/type_traits/aligned_storage.hpp>
128#include <boost/type_traits/alignment_of.hpp>
129
130#include <boost/config.hpp>
131
132#include <iterator>
133
134namespace boost {
135 namespace unordered {
136 namespace detail {
137
138 template <class ValueType, class VoidPtr> struct node
139 {
140 typedef ValueType value_type;
141 typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
142 node>::type node_pointer;
143
144 node_pointer next;
145 typename boost::aligned_storage<sizeof(value_type),
146 boost::alignment_of<value_type>::value>::type buf;
147
148 node() BOOST_NOEXCEPT : next(), buf() {}
149
150 value_type* value_ptr() BOOST_NOEXCEPT
151 {
152 return reinterpret_cast<value_type*>(buf.address());
153 }
154
155 value_type& value() BOOST_NOEXCEPT
156 {
157 return *reinterpret_cast<value_type*>(buf.address());
158 }
159 };
160
161 template <class Node, class VoidPtr> struct bucket
162 {
163 typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
164 Node>::type node_pointer;
165
166 typedef typename boost::pointer_traits<VoidPtr>::template rebind_to<
167 bucket>::type bucket_pointer;
168
169 node_pointer next;
170
171 bucket() BOOST_NOEXCEPT : next() {}
172 };
173
174 template <class Bucket> struct bucket_group
175 {
176 typedef typename Bucket::bucket_pointer bucket_pointer;
177 typedef
178 typename boost::pointer_traits<bucket_pointer>::template rebind_to<
179 bucket_group>::type bucket_group_pointer;
180
181 BOOST_STATIC_CONSTANT(std::size_t, N = sizeof(std::size_t) * CHAR_BIT);
182
183 bucket_pointer buckets;
184 std::size_t bitmask;
185 bucket_group_pointer next, prev;
186
187 bucket_group() BOOST_NOEXCEPT : buckets(), bitmask(0), next(), prev() {}
188 ~bucket_group() {}
189 };
190
191 inline std::size_t set_bit(std::size_t n) { return std::size_t(1) << n; }
192
193 inline std::size_t reset_bit(std::size_t n)
194 {
195 return ~(std::size_t(1) << n);
196 }
197
198 inline std::size_t reset_first_bits(std::size_t n) // n>0
199 {
200 return ~(~(std::size_t(0)) >> (sizeof(std::size_t) * 8 - n));
201 }
202
203 template <class Bucket> struct grouped_bucket_iterator
204 {
205 public:
206 typedef typename Bucket::bucket_pointer bucket_pointer;
207 typedef
208 typename boost::pointer_traits<bucket_pointer>::template rebind_to<
209 bucket_group<Bucket> >::type bucket_group_pointer;
210
211 typedef Bucket value_type;
212 typedef typename boost::pointer_traits<bucket_pointer>::difference_type
213 difference_type;
214 typedef Bucket& reference;
215 typedef Bucket* pointer;
216 typedef std::forward_iterator_tag iterator_category;
217
218 private:
219 bucket_pointer p;
220 bucket_group_pointer pbg;
221
222 public:
223 grouped_bucket_iterator() : p(), pbg() {}
224
225 reference operator*() const BOOST_NOEXCEPT { return dereference(); }
226 pointer operator->() const BOOST_NOEXCEPT
227 {
228 return boost::to_address(p);
229 }
230
231 grouped_bucket_iterator& operator++() BOOST_NOEXCEPT
232 {
233 increment();
234 return *this;
235 }
236
237 grouped_bucket_iterator operator++(int) BOOST_NOEXCEPT
238 {
239 grouped_bucket_iterator old = *this;
240 increment();
241 return old;
242 }
243
244 bool operator==(
245 grouped_bucket_iterator const& other) const BOOST_NOEXCEPT
246 {
247 return equal(x: other);
248 }
249
250 bool operator!=(
251 grouped_bucket_iterator const& other) const BOOST_NOEXCEPT
252 {
253 return !equal(x: other);
254 }
255
256 private:
257 template <typename, typename, typename>
258 friend class grouped_bucket_array;
259
260 BOOST_STATIC_CONSTANT(std::size_t, N = bucket_group<Bucket>::N);
261
262 grouped_bucket_iterator(bucket_pointer p_, bucket_group_pointer pbg_)
263 : p(p_), pbg(pbg_)
264 {
265 }
266
267 Bucket& dereference() const BOOST_NOEXCEPT { return *p; }
268
269 bool equal(const grouped_bucket_iterator& x) const BOOST_NOEXCEPT
270 {
271 return p == x.p;
272 }
273
274 void increment() BOOST_NOEXCEPT
275 {
276 std::size_t const offset = static_cast<std::size_t>(p - pbg->buckets);
277
278 std::size_t n = std::size_t(boost::core::countr_zero(
279 pbg->bitmask & reset_first_bits(n: offset + 1)));
280
281 if (n < N) {
282 p = pbg->buckets + static_cast<difference_type>(n);
283 } else {
284 pbg = pbg->next;
285
286 std::ptrdiff_t x = boost::core::countr_zero(pbg->bitmask);
287 p = pbg->buckets + x;
288 }
289 }
290 };
291
292 template <class Node> struct const_grouped_local_bucket_iterator;
293
294 template <class Node> struct grouped_local_bucket_iterator
295 {
296 typedef typename Node::node_pointer node_pointer;
297
298 public:
299 typedef typename Node::value_type value_type;
300 typedef value_type element_type;
301 typedef value_type* pointer;
302 typedef value_type& reference;
303 typedef std::ptrdiff_t difference_type;
304 typedef std::forward_iterator_tag iterator_category;
305
306 grouped_local_bucket_iterator() : p() {}
307
308 reference operator*() const BOOST_NOEXCEPT { return dereference(); }
309
310 pointer operator->() const BOOST_NOEXCEPT
311 {
312 return boost::to_address(p);
313 }
314
315 grouped_local_bucket_iterator& operator++() BOOST_NOEXCEPT
316 {
317 increment();
318 return *this;
319 }
320
321 grouped_local_bucket_iterator operator++(int) BOOST_NOEXCEPT
322 {
323 grouped_local_bucket_iterator old = *this;
324 increment();
325 return old;
326 }
327
328 bool operator==(
329 grouped_local_bucket_iterator const& other) const BOOST_NOEXCEPT
330 {
331 return equal(x: other);
332 }
333
334 bool operator!=(
335 grouped_local_bucket_iterator const& other) const BOOST_NOEXCEPT
336 {
337 return !equal(x: other);
338 }
339
340 private:
341 template <typename, typename, typename>
342 friend class grouped_bucket_array;
343
344 template <class> friend struct const_grouped_local_bucket_iterator;
345
346 grouped_local_bucket_iterator(node_pointer p_) : p(p_) {}
347
348 value_type& dereference() const BOOST_NOEXCEPT { return p->value(); }
349
350 bool equal(const grouped_local_bucket_iterator& x) const BOOST_NOEXCEPT
351 {
352 return p == x.p;
353 }
354
355 void increment() BOOST_NOEXCEPT { p = p->next; }
356
357 node_pointer p;
358 };
359
360 template <class Node> struct const_grouped_local_bucket_iterator
361 {
362 typedef typename Node::node_pointer node_pointer;
363
364 public:
365 typedef typename Node::value_type const value_type;
366 typedef value_type const element_type;
367 typedef value_type const* pointer;
368 typedef value_type const& reference;
369 typedef std::ptrdiff_t difference_type;
370 typedef std::forward_iterator_tag iterator_category;
371
372 const_grouped_local_bucket_iterator() : p() {}
373 const_grouped_local_bucket_iterator(
374 grouped_local_bucket_iterator<Node> it)
375 : p(it.p)
376 {
377 }
378
379 reference operator*() const BOOST_NOEXCEPT { return dereference(); }
380
381 pointer operator->() const BOOST_NOEXCEPT
382 {
383 return boost::to_address(p);
384 }
385
386 const_grouped_local_bucket_iterator& operator++() BOOST_NOEXCEPT
387 {
388 increment();
389 return *this;
390 }
391
392 const_grouped_local_bucket_iterator operator++(int) BOOST_NOEXCEPT
393 {
394 const_grouped_local_bucket_iterator old = *this;
395 increment();
396 return old;
397 }
398
399 bool operator==(
400 const_grouped_local_bucket_iterator const& other) const BOOST_NOEXCEPT
401 {
402 return equal(x: other);
403 }
404
405 bool operator!=(
406 const_grouped_local_bucket_iterator const& other) const BOOST_NOEXCEPT
407 {
408 return !equal(x: other);
409 }
410
411 private:
412 template <typename, typename, typename>
413 friend class grouped_bucket_array;
414
415 const_grouped_local_bucket_iterator(node_pointer p_) : p(p_) {}
416
417 value_type& dereference() const BOOST_NOEXCEPT { return p->value(); }
418
419 bool equal(
420 const const_grouped_local_bucket_iterator& x) const BOOST_NOEXCEPT
421 {
422 return p == x.p;
423 }
424
425 void increment() BOOST_NOEXCEPT { p = p->next; }
426
427 node_pointer p;
428 };
429
430 template <class T> struct span
431 {
432 T* begin() const BOOST_NOEXCEPT { return data; }
433 T* end() const BOOST_NOEXCEPT { return data + size; }
434
435 T* data;
436 std::size_t size;
437
438 span(T* data_, std::size_t size_) : data(data_), size(size_) {}
439 };
440
441 template <class Bucket, class Allocator, class SizePolicy>
442 class grouped_bucket_array
443 : boost::empty_value<typename boost::allocator_rebind<Allocator,
444 node<typename boost::allocator_value_type<Allocator>::type,
445 typename boost::allocator_void_pointer<Allocator>::type> >::
446 type>
447 {
448 BOOST_MOVABLE_BUT_NOT_COPYABLE(grouped_bucket_array)
449
450 typedef typename boost::allocator_value_type<Allocator>::type
451 allocator_value_type;
452 typedef
453 typename boost::allocator_void_pointer<Allocator>::type void_pointer;
454 typedef typename boost::allocator_difference_type<Allocator>::type
455 difference_type;
456
457 public:
458 typedef typename boost::allocator_rebind<Allocator,
459 node<allocator_value_type, void_pointer> >::type node_allocator_type;
460
461 typedef node<allocator_value_type, void_pointer> node_type;
462 typedef typename boost::allocator_pointer<node_allocator_type>::type
463 node_pointer;
464 typedef SizePolicy size_policy;
465
466 private:
467 typedef typename boost::allocator_rebind<Allocator, Bucket>::type
468 bucket_allocator_type;
469 typedef typename boost::allocator_pointer<bucket_allocator_type>::type
470 bucket_pointer;
471 typedef boost::pointer_traits<bucket_pointer> bucket_pointer_traits;
472
473 typedef bucket_group<Bucket> group;
474 typedef typename boost::allocator_rebind<Allocator, group>::type
475 group_allocator_type;
476 typedef typename boost::allocator_pointer<group_allocator_type>::type
477 group_pointer;
478 typedef typename boost::pointer_traits<group_pointer>
479 group_pointer_traits;
480
481 public:
482 typedef Bucket value_type;
483 typedef Bucket bucket_type;
484 typedef std::size_t size_type;
485 typedef Allocator allocator_type;
486 typedef grouped_bucket_iterator<Bucket> iterator;
487 typedef grouped_local_bucket_iterator<node_type> local_iterator;
488 typedef const_grouped_local_bucket_iterator<node_type>
489 const_local_iterator;
490
491 private:
492 std::size_t size_index_, size_;
493 bucket_pointer buckets;
494 group_pointer groups;
495
496 public:
497 static std::size_t bucket_count_for(std::size_t num_buckets)
498 {
499 if (num_buckets == 0) {
500 return 0;
501 }
502 return size_policy::size(size_policy::size_index(num_buckets));
503 }
504
505 grouped_bucket_array()
506 : empty_value<node_allocator_type>(
507 empty_init_t(), node_allocator_type()),
508 size_index_(0), size_(0), buckets(), groups()
509 {
510 }
511
512 grouped_bucket_array(size_type n, const Allocator& al)
513 : empty_value<node_allocator_type>(empty_init_t(), al),
514 size_index_(0),
515 size_(0), buckets(), groups()
516 {
517 if (n == 0) {
518 return;
519 }
520
521 size_index_ = size_policy::size_index(n);
522 size_ = size_policy::size(size_index_);
523
524 bucket_allocator_type bucket_alloc = this->get_bucket_allocator();
525 group_allocator_type group_alloc = this->get_group_allocator();
526
527 size_type const num_buckets = buckets_len();
528 size_type const num_groups = groups_len();
529
530 buckets = boost::allocator_allocate(bucket_alloc, num_buckets);
531 BOOST_TRY
532 {
533 groups = boost::allocator_allocate(group_alloc, num_groups);
534
535 bucket_type* pb = boost::to_address(buckets);
536 for (size_type i = 0; i < num_buckets; ++i) {
537 new (pb + i) bucket_type();
538 }
539
540 group* pg = boost::to_address(groups);
541 for (size_type i = 0; i < num_groups; ++i) {
542 new (pg + i) group();
543 }
544 }
545 BOOST_CATCH(...)
546 {
547 boost::allocator_deallocate(bucket_alloc, buckets, num_buckets);
548 BOOST_RETHROW
549 }
550 BOOST_CATCH_END
551
552 size_type const N = group::N;
553 group_pointer pbg =
554 groups + static_cast<difference_type>(num_groups - 1);
555
556 pbg->buckets =
557 buckets + static_cast<difference_type>(N * (size_ / N));
558 pbg->bitmask = set_bit(size_ % N);
559 pbg->next = pbg->prev = pbg;
560 }
561
562 ~grouped_bucket_array() { this->deallocate(); }
563
564 grouped_bucket_array(
565 BOOST_RV_REF(grouped_bucket_array) other) BOOST_NOEXCEPT
566 : empty_value<node_allocator_type>(
567 empty_init_t(), other.get_node_allocator()),
568 size_index_(other.size_index_),
569 size_(other.size_),
570 buckets(other.buckets),
571 groups(other.groups)
572 {
573 other.size_ = 0;
574 other.size_index_ = 0;
575 other.buckets = bucket_pointer();
576 other.groups = group_pointer();
577 }
578
579 grouped_bucket_array& operator=(
580 BOOST_RV_REF(grouped_bucket_array) other) BOOST_NOEXCEPT
581 {
582 BOOST_ASSERT(
583 this->get_node_allocator() == other.get_node_allocator());
584
585 if (this == boost::addressof(other)) {
586 return *this;
587 }
588
589 this->deallocate();
590 size_index_ = other.size_index_;
591 size_ = other.size_;
592
593 buckets = other.buckets;
594 groups = other.groups;
595
596 other.size_index_ = 0;
597 other.size_ = 0;
598 other.buckets = bucket_pointer();
599 other.groups = group_pointer();
600
601 return *this;
602 }
603
604 void deallocate() BOOST_NOEXCEPT
605 {
606 if (buckets) {
607 bucket_allocator_type bucket_alloc = this->get_bucket_allocator();
608 boost::allocator_deallocate(
609 bucket_alloc, buckets, this->buckets_len());
610
611 buckets = bucket_pointer();
612 }
613
614 if (groups) {
615 group_allocator_type group_alloc = this->get_group_allocator();
616 boost::allocator_deallocate(
617 group_alloc, groups, this->groups_len());
618
619 groups = group_pointer();
620 }
621 }
622
623 void swap(grouped_bucket_array& other)
624 {
625 std::swap(size_index_, other.size_index_);
626 std::swap(size_, other.size_);
627 std::swap(buckets, other.buckets);
628 std::swap(groups, other.groups);
629
630 bool b = boost::allocator_propagate_on_container_swap<
631 allocator_type>::type::value;
632 if (b) {
633 boost::swap(get_node_allocator(), other.get_node_allocator());
634 }
635 }
636
637 node_allocator_type const& get_node_allocator() const
638 {
639 return empty_value<node_allocator_type>::get();
640 }
641
642 node_allocator_type& get_node_allocator()
643 {
644 return empty_value<node_allocator_type>::get();
645 }
646
647 bucket_allocator_type get_bucket_allocator() const
648 {
649 return this->get_node_allocator();
650 }
651
652 group_allocator_type get_group_allocator() const
653 {
654 return this->get_node_allocator();
655 }
656
657 size_type buckets_len() const BOOST_NOEXCEPT { return size_ + 1; }
658
659 size_type groups_len() const BOOST_NOEXCEPT
660 {
661 return size_ / group::N + 1;
662 }
663
664 void reset_allocator(Allocator const& allocator_)
665 {
666 this->get_node_allocator() = node_allocator_type(allocator_);
667 }
668
669 size_type bucket_count() const { return size_; }
670
671 iterator begin() const { return size_ == 0 ? end() : ++at(n: size_); }
672
673 iterator end() const
674 {
675 // micro optimization: no need to return the bucket group
676 // as end() is not incrementable
677 iterator pbg;
678 pbg.p =
679 buckets + static_cast<difference_type>(this->buckets_len() - 1);
680 return pbg;
681 }
682
683 local_iterator begin(size_type n) const
684 {
685 if (size_ == 0) {
686 return this->end(n);
687 }
688
689 return local_iterator(
690 (buckets + static_cast<difference_type>(n))->next);
691 }
692
693 local_iterator end(size_type) const { return local_iterator(); }
694
695 size_type capacity() const BOOST_NOEXCEPT { return size_; }
696
697 iterator at(size_type n) const
698 {
699 if (size_ > 0) {
700 std::size_t const N = group::N;
701
702 iterator pbg(buckets + static_cast<difference_type>(n),
703 groups + static_cast<difference_type>(n / N));
704
705 return pbg;
706 } else {
707 return this->end();
708 }
709 }
710
711 span<Bucket> raw()
712 {
713 BOOST_ASSERT(size_ == 0 || size_ < this->buckets_len());
714 return span<Bucket>(boost::to_address(buckets), size_);
715 }
716
717 size_type position(std::size_t hash) const
718 {
719 return size_policy::position(hash, size_index_);
720 }
721
722 void clear()
723 {
724 this->deallocate();
725 size_index_ = 0;
726 size_ = 0;
727 }
728
729 void append_bucket_group(iterator itb) BOOST_NOEXCEPT
730 {
731 std::size_t const N = group::N;
732
733 bool const is_empty_bucket = (!itb->next);
734 if (is_empty_bucket) {
735 bucket_pointer pb = itb.p;
736 group_pointer pbg = itb.pbg;
737
738 std::size_t n =
739 static_cast<std::size_t>(boost::to_address(pb) - &buckets[0]);
740
741 bool const is_empty_group = (!pbg->bitmask);
742 if (is_empty_group) {
743 size_type const num_groups = this->groups_len();
744 group_pointer last_group =
745 groups + static_cast<difference_type>(num_groups - 1);
746
747 pbg->buckets =
748 buckets + static_cast<difference_type>(N * (n / N));
749 pbg->next = last_group->next;
750 pbg->next->prev = pbg;
751 pbg->prev = last_group;
752 pbg->prev->next = pbg;
753 }
754
755 pbg->bitmask |= set_bit(n % N);
756 }
757 }
758
759 void insert_node(iterator itb, node_pointer p) BOOST_NOEXCEPT
760 {
761 this->append_bucket_group(itb);
762
763 p->next = itb->next;
764 itb->next = p;
765 }
766
767 void insert_node_hint(
768 iterator itb, node_pointer p, node_pointer hint) BOOST_NOEXCEPT
769 {
770 this->append_bucket_group(itb);
771
772 if (hint) {
773 p->next = hint->next;
774 hint->next = p;
775 } else {
776 p->next = itb->next;
777 itb->next = p;
778 }
779 }
780
781 void extract_node(iterator itb, node_pointer p) BOOST_NOEXCEPT
782 {
783 node_pointer* pp = boost::addressof(itb->next);
784 while ((*pp) != p)
785 pp = boost::addressof((*pp)->next);
786 *pp = p->next;
787 if (!itb->next)
788 unlink_bucket(itb);
789 }
790
791 void extract_node_after(iterator itb, node_pointer* pp) BOOST_NOEXCEPT
792 {
793 *pp = (*pp)->next;
794 if (!itb->next)
795 unlink_bucket(itb);
796 }
797
798 void unlink_empty_buckets() BOOST_NOEXCEPT
799 {
800 std::size_t const N = group::N;
801
802 group_pointer pbg = groups,
803 last = groups + static_cast<difference_type>(
804 this->groups_len() - 1);
805
806 for (; pbg != last; ++pbg) {
807 if (!pbg->buckets) {
808 continue;
809 }
810
811 for (std::size_t n = 0; n < N; ++n) {
812 bucket_pointer bs = pbg->buckets;
813 bucket_type& b = bs[static_cast<std::ptrdiff_t>(n)];
814 if (!b.next)
815 pbg->bitmask &= reset_bit(n);
816 }
817 if (!pbg->bitmask && pbg->next)
818 unlink_group(pbg);
819 }
820
821 // do not check end bucket
822 for (std::size_t n = 0; n < size_ % N; ++n) {
823 if (!pbg->buckets[static_cast<std::ptrdiff_t>(n)].next)
824 pbg->bitmask &= reset_bit(n);
825 }
826 }
827
828 void unlink_bucket(iterator itb)
829 {
830 typename iterator::bucket_pointer p = itb.p;
831 typename iterator::bucket_group_pointer pbg = itb.pbg;
832 if (!(pbg->bitmask &=
833 reset_bit(n: static_cast<std::size_t>(p - pbg->buckets))))
834 unlink_group(pbg);
835 }
836
837 private:
838 void unlink_group(group_pointer pbg)
839 {
840 pbg->next->prev = pbg->prev;
841 pbg->prev->next = pbg->next;
842 pbg->prev = pbg->next = group_pointer();
843 }
844 };
845 } // namespace detail
846 } // namespace unordered
847} // namespace boost
848
849#endif // BOOST_UNORDERED_DETAIL_FCA_HPP
850