PMDK C++ bindings  1.8
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
1 /*
2  * Copyright 2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 
41 #include <libpmemobj++/detail/atomic_backoff.hpp>
44 
46 #include <libpmemobj++/mutex.hpp>
47 #include <libpmemobj++/p.hpp>
50 
51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
53 
54 #include <atomic>
55 #include <cassert>
56 #include <functional>
57 #include <initializer_list>
58 #include <iterator> // for std::distance
59 #include <memory>
60 #include <mutex>
61 #include <thread>
62 #include <type_traits>
63 #include <utility>
64 
65 namespace std
66 {
70 template <typename T>
71 struct hash<pmem::obj::p<T>> {
72  size_t
73  operator()(const pmem::obj::p<T> &x) const
74  {
75  return hash<T>()(x.get_ro());
76  }
77 };
78 } /* namespace std */
79 
80 namespace pmem
81 {
82 namespace obj
83 {
84 
85 namespace concurrent_hash_map_internal
86 {
87 template <typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89  using rw_mutex_type = SharedMutexT;
90 
91 public:
92  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
93  shared_mutex_scoped_lock &
94  operator=(const shared_mutex_scoped_lock &) = delete;
95 
97  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
98  {
99  }
100 
102  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
103  : mutex(nullptr)
104  {
105  acquire(m, write);
106  }
107 
109  ~shared_mutex_scoped_lock()
110  {
111  if (mutex)
112  release();
113  }
114 
116  void
117  acquire(rw_mutex_type &m, bool write = true)
118  {
119  is_writer = write;
120  mutex = &m;
121  if (write)
122  mutex->lock();
123  else
124  mutex->lock_shared();
125  }
126 
130  void
131  release()
132  {
133  assert(mutex);
134  rw_mutex_type *m = mutex;
135  mutex = nullptr;
136  if (is_writer) {
137  m->unlock();
138  } else {
139  m->unlock_shared();
140  }
141  }
142 
147  bool
148  try_acquire(rw_mutex_type &m, bool write = true)
149  {
150  assert(!mutex);
151  bool result;
152  is_writer = write;
153  result = write ? m.try_lock() : m.try_lock_shared();
154  if (result)
155  mutex = &m;
156  return result;
157  }
158 
159 protected:
164  rw_mutex_type *mutex;
169  bool is_writer;
170 }; /* class shared_mutex_scoped_lock */
171 
172 template <typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174  decltype(std::declval<ScopedLockType>().upgrade_to_writer());
175 
176 template <typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178  detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
179 
180 template <typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182  decltype(std::declval<ScopedLockType>().downgrade_to_reader());
183 
184 template <typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186  detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
187 
188 template <typename ScopedLockType,
189  bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190  &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
192 public:
193  using scope_lock_type = ScopedLockType;
194 
195  static bool
196  initial_rw_state(bool write)
197  {
198  /* For upgradeable locks, initial state is always read */
199  return false;
200  }
201 
202  static bool
203  upgrade_to_writer(scope_lock_type &lock)
204  {
205  return lock.upgrade_to_writer();
206  }
207 
208  static bool
209  downgrade_to_reader(scope_lock_type &lock)
210  {
211  return lock.downgrade_to_reader();
212  }
213 };
214 
215 template <typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
217 public:
218  using scope_lock_type = ScopedLockType;
219 
220  static bool
221  initial_rw_state(bool write)
222  {
223  /* For non-upgradeable locks, we take lock in required mode
224  * immediately */
225  return write;
226  }
227 
228  static bool
229  upgrade_to_writer(scope_lock_type &lock)
230  {
231  /* This overload is for locks which do not support upgrade
232  * operation. For those locks, upgrade_to_writer should not be
233  * called when holding a read lock */
234  return true;
235  }
236 
237  static bool
238  downgrade_to_reader(scope_lock_type &lock)
239  {
240  /* This overload is for locks which do not support downgrade
241  * operation. For those locks, downgrade_to_reader should never
242  * be called */
243  assert(false);
244 
245  return false;
246  }
247 };
248 }
249 
250 template <typename Key, typename T, typename Hash = std::hash<Key>,
251  typename KeyEqual = std::equal_to<Key>,
252  typename MutexType = pmem::obj::shared_mutex,
253  typename ScopedLockType = concurrent_hash_map_internal::
254  shared_mutex_scoped_lock<MutexType>>
256 
258 namespace concurrent_hash_map_internal
259 {
260 /* Helper method which throws an exception when called in a tx */
261 static inline void
262 check_outside_tx()
263 {
264  if (pmemobj_tx_stage() != TX_STAGE_NONE)
266  "Function called inside transaction scope.");
267 }
268 
269 template <typename Hash>
270 using transparent_key_equal = typename Hash::transparent_key_equal;
271 
272 template <typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
274 
275 template <typename Hash, typename Pred,
276  bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278  using type = typename Hash::transparent_key_equal;
279 };
280 
281 template <typename Hash, typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
283  using type = Pred;
284 };
285 
286 template <typename Mutex, typename ScopedLockType>
287 void
288 assert_not_locked(Mutex &mtx)
289 {
290 #ifndef NDEBUG
291  ScopedLockType scoped_lock;
292  assert(scoped_lock.try_acquire(mtx));
293  scoped_lock.release();
294 #else
295  (void)mtx;
296 #endif
297 }
298 
299 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
300 struct hash_map_node {
302  using mutex_t = MutexType;
303 
305  using scoped_t = ScopedLockType;
306 
307  using value_type = std::pair<const Key, T>;
308 
310  using node_ptr_t = detail::persistent_pool_ptr<
311  hash_map_node<Key, T, mutex_t, scoped_t>>;
312 
314  node_ptr_t next;
315 
317  mutex_t mutex;
318 
320  value_type item;
321 
322  hash_map_node() : next(OID_NULL)
323  {
324  }
325 
326  hash_map_node(const node_ptr_t &_next) : next(_next)
327  {
328  }
329 
330  hash_map_node(node_ptr_t &&_next) : next(std::move(_next))
331  {
332  }
333 
334  hash_map_node(const node_ptr_t &_next, const Key &key)
335  : next(_next), item(key, T())
336  {
337  }
338 
339  hash_map_node(const node_ptr_t &_next, const Key &key, const T &t)
340  : next(_next), item(key, t)
341  {
342  }
343 
344  hash_map_node(const node_ptr_t &_next, value_type &&i)
345  : next(_next), item(std::move(i))
346  {
347  }
348 
349  template <typename... Args>
350  hash_map_node(node_ptr_t &&_next, Args &&... args)
351  : next(std::forward<node_ptr_t>(_next)),
352  item(std::forward<Args>(args)...)
353  {
354  }
355 
356  hash_map_node(const node_ptr_t &_next, const value_type &i)
357  : next(_next), item(i)
358  {
359  }
360 
362  hash_map_node(const hash_map_node &) = delete;
363 
365  hash_map_node &operator=(const hash_map_node &) = delete;
366 }; /* struct node */
367 
372 template <typename Bucket>
373 class segment_traits {
374 public:
376  using segment_index_t = size_t;
377  using size_type = size_t;
378  using bucket_type = Bucket;
379 
380 protected:
382  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
383 
385  constexpr static segment_index_t first_big_block = 27;
386  /* TODO: avoid hardcoded value; need constexpr similar to:
387  * Log2(max_allocation_size / sizeof(bucket_type)) */
388 
390  constexpr static size_type big_block_size = size_type(1)
391  << first_big_block;
392 
393  /* Block size in bytes cannot exceed max_allocation_size */
394  static_assert((big_block_size * sizeof(bucket_type)) <
395  max_allocation_size,
396  "Block size exceeds max_allocation_size");
397 
399  constexpr static segment_index_t
400  first_block_in_segment(segment_index_t seg)
401  {
402  return seg < first_big_block
403  ? seg
404  : (first_big_block +
405  (segment_index_t(1) << (seg - first_big_block)) - 1);
406  }
407 
409  constexpr static size_type
410  blocks_in_segment(segment_index_t seg)
411  {
412  return seg < first_big_block
413  ? segment_index_t(1)
414  : segment_index_t(1) << (seg - first_big_block);
415  }
416 
418  constexpr static size_type
419  block_size(segment_index_t b)
420  {
421  return b < first_big_block ? segment_size(b ? b : 1)
422  : big_block_size;
423  }
424 
425 public:
427  constexpr static segment_index_t embedded_segments = 1;
428 
430  constexpr static size_type embedded_buckets = 1 << embedded_segments;
431 
433  constexpr static segment_index_t number_of_segments = 32;
434 
436  static const size_type first_block = 8;
437 
439  constexpr static segment_index_t
440  number_of_blocks()
441  {
442  return first_block_in_segment(number_of_segments);
443  }
444 
446  static segment_index_t
447  segment_index_of(size_type index)
448  {
449  return segment_index_t(detail::Log2(index | 1));
450  }
451 
453  constexpr static segment_index_t
454  segment_base(segment_index_t k)
455  {
456  return (segment_index_t(1) << k) & ~segment_index_t(1);
457  }
458 
460  constexpr static size_type
461  segment_size(segment_index_t k)
462  {
463  return size_type(1) << k; // fake value for k == 0
464  }
465  static_assert(
466  embedded_segments < first_big_block,
467  "Number of embedded segments cannot exceed max_allocation_size");
468 }; /* End of class segment_traits */
469 
486 template <typename BlockTable, typename SegmentTraits, bool is_const>
487 class segment_facade_impl : public SegmentTraits {
488 private:
489  using traits_type = SegmentTraits;
490  using traits_type::block_size;
491  using traits_type::blocks_in_segment;
492  using traits_type::embedded_buckets;
493  using traits_type::embedded_segments;
494  using traits_type::first_block;
495  using traits_type::first_block_in_segment;
496  using traits_type::segment_base;
497  using traits_type::segment_size;
498 
499 public:
500  using table_reference =
501  typename std::conditional<is_const, const BlockTable &,
502  BlockTable &>::type;
503 
504  using table_pointer =
505  typename std::conditional<is_const, const BlockTable *,
506  BlockTable *>::type;
507 
508  using bucket_type = typename traits_type::bucket_type;
509  using segment_index_t = typename traits_type::segment_index_t;
510  using size_type = typename traits_type::size_type;
511 
513  segment_facade_impl(table_reference table, segment_index_t s)
514  : my_table(&table), my_seg(s)
515  {
516  assert(my_seg < traits_type::number_of_segments);
517  }
518 
520  segment_facade_impl(const segment_facade_impl &src)
521  : my_table(src.my_table), my_seg(src.my_seg)
522  {
523  }
524 
525  segment_facade_impl(segment_facade_impl &&src) = default;
526 
528  segment_facade_impl &
529  operator=(const segment_facade_impl &src)
530  {
531  my_table = src.my_table;
532  my_seg = src.my_seg;
533  return *this;
534  }
535 
537  segment_facade_impl &
538  operator=(segment_facade_impl &&src)
539  {
540  my_table = src.my_table;
541  my_seg = src.my_seg;
542  return *this;
543  }
544 
551  bucket_type &operator[](size_type i) const
552  {
553  assert(i < size());
554 
555  segment_index_t table_block = first_block_in_segment(my_seg);
556  size_type b_size = block_size(table_block);
557 
558  table_block += i / b_size;
559  i = i % b_size;
560 
561  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
562  }
563 
567  segment_facade_impl &
568  operator++()
569  {
570  ++my_seg;
571  return *this;
572  }
573 
577  segment_facade_impl
578  operator++(int)
579  {
580  segment_facade_impl tmp = *this;
581  ++(*this);
582  return tmp;
583  }
584 
588  segment_facade_impl &
589  operator--()
590  {
591  --my_seg;
592  return *this;
593  }
594 
598  segment_facade_impl
599  operator--(int)
600  {
601  segment_facade_impl tmp = *this;
602  --(*this);
603  return tmp;
604  }
605 
609  segment_facade_impl &
610  operator+=(segment_index_t off)
611  {
612  my_seg += off;
613  return *this;
614  }
615 
619  segment_facade_impl &
620  operator-=(segment_index_t off)
621  {
622  my_seg -= off;
623  return *this;
624  }
625 
629  segment_facade_impl
630  operator+(segment_index_t off) const
631  {
632  return segment_facade_impl(*(this->my_table),
633  this->my_seg + off);
634  }
635 
639  segment_facade_impl
640  operator-(segment_index_t off) const
641  {
642  return segment_facade_impl(*(this->my_table),
643  this->my_seg - off);
644  }
645 
649  void
650  enable(pool_base &pop)
651  {
652  assert(my_seg >= embedded_segments);
653 
654  if (my_seg < first_block) {
655  enable_first_block(pop);
656  } else {
657  enable_big_segment(pop);
658  }
659  }
660 
664  void
665  disable()
666  {
667  assert(my_seg >= embedded_segments);
668 
669  if (my_seg < first_block) {
670  if (my_seg == embedded_segments) {
671  size_type sz = segment_size(first_block) -
672  embedded_buckets;
673  delete_persistent<bucket_type[]>(
674  (*my_table)[my_seg], sz);
675  }
676  (*my_table)[my_seg] = nullptr;
677  } else {
678  block_range blocks = segment_blocks(my_seg);
679 
680  for (segment_index_t b = blocks.first;
681  b < blocks.second; ++b) {
682  if ((*my_table)[b] != nullptr) {
683  delete_persistent<bucket_type[]>(
684  (*my_table)[b], block_size(b));
685  (*my_table)[b] = nullptr;
686  }
687  }
688  }
689  }
690 
694  constexpr size_type
695  size() const
696  {
697  return segment_size(my_seg ? my_seg : 1);
698  }
699 
705  bool
706  is_valid() const
707  {
708  block_range blocks = segment_blocks(my_seg);
709 
710  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711  if ((*my_table)[b] == nullptr)
712  return false;
713  }
714 
715  return true;
716  }
717 
718 private:
719  using block_range = std::pair<segment_index_t, segment_index_t>;
720 
724  static block_range
725  segment_blocks(segment_index_t seg)
726  {
727  segment_index_t begin = first_block_in_segment(seg);
728 
729  return block_range(begin, begin + blocks_in_segment(seg));
730  }
731 
732  void
733  enable_first_block(pool_base &pop)
734  {
735  assert(my_seg == embedded_segments);
736  {
737  transaction::manual tx(pop);
738 
739  size_type sz =
740  segment_size(first_block) - embedded_buckets;
741  (*my_table)[my_seg] =
742  make_persistent<bucket_type[]>(sz);
743 
744  persistent_ptr<bucket_type> base =
745  (*my_table)[embedded_segments].raw();
746 
747  for (segment_index_t s = my_seg + 1; s < first_block;
748  ++s) {
749  std::ptrdiff_t off =
750  static_cast<std::ptrdiff_t>(
751  segment_base(s) -
752  segment_base(my_seg));
753 
754  (*my_table)[s] = (base + off).raw();
755  }
756 
758  }
759  }
760 
761  void
762  enable_big_segment(pool_base &pop)
763  {
764  block_range blocks = segment_blocks(my_seg);
765  {
766  transaction::manual tx(pop);
767 
768  for (segment_index_t b = blocks.first;
769  b < blocks.second; ++b) {
770  assert((*my_table)[b] == nullptr);
771  (*my_table)[b] = make_persistent<bucket_type[]>(
772  block_size(b));
773  }
774 
776  }
777  }
778 
780  table_pointer my_table;
781 
783  segment_index_t my_seg;
784 }; /* End of class segment_facade_impl */
785 
792 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
793 class hash_map_base {
794 public:
795  using mutex_t = MutexType;
796  using scoped_t = ScopedLockType;
797 
799  using size_type = size_t;
800 
802  using hashcode_type = size_t;
803 
805  using node = hash_map_node<Key, T, mutex_t, scoped_t>;
806 
808  using node_ptr_t = detail::persistent_pool_ptr<node>;
809 
811  struct bucket {
812  using mutex_t = MutexType;
813  using scoped_t = ScopedLockType;
814 
816  mutex_t mutex;
817 
819  p<std::atomic<uint64_t>> rehashed;
820 
822  node_ptr_t node_list;
823 
825  bucket() : node_list(nullptr)
826  {
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
828  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
829  sizeof(rehashed));
830 #endif
831  rehashed.get_rw() = false;
832  }
833 
839  bool
840  is_rehashed(std::memory_order order)
841  {
842  return rehashed.get_ro().load(order);
843  }
844 
845  void
846  set_rehashed(std::memory_order order)
847  {
848  rehashed.get_rw().store(true, order);
849  }
850 
852  bucket(const bucket &) = delete;
853 
855  bucket &operator=(const bucket &) = delete;
856  }; /* End of struct bucket */
857 
859  using segment_traits_t = segment_traits<bucket>;
860 
862  using segment_index_t = typename segment_traits_t::segment_index_t;
863 
865  static const size_type embedded_buckets =
866  segment_traits_t::embedded_buckets;
867 
869  static const size_type first_block = segment_traits_t::first_block;
870 
872  constexpr static size_type block_table_size =
873  segment_traits_t::number_of_blocks();
874 
876  using segment_ptr_t = persistent_ptr<bucket[]>;
877 
879  using bucket_ptr_t = persistent_ptr<bucket>;
880 
882  using blocks_table_t = segment_ptr_t[block_table_size];
883 
885  using segment_enable_mutex_t = pmem::obj::mutex;
886 
888  struct features {
889  uint32_t compat;
890  uint32_t incompat;
891  };
892 
894  static constexpr features header_features = {0, 0};
895 
896  /* --------------------------------------------------------- */
897 
899  p<uint64_t> my_pool_uuid;
900 
903  features layout_features = (features)header_features;
904 
907  std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
908  my_mask_reserved;
909 
911  /* my_mask always restored on restart. */
912  p<std::atomic<hashcode_type>> my_mask;
913 
915  std::aligned_storage<32, 8>::type padding1;
916 
921  blocks_table_t my_table;
922 
923  /* It must be in separate cache line from my_mask due to performance
924  * effects */
926  p<std::atomic<size_type>> my_size;
927 
929  std::aligned_storage<24, 8>::type padding2;
930 
932  std::aligned_storage<64, 8>::type reserved;
933 
935  segment_enable_mutex_t my_segment_enable_mutex;
936 
938  bucket my_embedded_segment[embedded_buckets];
939 
940  /* --------------------------------------------------------- */
941 
942  const std::atomic<hashcode_type> &
943  mask() const noexcept
944  {
945  return my_mask.get_ro();
946  }
947 
948  std::atomic<hashcode_type> &
949  mask() noexcept
950  {
951  return my_mask.get_rw();
952  }
953 
955  using const_segment_facade_t =
956  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
957 
959  using segment_facade_t =
960  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
961 
963  hash_map_base()
964  {
965  static_assert(
966  sizeof(size_type) == sizeof(std::atomic<size_type>),
967  "std::atomic should have the same layout as underlying integral type");
968 
969 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
970  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
971  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
972 #endif
973 
974  my_size.get_rw() = 0;
975  PMEMoid oid = pmemobj_oid(this);
976 
977  assert(!OID_IS_NULL(oid));
978 
979  my_pool_uuid = oid.pool_uuid_lo;
980 
981  pool_base pop = get_pool_base();
982  /* enable embedded segments */
983  for (size_type i = 0; i < segment_traits_t::embedded_segments;
984  ++i) {
985  my_table[i] =
986  pmemobj_oid(my_embedded_segment +
987  segment_traits_t::segment_base(i));
988  segment_facade_t seg(my_table, i);
989  mark_rehashed<false>(pop, seg);
990  }
991  }
992 
996  void
997  calculate_mask()
998  {
999 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1000  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1001  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1002 #endif
1003 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1004  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask, sizeof(my_mask));
1005 #endif
1006 
1007  hashcode_type m = embedded_buckets - 1;
1008 
1009  const_segment_facade_t segment(
1010  my_table, segment_traits_t::embedded_segments);
1011 
1012  while (segment.is_valid()) {
1013  m += segment.size();
1014  ++segment;
1015  }
1016 
1017  mask().store(m, std::memory_order_relaxed);
1018  }
1019 
1020  void
1021  restore_size(size_type actual_size)
1022  {
1023  my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1024  pool_base pop = get_pool_base();
1025  pop.persist(my_size);
1026  }
1027 
1031  template <bool Flush = true>
1032  void
1033  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1034  {
1035  for (size_type i = 0; i < segment.size(); ++i) {
1036  bucket *b = &(segment[i]);
1037 
1038  assert_not_locked<mutex_t, scoped_t>(b->mutex);
1039 
1040  b->set_rehashed(std::memory_order_relaxed);
1041  }
1042 
1043  if (Flush) {
1044  /* Flush in separate loop to avoid read-after-flush */
1045  for (size_type i = 0; i < segment.size(); ++i) {
1046  bucket *b = &(segment[i]);
1047  pop.flush(b->rehashed);
1048  }
1049 
1050  pop.drain();
1051  }
1052  }
1053 
1057  void
1058  enable_segment(segment_index_t k, bool is_initial = false)
1059  {
1060  assert(k);
1061 
1062  pool_base pop = get_pool_base();
1063  size_type sz;
1064 
1065  if (k >= first_block) {
1066  segment_facade_t new_segment(my_table, k);
1067 
1068  sz = new_segment.size();
1069  if (!new_segment.is_valid())
1070  new_segment.enable(pop);
1071 
1072  if (is_initial) {
1073  mark_rehashed(pop, new_segment);
1074  }
1075 
1076  /* double it to get entire capacity of the container */
1077  sz <<= 1;
1078  } else {
1079  /* the first block */
1080  assert(k == segment_traits_t::embedded_segments);
1081 
1082  for (segment_index_t i = k; i < first_block; ++i) {
1083  segment_facade_t new_segment(my_table, i);
1084 
1085  if (!new_segment.is_valid())
1086  new_segment.enable(pop);
1087 
1088  if (is_initial) {
1089  mark_rehashed(pop, new_segment);
1090  }
1091  }
1092 
1093  sz = segment_traits_t::segment_size(first_block);
1094  }
1095 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1096  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1097 #endif
1098  mask().store(sz - 1, std::memory_order_release);
1099  }
1100 
1105  bucket *
1106  get_bucket(hashcode_type h) const
1107  {
1108  segment_index_t s = segment_traits_t::segment_index_of(h);
1109 
1110  h -= segment_traits_t::segment_base(s);
1111 
1112  const_segment_facade_t segment(my_table, s);
1113 
1114  assert(segment.is_valid());
1115 
1116  return &(segment[h]);
1117  }
1118 
1122  inline bool
1123  check_mask_race(hashcode_type h, hashcode_type &m) const
1124  {
1125  hashcode_type m_now, m_old = m;
1126 
1127  m_now = mask().load(std::memory_order_acquire);
1128 
1129  if (m_old != m_now)
1130  return check_rehashing_collision(h, m_old, m = m_now);
1131 
1132  return false;
1133  }
1134 
1138  bool
1139  check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1140  hashcode_type m) const
1141  {
1142  assert(m_old != m);
1143 
1144  if ((h & m_old) != (h & m)) {
1145  /* mask changed for this hashcode, rare event condition
1146  * above proves that 'h' has some other bits set beside
1147  * 'm_old', find next applicable mask after m_old */
1148 
1149  for (++m_old; !(h & m_old); m_old <<= 1)
1150  ;
1151 
1152  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1153 
1154  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1155 
1156  /* check whether it is rehashing/ed */
1157  bucket *b = get_bucket(h & m_old);
1158  return b->is_rehashed(std::memory_order_acquire);
1159  }
1160 
1161  return false;
1162  }
1163 
1169  template <typename Node, typename... Args>
1170  size_type
1171  insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1172  Args &&... args)
1173  {
1174  pool_base pop = get_pool_base();
1175 
1176  pmem::obj::transaction::run(pop, [&] {
1177  new_node = pmem::obj::make_persistent<Node>(
1178  b->node_list, std::forward<Args>(args)...);
1179  b->node_list = new_node; /* bucket is locked */
1180  });
1181 
1182  /* prefix form is to enforce allocation after the first item
1183  * inserted */
1184  size_t sz = ++(my_size.get_rw());
1185  pop.persist(&my_size, sizeof(my_size));
1186 
1187  return sz;
1188  }
1189 
1194  bool
1195  check_growth(hashcode_type m, size_type sz)
1196  {
1197  if (sz >= m) {
1198  segment_index_t new_seg =
1199  static_cast<segment_index_t>(detail::Log2(
1200  m +
1201  1)); /* optimized segment_index_of */
1202 
1203  assert(segment_facade_t(my_table, new_seg - 1)
1204  .is_valid());
1205 
1206  std::unique_lock<segment_enable_mutex_t> lock(
1207  my_segment_enable_mutex, std::try_to_lock);
1208 
1209  if (lock) {
1210  if (mask().load(std::memory_order_relaxed) ==
1211  m) {
1212  /* Otherwise, other thread enable this
1213  * segment */
1214  enable_segment(new_seg);
1215 
1216  return true;
1217  }
1218  }
1219  }
1220 
1221  return false;
1222  }
1223 
1227  void
1228  reserve(size_type buckets)
1229  {
1230  if (buckets == 0)
1231  return;
1232 
1233  --buckets;
1234 
1235  bool is_initial = (my_size.get_ro() == 0);
1236 
1237  for (size_type m = mask(); buckets > m; m = mask())
1238  enable_segment(
1239  segment_traits_t::segment_index_of(m + 1),
1240  is_initial);
1241  }
1242 
1247  void
1248  internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1249  {
1250  pool_base p = get_pool_base();
1251  {
1252  transaction::manual tx(p);
1253 
1254  this->my_pool_uuid.swap(table.my_pool_uuid);
1255  /* Swap my_mask */
1256  this->mask() = table.mask().exchange(
1257  this->mask(), std::memory_order_relaxed);
1258 
1259  /* Swap my_size */
1260  this->my_size.get_rw() =
1261  table.my_size.get_rw().exchange(
1262  this->my_size.get_ro(),
1263  std::memory_order_relaxed);
1264 
1265  for (size_type i = 0; i < embedded_buckets; ++i)
1266  this->my_embedded_segment[i].node_list.swap(
1267  table.my_embedded_segment[i].node_list);
1268 
1269  for (size_type i = segment_traits_t::embedded_segments;
1270  i < block_table_size; ++i)
1271  this->my_table[i].swap(table.my_table[i]);
1272 
1274  }
1275  }
1276 
1281  pool_base
1282  get_pool_base()
1283  {
1284  PMEMobjpool *pop =
1285  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1286 
1287  return pool_base(pop);
1288  }
1289 }; /* End of class hash_map_base */
1290 
1296 template <typename Container, bool is_const>
1297 class hash_map_iterator {
1298 public:
1299  using iterator_category = std::forward_iterator_tag;
1300  using difference_type = ptrdiff_t;
1301  using map_type = Container;
1302  using value_type = typename map_type::value_type;
1303  using node = typename map_type::node;
1304  using bucket = typename map_type::bucket;
1305  using map_ptr = typename std::conditional<is_const, const map_type *,
1306  map_type *>::type;
1307  using reference =
1308  typename std::conditional<is_const,
1309  typename map_type::const_reference,
1310  typename map_type::reference>::type;
1311  using pointer =
1312  typename std::conditional<is_const,
1313  typename map_type::const_pointer,
1314  typename map_type::pointer>::type;
1315 
1316  template <typename C, bool M, bool U>
1317  friend bool operator==(const hash_map_iterator<C, M> &i,
1318  const hash_map_iterator<C, U> &j);
1319 
1320  template <typename C, bool M, bool U>
1321  friend bool operator!=(const hash_map_iterator<C, M> &i,
1322  const hash_map_iterator<C, U> &j);
1323 
1324  friend class hash_map_iterator<map_type, true>;
1325 
1326 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1327 private:
1328  template <typename Key, typename T, typename Hash, typename KeyEqual,
1329  typename MutexType, typename ScopedLockType>
1330  friend class ::pmem::obj::concurrent_hash_map;
1331 #else
1332 public: /* workaround */
1333 #endif
1334  hash_map_iterator(map_ptr map, size_t index)
1335  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1336  {
1337  if (my_index <= my_map->mask()) {
1338  bucket_accessor acc(my_map, my_index);
1339  my_bucket = acc.get();
1340  my_node = static_cast<node *>(
1341  my_bucket->node_list.get(my_map->my_pool_uuid));
1342 
1343  if (!my_node) {
1344  advance_to_next_bucket();
1345  }
1346  }
1347  }
1348 
1349 public:
1351  hash_map_iterator() = default;
1352 
1354  hash_map_iterator(const hash_map_iterator &other)
1355  : my_map(other.my_map),
1356  my_index(other.my_index),
1357  my_bucket(other.my_bucket),
1358  my_node(other.my_node)
1359  {
1360  }
1361 
1363  template <typename U = void,
1364  typename = typename std::enable_if<is_const, U>::type>
1365  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1366  : my_map(other.my_map),
1367  my_index(other.my_index),
1368  my_bucket(other.my_bucket),
1369  my_node(other.my_node)
1370  {
1371  }
1372 
1373  hash_map_iterator &operator=(const hash_map_iterator &it) = default;
1374 
1376  reference operator*() const
1377  {
1378  assert(my_node);
1379  return my_node->item;
1380  }
1381 
1383  pointer operator->() const
1384  {
1385  return &operator*();
1386  }
1387 
1389  hash_map_iterator &
1390  operator++()
1391  {
1392  my_node = static_cast<node *>(
1393  my_node->next.get((my_map->my_pool_uuid)));
1394 
1395  if (!my_node)
1396  advance_to_next_bucket();
1397 
1398  return *this;
1399  }
1400 
1402  hash_map_iterator
1403  operator++(int)
1404  {
1405  hash_map_iterator old(*this);
1406  operator++();
1407  return old;
1408  }
1409 
1410 private:
1412  map_ptr my_map = nullptr;
1413 
1415  size_t my_index = 0;
1416 
1418  bucket *my_bucket = nullptr;
1419 
1421  node *my_node = nullptr;
1422 
1423  class bucket_accessor {
1424  public:
1425  bucket_accessor(map_ptr m, size_t index)
1426  {
1427  my_bucket = m->get_bucket(index);
1428  }
1429 
1430  bucket *
1431  get() const
1432  {
1433  return my_bucket;
1434  }
1435 
1436  private:
1437  bucket *my_bucket;
1438  };
1439 
1440  void
1441  advance_to_next_bucket()
1442  {
1443  size_t k = my_index + 1;
1444 
1445  assert(my_bucket);
1446 
1447  while (k <= my_map->mask()) {
1448  bucket_accessor acc(my_map, k);
1449  my_bucket = acc.get();
1450 
1451  if (my_bucket->node_list) {
1452  my_node = static_cast<node *>(
1453  my_bucket->node_list.get(
1454  my_map->my_pool_uuid));
1455 
1456  my_index = k;
1457 
1458  return;
1459  }
1460 
1461  ++k;
1462  }
1463 
1464  my_bucket = 0;
1465  my_node = 0;
1466  my_index = k;
1467  }
1468 };
1469 
1470 template <typename Container, bool M, bool U>
1471 bool
1472 operator==(const hash_map_iterator<Container, M> &i,
1473  const hash_map_iterator<Container, U> &j)
1474 {
1475  return i.my_node == j.my_node && i.my_map == j.my_map;
1476 }
1477 
1478 template <typename Container, bool M, bool U>
1479 bool
1480 operator!=(const hash_map_iterator<Container, M> &i,
1481  const hash_map_iterator<Container, U> &j)
1482 {
1483  return i.my_node != j.my_node || i.my_map != j.my_map;
1484 }
1485 } /* namespace concurrent_hash_map_internal */
1526 template <typename Key, typename T, typename Hash, typename KeyEqual,
1527  typename MutexType, typename ScopedLockType>
1528 class concurrent_hash_map
1529  : protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1530  ScopedLockType> {
1531  template <typename Container, bool is_const>
1532  friend class concurrent_hash_map_internal::hash_map_iterator;
1533 
1534 public:
1535  using size_type = typename concurrent_hash_map_internal::hash_map_base<
1536  Key, T, MutexType, ScopedLockType>::size_type;
1537  using hashcode_type =
1538  typename concurrent_hash_map_internal::hash_map_base<
1539  Key, T, MutexType, ScopedLockType>::hashcode_type;
1540  using key_type = Key;
1541  using mapped_type = T;
1542  using value_type = typename concurrent_hash_map_internal::hash_map_base<
1543  Key, T, MutexType, ScopedLockType>::node::value_type;
1544  using difference_type = ptrdiff_t;
1545  using pointer = value_type *;
1546  using const_pointer = const value_type *;
1547  using reference = value_type &;
1548  using const_reference = const value_type &;
1549  using iterator = concurrent_hash_map_internal::hash_map_iterator<
1550  concurrent_hash_map, false>;
1551  using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1552  concurrent_hash_map, true>;
1553  using hasher = Hash;
1554  using key_equal = typename concurrent_hash_map_internal::key_equal_type<
1555  Hash, KeyEqual>::type;
1556 
1557 protected:
1558  using mutex_t = MutexType;
1559  using scoped_t = ScopedLockType;
1560  /*
1561  * Explicitly use methods and types from template base class
1562  */
1563  using hash_map_base =
1564  concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1565  scoped_t>;
1566  using hash_map_base::calculate_mask;
1567  using hash_map_base::check_growth;
1568  using hash_map_base::check_mask_race;
1569  using hash_map_base::embedded_buckets;
1570  using hash_map_base::get_bucket;
1571  using hash_map_base::get_pool_base;
1572  using hash_map_base::header_features;
1573  using hash_map_base::insert_new_node;
1574  using hash_map_base::internal_swap;
1575  using hash_map_base::layout_features;
1576  using hash_map_base::mask;
1577  using hash_map_base::reserve;
1578  using node = typename hash_map_base::node;
1579  using node_mutex_t = typename node::mutex_t;
1580  using node_ptr_t = typename hash_map_base::node_ptr_t;
1581  using bucket = typename hash_map_base::bucket;
1582  using bucket_lock_type = typename bucket::scoped_t;
1583  using segment_index_t = typename hash_map_base::segment_index_t;
1584  using segment_traits_t = typename hash_map_base::segment_traits_t;
1585  using segment_facade_t = typename hash_map_base::segment_facade_t;
1586  using scoped_lock_traits_type =
1587  concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1588 
1589  friend class const_accessor;
1590  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1591 
1592  void
1593  delete_node(const node_ptr_t &n)
1594  {
1595  delete_persistent<node>(
1596  detail::static_persistent_pool_pointer_cast<node>(n)
1597  .get_persistent_ptr(this->my_pool_uuid));
1598  }
1599 
1600  template <typename K>
1601  persistent_node_ptr_t
1602  search_bucket(const K &key, bucket *b) const
1603  {
1604  assert(b->is_rehashed(std::memory_order_relaxed));
1605 
1606  persistent_node_ptr_t n =
1607  detail::static_persistent_pool_pointer_cast<node>(
1608  b->node_list);
1609 
1610  while (n &&
1611  !key_equal{}(key,
1612  n.get(this->my_pool_uuid)->item.first)) {
1613  n = detail::static_persistent_pool_pointer_cast<node>(
1614  n.get(this->my_pool_uuid)->next);
1615  }
1616 
1617  return n;
1618  }
1619 
1624  class bucket_accessor : public bucket_lock_type {
1625  bucket *my_b;
1626 
1627  public:
1629  const hashcode_type h, bool writer = false)
1630  {
1631  acquire(base, h, writer);
1632  }
1633 
1638  inline void
1639  acquire(concurrent_hash_map *base, const hashcode_type h,
1640  bool writer = false)
1641  {
1642  my_b = base->get_bucket(h);
1643 
1644  if (my_b->is_rehashed(std::memory_order_acquire) ==
1645  false &&
1646  bucket_lock_type::try_acquire(this->my_b->mutex,
1647  /*write=*/true)) {
1648  if (my_b->is_rehashed(
1649  std::memory_order_relaxed) ==
1650  false) {
1651  /* recursive rehashing */
1652  base->rehash_bucket<false>(my_b, h);
1653  }
1654  } else {
1655  bucket_lock_type::acquire(my_b->mutex, writer);
1656  }
1657 
1658  assert(my_b->is_rehashed(std::memory_order_relaxed));
1659  }
1660 
1664  bool
1665  is_writer() const
1666  {
1667  return bucket_lock_type::is_writer;
1668  }
1669 
1674  bucket *
1675  get() const
1676  {
1677  return my_b;
1678  }
1679 
1684  bucket *operator->() const
1685  {
1686  return this->get();
1687  }
1688  };
1689 
1694  bucket *my_b;
1695 
1696  public:
1698  const hashcode_type h,
1699  bool writer = false)
1700  {
1701  acquire(base, h, writer);
1702  }
1703 
1704  /*
1705  * Find a bucket by masked hashcode, optionally rehash
1706  */
1707  inline void
1708  acquire(concurrent_hash_map *base, const hashcode_type h,
1709  bool writer = false)
1710  {
1711  my_b = base->get_bucket(h);
1712 
1713  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1714  false) {
1715  /* recursive rehashing */
1716  base->rehash_bucket<true>(my_b, h);
1717  }
1718 
1719  assert(my_b->is_rehashed(std::memory_order_relaxed));
1720  }
1721 
1728  bool
1729  is_writer() const
1730  {
1731  return true;
1732  }
1733 
1738  bucket *
1739  get() const
1740  {
1741  return my_b;
1742  }
1743 
1748  bucket *operator->() const
1749  {
1750  return this->get();
1751  }
1752  };
1753 
1754  hashcode_type
1755  get_hash_code(node_ptr_t &n)
1756  {
1757  return hasher{}(
1758  detail::static_persistent_pool_pointer_cast<node>(n)(
1759  this->my_pool_uuid)
1760  ->item.first);
1761  }
1762 
1763  template <bool serial>
1764  void
1765  rehash_bucket(bucket *b_new, const hashcode_type h)
1766  {
1767  using accessor_type = typename std::conditional<
1768  serial, serial_bucket_accessor, bucket_accessor>::type;
1769 
1770  using scoped_lock_traits_type =
1771  concurrent_hash_map_internal::scoped_lock_traits<
1772  accessor_type>;
1773 
1774  /* First two bucket should be always rehashed */
1775  assert(h > 1);
1776 
1777  pool_base pop = get_pool_base();
1778  node_ptr_t *p_new = &(b_new->node_list);
1779  bool restore_after_crash = *p_new != nullptr;
1780 
1781  /* get parent mask from the topmost bit */
1782  hashcode_type mask = (1u << detail::Log2(h)) - 1;
1783  assert((h & mask) < h);
1784  accessor_type b_old(
1785  this, h & mask,
1786  scoped_lock_traits_type::initial_rw_state(true));
1787 
1788  /* get full mask for new bucket */
1789  mask = (mask << 1) | 1;
1790  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1791  restart:
1792  for (node_ptr_t *p_old = &(b_old->node_list), n = *p_old; n;
1793  n = *p_old) {
1794  hashcode_type c = get_hash_code(n);
1795 #ifndef NDEBUG
1796  hashcode_type bmask = h & (mask >> 1);
1797 
1798  bmask = bmask == 0
1799  ? 1 /* minimal mask of parent bucket */
1800  : (1u << (detail::Log2(bmask) + 1)) - 1;
1801 
1802  assert((c & bmask) == (h & bmask));
1803 #endif
1804 
1805  if ((c & mask) == h) {
1806  if (!b_old.is_writer() &&
1807  !scoped_lock_traits_type::upgrade_to_writer(
1808  b_old)) {
1809  goto restart;
1810  /* node ptr can be invalid due to
1811  * concurrent erase */
1812  }
1813 
1814  if (restore_after_crash) {
1815  while (*p_new != nullptr &&
1816  (mask & get_hash_code(*p_new)) ==
1817  h &&
1818  *p_new != n) {
1819  p_new = &(
1820  (*p_new)(
1821  this->my_pool_uuid)
1822  ->next);
1823  }
1824 
1825  restore_after_crash = false;
1826  }
1827 
1828  /* Add to new b_new */
1829  *p_new = n;
1830  pop.persist(p_new, sizeof(*p_new));
1831 
1832  /* exclude from b_old */
1833  *p_old = n(this->my_pool_uuid)->next;
1834  pop.persist(p_old, sizeof(*p_old));
1835 
1836  p_new = &(n(this->my_pool_uuid)->next);
1837  } else {
1838  /* iterate to next item */
1839  p_old = &(n(this->my_pool_uuid)->next);
1840  }
1841  }
1842 
1843  if (restore_after_crash) {
1844  while (*p_new != nullptr &&
1845  (mask & get_hash_code(*p_new)) == h)
1846  p_new = &((*p_new)(this->my_pool_uuid)->next);
1847  }
1848 
1849  *p_new = nullptr;
1850  pop.persist(p_new, sizeof(*p_new));
1851 
1852  /* mark rehashed */
1853  b_new->set_rehashed(std::memory_order_release);
1854  pop.persist(b_new->rehashed);
1855  }
1856 
1857  void
1858  check_features()
1859  {
1860  if (layout_features.incompat != header_features.incompat)
1861  throw pmem::layout_error(
1862  "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1863  }
1864 
1865 public:
1866  class accessor;
1871  : protected node::scoped_t /*which derived from no_copy*/ {
1872  friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
1873  mutex_t, scoped_t>;
1874  friend class accessor;
1876  using node::scoped_t::try_acquire;
1877 
1878  public:
1882  using value_type =
1883  const typename concurrent_hash_map::value_type;
1884 
1889  bool
1890  empty() const
1891  {
1892  return !my_node;
1893  }
1894 
1901  void
1903  {
1904  concurrent_hash_map_internal::check_outside_tx();
1905 
1906  if (my_node) {
1907  node::scoped_t::release();
1908  my_node = 0;
1909  }
1910  }
1911 
1915  const_reference operator*() const
1916  {
1917  assert(my_node);
1918 
1919  return my_node->item;
1920  }
1921 
1925  const_pointer operator->() const
1926  {
1927  return &operator*();
1928  }
1929 
1935  const_accessor() : my_node(OID_NULL)
1936  {
1937  concurrent_hash_map_internal::check_outside_tx();
1938  }
1939 
1944  {
1945  my_node = OID_NULL; // scoped lock's release() is called
1946  // in its destructor
1947  }
1948 
1949  protected:
1950  node_ptr_t my_node;
1951 
1952  hashcode_type my_hash;
1953  };
1954 
1959  class accessor : public const_accessor {
1960  public:
1962  using value_type = typename concurrent_hash_map::value_type;
1963 
1965  reference operator*() const
1966  {
1967  assert(this->my_node);
1968 
1969  return this->my_node->item;
1970  }
1971 
1973  pointer operator->() const
1974  {
1975  return &operator*();
1976  }
1977  };
1978 
1982  concurrent_hash_map() : hash_map_base()
1983  {
1984  runtime_initialize(true);
1985  }
1986 
1991  concurrent_hash_map(size_type n) : hash_map_base()
1992  {
1993  runtime_initialize(true);
1994 
1995  reserve(n);
1996  }
1997 
2001  concurrent_hash_map(const concurrent_hash_map &table) : hash_map_base()
2002  {
2003  runtime_initialize(true);
2004 
2005  reserve(table.size());
2006 
2007  internal_copy(table);
2008  }
2009 
2013  concurrent_hash_map(concurrent_hash_map &&table) : hash_map_base()
2014  {
2015  runtime_initialize(true);
2016 
2017  swap(table);
2018  }
2019 
2023  template <typename I>
2024  concurrent_hash_map(I first, I last)
2025  {
2026  runtime_initialize(true);
2027 
2028  reserve(static_cast<size_type>(std::distance(first, last)));
2029 
2030  internal_copy(first, last);
2031  }
2032 
2036  concurrent_hash_map(std::initializer_list<value_type> il)
2037  {
2038  runtime_initialize(true);
2039 
2040  reserve(il.size());
2041 
2042  internal_copy(il.begin(), il.end());
2043  }
2044 
2053  void
2054  runtime_initialize(bool graceful_shutdown = false)
2055  {
2056  check_features();
2057 
2058  calculate_mask();
2059 
2060  if (!graceful_shutdown) {
2061  auto actual_size =
2062  std::distance(this->begin(), this->end());
2063  assert(actual_size >= 0);
2064  this->restore_size(size_type(actual_size));
2065  } else {
2066  assert(this->size() ==
2067  size_type(std::distance(this->begin(),
2068  this->end())));
2069  }
2070  }
2071 
2083  concurrent_hash_map &
2085  {
2086  if (this != &table) {
2087  clear();
2088  internal_copy(table);
2089  }
2090 
2091  return *this;
2092  }
2093 
2106  operator=(std::initializer_list<value_type> il)
2107  {
2108  clear();
2109 
2110  reserve(il.size());
2111 
2112  internal_copy(il.begin(), il.end());
2113 
2114  return *this;
2115  }
2116 
2125  void rehash(size_type n = 0);
2126 
2133  void clear();
2134 
2139  {
2140  clear();
2141  }
2142 
2143  //------------------------------------------------------------------------
2144  // STL support - not thread-safe methods
2145  //------------------------------------------------------------------------
2146 
2153  iterator
2155  {
2156  return iterator(this, 0);
2157  }
2158 
2163  iterator
2165  {
2166  return iterator(this, mask() + 1);
2167  }
2168 
2173  const_iterator
2174  begin() const
2175  {
2176  return const_iterator(this, 0);
2177  }
2178 
2183  const_iterator
2184  end() const
2185  {
2186  return const_iterator(this, mask() + 1);
2187  }
2188 
2192  size_type
2193  size() const
2194  {
2195  return this->my_size.get_ro();
2196  }
2197 
2201  bool
2202  empty() const
2203  {
2204  return this->my_size.get_ro() == 0;
2205  }
2206 
2210  size_type
2211  max_size() const
2212  {
2213  return (~size_type(0)) / sizeof(node);
2214  }
2215 
2219  size_type
2221  {
2222  return mask() + 1;
2223  }
2224 
2228  void swap(concurrent_hash_map &table);
2229 
2230  //------------------------------------------------------------------------
2231  // concurrent map operations
2232  //------------------------------------------------------------------------
2233 
2239  size_type
2240  count(const Key &key) const
2241  {
2242  concurrent_hash_map_internal::check_outside_tx();
2243 
2244  return const_cast<concurrent_hash_map *>(this)->internal_find(
2245  key, nullptr, false);
2246  }
2247 
2259  template <typename K,
2260  typename = typename std::enable_if<
2261  concurrent_hash_map_internal::
2262  has_transparent_key_equal<hasher>::value,
2263  K>::type>
2264  size_type
2265  count(const K &key) const
2266  {
2267  concurrent_hash_map_internal::check_outside_tx();
2268 
2269  return const_cast<concurrent_hash_map *>(this)->internal_find(
2270  key, nullptr, false);
2271  }
2272 
2279  bool
2280  find(const_accessor &result, const Key &key) const
2281  {
2282  concurrent_hash_map_internal::check_outside_tx();
2283 
2284  result.release();
2285 
2286  return const_cast<concurrent_hash_map *>(this)->internal_find(
2287  key, &result, false);
2288  }
2289 
2303  template <typename K,
2304  typename = typename std::enable_if<
2305  concurrent_hash_map_internal::
2306  has_transparent_key_equal<hasher>::value,
2307  K>::type>
2308  bool
2309  find(const_accessor &result, const K &key) const
2310  {
2311  concurrent_hash_map_internal::check_outside_tx();
2312 
2313  result.release();
2314 
2315  return const_cast<concurrent_hash_map *>(this)->internal_find(
2316  key, &result, false);
2317  }
2318 
2325  bool
2326  find(accessor &result, const Key &key)
2327  {
2328  concurrent_hash_map_internal::check_outside_tx();
2329 
2330  result.release();
2331 
2332  return internal_find(key, &result, true);
2333  }
2334 
2348  template <typename K,
2349  typename = typename std::enable_if<
2350  concurrent_hash_map_internal::
2351  has_transparent_key_equal<hasher>::value,
2352  K>::type>
2353  bool
2354  find(accessor &result, const K &key)
2355  {
2356  concurrent_hash_map_internal::check_outside_tx();
2357 
2358  result.release();
2359 
2360  return internal_find(key, &result, true);
2361  }
2369  bool
2370  insert(const_accessor &result, const Key &key)
2371  {
2372  concurrent_hash_map_internal::check_outside_tx();
2373 
2374  result.release();
2375 
2376  return internal_insert(key, &result, false, key);
2377  }
2378 
2386  bool
2387  insert(accessor &result, const Key &key)
2388  {
2389  concurrent_hash_map_internal::check_outside_tx();
2390 
2391  result.release();
2392 
2393  return internal_insert(key, &result, true, key);
2394  }
2395 
2403  bool
2404  insert(const_accessor &result, const value_type &value)
2405  {
2406  concurrent_hash_map_internal::check_outside_tx();
2407 
2408  result.release();
2409 
2410  return internal_insert(value.first, &result, false, value);
2411  }
2412 
2420  bool
2421  insert(accessor &result, const value_type &value)
2422  {
2423  concurrent_hash_map_internal::check_outside_tx();
2424 
2425  result.release();
2426 
2427  return internal_insert(value.first, &result, true, value);
2428  }
2429 
2436  bool
2437  insert(const value_type &value)
2438  {
2439  concurrent_hash_map_internal::check_outside_tx();
2440 
2441  return internal_insert(value.first, nullptr, false, value);
2442  }
2443 
2451  bool
2452  insert(const_accessor &result, value_type &&value)
2453  {
2454  concurrent_hash_map_internal::check_outside_tx();
2455 
2456  result.release();
2457 
2458  return internal_insert(value.first, &result, false,
2459  std::move(value));
2460  }
2461 
2469  bool
2470  insert(accessor &result, value_type &&value)
2471  {
2472  concurrent_hash_map_internal::check_outside_tx();
2473 
2474  result.release();
2475 
2476  return internal_insert(value.first, &result, true,
2477  std::move(value));
2478  }
2479 
2486  bool
2487  insert(value_type &&value)
2488  {
2489  concurrent_hash_map_internal::check_outside_tx();
2490 
2491  return internal_insert(value.first, nullptr, false,
2492  std::move(value));
2493  }
2494 
2500  template <typename I>
2501  void
2502  insert(I first, I last)
2503  {
2504  concurrent_hash_map_internal::check_outside_tx();
2505 
2506  for (; first != last; ++first)
2507  insert(*first);
2508  }
2509 
2515  void
2516  insert(std::initializer_list<value_type> il)
2517  {
2518  concurrent_hash_map_internal::check_outside_tx();
2519 
2520  insert(il.begin(), il.end());
2521  }
2522 
2531  bool
2532  erase(const Key &key)
2533  {
2534  concurrent_hash_map_internal::check_outside_tx();
2535 
2536  return internal_erase(key);
2537  }
2538 
2553  template <typename K,
2554  typename = typename std::enable_if<
2555  concurrent_hash_map_internal::
2556  has_transparent_key_equal<hasher>::value,
2557  K>::type>
2558  bool
2559  erase(const K &key)
2560  {
2561  concurrent_hash_map_internal::check_outside_tx();
2562 
2563  return internal_erase(key);
2564  }
2565 
2566 protected:
2567  /*
2568  * Try to acquire the mutex for read or write.
2569  *
2570  * If acquiring succeeds returns true, otherwise retries for few times.
2571  * If acquiring fails after all attempts returns false.
2572  */
2573  bool try_acquire_item(const_accessor *result, node_mutex_t &mutex,
2574  bool write);
2575 
2576  template <typename K>
2577  bool internal_find(const K &key, const_accessor *result, bool write);
2578 
2579  template <typename K, typename... Args>
2580  bool internal_insert(const K &key, const_accessor *result, bool write,
2581  Args &&... args);
2582 
2583  /* Obtain pointer to node and lock bucket */
2584  template <bool Bucket_rw_lock, typename K>
2585  persistent_node_ptr_t
2586  get_node(const K &key, bucket_accessor &b)
2587  {
2588  /* find a node */
2589  auto n = search_bucket(key, b.get());
2590 
2591  if (!n) {
2592  if (Bucket_rw_lock && !b.is_writer() &&
2593  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2594  /* Rerun search_list, in case another
2595  * thread inserted the item during the
2596  * upgrade. */
2597  n = search_bucket(key, b.get());
2598  if (n) {
2599  /* unfortunately, it did */
2600  scoped_lock_traits_type::
2601  downgrade_to_reader(b);
2602  return n;
2603  }
2604  }
2605  }
2606 
2607  return n;
2608  }
2609 
2610  template <typename K>
2611  bool internal_erase(const K &key);
2612 
2613  void clear_segment(segment_index_t s);
2614 
2618  void internal_copy(const concurrent_hash_map &source);
2619 
2620  template <typename I>
2621  void internal_copy(I first, I last);
2622 
2623 }; // class concurrent_hash_map
2624 
2625 template <typename Key, typename T, typename Hash, typename KeyEqual,
2626  typename MutexType, typename ScopedLockType>
2627 bool
2628 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2629  ScopedLockType>::try_acquire_item(const_accessor *result,
2630  node_mutex_t &mutex,
2631  bool write)
2632 {
2633  /* acquire the item */
2634  if (!result->try_acquire(mutex, write)) {
2635  for (detail::atomic_backoff backoff(true);;) {
2636  if (result->try_acquire(mutex, write))
2637  break;
2638 
2639  if (!backoff.bounded_pause())
2640  return false;
2641  }
2642  }
2643 
2644  return true;
2645 }
2646 
2647 template <typename Key, typename T, typename Hash, typename KeyEqual,
2648  typename MutexType, typename ScopedLockType>
2649 template <typename K>
2650 bool
2651 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2652  ScopedLockType>::internal_find(const K &key,
2653  const_accessor *result,
2654  bool write)
2655 {
2656  assert(!result || !result->my_node);
2657 
2658  hashcode_type m = mask().load(std::memory_order_acquire);
2659 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2660  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2661 #endif
2662 
2663  assert((m & (m + 1)) == 0);
2664 
2665  hashcode_type const h = hasher{}(key);
2666 
2667  persistent_node_ptr_t node;
2668 
2669  while (true) {
2670  /* get bucket and acquire the lock */
2671  bucket_accessor b(
2672  this, h & m,
2673  scoped_lock_traits_type::initial_rw_state(false));
2674  node = get_node<false>(key, b);
2675 
2676  if (!node) {
2677  /* Element was possibly relocated, try again */
2678  if (check_mask_race(h, m)) {
2679  b.release();
2680  continue;
2681  } else {
2682  return false;
2683  }
2684  }
2685 
2686  /* No need to acquire the item or item acquired */
2687  if (!result ||
2688  try_acquire_item(
2689  result, node.get(this->my_pool_uuid)->mutex, write))
2690  break;
2691 
2692  /* the wait takes really long, restart the
2693  * operation */
2694  b.release();
2695 
2696  std::this_thread::yield();
2697 
2698  m = mask().load(std::memory_order_acquire);
2699  }
2700 
2701  if (result) {
2702  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2703  result->my_hash = h;
2704  }
2705 
2706  return true;
2707 }
2708 
2709 template <typename Key, typename T, typename Hash, typename KeyEqual,
2710  typename MutexType, typename ScopedLockType>
2711 template <typename K, typename... Args>
2712 bool
2713 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2714  ScopedLockType>::internal_insert(const K &key,
2715  const_accessor *result,
2716  bool write,
2717  Args &&... args)
2718 {
2719  assert(!result || !result->my_node);
2720 
2721  hashcode_type m = mask().load(std::memory_order_acquire);
2722 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2723  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2724 #endif
2725 
2726  assert((m & (m + 1)) == 0);
2727 
2728  hashcode_type const h = hasher{}(key);
2729 
2730  persistent_node_ptr_t node;
2731  size_t new_size = 0;
2732  bool inserted = false;
2733 
2734  while (true) {
2735  /* get bucket and acquire the lock */
2736  bucket_accessor b(
2737  this, h & m,
2738  scoped_lock_traits_type::initial_rw_state(true));
2739  node = get_node<true>(key, b);
2740 
2741  if (!node) {
2742  /* Element was possibly relocated, try again */
2743  if (check_mask_race(h, m)) {
2744  b.release();
2745  continue;
2746  }
2747 
2748  /* insert and set flag to grow the container */
2749  new_size = insert_new_node(b.get(), node,
2750  std::forward<Args>(args)...);
2751  inserted = true;
2752  }
2753 
2754  /* No need to acquire the item or item acquired */
2755  if (!result ||
2756  try_acquire_item(
2757  result, node.get(this->my_pool_uuid)->mutex, write))
2758  break;
2759 
2760  /* the wait takes really long, restart the
2761  * operation */
2762  b.release();
2763 
2764  std::this_thread::yield();
2765 
2766  m = mask().load(std::memory_order_acquire);
2767  }
2768 
2769  if (result) {
2770  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2771  result->my_hash = h;
2772  }
2773 
2774  check_growth(m, new_size);
2775 
2776  return inserted;
2777 }
2778 
2779 template <typename Key, typename T, typename Hash, typename KeyEqual,
2780  typename MutexType, typename ScopedLockType>
2781 template <typename K>
2782 bool
2783 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2784  ScopedLockType>::internal_erase(const K &key)
2785 {
2786  node_ptr_t n;
2787  hashcode_type const h = hasher{}(key);
2788  hashcode_type m = mask().load(std::memory_order_acquire);
2789  pool_base pop = get_pool_base();
2790 
2791 restart : {
2792  /* lock scope */
2793  /* get bucket */
2794  bucket_accessor b(this, h & m,
2795  scoped_lock_traits_type::initial_rw_state(true));
2796 
2797 search:
2798  node_ptr_t *p = &b->node_list;
2799  n = *p;
2800 
2801  while (n &&
2802  !key_equal{}(key,
2803  detail::static_persistent_pool_pointer_cast<node>(
2804  n)(this->my_pool_uuid)
2805  ->item.first)) {
2806  p = &n(this->my_pool_uuid)->next;
2807  n = *p;
2808  }
2809 
2810  if (!n) {
2811  /* not found, but mask could be changed */
2812  if (check_mask_race(h, m))
2813  goto restart;
2814 
2815  return false;
2816  } else if (!b.is_writer() &&
2817  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2818  if (check_mask_race(h, m)) /* contended upgrade, check mask */
2819  goto restart;
2820 
2821  goto search;
2822  }
2823 
2824  {
2825  transaction::manual tx(pop);
2826 
2827  persistent_ptr<node> del = n(this->my_pool_uuid);
2828 
2829  *p = del->next;
2830 
2831  {
2832  /* We cannot remove this element immediately because
2833  * other threads might work with this element via
2834  * accessors. The item_locker required to wait while
2835  * other threads use the node. */
2836  typename node::scoped_t item_locker(del->mutex,
2837  /*write=*/true);
2838  }
2839 
2840  /* Only one thread can delete it due to write lock on the bucket
2841  */
2842  delete_node(del);
2843 
2845  }
2846 
2847  --(this->my_size.get_rw());
2848  pop.persist(this->my_size);
2849 }
2850 
2851  return true;
2852 }
2853 
2854 template <typename Key, typename T, typename Hash, typename KeyEqual,
2855  typename MutexType, typename ScopedLockType>
2856 void
2859 {
2860  internal_swap(table);
2861 }
2862 
2863 template <typename Key, typename T, typename Hash, typename KeyEqual,
2864  typename MutexType, typename ScopedLockType>
2865 void
2867  size_type sz)
2868 {
2869  concurrent_hash_map_internal::check_outside_tx();
2870 
2871  reserve(sz);
2872  hashcode_type m = mask();
2873 
2874  /* only the last segment should be scanned for rehashing size or first
2875  * index of the last segment */
2876  hashcode_type b = (m + 1) >> 1;
2877 
2878  /* zero or power of 2 */
2879  assert((b & (b - 1)) == 0);
2880 
2881  for (; b <= m; ++b) {
2882  bucket *bp = get_bucket(b);
2883 
2884  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2885  scoped_t>(
2886  bp->mutex);
2887  /* XXX Need to investigate if this statement is needed */
2888  if (bp->is_rehashed(std::memory_order_relaxed) == false)
2889  rehash_bucket<true>(bp, b);
2890  }
2891 }
2892 
2893 template <typename Key, typename T, typename Hash, typename KeyEqual,
2894  typename MutexType, typename ScopedLockType>
2895 void
2897 {
2898  hashcode_type m = mask();
2899 
2900  assert((m & (m + 1)) == 0);
2901 
2902 #ifndef NDEBUG
2903  /* check consistency */
2904  for (segment_index_t b = 0; b <= m; ++b) {
2905  bucket *bp = get_bucket(b);
2906  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2907  scoped_t>(
2908  bp->mutex);
2909  }
2910 #endif
2911 
2912  pool_base pop = get_pool_base();
2913  { /* transaction scope */
2914 
2915  transaction::manual tx(pop);
2916 
2917  this->my_size.get_rw() = 0;
2918  segment_index_t s = segment_traits_t::segment_index_of(m);
2919 
2920  assert(s + 1 == this->block_table_size ||
2921  !segment_facade_t(this->my_table, s + 1).is_valid());
2922 
2923  do {
2924  clear_segment(s);
2925  } while (s-- > 0);
2926 
2927  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2928 
2930  }
2931 }
2932 
2933 template <typename Key, typename T, typename Hash, typename KeyEqual,
2934  typename MutexType, typename ScopedLockType>
2935 void
2936 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2937  ScopedLockType>::clear_segment(segment_index_t s)
2938 {
2939  segment_facade_t segment(this->my_table, s);
2940 
2941  assert(segment.is_valid());
2942 
2943  size_type sz = segment.size();
2944  for (segment_index_t i = 0; i < sz; ++i) {
2945  for (node_ptr_t n = segment[i].node_list; n;
2946  n = segment[i].node_list) {
2947  segment[i].node_list = n(this->my_pool_uuid)->next;
2948  delete_node(n);
2949  }
2950  }
2951 
2952  if (s >= segment_traits_t::embedded_segments)
2953  segment.disable();
2954 }
2955 
2956 template <typename Key, typename T, typename Hash, typename KeyEqual,
2957  typename MutexType, typename ScopedLockType>
2958 void
2961 {
2962  reserve(source.my_size.get_ro());
2963  internal_copy(source.begin(), source.end());
2964 }
2965 
2966 template <typename Key, typename T, typename Hash, typename KeyEqual,
2967  typename MutexType, typename ScopedLockType>
2968 template <typename I>
2969 void
2970 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2971  ScopedLockType>::internal_copy(I first, I last)
2972 {
2973  hashcode_type m = mask();
2974 
2975  for (; first != last; ++first) {
2976  hashcode_type h = hasher{}(first->first);
2977  bucket *b = get_bucket(h & m);
2978 
2979  assert(b->is_rehashed(std::memory_order_relaxed));
2980 
2981  detail::persistent_pool_ptr<node> p;
2982  insert_new_node(b, p, *first);
2983  }
2984 }
2985 
2986 template <typename Key, typename T, typename Hash, typename KeyEqual,
2987  typename MutexType, typename ScopedLockType>
2988 inline bool
2989 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2990  ScopedLockType> &a,
2991  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2992  ScopedLockType> &b)
2993 {
2994  if (a.size() != b.size())
2995  return false;
2996 
2997  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2998  ScopedLockType>::const_iterator
2999  i(a.begin()),
3000  i_end(a.end());
3001 
3002  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3003  ScopedLockType>::const_iterator j,
3004  j_end(b.end());
3005 
3006  for (; i != i_end; ++i) {
3007  j = b.equal_range(i->first).first;
3008 
3009  if (j == j_end || !(i->second == j->second))
3010  return false;
3011  }
3012 
3013  return true;
3014 }
3015 
3016 template <typename Key, typename T, typename Hash, typename KeyEqual,
3017  typename MutexType, typename ScopedLockType>
3018 inline bool
3019 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3020  ScopedLockType> &a,
3021  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3022  ScopedLockType> &b)
3023 {
3024  return !(a == b);
3025 }
3026 
3027 template <typename Key, typename T, typename Hash, typename KeyEqual,
3028  typename MutexType, typename ScopedLockType>
3029 inline void
3030 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3031  concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
3032 {
3033  a.swap(b);
3034 }
3035 
3036 } /* namespace obj */
3037 } /* namespace pmem */
3038 
3039 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2280
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2013
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1883
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:2866
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2559
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2421
Persistent_ptr transactional allocation functions for objects.
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1870
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:2960
Pmem-resident mutex.
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2532
The non-template pool base class.
Definition: pool.hpp:67
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1639
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1665
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2309
iterator end()
Definition: concurrent_hash_map.hpp:2164
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:1925
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2174
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:923
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: concurrent_hash_map.hpp:65
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2024
size_type size() const
Definition: concurrent_hash_map.hpp:2193
iterator begin()
Definition: concurrent_hash_map.hpp:2154
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:1935
C++ manual scope transaction class.
Definition: transaction.hpp:92
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2437
Resides on pmem property template.
C++ pmemobj transactions.
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2036
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2370
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2857
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2387
Commonly used functionality.
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:1973
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1739
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2106
bool empty() const
Definition: concurrent_hash_map.hpp:2202
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:350
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2502
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1684
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:1943
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
const_iterator end() const
Definition: concurrent_hash_map.hpp:2184
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:1902
Custom layout error class.
Definition: pexceptions.hpp:205
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1675
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
bool empty() const
Definition: concurrent_hash_map.hpp:1890
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2404
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1729
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2084
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2326
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2487
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1693
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2220
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:1991
Commonly used SFINAE helpers.
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:255
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
Persistent smart pointer.
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2240
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2470
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2452
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2211
void runtime_initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2054
Resides on pmem class.
Definition: p.hpp:64
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2138
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
Custom transaction error class.
Definition: pexceptions.hpp:185
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1748
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: allocation_flag.hpp:43
const_reference operator*() const
Definition: concurrent_hash_map.hpp:1915
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:2896
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:1982
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2516
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1624
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2001
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2354
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2265
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:1965
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:403
Pmem-resident shared mutex.
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1959