38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP 39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP 41 #include <libpmemobj++/detail/atomic_backoff.hpp> 51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp> 57 #include <initializer_list> 62 #include <type_traits> 71 struct hash<
pmem::obj::p<T>> {
75 return hash<T>()(x.
get_ro());
85 namespace concurrent_hash_map_internal
87 template <
typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89 using rw_mutex_type = SharedMutexT;
92 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
93 shared_mutex_scoped_lock &
94 operator=(
const shared_mutex_scoped_lock &) =
delete;
97 shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
102 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
109 ~shared_mutex_scoped_lock()
117 acquire(rw_mutex_type &m,
bool write =
true)
124 mutex->lock_shared();
134 rw_mutex_type *m = mutex;
148 try_acquire(rw_mutex_type &m,
bool write =
true)
153 result = write ? m.try_lock() : m.try_lock_shared();
164 rw_mutex_type *mutex;
172 template <
typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
176 template <
typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
180 template <
typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
184 template <
typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
188 template <
typename ScopedLockType,
189 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
193 using scope_lock_type = ScopedLockType;
196 initial_rw_state(
bool write)
203 upgrade_to_writer(scope_lock_type &lock)
205 return lock.upgrade_to_writer();
209 downgrade_to_reader(scope_lock_type &lock)
211 return lock.downgrade_to_reader();
215 template <
typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
218 using scope_lock_type = ScopedLockType;
221 initial_rw_state(
bool write)
229 upgrade_to_writer(scope_lock_type &lock)
238 downgrade_to_reader(scope_lock_type &lock)
250 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
251 typename KeyEqual = std::equal_to<Key>,
252 typename MutexType = pmem::obj::shared_mutex,
253 typename ScopedLockType = concurrent_hash_map_
internal::
254 shared_mutex_scoped_lock<MutexType>>
258 namespace concurrent_hash_map_internal
264 if (pmemobj_tx_stage() != TX_STAGE_NONE)
266 "Function called inside transaction scope.");
269 template <
typename Hash>
270 using transparent_key_equal =
typename Hash::transparent_key_equal;
272 template <
typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
275 template <
typename Hash,
typename Pred,
276 bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278 using type =
typename Hash::transparent_key_equal;
281 template <
typename Hash,
typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
286 template <
typename Mutex,
typename ScopedLockType>
288 assert_not_locked(Mutex &mtx)
291 ScopedLockType scoped_lock;
292 assert(scoped_lock.try_acquire(mtx));
293 scoped_lock.release();
299 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
300 struct hash_map_node {
302 using mutex_t = MutexType;
305 using scoped_t = ScopedLockType;
307 using value_type = std::pair<const Key, T>;
310 using node_ptr_t = detail::persistent_pool_ptr<
311 hash_map_node<Key, T, mutex_t, scoped_t>>;
322 hash_map_node() : next(OID_NULL)
326 hash_map_node(
const node_ptr_t &_next) : next(_next)
330 hash_map_node(node_ptr_t &&_next) : next(
std::move(_next))
334 hash_map_node(
const node_ptr_t &_next,
const Key &key)
335 : next(_next), item(key, T())
339 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
340 : next(_next), item(key, t)
344 hash_map_node(
const node_ptr_t &_next, value_type &&i)
345 : next(_next), item(
std::move(i))
349 template <
typename... Args>
350 hash_map_node(node_ptr_t &&_next, Args &&... args)
351 : next(
std::forward<node_ptr_t>(_next)),
352 item(
std::forward<Args>(args)...)
356 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
357 : next(_next), item(i)
362 hash_map_node(
const hash_map_node &) =
delete;
365 hash_map_node &operator=(
const hash_map_node &) =
delete;
372 template <
typename Bucket>
373 class segment_traits {
376 using segment_index_t = size_t;
377 using size_type = size_t;
378 using bucket_type = Bucket;
382 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
385 constexpr
static segment_index_t first_big_block = 27;
390 constexpr
static size_type big_block_size = size_type(1)
394 static_assert((big_block_size *
sizeof(bucket_type)) <
396 "Block size exceeds max_allocation_size");
399 constexpr
static segment_index_t
400 first_block_in_segment(segment_index_t seg)
402 return seg < first_big_block
405 (segment_index_t(1) << (seg - first_big_block)) - 1);
409 constexpr
static size_type
410 blocks_in_segment(segment_index_t seg)
412 return seg < first_big_block
414 : segment_index_t(1) << (seg - first_big_block);
418 constexpr
static size_type
419 block_size(segment_index_t b)
421 return b < first_big_block ? segment_size(b ? b : 1)
427 constexpr
static segment_index_t embedded_segments = 1;
430 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
433 constexpr
static segment_index_t number_of_segments = 32;
436 static const size_type first_block = 8;
439 constexpr
static segment_index_t
442 return first_block_in_segment(number_of_segments);
446 static segment_index_t
447 segment_index_of(size_type index)
449 return segment_index_t(detail::Log2(index | 1));
453 constexpr
static segment_index_t
454 segment_base(segment_index_t k)
456 return (segment_index_t(1) << k) & ~segment_index_t(1);
460 constexpr
static size_type
461 segment_size(segment_index_t k)
463 return size_type(1) << k;
466 embedded_segments < first_big_block,
467 "Number of embedded segments cannot exceed max_allocation_size");
486 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
487 class segment_facade_impl :
public SegmentTraits {
489 using traits_type = SegmentTraits;
490 using traits_type::block_size;
491 using traits_type::blocks_in_segment;
492 using traits_type::embedded_buckets;
493 using traits_type::embedded_segments;
494 using traits_type::first_block;
495 using traits_type::first_block_in_segment;
496 using traits_type::segment_base;
497 using traits_type::segment_size;
500 using table_reference =
501 typename std::conditional<is_const,
const BlockTable &,
504 using table_pointer =
505 typename std::conditional<is_const,
const BlockTable *,
508 using bucket_type =
typename traits_type::bucket_type;
509 using segment_index_t =
typename traits_type::segment_index_t;
510 using size_type =
typename traits_type::size_type;
513 segment_facade_impl(table_reference table, segment_index_t s)
514 : my_table(&table), my_seg(s)
516 assert(my_seg < traits_type::number_of_segments);
520 segment_facade_impl(
const segment_facade_impl &src)
521 : my_table(src.my_table), my_seg(src.my_seg)
525 segment_facade_impl(segment_facade_impl &&src) =
default;
528 segment_facade_impl &
529 operator=(
const segment_facade_impl &src)
531 my_table = src.my_table;
537 segment_facade_impl &
538 operator=(segment_facade_impl &&src)
540 my_table = src.my_table;
551 bucket_type &operator[](size_type i)
const 555 segment_index_t table_block = first_block_in_segment(my_seg);
556 size_type b_size = block_size(table_block);
558 table_block += i / b_size;
561 return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
567 segment_facade_impl &
580 segment_facade_impl tmp = *
this;
588 segment_facade_impl &
601 segment_facade_impl tmp = *
this;
609 segment_facade_impl &
619 segment_facade_impl &
632 return segment_facade_impl(*(this->my_table),
642 return segment_facade_impl(*(this->my_table),
650 enable(pool_base &pop)
652 assert(my_seg >= embedded_segments);
654 if (my_seg < first_block) {
655 enable_first_block(pop);
657 enable_big_segment(pop);
667 assert(my_seg >= embedded_segments);
669 if (my_seg < first_block) {
670 if (my_seg == embedded_segments) {
671 size_type sz = segment_size(first_block) -
673 delete_persistent<bucket_type[]>(
674 (*my_table)[my_seg], sz);
676 (*my_table)[my_seg] =
nullptr;
678 block_range blocks = segment_blocks(my_seg);
680 for (segment_index_t b = blocks.first;
681 b < blocks.second; ++b) {
682 if ((*my_table)[b] !=
nullptr) {
683 delete_persistent<bucket_type[]>(
684 (*my_table)[b], block_size(b));
685 (*my_table)[b] =
nullptr;
697 return segment_size(my_seg ? my_seg : 1);
708 block_range blocks = segment_blocks(my_seg);
710 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711 if ((*my_table)[b] ==
nullptr)
719 using block_range = std::pair<segment_index_t, segment_index_t>;
725 segment_blocks(segment_index_t seg)
727 segment_index_t
begin = first_block_in_segment(seg);
729 return block_range(begin, begin + blocks_in_segment(seg));
733 enable_first_block(pool_base &pop)
735 assert(my_seg == embedded_segments);
737 transaction::manual tx(pop);
740 segment_size(first_block) - embedded_buckets;
741 (*my_table)[my_seg] =
742 make_persistent<bucket_type[]>(sz);
744 persistent_ptr<bucket_type> base =
745 (*my_table)[embedded_segments].raw();
747 for (segment_index_t s = my_seg + 1; s < first_block;
750 static_cast<std::ptrdiff_t>(
752 segment_base(my_seg));
754 (*my_table)[s] = (base + off).raw();
762 enable_big_segment(pool_base &pop)
764 block_range blocks = segment_blocks(my_seg);
766 transaction::manual tx(pop);
768 for (segment_index_t b = blocks.first;
769 b < blocks.second; ++b) {
770 assert((*my_table)[b] ==
nullptr);
771 (*my_table)[b] = make_persistent<bucket_type[]>(
780 table_pointer my_table;
783 segment_index_t my_seg;
792 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
793 class hash_map_base {
795 using mutex_t = MutexType;
796 using scoped_t = ScopedLockType;
799 using size_type = size_t;
802 using hashcode_type = size_t;
805 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
808 using node_ptr_t = detail::persistent_pool_ptr<node>;
812 using mutex_t = MutexType;
813 using scoped_t = ScopedLockType;
819 p<std::atomic<uint64_t>> rehashed;
822 node_ptr_t node_list;
825 bucket() : node_list(nullptr)
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 828 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
831 rehashed.get_rw() =
false;
840 is_rehashed(std::memory_order order)
842 return rehashed.get_ro().load(order);
846 set_rehashed(std::memory_order order)
848 rehashed.get_rw().store(
true, order);
852 bucket(
const bucket &) =
delete;
855 bucket &operator=(
const bucket &) =
delete;
859 using segment_traits_t = segment_traits<bucket>;
862 using segment_index_t =
typename segment_traits_t::segment_index_t;
865 static const size_type embedded_buckets =
866 segment_traits_t::embedded_buckets;
869 static const size_type first_block = segment_traits_t::first_block;
872 constexpr
static size_type block_table_size =
873 segment_traits_t::number_of_blocks();
876 using segment_ptr_t = persistent_ptr<bucket[]>;
879 using bucket_ptr_t = persistent_ptr<bucket>;
882 using blocks_table_t = segment_ptr_t[block_table_size];
894 static constexpr features header_features = {0, 0};
899 p<uint64_t> my_pool_uuid;
903 features layout_features = (features)header_features;
907 std::aligned_storage<
sizeof(size_t),
sizeof(
size_t)>::type
912 p<std::atomic<hashcode_type>> my_mask;
915 std::aligned_storage<32, 8>::type padding1;
921 blocks_table_t my_table;
926 p<std::atomic<size_type>> my_size;
929 std::aligned_storage<24, 8>::type padding2;
932 std::aligned_storage<64, 8>::type reserved;
935 segment_enable_mutex_t my_segment_enable_mutex;
938 bucket my_embedded_segment[embedded_buckets];
942 const std::atomic<hashcode_type> &
943 mask() const noexcept
945 return my_mask.get_ro();
948 std::atomic<hashcode_type> &
951 return my_mask.get_rw();
955 using const_segment_facade_t =
956 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
959 using segment_facade_t =
960 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
966 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
967 "std::atomic should have the same layout as underlying integral type");
969 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 970 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
971 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
974 my_size.get_rw() = 0;
975 PMEMoid oid = pmemobj_oid(
this);
977 assert(!OID_IS_NULL(oid));
979 my_pool_uuid = oid.pool_uuid_lo;
981 pool_base pop = get_pool_base();
983 for (size_type i = 0; i < segment_traits_t::embedded_segments;
986 pmemobj_oid(my_embedded_segment +
987 segment_traits_t::segment_base(i));
988 segment_facade_t seg(my_table, i);
989 mark_rehashed<false>(pop, seg);
999 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 1000 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1001 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1003 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED 1004 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1007 hashcode_type m = embedded_buckets - 1;
1009 const_segment_facade_t segment(
1010 my_table, segment_traits_t::embedded_segments);
1012 while (segment.is_valid()) {
1013 m += segment.size();
1017 mask().store(m, std::memory_order_relaxed);
1021 restore_size(size_type actual_size)
1023 my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1024 pool_base pop = get_pool_base();
1025 pop.persist(my_size);
1031 template <
bool Flush = true>
1033 mark_rehashed(pool_base &pop, segment_facade_t &segment)
1035 for (size_type i = 0; i < segment.size(); ++i) {
1036 bucket *b = &(segment[i]);
1038 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1040 b->set_rehashed(std::memory_order_relaxed);
1045 for (size_type i = 0; i < segment.size(); ++i) {
1046 bucket *b = &(segment[i]);
1047 pop.flush(b->rehashed);
1058 enable_segment(segment_index_t k,
bool is_initial =
false)
1062 pool_base pop = get_pool_base();
1065 if (k >= first_block) {
1066 segment_facade_t new_segment(my_table, k);
1068 sz = new_segment.size();
1069 if (!new_segment.is_valid())
1070 new_segment.enable(pop);
1073 mark_rehashed(pop, new_segment);
1080 assert(k == segment_traits_t::embedded_segments);
1082 for (segment_index_t i = k; i < first_block; ++i) {
1083 segment_facade_t new_segment(my_table, i);
1085 if (!new_segment.is_valid())
1086 new_segment.enable(pop);
1089 mark_rehashed(pop, new_segment);
1093 sz = segment_traits_t::segment_size(first_block);
1095 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 1096 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1098 mask().store(sz - 1, std::memory_order_release);
1106 get_bucket(hashcode_type h)
const 1108 segment_index_t s = segment_traits_t::segment_index_of(h);
1110 h -= segment_traits_t::segment_base(s);
1112 const_segment_facade_t segment(my_table, s);
1114 assert(segment.is_valid());
1116 return &(segment[h]);
1123 check_mask_race(hashcode_type h, hashcode_type &m)
const 1125 hashcode_type m_now, m_old = m;
1127 m_now = mask().load(std::memory_order_acquire);
1130 return check_rehashing_collision(h, m_old, m = m_now);
1139 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1140 hashcode_type m)
const 1144 if ((h & m_old) != (h & m)) {
1149 for (++m_old; !(h & m_old); m_old <<= 1)
1152 m_old = (m_old << 1) - 1;
1154 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1157 bucket *b = get_bucket(h & m_old);
1158 return b->is_rehashed(std::memory_order_acquire);
1169 template <
typename Node,
typename... Args>
1171 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1174 pool_base pop = get_pool_base();
1177 new_node = pmem::obj::make_persistent<Node>(
1178 b->node_list, std::forward<Args>(args)...);
1179 b->node_list = new_node;
1184 size_t sz = ++(my_size.get_rw());
1185 pop.persist(&my_size,
sizeof(my_size));
1195 check_growth(hashcode_type m, size_type sz)
1198 segment_index_t new_seg =
1199 static_cast<segment_index_t>(detail::Log2(
1203 assert(segment_facade_t(my_table, new_seg - 1)
1206 std::unique_lock<segment_enable_mutex_t> lock(
1207 my_segment_enable_mutex, std::try_to_lock);
1210 if (mask().load(std::memory_order_relaxed) ==
1214 enable_segment(new_seg);
1228 reserve(size_type buckets)
1235 bool is_initial = (my_size.get_ro() == 0);
1237 for (size_type m = mask(); buckets > m; m = mask())
1239 segment_traits_t::segment_index_of(m + 1),
1248 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1250 pool_base p = get_pool_base();
1252 transaction::manual tx(p);
1254 this->my_pool_uuid.swap(table.my_pool_uuid);
1256 this->mask() = table.mask().exchange(
1257 this->mask(), std::memory_order_relaxed);
1260 this->my_size.get_rw() =
1261 table.my_size.get_rw().exchange(
1262 this->my_size.get_ro(),
1263 std::memory_order_relaxed);
1265 for (size_type i = 0; i < embedded_buckets; ++i)
1266 this->my_embedded_segment[i].node_list.swap(
1267 table.my_embedded_segment[i].node_list);
1269 for (size_type i = segment_traits_t::embedded_segments;
1270 i < block_table_size; ++i)
1271 this->my_table[i].
swap(table.my_table[i]);
1285 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1287 return pool_base(pop);
1296 template <
typename Container,
bool is_const>
1297 class hash_map_iterator {
1299 using iterator_category = std::forward_iterator_tag;
1300 using difference_type = ptrdiff_t;
1301 using map_type = Container;
1302 using value_type =
typename map_type::value_type;
1303 using node =
typename map_type::node;
1304 using bucket =
typename map_type::bucket;
1305 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1308 typename std::conditional<is_const,
1309 typename map_type::const_reference,
1310 typename map_type::reference>::type;
1312 typename std::conditional<is_const,
1313 typename map_type::const_pointer,
1314 typename map_type::pointer>::type;
1316 template <
typename C,
bool M,
bool U>
1317 friend bool operator==(
const hash_map_iterator<C, M> &i,
1318 const hash_map_iterator<C, U> &j);
1320 template <
typename C,
bool M,
bool U>
1321 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1322 const hash_map_iterator<C, U> &j);
1324 friend class hash_map_iterator<map_type, true>;
1326 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) 1328 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1329 typename MutexType,
typename ScopedLockType>
1330 friend class ::pmem::obj::concurrent_hash_map;
1334 hash_map_iterator(map_ptr map,
size_t index)
1335 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1337 if (my_index <= my_map->mask()) {
1338 bucket_accessor acc(my_map, my_index);
1339 my_bucket = acc.get();
1340 my_node = static_cast<node *>(
1341 my_bucket->node_list.get(my_map->my_pool_uuid));
1344 advance_to_next_bucket();
1351 hash_map_iterator() =
default;
1354 hash_map_iterator(
const hash_map_iterator &other)
1355 : my_map(other.my_map),
1356 my_index(other.my_index),
1357 my_bucket(other.my_bucket),
1358 my_node(other.my_node)
1363 template <
typename U = void,
1364 typename =
typename std::enable_if<is_const, U>::type>
1365 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1366 : my_map(other.my_map),
1367 my_index(other.my_index),
1368 my_bucket(other.my_bucket),
1369 my_node(other.my_node)
1373 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1376 reference operator*()
const 1379 return my_node->item;
1383 pointer operator->()
const 1385 return &operator*();
1392 my_node = static_cast<node *>(
1393 my_node->next.get((my_map->my_pool_uuid)));
1396 advance_to_next_bucket();
1405 hash_map_iterator old(*
this);
1412 map_ptr my_map =
nullptr;
1415 size_t my_index = 0;
1418 bucket *my_bucket =
nullptr;
1421 node *my_node =
nullptr;
1423 class bucket_accessor {
1425 bucket_accessor(map_ptr m,
size_t index)
1427 my_bucket = m->get_bucket(index);
1441 advance_to_next_bucket()
1443 size_t k = my_index + 1;
1447 while (k <= my_map->mask()) {
1448 bucket_accessor acc(my_map, k);
1449 my_bucket = acc.get();
1451 if (my_bucket->node_list) {
1452 my_node = static_cast<node *>(
1453 my_bucket->node_list.get(
1454 my_map->my_pool_uuid));
1470 template <
typename Container,
bool M,
bool U>
1472 operator==(
const hash_map_iterator<Container, M> &i,
1473 const hash_map_iterator<Container, U> &j)
1475 return i.my_node == j.my_node && i.my_map == j.my_map;
1478 template <
typename Container,
bool M,
bool U>
1480 operator!=(
const hash_map_iterator<Container, M> &i,
1481 const hash_map_iterator<Container, U> &j)
1483 return i.my_node != j.my_node || i.my_map != j.my_map;
1526 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1527 typename MutexType,
typename ScopedLockType>
1528 class concurrent_hash_map
1529 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1531 template <
typename Container,
bool is_const>
1532 friend class concurrent_hash_map_internal::hash_map_iterator;
1535 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1536 Key, T, MutexType, ScopedLockType>::size_type;
1537 using hashcode_type =
1538 typename concurrent_hash_map_internal::hash_map_base<
1539 Key, T, MutexType, ScopedLockType>::hashcode_type;
1540 using key_type = Key;
1541 using mapped_type = T;
1542 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1543 Key, T, MutexType, ScopedLockType>::node::value_type;
1544 using difference_type = ptrdiff_t;
1545 using pointer = value_type *;
1546 using const_pointer =
const value_type *;
1547 using reference = value_type &;
1548 using const_reference =
const value_type &;
1549 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1551 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1553 using hasher = Hash;
1554 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1555 Hash, KeyEqual>::type;
1558 using mutex_t = MutexType;
1559 using scoped_t = ScopedLockType;
1563 using hash_map_base =
1564 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1566 using hash_map_base::calculate_mask;
1567 using hash_map_base::check_growth;
1568 using hash_map_base::check_mask_race;
1569 using hash_map_base::embedded_buckets;
1570 using hash_map_base::get_bucket;
1571 using hash_map_base::get_pool_base;
1572 using hash_map_base::header_features;
1573 using hash_map_base::insert_new_node;
1574 using hash_map_base::internal_swap;
1575 using hash_map_base::layout_features;
1576 using hash_map_base::mask;
1577 using hash_map_base::reserve;
1578 using node =
typename hash_map_base::node;
1579 using node_mutex_t =
typename node::mutex_t;
1580 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1581 using bucket =
typename hash_map_base::bucket;
1582 using bucket_lock_type =
typename bucket::scoped_t;
1583 using segment_index_t =
typename hash_map_base::segment_index_t;
1584 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1585 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1586 using scoped_lock_traits_type =
1587 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1589 friend class const_accessor;
1590 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1593 delete_node(
const node_ptr_t &n)
1595 delete_persistent<node>(
1596 detail::static_persistent_pool_pointer_cast<node>(n)
1597 .get_persistent_ptr(this->my_pool_uuid));
1600 template <
typename K>
1601 persistent_node_ptr_t
1602 search_bucket(
const K &key, bucket *b)
const 1604 assert(b->is_rehashed(std::memory_order_relaxed));
1606 persistent_node_ptr_t n =
1607 detail::static_persistent_pool_pointer_cast<node>(
1612 n.get(this->my_pool_uuid)->item.first)) {
1613 n = detail::static_persistent_pool_pointer_cast<node>(
1614 n.get(this->my_pool_uuid)->next);
1629 const hashcode_type h,
bool writer =
false)
1640 bool writer =
false)
1642 my_b = base->get_bucket(h);
1644 if (my_b->is_rehashed(std::memory_order_acquire) ==
1646 bucket_lock_type::try_acquire(this->my_b->mutex,
1648 if (my_b->is_rehashed(
1649 std::memory_order_relaxed) ==
1652 base->rehash_bucket<
false>(my_b, h);
1655 bucket_lock_type::acquire(my_b->mutex, writer);
1658 assert(my_b->is_rehashed(std::memory_order_relaxed));
1667 return bucket_lock_type::is_writer;
1698 const hashcode_type h,
1699 bool writer =
false)
1701 acquire(base, h, writer);
1709 bool writer =
false)
1711 my_b = base->get_bucket(h);
1713 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1716 base->rehash_bucket<
true>(my_b, h);
1719 assert(my_b->is_rehashed(std::memory_order_relaxed));
1755 get_hash_code(node_ptr_t &n)
1758 detail::static_persistent_pool_pointer_cast<node>(n)(
1763 template <
bool serial>
1765 rehash_bucket(bucket *b_new,
const hashcode_type h)
1767 using accessor_type =
typename std::conditional<
1768 serial, serial_bucket_accessor, bucket_accessor>::type;
1770 using scoped_lock_traits_type =
1771 concurrent_hash_map_internal::scoped_lock_traits<
1777 pool_base pop = get_pool_base();
1778 node_ptr_t *p_new = &(b_new->node_list);
1779 bool restore_after_crash = *p_new !=
nullptr;
1782 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1783 assert((h & mask) < h);
1784 accessor_type b_old(
1786 scoped_lock_traits_type::initial_rw_state(
true));
1789 mask = (mask << 1) | 1;
1790 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1792 for (node_ptr_t *p_old = &(b_old->node_list), n = *p_old; n;
1794 hashcode_type c = get_hash_code(n);
1796 hashcode_type bmask = h & (mask >> 1);
1800 : (1u << (detail::Log2(bmask) + 1)) - 1;
1802 assert((c & bmask) == (h & bmask));
1805 if ((c & mask) == h) {
1806 if (!b_old.is_writer() &&
1807 !scoped_lock_traits_type::upgrade_to_writer(
1814 if (restore_after_crash) {
1815 while (*p_new !=
nullptr &&
1816 (mask & get_hash_code(*p_new)) ==
1825 restore_after_crash =
false;
1830 pop.persist(p_new,
sizeof(*p_new));
1833 *p_old = n(this->my_pool_uuid)->next;
1834 pop.persist(p_old,
sizeof(*p_old));
1836 p_new = &(n(this->my_pool_uuid)->next);
1839 p_old = &(n(this->my_pool_uuid)->next);
1843 if (restore_after_crash) {
1844 while (*p_new !=
nullptr &&
1845 (mask & get_hash_code(*p_new)) == h)
1846 p_new = &((*p_new)(this->my_pool_uuid)->next);
1850 pop.persist(p_new,
sizeof(*p_new));
1853 b_new->set_rehashed(std::memory_order_release);
1854 pop.persist(b_new->rehashed);
1860 if (layout_features.incompat != header_features.incompat)
1862 "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1871 :
protected node::scoped_t {
1876 using node::scoped_t::try_acquire;
1883 const typename concurrent_hash_map::value_type;
1904 concurrent_hash_map_internal::check_outside_tx();
1907 node::scoped_t::release();
1919 return my_node->item;
1937 concurrent_hash_map_internal::check_outside_tx();
1952 hashcode_type my_hash;
1967 assert(this->my_node);
1969 return this->my_node->item;
2005 reserve(table.size());
2023 template <
typename I>
2028 reserve(static_cast<size_type>(std::distance(first, last)));
2060 if (!graceful_shutdown) {
2062 std::distance(this->
begin(), this->
end());
2063 assert(actual_size >= 0);
2064 this->restore_size(size_type(actual_size));
2066 assert(this->
size() ==
2067 size_type(std::distance(this->
begin(),
2083 concurrent_hash_map &
2086 if (
this != &table) {
2125 void rehash(size_type n = 0);
2156 return iterator(
this, 0);
2166 return iterator(
this, mask() + 1);
2176 return const_iterator(
this, 0);
2186 return const_iterator(
this, mask() + 1);
2195 return this->my_size.get_ro();
2204 return this->my_size.get_ro() == 0;
2213 return (~size_type(0)) /
sizeof(node);
2242 concurrent_hash_map_internal::check_outside_tx();
2244 return const_cast<concurrent_hash_map *>(
this)->internal_find(
2245 key,
nullptr,
false);
2259 template <
typename K,
2260 typename =
typename std::enable_if<
2261 concurrent_hash_map_internal::
2262 has_transparent_key_equal<hasher>::value,
2267 concurrent_hash_map_internal::check_outside_tx();
2269 return const_cast<concurrent_hash_map *>(
this)->internal_find(
2270 key,
nullptr,
false);
2282 concurrent_hash_map_internal::check_outside_tx();
2286 return const_cast<concurrent_hash_map *>(
this)->internal_find(
2287 key, &result,
false);
2303 template <
typename K,
2304 typename =
typename std::enable_if<
2305 concurrent_hash_map_internal::
2306 has_transparent_key_equal<hasher>::value,
2311 concurrent_hash_map_internal::check_outside_tx();
2315 return const_cast<concurrent_hash_map *>(
this)->internal_find(
2316 key, &result,
false);
2328 concurrent_hash_map_internal::check_outside_tx();
2332 return internal_find(key, &result,
true);
2348 template <
typename K,
2349 typename =
typename std::enable_if<
2350 concurrent_hash_map_internal::
2351 has_transparent_key_equal<hasher>::value,
2356 concurrent_hash_map_internal::check_outside_tx();
2360 return internal_find(key, &result,
true);
2372 concurrent_hash_map_internal::check_outside_tx();
2376 return internal_insert(key, &result,
false, key);
2389 concurrent_hash_map_internal::check_outside_tx();
2393 return internal_insert(key, &result,
true, key);
2406 concurrent_hash_map_internal::check_outside_tx();
2410 return internal_insert(value.first, &result,
false, value);
2423 concurrent_hash_map_internal::check_outside_tx();
2427 return internal_insert(value.first, &result,
true, value);
2439 concurrent_hash_map_internal::check_outside_tx();
2441 return internal_insert(value.first,
nullptr,
false, value);
2454 concurrent_hash_map_internal::check_outside_tx();
2458 return internal_insert(value.first, &result,
false,
2472 concurrent_hash_map_internal::check_outside_tx();
2476 return internal_insert(value.first, &result,
true,
2489 concurrent_hash_map_internal::check_outside_tx();
2491 return internal_insert(value.first,
nullptr,
false,
2500 template <
typename I>
2504 concurrent_hash_map_internal::check_outside_tx();
2506 for (; first != last; ++first)
2518 concurrent_hash_map_internal::check_outside_tx();
2520 insert(il.begin(), il.end());
2534 concurrent_hash_map_internal::check_outside_tx();
2536 return internal_erase(key);
2553 template <
typename K,
2554 typename =
typename std::enable_if<
2555 concurrent_hash_map_internal::
2556 has_transparent_key_equal<hasher>::value,
2561 concurrent_hash_map_internal::check_outside_tx();
2563 return internal_erase(key);
2573 bool try_acquire_item(const_accessor *result, node_mutex_t &
mutex,
2576 template <
typename K>
2577 bool internal_find(
const K &key, const_accessor *result,
bool write);
2579 template <
typename K,
typename... Args>
2580 bool internal_insert(
const K &key, const_accessor *result,
bool write,
2584 template <
bool Bucket_rw_lock,
typename K>
2585 persistent_node_ptr_t
2586 get_node(
const K &key, bucket_accessor &b)
2589 auto n = search_bucket(key, b.get());
2592 if (Bucket_rw_lock && !b.is_writer() &&
2593 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2597 n = search_bucket(key, b.get());
2600 scoped_lock_traits_type::
2601 downgrade_to_reader(b);
2610 template <
typename K>
2611 bool internal_erase(
const K &key);
2613 void clear_segment(segment_index_t s);
2620 template <
typename I>
2625 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2626 typename MutexType,
typename ScopedLockType>
2628 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2629 ScopedLockType>::try_acquire_item(const_accessor *result,
2630 node_mutex_t &mutex,
2634 if (!result->try_acquire(mutex, write)) {
2635 for (detail::atomic_backoff backoff(
true);;) {
2636 if (result->try_acquire(mutex, write))
2639 if (!backoff.bounded_pause())
2647 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2648 typename MutexType,
typename ScopedLockType>
2649 template <
typename K>
2651 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2652 ScopedLockType>::internal_find(
const K &key,
2653 const_accessor *result,
2656 assert(!result || !result->my_node);
2658 hashcode_type m = mask().load(std::memory_order_acquire);
2659 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2660 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2663 assert((m & (m + 1)) == 0);
2665 hashcode_type
const h = hasher{}(key);
2667 persistent_node_ptr_t node;
2673 scoped_lock_traits_type::initial_rw_state(
false));
2674 node = get_node<false>(key, b);
2678 if (check_mask_race(h, m)) {
2689 result, node.get(this->my_pool_uuid)->mutex, write))
2696 std::this_thread::yield();
2698 m = mask().load(std::memory_order_acquire);
2702 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2703 result->my_hash = h;
2709 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2710 typename MutexType,
typename ScopedLockType>
2711 template <
typename K,
typename... Args>
2713 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2714 ScopedLockType>::internal_insert(
const K &key,
2715 const_accessor *result,
2719 assert(!result || !result->my_node);
2721 hashcode_type m = mask().load(std::memory_order_acquire);
2722 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED 2723 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2726 assert((m & (m + 1)) == 0);
2728 hashcode_type
const h = hasher{}(key);
2730 persistent_node_ptr_t node;
2731 size_t new_size = 0;
2732 bool inserted =
false;
2738 scoped_lock_traits_type::initial_rw_state(
true));
2739 node = get_node<true>(key, b);
2743 if (check_mask_race(h, m)) {
2749 new_size = insert_new_node(b.get(), node,
2750 std::forward<Args>(args)...);
2757 result, node.get(this->my_pool_uuid)->mutex, write))
2764 std::this_thread::yield();
2766 m = mask().load(std::memory_order_acquire);
2770 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2771 result->my_hash = h;
2774 check_growth(m, new_size);
2779 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2780 typename MutexType,
typename ScopedLockType>
2781 template <
typename K>
2783 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2784 ScopedLockType>::internal_erase(
const K &key)
2787 hashcode_type
const h = hasher{}(key);
2788 hashcode_type m = mask().load(std::memory_order_acquire);
2789 pool_base pop = get_pool_base();
2794 bucket_accessor b(
this, h & m,
2795 scoped_lock_traits_type::initial_rw_state(
true));
2798 node_ptr_t *p = &b->node_list;
2803 detail::static_persistent_pool_pointer_cast<node>(
2804 n)(this->my_pool_uuid)
2806 p = &n(this->my_pool_uuid)->next;
2812 if (check_mask_race(h, m))
2816 }
else if (!b.is_writer() &&
2817 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2818 if (check_mask_race(h, m))
2825 transaction::manual tx(pop);
2827 persistent_ptr<node> del = n(this->my_pool_uuid);
2836 typename node::scoped_t item_locker(del->mutex,
2847 --(this->my_size.get_rw());
2848 pop.persist(this->my_size);
2854 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2855 typename MutexType,
typename ScopedLockType>
2860 internal_swap(table);
2863 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2864 typename MutexType,
typename ScopedLockType>
2869 concurrent_hash_map_internal::check_outside_tx();
2872 hashcode_type m = mask();
2876 hashcode_type b = (m + 1) >> 1;
2879 assert((b & (b - 1)) == 0);
2881 for (; b <= m; ++b) {
2882 bucket *bp = get_bucket(b);
2884 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2888 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
2889 rehash_bucket<true>(bp, b);
2893 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2894 typename MutexType,
typename ScopedLockType>
2898 hashcode_type m = mask();
2900 assert((m & (m + 1)) == 0);
2904 for (segment_index_t b = 0; b <= m; ++b) {
2905 bucket *bp = get_bucket(b);
2906 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2917 this->my_size.get_rw() = 0;
2918 segment_index_t s = segment_traits_t::segment_index_of(m);
2920 assert(s + 1 == this->block_table_size ||
2921 !segment_facade_t(this->my_table, s + 1).is_valid());
2927 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2933 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2934 typename MutexType,
typename ScopedLockType>
2937 ScopedLockType>::clear_segment(segment_index_t s)
2939 segment_facade_t segment(this->my_table, s);
2941 assert(segment.is_valid());
2943 size_type sz = segment.size();
2944 for (segment_index_t i = 0; i < sz; ++i) {
2945 for (node_ptr_t n = segment[i].node_list; n;
2946 n = segment[i].node_list) {
2947 segment[i].node_list = n(this->my_pool_uuid)->next;
2952 if (s >= segment_traits_t::embedded_segments)
2956 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2957 typename MutexType,
typename ScopedLockType>
2962 reserve(source.my_size.get_ro());
2963 internal_copy(source.begin(), source.end());
2966 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2967 typename MutexType,
typename ScopedLockType>
2968 template <
typename I>
2971 ScopedLockType>::internal_copy(I first, I last)
2973 hashcode_type m = mask();
2975 for (; first != last; ++first) {
2976 hashcode_type h = hasher{}(first->first);
2977 bucket *b = get_bucket(h & m);
2979 assert(b->is_rehashed(std::memory_order_relaxed));
2981 detail::persistent_pool_ptr<node> p;
2982 insert_new_node(b, p, *first);
2986 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2987 typename MutexType,
typename ScopedLockType>
2989 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2991 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2994 if (a.size() != b.size())
2997 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2998 ScopedLockType>::const_iterator
3002 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3003 ScopedLockType>::const_iterator j,
3006 for (; i != i_end; ++i) {
3007 j = b.equal_range(i->first).first;
3009 if (j == j_end || !(i->second == j->second))
3016 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3017 typename MutexType,
typename ScopedLockType>
3019 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3021 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3027 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3028 typename MutexType,
typename ScopedLockType>
3030 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3031 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2280
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2013
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1883
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:2866
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2559
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2421
Persistent_ptr transactional allocation functions for objects.
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1870
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:2960
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2532
The non-template pool base class.
Definition: pool.hpp:67
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1639
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1665
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2309
iterator end()
Definition: concurrent_hash_map.hpp:2164
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:1925
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2174
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:923
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: concurrent_hash_map.hpp:65
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2024
size_type size() const
Definition: concurrent_hash_map.hpp:2193
iterator begin()
Definition: concurrent_hash_map.hpp:2154
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:1935
C++ manual scope transaction class.
Definition: transaction.hpp:92
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2437
Resides on pmem property template.
C++ pmemobj transactions.
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2036
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2370
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2857
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2387
Commonly used functionality.
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:1973
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1739
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2106
bool empty() const
Definition: concurrent_hash_map.hpp:2202
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:350
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2502
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1684
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:1943
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
const_iterator end() const
Definition: concurrent_hash_map.hpp:2184
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:1902
Custom layout error class.
Definition: pexceptions.hpp:205
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1675
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
bool empty() const
Definition: concurrent_hash_map.hpp:1890
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2404
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1729
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2084
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2326
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2487
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1693
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2220
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:1991
Commonly used SFINAE helpers.
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:255
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
Persistent smart pointer.
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2240
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2470
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2452
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2211
void runtime_initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2054
Resides on pmem class.
Definition: p.hpp:64
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2138
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
Custom transaction error class.
Definition: pexceptions.hpp:185
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1748
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: allocation_flag.hpp:43
const_reference operator*() const
Definition: concurrent_hash_map.hpp:1915
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:2896
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:1982
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2516
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1624
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2001
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2354
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2265
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:1965
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:403
Pmem-resident shared mutex.
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1959