23#ifndef KOKKOS_UNORDERED_MAP_HPP
24#define KOKKOS_UNORDERED_MAP_HPP
25#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26#define KOKKOS_IMPL_PUBLIC_INCLUDE
27#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
30#include <Kokkos_Core.hpp>
31#include <Kokkos_Functional.hpp>
33#include <Kokkos_Bitset.hpp>
35#include <impl/Kokkos_Traits.hpp>
36#include <impl/Kokkos_UnorderedMap_impl.hpp>
44enum :
unsigned { UnorderedMapInvalidIndex = ~0u };
62 enum Status : uint32_t {
65 FREED_EXISTING = 1u << 29,
66 LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
71 KOKKOS_FORCEINLINE_FUNCTION
72 bool success()
const {
return (m_status & SUCCESS); }
75 KOKKOS_FORCEINLINE_FUNCTION
76 bool existing()
const {
return (m_status & EXISTING); }
79 KOKKOS_FORCEINLINE_FUNCTION
80 bool failed()
const {
return m_index == UnorderedMapInvalidIndex; }
84 KOKKOS_FORCEINLINE_FUNCTION
89 KOKKOS_FORCEINLINE_FUNCTION
90 uint32_t
list_position()
const {
return (m_status & LIST_LENGTH_MASK); }
93 KOKKOS_FORCEINLINE_FUNCTION
94 uint32_t
index()
const {
return m_index; }
96 KOKKOS_FORCEINLINE_FUNCTION
99 KOKKOS_FORCEINLINE_FUNCTION
100 void increment_list_position() {
104 KOKKOS_FORCEINLINE_FUNCTION
105 void set_existing(uint32_t i,
bool arg_freed_existing) {
108 EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) |
list_position();
111 KOKKOS_FORCEINLINE_FUNCTION
112 void set_success(uint32_t i) {
177template <
typename Key,
typename Value,
178 typename Device = Kokkos::DefaultExecutionSpace,
179 typename Hasher = pod_hash<std::remove_const_t<Key>>,
180 typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
183 using host_mirror_space =
184 typename ViewTraits<Key, Device, void, void>::host_mirror_space;
191 using declared_key_type = Key;
192 using key_type = std::remove_const_t<declared_key_type>;
193 using const_key_type = std::add_const_t<key_type>;
196 using declared_value_type = Value;
197 using value_type = std::remove_const_t<declared_value_type>;
198 using const_value_type = std::add_const_t<value_type>;
200 using device_type = Device;
201 using execution_space =
typename Device::execution_space;
202 using hasher_type = Hasher;
203 using equal_to_type = EqualTo;
204 using size_type = uint32_t;
208 UnorderedMap<declared_key_type, declared_value_type, device_type,
209 hasher_type, equal_to_type>;
211 hasher_type, equal_to_type>;
213 UnorderedMap<const_key_type, value_type, device_type, hasher_type,
216 device_type, hasher_type, equal_to_type>;
218 static const bool is_set = std::is_void<value_type>::value;
219 static const bool has_const_key =
220 std::is_same<const_key_type, declared_key_type>::value;
221 static const bool has_const_value =
222 is_set || std::is_same<const_value_type, declared_value_type>::value;
224 static const bool is_insertable_map =
225 !has_const_key && (is_set || !has_const_value);
226 static const bool is_modifiable_map = has_const_key && !has_const_value;
227 static const bool is_const_map = has_const_key && has_const_value;
234 using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
239 enum : size_type { invalid_index = ~static_cast<size_type>(0) };
241 using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
243 using key_type_view = std::conditional_t<
247 using value_type_view = std::conditional_t<
248 is_insertable_map || is_modifiable_map,
252 using size_type_view = std::conditional_t<
256 using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
259 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
260 enum { num_scalars = 3 };
273 UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
274 equal_to_type equal_to = equal_to_type())
275 : m_bounded_insert(true),
277 m_equal_to(equal_to),
279 m_available_indexes(calculate_capacity(capacity_hint)),
280 m_hash_lists(view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
282 m_next_index(view_alloc(WithoutInitializing,
"UnorderedMap next index"),
286 m_keys(
"UnorderedMap keys",
capacity()),
287 m_values(
"UnorderedMap values", (is_set ? 0 :
capacity())),
288 m_scalars(
"UnorderedMap scalars") {
289 if (!is_insertable_map) {
290 Kokkos::Impl::throw_runtime_exception(
291 "Cannot construct a non-insertable (i.e. const key_type) "
295 Kokkos::deep_copy(m_hash_lists, invalid_index);
296 Kokkos::deep_copy(m_next_index, invalid_index);
299 void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
301 histogram_type get_histogram() {
return histogram_type(*
this); }
305 m_bounded_insert =
true;
309 m_available_indexes.clear();
311 Kokkos::deep_copy(m_hash_lists, invalid_index);
312 Kokkos::deep_copy(m_next_index, invalid_index);
314 const key_type tmp = key_type();
315 Kokkos::deep_copy(m_keys, tmp);
317 Kokkos::deep_copy(m_scalars, 0);
321 KOKKOS_INLINE_FUNCTION
constexpr bool is_allocated()
const {
322 return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
323 m_scalars.is_allocated());
336 bool rehash(size_type requested_capacity = 0) {
337 const bool bounded_insert = (
capacity() == 0) || (
size() == 0u);
338 return rehash(requested_capacity, bounded_insert);
341 bool rehash(size_type requested_capacity,
bool bounded_insert) {
342 if (!is_insertable_map)
return false;
344 const size_type curr_size =
size();
346 (requested_capacity < curr_size) ? curr_size : requested_capacity;
348 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
351 tmp.m_bounded_insert =
false;
352 Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *
this);
355 tmp.m_bounded_insert = bounded_insert;
372 m_size = m_available_indexes.count();
373 reset_flag(modified_idx);
385 bool erasable()
const {
386 return is_insertable_map ? get_flag(erasable_idx) : false;
390 bool result = !erasable();
391 if (is_insertable_map && result) {
392 execution_space().fence(
393 "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
395 set_flag(erasable_idx);
401 bool result = erasable();
402 if (is_insertable_map && result) {
403 execution_space().fence(
404 "Kokkos::UnorderedMap::end_erase: fence before erasing");
405 Impl::UnorderedMapErase<declared_map_type> f(*
this);
407 execution_space().fence(
408 "Kokkos::UnorderedMap::end_erase: fence after erasing");
409 reset_flag(erasable_idx);
418 KOKKOS_FORCEINLINE_FUNCTION
419 size_type
capacity()
const {
return m_available_indexes.size(); }
431 KOKKOS_INLINE_FUNCTION
445 KOKKOS_INLINE_FUNCTION
447 impl_value_type
const &v = impl_value_type())
const {
450 if (!is_insertable_map ||
capacity() == 0u ||
451 m_scalars((
int)erasable_idx)) {
455 if (!m_scalars((
int)modified_idx)) {
456 m_scalars((
int)modified_idx) =
true;
459 int volatile &failed_insert_ref = m_scalars((
int)failed_insert_idx);
461 const size_type hash_value = m_hasher(k);
462 const size_type hash_list = hash_value % m_hash_lists.extent(0);
464 size_type *curr_ptr = &m_hash_lists[hash_list];
465 size_type new_index = invalid_index;
468 size_type index_hint =
static_cast<size_type
>(
469 (
static_cast<double>(hash_list) *
capacity()) / m_hash_lists.extent(0));
471 size_type find_attempts = 0;
473 enum :
unsigned { bounded_find_attempts = 32u };
474 const size_type max_attempts =
476 (bounded_find_attempts < m_available_indexes.max_hint()))
477 ? bounded_find_attempts
478 : m_available_indexes.max_hint();
480 bool not_done =
true;
491#ifdef KOKKOS_ENABLE_SYCL
492 size_type curr = Kokkos::atomic_load(curr_ptr);
494 size_type curr = volatile_load(curr_ptr);
497 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
498 &m_keys[curr != invalid_index ? curr : 0]);
502 while (curr != invalid_index && !m_equal_to(
503#ifdef KOKKOS_ENABLE_SYCL
504 Kokkos::atomic_load(&m_keys[curr])
506 volatile_load(&m_keys[curr])
510 result.increment_list_position();
512 curr_ptr = &m_next_index[curr];
513#ifdef KOKKOS_ENABLE_SYCL
514 curr = Kokkos::atomic_load(curr_ptr);
516 curr = volatile_load(curr_ptr);
518 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
519 &m_keys[curr != invalid_index ? curr : 0]);
524 if (curr != invalid_index) {
525 const bool free_existing = new_index != invalid_index;
529 if (!m_available_indexes.reset(new_index)) {
530 KOKKOS_IMPL_DO_NOT_USE_PRINTF(
"Unable to free existing\n");
534 result.set_existing(curr, free_existing);
543 if (new_index == invalid_index) {
547 m_available_indexes.find_any_unset_near(index_hint, hash_list);
550 if (!found && ++find_attempts >= max_attempts) {
551 failed_insert_ref =
true;
553 }
else if (m_available_indexes.set(index_hint)) {
554 new_index = index_hint;
556 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
558#ifdef KOKKOS_ENABLE_SYCL
559 Kokkos::atomic_store(&m_keys[new_index], k);
561 m_keys[new_index] = k;
565 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
566#ifdef KOKKOS_ENABLE_SYCL
567 Kokkos::atomic_store(&m_values[new_index], v);
569 m_values[new_index] = v;
573#ifndef KOKKOS_ENABLE_SYCL
578 }
else if (failed_insert_ref) {
585 if (new_index != invalid_index &&
586 curr == atomic_compare_exchange(
587 curr_ptr,
static_cast<size_type
>(invalid_index),
590 result.set_success(new_index);
599 KOKKOS_INLINE_FUNCTION
600 bool erase(key_type
const &k)
const {
603 if (is_insertable_map && 0u <
capacity() && m_scalars((
int)erasable_idx)) {
604 if (!m_scalars((
int)modified_idx)) {
605 m_scalars((
int)modified_idx) =
true;
608 size_type index =
find(k);
609 if (valid_at(index)) {
610 m_available_indexes.reset(index);
625 KOKKOS_INLINE_FUNCTION
626 size_type
find(
const key_type &k)
const {
628 ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
631 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
632 while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
633 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
634 &m_keys[curr != invalid_index ? curr : 0]);
635 curr = m_next_index[curr];
645 KOKKOS_INLINE_FUNCTION
646 bool exists(
const key_type &k)
const {
return valid_at(
find(k)); }
656 template <
typename Dummy = value_type>
657 KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
658 !std::is_void<Dummy>::value,
659 std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
671 KOKKOS_FORCEINLINE_FUNCTION
677 KOKKOS_FORCEINLINE_FUNCTION
678 bool valid_at(size_type i)
const {
return m_available_indexes.test(i); }
680 template <
typename SKey,
typename SValue>
682 UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src,
684 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
685 SKey, SValue>::value,
687 : m_bounded_insert(src.m_bounded_insert),
688 m_hasher(src.m_hasher),
689 m_equal_to(src.m_equal_to),
691 m_available_indexes(src.m_available_indexes),
692 m_hash_lists(src.m_hash_lists),
693 m_next_index(src.m_next_index),
695 m_values(src.m_values),
696 m_scalars(src.m_scalars) {}
698 template <
typename SKey,
typename SValue>
700 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
703 operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src) {
704 m_bounded_insert = src.m_bounded_insert;
705 m_hasher = src.m_hasher;
706 m_equal_to = src.m_equal_to;
708 m_available_indexes = src.m_available_indexes;
709 m_hash_lists = src.m_hash_lists;
710 m_next_index = src.m_next_index;
712 m_values = src.m_values;
713 m_scalars = src.m_scalars;
717 template <
typename SKey,
typename SValue,
typename SDevice>
718 std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
719 std::is_same<std::remove_const_t<SValue>, value_type>::value>
721 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
722 if (m_hash_lists.data() != src.m_hash_lists.data()) {
723 insertable_map_type tmp;
725 tmp.m_bounded_insert = src.m_bounded_insert;
726 tmp.m_hasher = src.m_hasher;
727 tmp.m_equal_to = src.m_equal_to;
728 tmp.m_size = src.size();
729 tmp.m_available_indexes = bitset_type(src.capacity());
730 tmp.m_hash_lists = size_type_view(
731 view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
732 src.m_hash_lists.extent(0));
733 tmp.m_next_index = size_type_view(
734 view_alloc(WithoutInitializing,
"UnorderedMap next index"),
735 src.m_next_index.extent(0));
737 key_type_view(view_alloc(WithoutInitializing,
"UnorderedMap keys"),
738 src.m_keys.extent(0));
739 tmp.m_values = value_type_view(
740 view_alloc(WithoutInitializing,
"UnorderedMap values"),
741 src.m_values.extent(0));
742 tmp.m_scalars = scalars_view(
"UnorderedMap scalars");
744 Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
746 using raw_deep_copy =
747 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
748 typename SDevice::memory_space>;
750 raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
751 sizeof(size_type) * src.m_hash_lists.extent(0));
752 raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
753 sizeof(size_type) * src.m_next_index.extent(0));
754 raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
755 sizeof(key_type) * src.m_keys.extent(0));
757 raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
758 sizeof(impl_value_type) * src.m_values.extent(0));
760 raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
761 sizeof(
int) * num_scalars);
764 "Kokkos::UnorderedMap::create_copy_view: fence after copy to tmp");
772 bool modified()
const {
return get_flag(modified_idx); }
774 void set_flag(
int flag)
const {
775 using raw_deep_copy =
776 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
778 const int true_ =
true;
779 raw_deep_copy(m_scalars.data() + flag, &true_,
sizeof(
int));
781 "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
785 void reset_flag(
int flag)
const {
786 using raw_deep_copy =
787 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
789 const int false_ =
false;
790 raw_deep_copy(m_scalars.data() + flag, &false_,
sizeof(
int));
792 "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
796 bool get_flag(
int flag)
const {
797 using raw_deep_copy =
799 typename device_type::memory_space>;
801 raw_deep_copy(&result, m_scalars.data() + flag,
sizeof(
int));
803 "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
808 static uint32_t calculate_capacity(uint32_t capacity_hint) {
811 ? ((
static_cast<uint32_t
>(7ull * capacity_hint / 6u) + 127u) /
818 bool m_bounded_insert;
819 hasher_type m_hasher;
820 equal_to_type m_equal_to;
821 mutable size_type m_size;
822 bitset_type m_available_indexes;
823 size_type_view m_hash_lists;
824 size_type_view m_next_index;
825 key_type_view m_keys;
826 value_type_view m_values;
827 scalars_view m_scalars;
829 template <
typename KKey,
typename VValue,
typename DDevice,
typename HHash,
831 friend class UnorderedMap;
833 template <
typename UMap>
834 friend struct Impl::UnorderedMapErase;
836 template <
typename UMap>
837 friend struct Impl::UnorderedMapHistogram;
839 template <
typename UMap>
840 friend struct Impl::UnorderedMapPrint;
844template <
typename DKey,
typename DT,
typename DDevice,
typename SKey,
845 typename ST,
typename SDevice,
typename Hasher,
typename EqualTo>
846inline void deep_copy(
847 UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
848 const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
849 dst.create_copy_view(src);
854#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
855#undef KOKKOS_IMPL_PUBLIC_INCLUDE
856#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
Memory management for host memory.
First element of the return value of UnorderedMap::insert().
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Thread-safe, performance-portable lookup table.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t< !std::is_void< Dummy >::value, std::conditional_t< has_const_value, impl_value_type, impl_value_type & > > value_at(size_type i) const
Get the value with i as its direct index.
void clear()
Clear all entries in the table.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
size_type size() const
The number of entries in the table.
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
View to an array of data.