1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
18#include "llvm/ADT/DenseMapInfo.h"
19#include "llvm/ADT/EpochTracker.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/Support/AlignOf.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Support/MathExtras.h"
24#include "llvm/Support/MemAlloc.h"
25#include "llvm/Support/ReverseIteration.h"
26#include "llvm/Support/type_traits.h"
27#include <algorithm>
28#include <cassert>
29#include <cstddef>
30#include <cstring>
31#include <initializer_list>
32#include <iterator>
33#include <new>
34#include <type_traits>
35#include <utility>
36
37namespace llvm {
38
39namespace detail {
40
41// We extend a pair to allow users to override the bucket type with their own
42// implementation without requiring two members.
43template <typename KeyT, typename ValueT>
44struct DenseMapPair : public std::pair<KeyT, ValueT> {
45 using std::pair<KeyT, ValueT>::pair;
46
47 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
48 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
49 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
50 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
51};
52
53} // end namespace detail
54
55template <typename KeyT, typename ValueT,
56 typename KeyInfoT = DenseMapInfo<KeyT>,
57 typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
58 bool IsConst = false>
59class DenseMapIterator;
60
61template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
62 typename BucketT>
63class DenseMapBase : public DebugEpochBase {
64 template <typename T>
65 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
66
67public:
68 using size_type = unsigned;
69 using key_type = KeyT;
70 using mapped_type = ValueT;
71 using value_type = BucketT;
72
73 using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
74 using const_iterator =
75 DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
76
77 inline iterator begin() {
78 // When the map is empty, avoid the overhead of advancing/retreating past
79 // empty buckets.
80 if (empty())
81 return end();
82 if (shouldReverseIterate<KeyT>())
83 return makeIterator(P: getBucketsEnd() - 1, E: getBuckets(), Epoch&: *this);
84 return makeIterator(P: getBuckets(), E: getBucketsEnd(), Epoch&: *this);
85 }
86 inline iterator end() {
87 return makeIterator(P: getBucketsEnd(), E: getBucketsEnd(), Epoch&: *this, NoAdvance: true);
88 }
89 inline const_iterator begin() const {
90 if (empty())
91 return end();
92 if (shouldReverseIterate<KeyT>())
93 return makeConstIterator(P: getBucketsEnd() - 1, E: getBuckets(), Epoch: *this);
94 return makeConstIterator(P: getBuckets(), E: getBucketsEnd(), Epoch: *this);
95 }
96 inline const_iterator end() const {
97 return makeConstIterator(P: getBucketsEnd(), E: getBucketsEnd(), Epoch: *this, NoAdvance: true);
98 }
99
100 // Return an iterator to iterate over keys in the map.
101 inline auto keys() {
102 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
103 }
104
105 // Return an iterator to iterate over values in the map.
106 inline auto values() {
107 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
108 }
109
110 inline auto keys() const {
111 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
112 }
113
114 inline auto values() const {
115 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
116 }
117
118 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
119 unsigned size() const { return getNumEntries(); }
120
121 /// Grow the densemap so that it can contain at least \p NumEntries items
122 /// before resizing again.
123 void reserve(size_type NumEntries) {
124 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
125 incrementEpoch();
126 if (NumBuckets > getNumBuckets())
127 grow(AtLeast: NumBuckets);
128 }
129
130 void clear() {
131 incrementEpoch();
132 if (getNumEntries() == 0 && getNumTombstones() == 0)
133 return;
134
135 // If the capacity of the array is huge, and the # elements used is small,
136 // shrink the array.
137 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
138 shrink_and_clear();
139 return;
140 }
141
142 const KeyT EmptyKey = getEmptyKey();
143 if constexpr (std::is_trivially_destructible_v<ValueT>) {
144 // Use a simpler loop when values don't need destruction.
145 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
146 P->getFirst() = EmptyKey;
147 } else {
148 const KeyT TombstoneKey = getTombstoneKey();
149 unsigned NumEntries = getNumEntries();
150 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
151 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
152 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
153 P->getSecond().~ValueT();
154 --NumEntries;
155 }
156 P->getFirst() = EmptyKey;
157 }
158 }
159 assert(NumEntries == 0 && "Node count imbalance!");
160 (void)NumEntries;
161 }
162 setNumEntries(0);
163 setNumTombstones(0);
164 }
165
166 /// Return true if the specified key is in the map, false otherwise.
167 bool contains(const_arg_type_t<KeyT> Val) const {
168 return doFind(Val) != nullptr;
169 }
170
171 /// Return 1 if the specified key is in the map, 0 otherwise.
172 size_type count(const_arg_type_t<KeyT> Val) const {
173 return contains(Val) ? 1 : 0;
174 }
175
176 iterator find(const_arg_type_t<KeyT> Val) {
177 if (BucketT *Bucket = doFind(Val))
178 return makeIterator(
179 P: Bucket, E: shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
180 Epoch&: *this, NoAdvance: true);
181 return end();
182 }
183 const_iterator find(const_arg_type_t<KeyT> Val) const {
184 if (const BucketT *Bucket = doFind(Val))
185 return makeConstIterator(
186 P: Bucket, E: shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
187 Epoch: *this, NoAdvance: true);
188 return end();
189 }
190
191 /// Alternate version of find() which allows a different, and possibly
192 /// less expensive, key type.
193 /// The DenseMapInfo is responsible for supplying methods
194 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
195 /// type used.
196 template <class LookupKeyT> iterator find_as(const LookupKeyT &Val) {
197 if (BucketT *Bucket = doFind(Val))
198 return makeIterator(
199 P: Bucket, E: shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
200 Epoch&: *this, NoAdvance: true);
201 return end();
202 }
203 template <class LookupKeyT>
204 const_iterator find_as(const LookupKeyT &Val) const {
205 if (const BucketT *Bucket = doFind(Val))
206 return makeConstIterator(
207 P: Bucket, E: shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
208 Epoch: *this, NoAdvance: true);
209 return end();
210 }
211
212 /// lookup - Return the entry for the specified key, or a default
213 /// constructed value if no such entry exists.
214 ValueT lookup(const_arg_type_t<KeyT> Val) const {
215 if (const BucketT *Bucket = doFind(Val))
216 return Bucket->getSecond();
217 return ValueT();
218 }
219
220 // Return the entry with the specified key, or \p Default. This variant is
221 // useful, because `lookup` cannot be used with non-default-constructible
222 // values.
223 template <typename U = std::remove_cv_t<ValueT>>
224 ValueT lookup_or(const_arg_type_t<KeyT> Val, U &&Default) const {
225 if (const BucketT *Bucket = doFind(Val))
226 return Bucket->getSecond();
227 return Default;
228 }
229
230 /// at - Return the entry for the specified key, or abort if no such
231 /// entry exists.
232 const ValueT &at(const_arg_type_t<KeyT> Val) const {
233 auto Iter = this->find(std::move(Val));
234 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
235 return Iter->second;
236 }
237
238 // Inserts key,value pair into the map if the key isn't already in the map.
239 // If the key is already in the map, it returns false and doesn't update the
240 // value.
241 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
242 return try_emplace(KV.first, KV.second);
243 }
244
245 // Inserts key,value pair into the map if the key isn't already in the map.
246 // If the key is already in the map, it returns false and doesn't update the
247 // value.
248 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
249 return try_emplace(std::move(KV.first), std::move(KV.second));
250 }
251
252 // Inserts key,value pair into the map if the key isn't already in the map.
253 // The value is constructed in-place if the key is not in the map, otherwise
254 // it is not moved.
255 template <typename... Ts>
256 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
257 BucketT *TheBucket;
258 if (LookupBucketFor(Key, TheBucket))
259 return std::make_pair(makeIterator(P: TheBucket,
260 E: shouldReverseIterate<KeyT>()
261 ? getBuckets()
262 : getBucketsEnd(),
263 Epoch&: *this, NoAdvance: true),
264 false); // Already in map.
265
266 // Otherwise, insert the new element.
267 TheBucket =
268 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
269 return std::make_pair(makeIterator(P: TheBucket,
270 E: shouldReverseIterate<KeyT>()
271 ? getBuckets()
272 : getBucketsEnd(),
273 Epoch&: *this, NoAdvance: true),
274 true);
275 }
276
277 // Inserts key,value pair into the map if the key isn't already in the map.
278 // The value is constructed in-place if the key is not in the map, otherwise
279 // it is not moved.
280 template <typename... Ts>
281 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
282 BucketT *TheBucket;
283 if (LookupBucketFor(Key, TheBucket))
284 return std::make_pair(makeIterator(P: TheBucket,
285 E: shouldReverseIterate<KeyT>()
286 ? getBuckets()
287 : getBucketsEnd(),
288 Epoch&: *this, NoAdvance: true),
289 false); // Already in map.
290
291 // Otherwise, insert the new element.
292 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
293 return std::make_pair(makeIterator(P: TheBucket,
294 E: shouldReverseIterate<KeyT>()
295 ? getBuckets()
296 : getBucketsEnd(),
297 Epoch&: *this, NoAdvance: true),
298 true);
299 }
300
301 /// Alternate version of insert() which allows a different, and possibly
302 /// less expensive, key type.
303 /// The DenseMapInfo is responsible for supplying methods
304 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
305 /// type used.
306 template <typename LookupKeyT>
307 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
308 const LookupKeyT &Val) {
309 BucketT *TheBucket;
310 if (LookupBucketFor(Val, TheBucket))
311 return std::make_pair(makeIterator(P: TheBucket,
312 E: shouldReverseIterate<KeyT>()
313 ? getBuckets()
314 : getBucketsEnd(),
315 Epoch&: *this, NoAdvance: true),
316 false); // Already in map.
317
318 // Otherwise, insert the new element.
319 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
320 std::move(KV.second), Val);
321 return std::make_pair(makeIterator(P: TheBucket,
322 E: shouldReverseIterate<KeyT>()
323 ? getBuckets()
324 : getBucketsEnd(),
325 Epoch&: *this, NoAdvance: true),
326 true);
327 }
328
329 /// insert - Range insertion of pairs.
330 template <typename InputIt> void insert(InputIt I, InputIt E) {
331 for (; I != E; ++I)
332 insert(*I);
333 }
334
335 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
336 template <typename Range> void insert_range(Range &&R) {
337 insert(adl_begin(R), adl_end(R));
338 }
339
340 template <typename V>
341 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
342 auto Ret = try_emplace(Key, std::forward<V>(Val));
343 if (!Ret.second)
344 Ret.first->second = std::forward<V>(Val);
345 return Ret;
346 }
347
348 template <typename V>
349 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
350 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
351 if (!Ret.second)
352 Ret.first->second = std::forward<V>(Val);
353 return Ret;
354 }
355
356 template <typename... Ts>
357 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
358 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
359 if (!Ret.second)
360 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
361 return Ret;
362 }
363
364 template <typename... Ts>
365 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
366 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
367 if (!Ret.second)
368 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
369 return Ret;
370 }
371
372 bool erase(const KeyT &Val) {
373 BucketT *TheBucket = doFind(Val);
374 if (!TheBucket)
375 return false; // not in map.
376
377 TheBucket->getSecond().~ValueT();
378 TheBucket->getFirst() = getTombstoneKey();
379 decrementNumEntries();
380 incrementNumTombstones();
381 return true;
382 }
383 void erase(iterator I) {
384 BucketT *TheBucket = &*I;
385 TheBucket->getSecond().~ValueT();
386 TheBucket->getFirst() = getTombstoneKey();
387 decrementNumEntries();
388 incrementNumTombstones();
389 }
390
391 ValueT &operator[](const KeyT &Key) {
392 BucketT *TheBucket;
393 if (LookupBucketFor(Key, TheBucket))
394 return TheBucket->second;
395
396 return InsertIntoBucket(TheBucket, Key)->second;
397 }
398
399 ValueT &operator[](KeyT &&Key) {
400 BucketT *TheBucket;
401 if (LookupBucketFor(Key, TheBucket))
402 return TheBucket->second;
403
404 return InsertIntoBucket(TheBucket, std::move(Key))->second;
405 }
406
407 /// isPointerIntoBucketsArray - Return true if the specified pointer points
408 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
409 /// value in the DenseMap).
410 bool isPointerIntoBucketsArray(const void *Ptr) const {
411 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
412 }
413
414 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
415 /// array. In conjunction with the previous method, this can be used to
416 /// determine whether an insertion caused the DenseMap to reallocate.
417 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
418
419protected:
420 DenseMapBase() = default;
421
422 void destroyAll() {
423 if (getNumBuckets() == 0) // Nothing to do.
424 return;
425
426 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
427 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
428 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
429 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
430 P->getSecond().~ValueT();
431 P->getFirst().~KeyT();
432 }
433 }
434
435 void initEmpty() {
436 setNumEntries(0);
437 setNumTombstones(0);
438
439 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
440 "# initial buckets must be a power of two!");
441 const KeyT EmptyKey = getEmptyKey();
442 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
443 ::new (&B->getFirst()) KeyT(EmptyKey);
444 }
445
446 /// Returns the number of buckets to allocate to ensure that the DenseMap can
447 /// accommodate \p NumEntries without need to grow().
448 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
449 // Ensure that "NumEntries * 4 < NumBuckets * 3"
450 if (NumEntries == 0)
451 return 0;
452 // +1 is required because of the strict equality.
453 // For example if NumEntries is 48, we need to return 401.
454 return NextPowerOf2(A: NumEntries * 4 / 3 + 1);
455 }
456
457 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
458 initEmpty();
459
460 // Insert all the old elements.
461 const KeyT EmptyKey = getEmptyKey();
462 const KeyT TombstoneKey = getTombstoneKey();
463 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
464 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
465 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
466 // Insert the key/value into the new table.
467 BucketT *DestBucket;
468 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
469 (void)FoundVal; // silence warning.
470 assert(!FoundVal && "Key already in new map?");
471 DestBucket->getFirst() = std::move(B->getFirst());
472 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
473 incrementNumEntries();
474
475 // Free the value.
476 B->getSecond().~ValueT();
477 }
478 B->getFirst().~KeyT();
479 }
480 }
481
482 template <typename OtherBaseT>
483 void copyFrom(
484 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
485 assert(&other != this);
486 assert(getNumBuckets() == other.getNumBuckets());
487
488 setNumEntries(other.getNumEntries());
489 setNumTombstones(other.getNumTombstones());
490
491 BucketT *Buckets = getBuckets();
492 const BucketT *OtherBuckets = other.getBuckets();
493 const size_t NumBuckets = getNumBuckets();
494 if constexpr (std::is_trivially_copyable_v<KeyT> &&
495 std::is_trivially_copyable_v<ValueT>) {
496 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
497 NumBuckets * sizeof(BucketT));
498 } else {
499 const KeyT EmptyKey = getEmptyKey();
500 const KeyT TombstoneKey = getTombstoneKey();
501 for (size_t I = 0; I < NumBuckets; ++I) {
502 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
503 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
504 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
505 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
506 }
507 }
508 }
509
510 static unsigned getHashValue(const KeyT &Val) {
511 return KeyInfoT::getHashValue(Val);
512 }
513
514 template <typename LookupKeyT>
515 static unsigned getHashValue(const LookupKeyT &Val) {
516 return KeyInfoT::getHashValue(Val);
517 }
518
519 static const KeyT getEmptyKey() {
520 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
521 "Must pass the derived type to this template!");
522 return KeyInfoT::getEmptyKey();
523 }
524
525 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
526
527private:
528 iterator makeIterator(BucketT *P, BucketT *E, DebugEpochBase &Epoch,
529 bool NoAdvance = false) {
530 if (shouldReverseIterate<KeyT>()) {
531 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
532 return iterator(B, E, Epoch, NoAdvance);
533 }
534 return iterator(P, E, Epoch, NoAdvance);
535 }
536
537 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
538 const DebugEpochBase &Epoch,
539 const bool NoAdvance = false) const {
540 if (shouldReverseIterate<KeyT>()) {
541 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
542 return const_iterator(B, E, Epoch, NoAdvance);
543 }
544 return const_iterator(P, E, Epoch, NoAdvance);
545 }
546
547 unsigned getNumEntries() const {
548 return static_cast<const DerivedT *>(this)->getNumEntries();
549 }
550
551 void setNumEntries(unsigned Num) {
552 static_cast<DerivedT *>(this)->setNumEntries(Num);
553 }
554
555 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
556
557 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
558
559 unsigned getNumTombstones() const {
560 return static_cast<const DerivedT *>(this)->getNumTombstones();
561 }
562
563 void setNumTombstones(unsigned Num) {
564 static_cast<DerivedT *>(this)->setNumTombstones(Num);
565 }
566
567 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
568
569 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
570
571 const BucketT *getBuckets() const {
572 return static_cast<const DerivedT *>(this)->getBuckets();
573 }
574
575 BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
576
577 unsigned getNumBuckets() const {
578 return static_cast<const DerivedT *>(this)->getNumBuckets();
579 }
580
581 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
582
583 const BucketT *getBucketsEnd() const {
584 return getBuckets() + getNumBuckets();
585 }
586
587 void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
588
589 void shrink_and_clear() { static_cast<DerivedT *>(this)->shrink_and_clear(); }
590
591 template <typename KeyArg, typename... ValueArgs>
592 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
593 ValueArgs &&...Values) {
594 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
595
596 TheBucket->getFirst() = std::forward<KeyArg>(Key);
597 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
598 return TheBucket;
599 }
600
601 template <typename LookupKeyT>
602 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
603 ValueT &&Value, LookupKeyT &Lookup) {
604 TheBucket = InsertIntoBucketImpl(Lookup, TheBucket);
605
606 TheBucket->getFirst() = std::move(Key);
607 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
608 return TheBucket;
609 }
610
611 template <typename LookupKeyT>
612 BucketT *InsertIntoBucketImpl(const LookupKeyT &Lookup, BucketT *TheBucket) {
613 incrementEpoch();
614
615 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
616 // the buckets are empty (meaning that many are filled with tombstones),
617 // grow the table.
618 //
619 // The later case is tricky. For example, if we had one empty bucket with
620 // tons of tombstones, failing lookups (e.g. for insertion) would have to
621 // probe almost the entire table until it found the empty bucket. If the
622 // table completely filled with tombstones, no lookup would ever succeed,
623 // causing infinite loops in lookup.
624 unsigned NewNumEntries = getNumEntries() + 1;
625 unsigned NumBuckets = getNumBuckets();
626 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
627 this->grow(NumBuckets * 2);
628 LookupBucketFor(Lookup, TheBucket);
629 NumBuckets = getNumBuckets();
630 } else if (LLVM_UNLIKELY(NumBuckets -
631 (NewNumEntries + getNumTombstones()) <=
632 NumBuckets / 8)) {
633 this->grow(NumBuckets);
634 LookupBucketFor(Lookup, TheBucket);
635 }
636 assert(TheBucket);
637
638 // Only update the state after we've grown our bucket space appropriately
639 // so that when growing buckets we have self-consistent entry count.
640 incrementNumEntries();
641
642 // If we are writing over a tombstone, remember this.
643 const KeyT EmptyKey = getEmptyKey();
644 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
645 decrementNumTombstones();
646
647 return TheBucket;
648 }
649
650 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
651 BucketT *BucketsPtr = getBuckets();
652 const unsigned NumBuckets = getNumBuckets();
653 if (NumBuckets == 0)
654 return nullptr;
655
656 const KeyT EmptyKey = getEmptyKey();
657 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
658 unsigned ProbeAmt = 1;
659 while (true) {
660 BucketT *Bucket = BucketsPtr + BucketNo;
661 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
662 return Bucket;
663 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
664 return nullptr;
665
666 // Otherwise, it's a hash collision or a tombstone, continue quadratic
667 // probing.
668 BucketNo += ProbeAmt++;
669 BucketNo &= NumBuckets - 1;
670 }
671 }
672
673 template <typename LookupKeyT>
674 const BucketT *doFind(const LookupKeyT &Val) const {
675 return const_cast<DenseMapBase *>(this)->doFind(Val); // NOLINT
676 }
677
678 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
679 /// FoundBucket. If the bucket contains the key and a value, this returns
680 /// true, otherwise it returns a bucket with an empty marker or tombstone and
681 /// returns false.
682 template <typename LookupKeyT>
683 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
684 BucketT *BucketsPtr = getBuckets();
685 const unsigned NumBuckets = getNumBuckets();
686
687 if (NumBuckets == 0) {
688 FoundBucket = nullptr;
689 return false;
690 }
691
692 // FoundTombstone - Keep track of whether we find a tombstone while probing.
693 BucketT *FoundTombstone = nullptr;
694 const KeyT EmptyKey = getEmptyKey();
695 const KeyT TombstoneKey = getTombstoneKey();
696 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
697 !KeyInfoT::isEqual(Val, TombstoneKey) &&
698 "Empty/Tombstone value shouldn't be inserted into map!");
699
700 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
701 unsigned ProbeAmt = 1;
702 while (true) {
703 BucketT *ThisBucket = BucketsPtr + BucketNo;
704 // Found Val's bucket? If so, return it.
705 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
706 FoundBucket = ThisBucket;
707 return true;
708 }
709
710 // If we found an empty bucket, the key doesn't exist in the set.
711 // Insert it and return the default value.
712 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
713 // If we've already seen a tombstone while probing, fill it in instead
714 // of the empty bucket we eventually probed to.
715 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
716 return false;
717 }
718
719 // If this is a tombstone, remember it. If Val ends up not in the map, we
720 // prefer to return it than something that would require more probing.
721 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
722 !FoundTombstone)
723 FoundTombstone = ThisBucket; // Remember the first tombstone found.
724
725 // Otherwise, it's a hash collision or a tombstone, continue quadratic
726 // probing.
727 BucketNo += ProbeAmt++;
728 BucketNo &= (NumBuckets - 1);
729 }
730 }
731
732public:
733 /// Return the approximate size (in bytes) of the actual map.
734 /// This is just the raw memory used by DenseMap.
735 /// If entries are pointers to objects, the size of the referenced objects
736 /// are not included.
737 size_t getMemorySize() const { return getNumBuckets() * sizeof(BucketT); }
738};
739
740/// Equality comparison for DenseMap.
741///
742/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
743/// is also in RHS, and that no additional pairs are in RHS.
744/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
745/// complexity is linear, worst case is O(N^2) (if every hash collides).
746template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
747 typename BucketT>
748bool operator==(
749 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
750 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
751 if (LHS.size() != RHS.size())
752 return false;
753
754 for (auto &KV : LHS) {
755 auto I = RHS.find(KV.first);
756 if (I == RHS.end() || I->second != KV.second)
757 return false;
758 }
759
760 return true;
761}
762
763/// Inequality comparison for DenseMap.
764///
765/// Equivalent to !(LHS == RHS). See operator== for performance notes.
766template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
767 typename BucketT>
768bool operator!=(
769 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
770 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
771 return !(LHS == RHS);
772}
773
774template <typename KeyT, typename ValueT,
775 typename KeyInfoT = DenseMapInfo<KeyT>,
776 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
777class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
778 KeyT, ValueT, KeyInfoT, BucketT> {
779 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
780
781 // Lift some types from the dependent base class into this class for
782 // simplicity of referring to them.
783 using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
784
785 BucketT *Buckets;
786 unsigned NumEntries;
787 unsigned NumTombstones;
788 unsigned NumBuckets;
789
790public:
791 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
792 /// this number of elements can be inserted in the map without grow()
793 explicit DenseMap(unsigned InitialReserve = 0) { init(InitNumEntries: InitialReserve); }
794
795 DenseMap(const DenseMap &other) : BaseT() {
796 init(InitNumEntries: 0);
797 copyFrom(other);
798 }
799
800 DenseMap(DenseMap &&other) : BaseT() {
801 init(InitNumEntries: 0);
802 swap(RHS&: other);
803 }
804
805 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
806 init(InitNumEntries: std::distance(I, E));
807 this->insert(I, E);
808 }
809
810 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
811 init(InitNumEntries: Vals.size());
812 this->insert(Vals.begin(), Vals.end());
813 }
814
815 ~DenseMap() {
816 this->destroyAll();
817 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
818 }
819
820 void swap(DenseMap &RHS) {
821 this->incrementEpoch();
822 RHS.incrementEpoch();
823 std::swap(Buckets, RHS.Buckets);
824 std::swap(NumEntries, RHS.NumEntries);
825 std::swap(NumTombstones, RHS.NumTombstones);
826 std::swap(NumBuckets, RHS.NumBuckets);
827 }
828
829 DenseMap &operator=(const DenseMap &other) {
830 if (&other != this)
831 copyFrom(other);
832 return *this;
833 }
834
835 DenseMap &operator=(DenseMap &&other) {
836 this->destroyAll();
837 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
838 init(InitNumEntries: 0);
839 swap(RHS&: other);
840 return *this;
841 }
842
843 void copyFrom(const DenseMap &other) {
844 this->destroyAll();
845 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
846 if (allocateBuckets(Num: other.NumBuckets)) {
847 this->BaseT::copyFrom(other);
848 } else {
849 NumEntries = 0;
850 NumTombstones = 0;
851 }
852 }
853
854 void grow(unsigned AtLeast) {
855 unsigned OldNumBuckets = NumBuckets;
856 BucketT *OldBuckets = Buckets;
857
858 allocateBuckets(Num: std::max<unsigned>(
859 a: 64, b: static_cast<unsigned>(NextPowerOf2(A: AtLeast - 1))));
860 assert(Buckets);
861 if (!OldBuckets) {
862 this->BaseT::initEmpty();
863 return;
864 }
865
866 this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
867
868 // Free the old table.
869 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
870 alignof(BucketT));
871 }
872
873 void shrink_and_clear() {
874 unsigned OldNumBuckets = NumBuckets;
875 unsigned OldNumEntries = NumEntries;
876 this->destroyAll();
877
878 // Reduce the number of buckets.
879 unsigned NewNumBuckets = 0;
880 if (OldNumEntries)
881 NewNumBuckets = std::max(a: 64, b: 1 << (Log2_32_Ceil(Value: OldNumEntries) + 1));
882 if (NewNumBuckets == NumBuckets) {
883 this->BaseT::initEmpty();
884 return;
885 }
886
887 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
888 alignof(BucketT));
889 init(InitNumEntries: NewNumBuckets);
890 }
891
892private:
893 unsigned getNumEntries() const { return NumEntries; }
894
895 void setNumEntries(unsigned Num) { NumEntries = Num; }
896
897 unsigned getNumTombstones() const { return NumTombstones; }
898
899 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
900
901 BucketT *getBuckets() const { return Buckets; }
902
903 unsigned getNumBuckets() const { return NumBuckets; }
904
905 bool allocateBuckets(unsigned Num) {
906 NumBuckets = Num;
907 if (NumBuckets == 0) {
908 Buckets = nullptr;
909 return false;
910 }
911
912 Buckets = static_cast<BucketT *>(
913 allocate_buffer(Size: sizeof(BucketT) * NumBuckets, Alignment: alignof(BucketT)));
914 return true;
915 }
916
917 void init(unsigned InitNumEntries) {
918 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
919 if (allocateBuckets(Num: InitBuckets)) {
920 this->BaseT::initEmpty();
921 } else {
922 NumEntries = 0;
923 NumTombstones = 0;
924 }
925 }
926};
927
928template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
929 typename KeyInfoT = DenseMapInfo<KeyT>,
930 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
931class SmallDenseMap
932 : public DenseMapBase<
933 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
934 ValueT, KeyInfoT, BucketT> {
935 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
936
937 // Lift some types from the dependent base class into this class for
938 // simplicity of referring to them.
939 using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
940
941 static_assert(isPowerOf2_64(Value: InlineBuckets),
942 "InlineBuckets must be a power of 2.");
943
944 unsigned Small : 1;
945 unsigned NumEntries : 31;
946 unsigned NumTombstones;
947
948 struct LargeRep {
949 BucketT *Buckets;
950 unsigned NumBuckets;
951 };
952
953 /// A "union" of an inline bucket array and the struct representing
954 /// a large bucket. This union will be discriminated by the 'Small' bit.
955 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
956
957public:
958 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
959 if (NumInitBuckets > InlineBuckets)
960 NumInitBuckets = llvm::bit_ceil(Value: NumInitBuckets);
961 init(InitBuckets: NumInitBuckets);
962 }
963
964 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
965 init(InitBuckets: 0);
966 copyFrom(other);
967 }
968
969 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
970 init(InitBuckets: 0);
971 swap(RHS&: other);
972 }
973
974 template <typename InputIt>
975 SmallDenseMap(const InputIt &I, const InputIt &E) {
976 init(InitBuckets: NextPowerOf2(std::distance(I, E)));
977 this->insert(I, E);
978 }
979
980 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
981 : SmallDenseMap(Vals.begin(), Vals.end()) {}
982
983 ~SmallDenseMap() {
984 this->destroyAll();
985 deallocateBuckets();
986 }
987
988 void swap(SmallDenseMap &RHS) {
989 unsigned TmpNumEntries = RHS.NumEntries;
990 RHS.NumEntries = NumEntries;
991 NumEntries = TmpNumEntries;
992 std::swap(NumTombstones, RHS.NumTombstones);
993
994 const KeyT EmptyKey = this->getEmptyKey();
995 const KeyT TombstoneKey = this->getTombstoneKey();
996 if (Small && RHS.Small) {
997 // If we're swapping inline bucket arrays, we have to cope with some of
998 // the tricky bits of DenseMap's storage system: the buckets are not
999 // fully initialized. Thus we swap every key, but we may have
1000 // a one-directional move of the value.
1001 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1002 BucketT *LHSB = &getInlineBuckets()[i],
1003 *RHSB = &RHS.getInlineBuckets()[i];
1004 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
1005 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
1006 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
1007 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
1008 if (hasLHSValue && hasRHSValue) {
1009 // Swap together if we can...
1010 std::swap(*LHSB, *RHSB);
1011 continue;
1012 }
1013 // Swap separately and handle any asymmetry.
1014 std::swap(LHSB->getFirst(), RHSB->getFirst());
1015 if (hasLHSValue) {
1016 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
1017 LHSB->getSecond().~ValueT();
1018 } else if (hasRHSValue) {
1019 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
1020 RHSB->getSecond().~ValueT();
1021 }
1022 }
1023 return;
1024 }
1025 if (!Small && !RHS.Small) {
1026 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
1027 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
1028 return;
1029 }
1030
1031 SmallDenseMap &SmallSide = Small ? *this : RHS;
1032 SmallDenseMap &LargeSide = Small ? RHS : *this;
1033
1034 // First stash the large side's rep and move the small side across.
1035 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
1036 LargeSide.getLargeRep()->~LargeRep();
1037 LargeSide.Small = true;
1038 // This is similar to the standard move-from-old-buckets, but the bucket
1039 // count hasn't actually rotated in this case. So we have to carefully
1040 // move construct the keys and values into their new locations, but there
1041 // is no need to re-hash things.
1042 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1043 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1044 *OldB = &SmallSide.getInlineBuckets()[i];
1045 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1046 OldB->getFirst().~KeyT();
1047 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1048 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1049 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1050 OldB->getSecond().~ValueT();
1051 }
1052 }
1053
1054 // The hard part of moving the small buckets across is done, just move
1055 // the TmpRep into its new home.
1056 SmallSide.Small = false;
1057 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1058 }
1059
1060 SmallDenseMap &operator=(const SmallDenseMap &other) {
1061 if (&other != this)
1062 copyFrom(other);
1063 return *this;
1064 }
1065
1066 SmallDenseMap &operator=(SmallDenseMap &&other) {
1067 this->destroyAll();
1068 deallocateBuckets();
1069 init(InitBuckets: 0);
1070 swap(RHS&: other);
1071 return *this;
1072 }
1073
1074 void copyFrom(const SmallDenseMap &other) {
1075 this->destroyAll();
1076 deallocateBuckets();
1077 Small = true;
1078 if (other.getNumBuckets() > InlineBuckets) {
1079 Small = false;
1080 new (getLargeRep()) LargeRep(allocateBuckets(Num: other.getNumBuckets()));
1081 }
1082 this->BaseT::copyFrom(other);
1083 }
1084
1085 void init(unsigned InitBuckets) {
1086 Small = true;
1087 if (InitBuckets > InlineBuckets) {
1088 Small = false;
1089 new (getLargeRep()) LargeRep(allocateBuckets(Num: InitBuckets));
1090 }
1091 this->BaseT::initEmpty();
1092 }
1093
1094 void grow(unsigned AtLeast) {
1095 if (AtLeast > InlineBuckets)
1096 AtLeast = std::max<unsigned>(a: 64, b: NextPowerOf2(A: AtLeast - 1));
1097
1098 if (Small) {
1099 // First move the inline buckets into a temporary storage.
1100 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1101 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1102 BucketT *TmpEnd = TmpBegin;
1103
1104 // Loop over the buckets, moving non-empty, non-tombstones into the
1105 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1106 const KeyT EmptyKey = this->getEmptyKey();
1107 const KeyT TombstoneKey = this->getTombstoneKey();
1108 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1109 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1110 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1111 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1112 "Too many inline buckets!");
1113 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1114 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1115 ++TmpEnd;
1116 P->getSecond().~ValueT();
1117 }
1118 P->getFirst().~KeyT();
1119 }
1120
1121 // AtLeast == InlineBuckets can happen if there are many tombstones,
1122 // and grow() is used to remove them. Usually we always switch to the
1123 // large rep here.
1124 if (AtLeast > InlineBuckets) {
1125 Small = false;
1126 new (getLargeRep()) LargeRep(allocateBuckets(Num: AtLeast));
1127 }
1128 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1129 return;
1130 }
1131
1132 LargeRep OldRep = std::move(*getLargeRep());
1133 getLargeRep()->~LargeRep();
1134 if (AtLeast <= InlineBuckets) {
1135 Small = true;
1136 } else {
1137 new (getLargeRep()) LargeRep(allocateBuckets(Num: AtLeast));
1138 }
1139
1140 this->moveFromOldBuckets(OldRep.Buckets,
1141 OldRep.Buckets + OldRep.NumBuckets);
1142
1143 // Free the old table.
1144 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1145 alignof(BucketT));
1146 }
1147
1148 void shrink_and_clear() {
1149 unsigned OldSize = this->size();
1150 this->destroyAll();
1151
1152 // Reduce the number of buckets.
1153 unsigned NewNumBuckets = 0;
1154 if (OldSize) {
1155 NewNumBuckets = 1 << (Log2_32_Ceil(Value: OldSize) + 1);
1156 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1157 NewNumBuckets = 64;
1158 }
1159 if ((Small && NewNumBuckets <= InlineBuckets) ||
1160 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1161 this->BaseT::initEmpty();
1162 return;
1163 }
1164
1165 deallocateBuckets();
1166 init(InitBuckets: NewNumBuckets);
1167 }
1168
1169private:
1170 unsigned getNumEntries() const { return NumEntries; }
1171
1172 void setNumEntries(unsigned Num) {
1173 // NumEntries is hardcoded to be 31 bits wide.
1174 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1175 NumEntries = Num;
1176 }
1177
1178 unsigned getNumTombstones() const { return NumTombstones; }
1179
1180 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1181
1182 const BucketT *getInlineBuckets() const {
1183 assert(Small);
1184 // Note that this cast does not violate aliasing rules as we assert that
1185 // the memory's dynamic type is the small, inline bucket buffer, and the
1186 // 'storage' is a POD containing a char buffer.
1187 return reinterpret_cast<const BucketT *>(&storage);
1188 }
1189
1190 BucketT *getInlineBuckets() {
1191 return const_cast<BucketT *>(
1192 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1193 }
1194
1195 const LargeRep *getLargeRep() const {
1196 assert(!Small);
1197 // Note, same rule about aliasing as with getInlineBuckets.
1198 return reinterpret_cast<const LargeRep *>(&storage);
1199 }
1200
1201 LargeRep *getLargeRep() {
1202 return const_cast<LargeRep *>(
1203 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1204 }
1205
1206 const BucketT *getBuckets() const {
1207 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1208 }
1209
1210 BucketT *getBuckets() {
1211 return const_cast<BucketT *>(
1212 const_cast<const SmallDenseMap *>(this)->getBuckets());
1213 }
1214
1215 unsigned getNumBuckets() const {
1216 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1217 }
1218
1219 void deallocateBuckets() {
1220 if (Small)
1221 return;
1222
1223 deallocate_buffer(getLargeRep()->Buckets,
1224 sizeof(BucketT) * getLargeRep()->NumBuckets,
1225 alignof(BucketT));
1226 getLargeRep()->~LargeRep();
1227 }
1228
1229 LargeRep allocateBuckets(unsigned Num) {
1230 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1231 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1232 Size: sizeof(BucketT) * Num, Alignment: alignof(BucketT))),
1233 Num};
1234 return Rep;
1235 }
1236};
1237
1238template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1239 bool IsConst>
1240class DenseMapIterator : DebugEpochBase::HandleBase {
1241 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1242 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1243
1244public:
1245 using difference_type = ptrdiff_t;
1246 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1247 using pointer = value_type *;
1248 using reference = value_type &;
1249 using iterator_category = std::forward_iterator_tag;
1250
1251private:
1252 pointer Ptr = nullptr;
1253 pointer End = nullptr;
1254
1255public:
1256 DenseMapIterator() = default;
1257
1258 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1259 bool NoAdvance = false)
1260 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1261 assert(isHandleInSync() && "invalid construction!");
1262
1263 if (NoAdvance)
1264 return;
1265 if (shouldReverseIterate<KeyT>()) {
1266 RetreatPastEmptyBuckets();
1267 return;
1268 }
1269 AdvancePastEmptyBuckets();
1270 }
1271
1272 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1273 // for const iterator destinations so it doesn't end up as a user defined copy
1274 // constructor.
1275 template <bool IsConstSrc,
1276 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1277 DenseMapIterator(
1278 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1279 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1280
1281 reference operator*() const {
1282 assert(isHandleInSync() && "invalid iterator access!");
1283 assert(Ptr != End && "dereferencing end() iterator");
1284 if (shouldReverseIterate<KeyT>())
1285 return Ptr[-1];
1286 return *Ptr;
1287 }
1288 pointer operator->() const {
1289 assert(isHandleInSync() && "invalid iterator access!");
1290 assert(Ptr != End && "dereferencing end() iterator");
1291 if (shouldReverseIterate<KeyT>())
1292 return &(Ptr[-1]);
1293 return Ptr;
1294 }
1295
1296 friend bool operator==(const DenseMapIterator &LHS,
1297 const DenseMapIterator &RHS) {
1298 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1299 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1300 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1301 "comparing incomparable iterators!");
1302 return LHS.Ptr == RHS.Ptr;
1303 }
1304
1305 friend bool operator!=(const DenseMapIterator &LHS,
1306 const DenseMapIterator &RHS) {
1307 return !(LHS == RHS);
1308 }
1309
1310 inline DenseMapIterator &operator++() { // Preincrement
1311 assert(isHandleInSync() && "invalid iterator access!");
1312 assert(Ptr != End && "incrementing end() iterator");
1313 if (shouldReverseIterate<KeyT>()) {
1314 --Ptr;
1315 RetreatPastEmptyBuckets();
1316 return *this;
1317 }
1318 ++Ptr;
1319 AdvancePastEmptyBuckets();
1320 return *this;
1321 }
1322 DenseMapIterator operator++(int) { // Postincrement
1323 assert(isHandleInSync() && "invalid iterator access!");
1324 DenseMapIterator tmp = *this;
1325 ++*this;
1326 return tmp;
1327 }
1328
1329private:
1330 void AdvancePastEmptyBuckets() {
1331 assert(Ptr <= End);
1332 const KeyT Empty = KeyInfoT::getEmptyKey();
1333 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1334
1335 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1336 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1337 ++Ptr;
1338 }
1339
1340 void RetreatPastEmptyBuckets() {
1341 assert(Ptr >= End);
1342 const KeyT Empty = KeyInfoT::getEmptyKey();
1343 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1344
1345 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1346 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1347 --Ptr;
1348 }
1349};
1350
1351template <typename KeyT, typename ValueT, typename KeyInfoT>
1352inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1353 return X.getMemorySize();
1354}
1355
1356} // end namespace llvm
1357
1358#endif // LLVM_ADT_DENSEMAP_H
1359