WPILibC++ 2025.2.1
Loading...
Searching...
No Matches
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef WPIUTIL_WPI_DENSEMAP_H
15#define WPIUTIL_WPI_DENSEMAP_H
16
17#include "wpi/DenseMapInfo.h"
18#include "wpi/EpochTracker.h"
19#include "wpi/AlignOf.h"
20#include "wpi/Compiler.h"
21#include "wpi/MathExtras.h"
22#include "wpi/MemAlloc.h"
24#include "wpi/type_traits.h"
25#include <algorithm>
26#include <bit>
27#include <cassert>
28#include <cstddef>
29#include <cstring>
30#include <initializer_list>
31#include <iterator>
32#include <new>
33#include <type_traits>
34#include <utility>
35
36namespace wpi {
37
38namespace detail {
39
40// We extend a pair to allow users to override the bucket type with their own
41// implementation without requiring two members.
42template <typename KeyT, typename ValueT>
43struct DenseMapPair : public std::pair<KeyT, ValueT> {
44 using std::pair<KeyT, ValueT>::pair;
45
46 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
47 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
48 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
49 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
50};
51
52} // end namespace detail
53
54template <typename KeyT, typename ValueT,
55 typename KeyInfoT = DenseMapInfo<KeyT>,
57 bool IsConst = false>
58class DenseMapIterator;
59
60template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
61 typename BucketT>
63 template <typename T>
64 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
65
66public:
67 using size_type = unsigned;
68 using key_type = KeyT;
69 using mapped_type = ValueT;
70 using value_type = BucketT;
71
75
76 inline iterator begin() {
77 // When the map is empty, avoid the overhead of advancing/retreating past
78 // empty buckets.
79 if (empty())
80 return end();
82 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
83 return makeIterator(getBuckets(), getBucketsEnd(), *this);
84 }
85 inline iterator end() {
86 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
87 }
88 inline const_iterator begin() const {
89 if (empty())
90 return end();
92 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
93 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
94 }
95 inline const_iterator end() const {
96 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
97 }
98
99 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
100 unsigned size() const { return getNumEntries(); }
101
102 /// Grow the densemap so that it can contain at least \p NumEntries items
103 /// before resizing again.
104 void reserve(size_type NumEntries) {
105 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
107 if (NumBuckets > getNumBuckets())
108 grow(NumBuckets);
109 }
110
111 void clear() {
113 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
114
115 // If the capacity of the array is huge, and the # elements used is small,
116 // shrink the array.
117 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
118 shrink_and_clear();
119 return;
120 }
121
122 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
123 if (std::is_trivially_destructible<ValueT>::value) {
124 // Use a simpler loop when values don't need destruction.
125 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
126 P->getFirst() = EmptyKey;
127 } else {
128 [[maybe_unused]] unsigned NumEntries = getNumEntries();
129 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
130 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
131 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
132 P->getSecond().~ValueT();
133 --NumEntries;
134 }
135 P->getFirst() = EmptyKey;
136 }
137 }
138 assert(NumEntries == 0 && "Node count imbalance!");
139 (void)NumEntries;
140 }
141 setNumEntries(0);
142 setNumTombstones(0);
143 }
144
145 /// Return true if the specified key is in the map, false otherwise.
146 bool contains(const_arg_type_t<KeyT> Val) const {
147 const BucketT *TheBucket;
148 return LookupBucketFor(Val, TheBucket);
149 }
150
151 /// Return 1 if the specified key is in the map, 0 otherwise.
152 size_type count(const_arg_type_t<KeyT> Val) const {
153 return contains(Val) ? 1 : 0;
154 }
155
156 iterator find(const_arg_type_t<KeyT> Val) {
157 BucketT *TheBucket;
158 if (LookupBucketFor(Val, TheBucket))
159 return makeIterator(TheBucket,
160 shouldReverseIterate<KeyT>() ? getBuckets()
161 : getBucketsEnd(),
162 *this, true);
163 return end();
164 }
165 const_iterator find(const_arg_type_t<KeyT> Val) const {
166 const BucketT *TheBucket;
167 if (LookupBucketFor(Val, TheBucket))
168 return makeConstIterator(TheBucket,
169 shouldReverseIterate<KeyT>() ? getBuckets()
170 : getBucketsEnd(),
171 *this, true);
172 return end();
173 }
174
175 /// Alternate version of find() which allows a different, and possibly
176 /// less expensive, key type.
177 /// The DenseMapInfo is responsible for supplying methods
178 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
179 /// type used.
180 template<class LookupKeyT>
181 iterator find_as(const LookupKeyT &Val) {
182 BucketT *TheBucket;
183 if (LookupBucketFor(Val, TheBucket))
184 return makeIterator(TheBucket,
185 shouldReverseIterate<KeyT>() ? getBuckets()
186 : getBucketsEnd(),
187 *this, true);
188 return end();
189 }
190 template<class LookupKeyT>
191 const_iterator find_as(const LookupKeyT &Val) const {
192 const BucketT *TheBucket;
193 if (LookupBucketFor(Val, TheBucket))
194 return makeConstIterator(TheBucket,
195 shouldReverseIterate<KeyT>() ? getBuckets()
196 : getBucketsEnd(),
197 *this, true);
198 return end();
199 }
200
201 /// lookup - Return the entry for the specified key, or a default
202 /// constructed value if no such entry exists.
203 ValueT lookup(const_arg_type_t<KeyT> Val) const {
204 const BucketT *TheBucket;
205 if (LookupBucketFor(Val, TheBucket))
206 return TheBucket->getSecond();
207 return ValueT();
208 }
209
210 /// at - Return the entry for the specified key, or abort if no such
211 /// entry exists.
212 const ValueT &at(const_arg_type_t<KeyT> Val) const {
213 auto Iter = this->find(std::move(Val));
214 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
215 return Iter->second;
216 }
217
218 // Inserts key,value pair into the map if the key isn't already in the map.
219 // If the key is already in the map, it returns false and doesn't update the
220 // value.
221 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
222 return try_emplace(KV.first, KV.second);
223 }
224
225 // Inserts key,value pair into the map if the key isn't already in the map.
226 // If the key is already in the map, it returns false and doesn't update the
227 // value.
228 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
229 return try_emplace(std::move(KV.first), std::move(KV.second));
230 }
231
232 // Inserts key,value pair into the map if the key isn't already in the map.
233 // The value is constructed in-place if the key is not in the map, otherwise
234 // it is not moved.
235 template <typename... Ts>
236 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
237 BucketT *TheBucket;
238 if (LookupBucketFor(Key, TheBucket))
239 return std::make_pair(makeIterator(TheBucket,
241 ? getBuckets()
242 : getBucketsEnd(),
243 *this, true),
244 false); // Already in map.
245
246 // Otherwise, insert the new element.
247 TheBucket =
248 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
249 return std::make_pair(makeIterator(TheBucket,
251 ? getBuckets()
252 : getBucketsEnd(),
253 *this, true),
254 true);
255 }
256
257 // Inserts key,value pair into the map if the key isn't already in the map.
258 // The value is constructed in-place if the key is not in the map, otherwise
259 // it is not moved.
260 template <typename... Ts>
261 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
262 BucketT *TheBucket;
263 if (LookupBucketFor(Key, TheBucket))
264 return std::make_pair(makeIterator(TheBucket,
266 ? getBuckets()
267 : getBucketsEnd(),
268 *this, true),
269 false); // Already in map.
270
271 // Otherwise, insert the new element.
272 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
273 return std::make_pair(makeIterator(TheBucket,
275 ? getBuckets()
276 : getBucketsEnd(),
277 *this, true),
278 true);
279 }
280
281 /// Alternate version of insert() which allows a different, and possibly
282 /// less expensive, key type.
283 /// The DenseMapInfo is responsible for supplying methods
284 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
285 /// type used.
286 template <typename LookupKeyT>
287 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
288 const LookupKeyT &Val) {
289 BucketT *TheBucket;
290 if (LookupBucketFor(Val, TheBucket))
291 return std::make_pair(makeIterator(TheBucket,
293 ? getBuckets()
294 : getBucketsEnd(),
295 *this, true),
296 false); // Already in map.
297
298 // Otherwise, insert the new element.
299 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
300 std::move(KV.second), Val);
301 return std::make_pair(makeIterator(TheBucket,
303 ? getBuckets()
304 : getBucketsEnd(),
305 *this, true),
306 true);
307 }
308
309 /// insert - Range insertion of pairs.
310 template<typename InputIt>
311 void insert(InputIt I, InputIt E) {
312 for (; I != E; ++I)
313 insert(*I);
314 }
315
316 template <typename V>
317 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
318 auto Ret = try_emplace(Key, std::forward<V>(Val));
319 if (!Ret.second)
320 Ret.first->second = std::forward<V>(Val);
321 return Ret;
322 }
323
324 template <typename V>
325 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
326 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
327 if (!Ret.second)
328 Ret.first->second = std::forward<V>(Val);
329 return Ret;
330 }
331
332 /// Returns the value associated to the key in the map if it exists. If it
333 /// does not exist, emplace a default value for the key and returns a
334 /// reference to the newly created value.
335 ValueT &getOrInsertDefault(KeyT &&Key) {
336 return try_emplace(Key).first->second;
337 }
338
339 /// Returns the value associated to the key in the map if it exists. If it
340 /// does not exist, emplace a default value for the key and returns a
341 /// reference to the newly created value.
342 ValueT &getOrInsertDefault(const KeyT &Key) {
343 return try_emplace(Key).first->second;
344 }
345
346 bool erase(const KeyT &Val) {
347 BucketT *TheBucket;
348 if (!LookupBucketFor(Val, TheBucket))
349 return false; // not in map.
350
351 TheBucket->getSecond().~ValueT();
352 TheBucket->getFirst() = getTombstoneKey();
353 decrementNumEntries();
354 incrementNumTombstones();
355 return true;
356 }
357 void erase(iterator I) {
358 BucketT *TheBucket = &*I;
359 TheBucket->getSecond().~ValueT();
360 TheBucket->getFirst() = getTombstoneKey();
361 decrementNumEntries();
362 incrementNumTombstones();
363 }
364
365 value_type& FindAndConstruct(const KeyT &Key) {
366 BucketT *TheBucket;
367 if (LookupBucketFor(Key, TheBucket))
368 return *TheBucket;
369
370 return *InsertIntoBucket(TheBucket, Key);
371 }
372
373 ValueT &operator[](const KeyT &Key) {
374 return FindAndConstruct(Key).second;
375 }
376
378 BucketT *TheBucket;
379 if (LookupBucketFor(Key, TheBucket))
380 return *TheBucket;
381
382 return *InsertIntoBucket(TheBucket, std::move(Key));
383 }
384
385 ValueT &operator[](KeyT &&Key) {
386 return FindAndConstruct(std::move(Key)).second;
387 }
388
389 /// isPointerIntoBucketsArray - Return true if the specified pointer points
390 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
391 /// value in the DenseMap).
392 bool isPointerIntoBucketsArray(const void *Ptr) const {
393 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
394 }
395
396 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
397 /// array. In conjunction with the previous method, this can be used to
398 /// determine whether an insertion caused the DenseMap to reallocate.
399 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
400
401protected:
402 DenseMapBase() = default;
403
404 void destroyAll() {
405 if (getNumBuckets() == 0) // Nothing to do.
406 return;
407
408 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
409 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
410 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
411 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
412 P->getSecond().~ValueT();
413 P->getFirst().~KeyT();
414 }
415 }
416
417 void initEmpty() {
418 setNumEntries(0);
419 setNumTombstones(0);
420
421 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
422 "# initial buckets must be a power of two!");
423 const KeyT EmptyKey = getEmptyKey();
424 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
425 ::new (&B->getFirst()) KeyT(EmptyKey);
426 }
427
428 /// Returns the number of buckets to allocate to ensure that the DenseMap can
429 /// accommodate \p NumEntries without need to grow().
430 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
431 // Ensure that "NumEntries * 4 < NumBuckets * 3"
432 if (NumEntries == 0)
433 return 0;
434 // +1 is required because of the strict equality.
435 // For example if NumEntries is 48, we need to return 401.
436 return static_cast<unsigned>(NextPowerOf2(NumEntries * 4 / 3 + 1));
437 }
438
439 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
440 initEmpty();
441
442 // Insert all the old elements.
443 const KeyT EmptyKey = getEmptyKey();
444 const KeyT TombstoneKey = getTombstoneKey();
445 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
446 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
447 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
448 // Insert the key/value into the new table.
449 BucketT *DestBucket;
450 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
451 (void)FoundVal; // silence warning.
452 assert(!FoundVal && "Key already in new map?");
453 DestBucket->getFirst() = std::move(B->getFirst());
454 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
455 incrementNumEntries();
456
457 // Free the value.
458 B->getSecond().~ValueT();
459 }
460 B->getFirst().~KeyT();
461 }
462 }
463
464 template <typename OtherBaseT>
467 assert(&other != this);
468 assert(getNumBuckets() == other.getNumBuckets());
469
470 setNumEntries(other.getNumEntries());
471 setNumTombstones(other.getNumTombstones());
472
473 if (std::is_trivially_copyable<KeyT>::value &&
474 std::is_trivially_copyable<ValueT>::value)
475 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
476 getNumBuckets() * sizeof(BucketT));
477 else
478 for (size_t i = 0; i < getNumBuckets(); ++i) {
479 ::new (&getBuckets()[i].getFirst())
480 KeyT(other.getBuckets()[i].getFirst());
481 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
482 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
483 ::new (&getBuckets()[i].getSecond())
484 ValueT(other.getBuckets()[i].getSecond());
485 }
486 }
487
488 static unsigned getHashValue(const KeyT &Val) {
489 return KeyInfoT::getHashValue(Val);
490 }
491
492 template<typename LookupKeyT>
493 static unsigned getHashValue(const LookupKeyT &Val) {
494 return KeyInfoT::getHashValue(Val);
495 }
496
497 static const KeyT getEmptyKey() {
498 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
499 "Must pass the derived type to this template!");
500 return KeyInfoT::getEmptyKey();
501 }
502
503 static const KeyT getTombstoneKey() {
504 return KeyInfoT::getTombstoneKey();
505 }
506
507private:
508 iterator makeIterator(BucketT *P, BucketT *E,
509 DebugEpochBase &Epoch,
510 bool NoAdvance=false) {
512 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
513 return iterator(B, E, Epoch, NoAdvance);
514 }
515 return iterator(P, E, Epoch, NoAdvance);
516 }
517
518 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
519 const DebugEpochBase &Epoch,
520 const bool NoAdvance=false) const {
522 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
523 return const_iterator(B, E, Epoch, NoAdvance);
524 }
525 return const_iterator(P, E, Epoch, NoAdvance);
526 }
527
528 unsigned getNumEntries() const {
529 return static_cast<const DerivedT *>(this)->getNumEntries();
530 }
531
532 void setNumEntries(unsigned Num) {
533 static_cast<DerivedT *>(this)->setNumEntries(Num);
534 }
535
536 void incrementNumEntries() {
537 setNumEntries(getNumEntries() + 1);
538 }
539
540 void decrementNumEntries() {
541 setNumEntries(getNumEntries() - 1);
542 }
543
544 unsigned getNumTombstones() const {
545 return static_cast<const DerivedT *>(this)->getNumTombstones();
546 }
547
548 void setNumTombstones(unsigned Num) {
549 static_cast<DerivedT *>(this)->setNumTombstones(Num);
550 }
551
552 void incrementNumTombstones() {
553 setNumTombstones(getNumTombstones() + 1);
554 }
555
556 void decrementNumTombstones() {
557 setNumTombstones(getNumTombstones() - 1);
558 }
559
560 const BucketT *getBuckets() const {
561 return static_cast<const DerivedT *>(this)->getBuckets();
562 }
563
564 BucketT *getBuckets() {
565 return static_cast<DerivedT *>(this)->getBuckets();
566 }
567
568 unsigned getNumBuckets() const {
569 return static_cast<const DerivedT *>(this)->getNumBuckets();
570 }
571
572 BucketT *getBucketsEnd() {
573 return getBuckets() + getNumBuckets();
574 }
575
576 const BucketT *getBucketsEnd() const {
577 return getBuckets() + getNumBuckets();
578 }
579
580 void grow(unsigned AtLeast) {
581 static_cast<DerivedT *>(this)->grow(AtLeast);
582 }
583
584 void shrink_and_clear() {
585 static_cast<DerivedT *>(this)->shrink_and_clear();
586 }
587
588 template <typename KeyArg, typename... ValueArgs>
589 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
590 ValueArgs &&... Values) {
591 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
592
593 TheBucket->getFirst() = std::forward<KeyArg>(Key);
594 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
595 return TheBucket;
596 }
597
598 template <typename LookupKeyT>
599 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
600 ValueT &&Value, LookupKeyT &Lookup) {
601 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
602
603 TheBucket->getFirst() = std::move(Key);
604 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
605 return TheBucket;
606 }
607
608 template <typename LookupKeyT>
609 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
610 BucketT *TheBucket) {
612
613 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
614 // the buckets are empty (meaning that many are filled with tombstones),
615 // grow the table.
616 //
617 // The later case is tricky. For example, if we had one empty bucket with
618 // tons of tombstones, failing lookups (e.g. for insertion) would have to
619 // probe almost the entire table until it found the empty bucket. If the
620 // table completely filled with tombstones, no lookup would ever succeed,
621 // causing infinite loops in lookup.
622 unsigned NewNumEntries = getNumEntries() + 1;
623 unsigned NumBuckets = getNumBuckets();
624 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
625 this->grow(NumBuckets * 2);
626 LookupBucketFor(Lookup, TheBucket);
627 NumBuckets = getNumBuckets();
628 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
629 NumBuckets/8)) {
630 this->grow(NumBuckets);
631 LookupBucketFor(Lookup, TheBucket);
632 }
633 assert(TheBucket);
634
635 // Only update the state after we've grown our bucket space appropriately
636 // so that when growing buckets we have self-consistent entry count.
637 incrementNumEntries();
638
639 // If we are writing over a tombstone, remember this.
640 const KeyT EmptyKey = getEmptyKey();
641 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
642 decrementNumTombstones();
643
644 return TheBucket;
645 }
646
647 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
648 /// FoundBucket. If the bucket contains the key and a value, this returns
649 /// true, otherwise it returns a bucket with an empty marker or tombstone and
650 /// returns false.
651 template<typename LookupKeyT>
652 bool LookupBucketFor(const LookupKeyT &Val,
653 const BucketT *&FoundBucket) const {
654 const BucketT *BucketsPtr = getBuckets();
655 const unsigned NumBuckets = getNumBuckets();
656
657 if (NumBuckets == 0) {
658 FoundBucket = nullptr;
659 return false;
660 }
661
662 // FoundTombstone - Keep track of whether we find a tombstone while probing.
663 const BucketT *FoundTombstone = nullptr;
664 const KeyT EmptyKey = getEmptyKey();
665 const KeyT TombstoneKey = getTombstoneKey();
666 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
667 !KeyInfoT::isEqual(Val, TombstoneKey) &&
668 "Empty/Tombstone value shouldn't be inserted into map!");
669
670 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
671 unsigned ProbeAmt = 1;
672 while (true) {
673 const BucketT *ThisBucket = BucketsPtr + BucketNo;
674 // Found Val's bucket? If so, return it.
675 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
676 FoundBucket = ThisBucket;
677 return true;
678 }
679
680 // If we found an empty bucket, the key doesn't exist in the set.
681 // Insert it and return the default value.
682 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
683 // If we've already seen a tombstone while probing, fill it in instead
684 // of the empty bucket we eventually probed to.
685 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
686 return false;
687 }
688
689 // If this is a tombstone, remember it. If Val ends up not in the map, we
690 // prefer to return it than something that would require more probing.
691 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
692 !FoundTombstone)
693 FoundTombstone = ThisBucket; // Remember the first tombstone found.
694
695 // Otherwise, it's a hash collision or a tombstone, continue quadratic
696 // probing.
697 BucketNo += ProbeAmt++;
698 BucketNo &= (NumBuckets-1);
699 }
700 }
701
702 template <typename LookupKeyT>
703 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
704 const BucketT *ConstFoundBucket;
705 bool Result = const_cast<const DenseMapBase *>(this)
706 ->LookupBucketFor(Val, ConstFoundBucket);
707 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
708 return Result;
709 }
710
711public:
712 /// Return the approximate size (in bytes) of the actual map.
713 /// This is just the raw memory used by DenseMap.
714 /// If entries are pointers to objects, the size of the referenced objects
715 /// are not included.
716 size_t getMemorySize() const {
717 return getNumBuckets() * sizeof(BucketT);
718 }
719};
720
721/// Equality comparison for DenseMap.
722///
723/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
724/// is also in RHS, and that no additional pairs are in RHS.
725/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
726/// complexity is linear, worst case is O(N^2) (if every hash collides).
727template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
728 typename BucketT>
732 if (LHS.size() != RHS.size())
733 return false;
734
735 for (auto &KV : LHS) {
736 auto I = RHS.find(KV.first);
737 if (I == RHS.end() || I->second != KV.second)
738 return false;
739 }
740
741 return true;
742}
743
744/// Inequality comparison for DenseMap.
745///
746/// Equivalent to !(LHS == RHS). See operator== for performance notes.
747template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
748 typename BucketT>
754
755template <typename KeyT, typename ValueT,
756 typename KeyInfoT = DenseMapInfo<KeyT>,
758class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
759 KeyT, ValueT, KeyInfoT, BucketT> {
760 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
761
762 // Lift some types from the dependent base class into this class for
763 // simplicity of referring to them.
765
766 BucketT *Buckets;
767 unsigned NumEntries;
768 unsigned NumTombstones;
769 unsigned NumBuckets;
770
771public:
772 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
773 /// this number of elements can be inserted in the map without grow()
774 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
775
776 DenseMap(const DenseMap &other) : BaseT() {
777 init(0);
778 copyFrom(other);
779 }
780
781 DenseMap(DenseMap &&other) : BaseT() {
782 init(0);
783 swap(other);
784 }
785
786 template<typename InputIt>
787 DenseMap(const InputIt &I, const InputIt &E) {
788 init(std::distance(I, E));
789 this->insert(I, E);
790 }
791
792 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
793 init(Vals.size());
794 this->insert(Vals.begin(), Vals.end());
795 }
796
798 this->destroyAll();
799 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
800 }
801
802 void swap(DenseMap& RHS) {
803 this->incrementEpoch();
804 RHS.incrementEpoch();
805 std::swap(Buckets, RHS.Buckets);
806 std::swap(NumEntries, RHS.NumEntries);
807 std::swap(NumTombstones, RHS.NumTombstones);
808 std::swap(NumBuckets, RHS.NumBuckets);
809 }
810
811 DenseMap& operator=(const DenseMap& other) {
812 if (&other != this)
813 copyFrom(other);
814 return *this;
815 }
816
818 this->destroyAll();
819 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
820 init(0);
821 swap(other);
822 return *this;
823 }
824
825 void copyFrom(const DenseMap& other) {
826 this->destroyAll();
827 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
828 if (allocateBuckets(other.NumBuckets)) {
829 this->BaseT::copyFrom(other);
830 } else {
831 NumEntries = 0;
832 NumTombstones = 0;
833 }
834 }
835
836 void init(unsigned InitNumEntries) {
837 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
838 if (allocateBuckets(InitBuckets)) {
839 this->BaseT::initEmpty();
840 } else {
841 NumEntries = 0;
842 NumTombstones = 0;
843 }
844 }
845
846 void grow(unsigned AtLeast) {
847 unsigned OldNumBuckets = NumBuckets;
848 BucketT *OldBuckets = Buckets;
849
850 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
851 assert(Buckets);
852 if (!OldBuckets) {
853 this->BaseT::initEmpty();
854 return;
855 }
856
857 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
858
859 // Free the old table.
860 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
861 alignof(BucketT));
862 }
863
865 unsigned OldNumBuckets = NumBuckets;
866 unsigned OldNumEntries = NumEntries;
867 this->destroyAll();
868
869 // Reduce the number of buckets.
870 unsigned NewNumBuckets = 0;
871 if (OldNumEntries)
872 NewNumBuckets = (std::max)(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
873 if (NewNumBuckets == NumBuckets) {
874 this->BaseT::initEmpty();
875 return;
876 }
877
878 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
879 alignof(BucketT));
880 init(NewNumBuckets);
881 }
882
883private:
884 unsigned getNumEntries() const {
885 return NumEntries;
886 }
887
888 void setNumEntries(unsigned Num) {
889 NumEntries = Num;
890 }
891
892 unsigned getNumTombstones() const {
893 return NumTombstones;
894 }
895
896 void setNumTombstones(unsigned Num) {
897 NumTombstones = Num;
898 }
899
900 BucketT *getBuckets() const {
901 return Buckets;
902 }
903
904 unsigned getNumBuckets() const {
905 return NumBuckets;
906 }
907
908 bool allocateBuckets(unsigned Num) {
909 NumBuckets = Num;
910 if (NumBuckets == 0) {
911 Buckets = nullptr;
912 return false;
913 }
914
915 Buckets = static_cast<BucketT *>(
916 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
917 return true;
918 }
919};
920
921template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
922 typename KeyInfoT = DenseMapInfo<KeyT>,
925 : public DenseMapBase<
926 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
927 ValueT, KeyInfoT, BucketT> {
928 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
929
930 // Lift some types from the dependent base class into this class for
931 // simplicity of referring to them.
933
934 static_assert(isPowerOf2_64(InlineBuckets),
935 "InlineBuckets must be a power of 2.");
936
937 unsigned Small : 1;
938 unsigned NumEntries : 31;
939 unsigned NumTombstones;
940
941 struct LargeRep {
942 BucketT *Buckets;
943 unsigned NumBuckets;
944 };
945
946 /// A "union" of an inline bucket array and the struct representing
947 /// a large bucket. This union will be discriminated by the 'Small' bit.
949
950public:
951 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
952 if (NumInitBuckets > InlineBuckets)
953 NumInitBuckets = std::bit_ceil(NumInitBuckets);
954 init(NumInitBuckets);
955 }
956
958 init(0);
959 copyFrom(other);
960 }
961
963 init(0);
964 swap(other);
965 }
966
967 template<typename InputIt>
968 SmallDenseMap(const InputIt &I, const InputIt &E) {
969 init(NextPowerOf2(std::distance(I, E)));
970 this->insert(I, E);
971 }
972
973 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
974 : SmallDenseMap(Vals.begin(), Vals.end()) {}
975
977 this->destroyAll();
978 deallocateBuckets();
979 }
980
981 void swap(SmallDenseMap& RHS) {
982 unsigned TmpNumEntries = RHS.NumEntries;
983 RHS.NumEntries = NumEntries;
984 NumEntries = TmpNumEntries;
985 std::swap(NumTombstones, RHS.NumTombstones);
986
987 const KeyT EmptyKey = this->getEmptyKey();
988 const KeyT TombstoneKey = this->getTombstoneKey();
989 if (Small && RHS.Small) {
990 // If we're swapping inline bucket arrays, we have to cope with some of
991 // the tricky bits of DenseMap's storage system: the buckets are not
992 // fully initialized. Thus we swap every key, but we may have
993 // a one-directional move of the value.
994 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
995 BucketT *LHSB = &getInlineBuckets()[i],
996 *RHSB = &RHS.getInlineBuckets()[i];
997 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
998 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
999 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
1000 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
1001 if (hasLHSValue && hasRHSValue) {
1002 // Swap together if we can...
1003 std::swap(*LHSB, *RHSB);
1004 continue;
1005 }
1006 // Swap separately and handle any asymmetry.
1007 std::swap(LHSB->getFirst(), RHSB->getFirst());
1008 if (hasLHSValue) {
1009 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
1010 LHSB->getSecond().~ValueT();
1011 } else if (hasRHSValue) {
1012 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
1013 RHSB->getSecond().~ValueT();
1014 }
1015 }
1016 return;
1017 }
1018 if (!Small && !RHS.Small) {
1019 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
1020 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
1021 return;
1022 }
1023
1024 SmallDenseMap &SmallSide = Small ? *this : RHS;
1025 SmallDenseMap &LargeSide = Small ? RHS : *this;
1026
1027 // First stash the large side's rep and move the small side across.
1028 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
1029 LargeSide.getLargeRep()->~LargeRep();
1030 LargeSide.Small = true;
1031 // This is similar to the standard move-from-old-buckets, but the bucket
1032 // count hasn't actually rotated in this case. So we have to carefully
1033 // move construct the keys and values into their new locations, but there
1034 // is no need to re-hash things.
1035 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1036 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1037 *OldB = &SmallSide.getInlineBuckets()[i];
1038 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1039 OldB->getFirst().~KeyT();
1040 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1041 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1042 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1043 OldB->getSecond().~ValueT();
1044 }
1045 }
1046
1047 // The hard part of moving the small buckets across is done, just move
1048 // the TmpRep into its new home.
1049 SmallSide.Small = false;
1050 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1051 }
1052
1054 if (&other != this)
1055 copyFrom(other);
1056 return *this;
1057 }
1058
1060 this->destroyAll();
1061 deallocateBuckets();
1062 init(0);
1063 swap(other);
1064 return *this;
1065 }
1066
1067 void copyFrom(const SmallDenseMap& other) {
1068 this->destroyAll();
1069 deallocateBuckets();
1070 Small = true;
1071 if (other.getNumBuckets() > InlineBuckets) {
1072 Small = false;
1073 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1074 }
1075 this->BaseT::copyFrom(other);
1076 }
1077
1078 void init(unsigned InitBuckets) {
1079 Small = true;
1080 if (InitBuckets > InlineBuckets) {
1081 Small = false;
1082 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1083 }
1084 this->BaseT::initEmpty();
1085 }
1086
1087 void grow(unsigned AtLeast) {
1088 if (AtLeast > InlineBuckets)
1089 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1090
1091 if (Small) {
1092 // First move the inline buckets into a temporary storage.
1094 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1095 BucketT *TmpEnd = TmpBegin;
1096
1097 // Loop over the buckets, moving non-empty, non-tombstones into the
1098 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1099 const KeyT EmptyKey = this->getEmptyKey();
1100 const KeyT TombstoneKey = this->getTombstoneKey();
1101 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1102 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1103 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1104 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1105 "Too many inline buckets!");
1106 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1107 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1108 ++TmpEnd;
1109 P->getSecond().~ValueT();
1110 }
1111 P->getFirst().~KeyT();
1112 }
1113
1114 // AtLeast == InlineBuckets can happen if there are many tombstones,
1115 // and grow() is used to remove them. Usually we always switch to the
1116 // large rep here.
1117 if (AtLeast > InlineBuckets) {
1118 Small = false;
1119 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1120 }
1121 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1122 return;
1123 }
1124
1125 LargeRep OldRep = std::move(*getLargeRep());
1126 getLargeRep()->~LargeRep();
1127 if (AtLeast <= InlineBuckets) {
1128 Small = true;
1129 } else {
1130 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1131 }
1132
1133 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1134
1135 // Free the old table.
1136 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1137 alignof(BucketT));
1138 }
1139
1141 unsigned OldSize = this->size();
1142 this->destroyAll();
1143
1144 // Reduce the number of buckets.
1145 unsigned NewNumBuckets = 0;
1146 if (OldSize) {
1147 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1148 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1149 NewNumBuckets = 64;
1150 }
1151 if ((Small && NewNumBuckets <= InlineBuckets) ||
1152 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1153 this->BaseT::initEmpty();
1154 return;
1155 }
1156
1157 deallocateBuckets();
1158 init(NewNumBuckets);
1159 }
1160
1161private:
1162 unsigned getNumEntries() const {
1163 return NumEntries;
1164 }
1165
1166 void setNumEntries(unsigned Num) {
1167 // NumEntries is hardcoded to be 31 bits wide.
1168 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1169 NumEntries = Num;
1170 }
1171
1172 unsigned getNumTombstones() const {
1173 return NumTombstones;
1174 }
1175
1176 void setNumTombstones(unsigned Num) {
1177 NumTombstones = Num;
1178 }
1179
1180 const BucketT *getInlineBuckets() const {
1181 assert(Small);
1182 // Note that this cast does not violate aliasing rules as we assert that
1183 // the memory's dynamic type is the small, inline bucket buffer, and the
1184 // 'storage' is a POD containing a char buffer.
1185 return reinterpret_cast<const BucketT *>(&storage);
1186 }
1187
1188 BucketT *getInlineBuckets() {
1189 return const_cast<BucketT *>(
1190 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1191 }
1192
1193 const LargeRep *getLargeRep() const {
1194 assert(!Small);
1195 // Note, same rule about aliasing as with getInlineBuckets.
1196 return reinterpret_cast<const LargeRep *>(&storage);
1197 }
1198
1199 LargeRep *getLargeRep() {
1200 return const_cast<LargeRep *>(
1201 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1202 }
1203
1204 const BucketT *getBuckets() const {
1205 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1206 }
1207
1208 BucketT *getBuckets() {
1209 return const_cast<BucketT *>(
1210 const_cast<const SmallDenseMap *>(this)->getBuckets());
1211 }
1212
1213 unsigned getNumBuckets() const {
1214 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1215 }
1216
1217 void deallocateBuckets() {
1218 if (Small)
1219 return;
1220
1221 deallocate_buffer(getLargeRep()->Buckets,
1222 sizeof(BucketT) * getLargeRep()->NumBuckets,
1223 alignof(BucketT));
1224 getLargeRep()->~LargeRep();
1225 }
1226
1227 LargeRep allocateBuckets(unsigned Num) {
1228 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1229 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1230 sizeof(BucketT) * Num, alignof(BucketT))),
1231 Num};
1232 return Rep;
1233 }
1234};
1235
1236template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1237 bool IsConst>
1239 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1240 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1241
1242public:
1243 using difference_type = ptrdiff_t;
1244 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1247 using iterator_category = std::forward_iterator_tag;
1248
1249private:
1250 pointer Ptr = nullptr;
1251 pointer End = nullptr;
1252
1253public:
1254 DenseMapIterator() = default;
1255
1257 bool NoAdvance = false)
1258 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1259 assert(isHandleInSync() && "invalid construction!");
1260
1261 if (NoAdvance) return;
1262 if (shouldReverseIterate<KeyT>()) {
1263 RetreatPastEmptyBuckets();
1264 return;
1265 }
1266 AdvancePastEmptyBuckets();
1267 }
1268
1269 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1270 // for const iterator destinations so it doesn't end up as a user defined copy
1271 // constructor.
1272 template <bool IsConstSrc,
1273 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1276 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1277
1279 assert(isHandleInSync() && "invalid iterator access!");
1280 assert(Ptr != End && "dereferencing end() iterator");
1281 if (shouldReverseIterate<KeyT>())
1282 return Ptr[-1];
1283 return *Ptr;
1284 }
1286 assert(isHandleInSync() && "invalid iterator access!");
1287 assert(Ptr != End && "dereferencing end() iterator");
1288 if (shouldReverseIterate<KeyT>())
1289 return &(Ptr[-1]);
1290 return Ptr;
1291 }
1292
1293 friend bool operator==(const DenseMapIterator &LHS,
1294 const DenseMapIterator &RHS) {
1295 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1296 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1297 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1298 "comparing incomparable iterators!");
1299 return LHS.Ptr == RHS.Ptr;
1300 }
1301
1302 friend bool operator!=(const DenseMapIterator &LHS,
1303 const DenseMapIterator &RHS) {
1304 return !(LHS == RHS);
1305 }
1306
1307 inline DenseMapIterator& operator++() { // Preincrement
1308 assert(isHandleInSync() && "invalid iterator access!");
1309 assert(Ptr != End && "incrementing end() iterator");
1310 if (shouldReverseIterate<KeyT>()) {
1311 --Ptr;
1312 RetreatPastEmptyBuckets();
1313 return *this;
1314 }
1315 ++Ptr;
1316 AdvancePastEmptyBuckets();
1317 return *this;
1318 }
1319 DenseMapIterator operator++(int) { // Postincrement
1320 assert(isHandleInSync() && "invalid iterator access!");
1321 DenseMapIterator tmp = *this; ++*this; return tmp;
1322 }
1323
1324private:
1325 void AdvancePastEmptyBuckets() {
1326 assert(Ptr <= End);
1327 const KeyT Empty = KeyInfoT::getEmptyKey();
1328 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1329
1330 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1331 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1332 ++Ptr;
1333 }
1334
1335 void RetreatPastEmptyBuckets() {
1336 assert(Ptr >= End);
1337 const KeyT Empty = KeyInfoT::getEmptyKey();
1338 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1339
1340 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1341 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1342 --Ptr;
1343 }
1344};
1345
1346template <typename KeyT, typename ValueT, typename KeyInfoT>
1348 return X.getMemorySize();
1349}
1350
1351} // end namespace wpi
1352
1353#endif // WPIUTIL_WPI_DENSEMAP_H
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:253
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:252
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
A base class for iterator classes ("handles") that wish to poll for iterator invalidating modificatio...
Definition EpochTracker.h:58
A base class for data structure classes wishing to make iterators ("handles") pointing into themselve...
Definition EpochTracker.h:36
void incrementEpoch()
Calling incrementEpoch invalidates all handles pointing into the calling instance.
Definition EpochTracker.h:44
DebugEpochBase()=default
Definition DenseMap.h:62
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition DenseMap.h:465
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition DenseMap.h:392
const_iterator find_as(const LookupKeyT &Val) const
Definition DenseMap.h:191
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:72
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition DenseMap.h:236
iterator end()
Definition DenseMap.h:85
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition DenseMap.h:228
static const KeyT getEmptyKey()
Definition DenseMap.h:497
bool erase(const KeyT &Val)
Definition DenseMap.h:346
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:152
ValueT mapped_type
Definition DenseMap.h:69
void erase(iterator I)
Definition DenseMap.h:357
void initEmpty()
Definition DenseMap.h:417
unsigned size() const
Definition DenseMap.h:100
static unsigned getHashValue(const KeyT &Val)
Definition DenseMap.h:488
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:104
ValueT & getOrInsertDefault(KeyT &&Key)
Returns the value associated to the key in the map if it exists.
Definition DenseMap.h:335
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition DenseMap.h:311
ValueT & getOrInsertDefault(const KeyT &Key)
Returns the value associated to the key in the map if it exists.
Definition DenseMap.h:342
BucketT value_type
Definition DenseMap.h:70
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:399
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:221
const_iterator end() const
Definition DenseMap.h:95
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:146
KeyT key_type
Definition DenseMap.h:68
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:73
value_type & FindAndConstruct(const KeyT &Key)
Definition DenseMap.h:365
void clear()
Definition DenseMap.h:111
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:212
void destroyAll()
Definition DenseMap.h:404
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition DenseMap.h:325
iterator begin()
Definition DenseMap.h:76
ValueT & operator[](const KeyT &Key)
Definition DenseMap.h:373
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:203
static unsigned getHashValue(const LookupKeyT &Val)
Definition DenseMap.h:493
DenseMapBase()=default
unsigned size_type
Definition DenseMap.h:67
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition DenseMap.h:287
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition DenseMap.h:430
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
Definition DenseMap.h:439
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition DenseMap.h:165
static const KeyT getTombstoneKey()
Definition DenseMap.h:503
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:317
ValueT & operator[](KeyT &&Key)
Definition DenseMap.h:385
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:156
value_type & FindAndConstruct(KeyT &&Key)
Definition DenseMap.h:377
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:181
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition DenseMap.h:716
const_iterator begin() const
Definition DenseMap.h:88
bool empty() const
Definition DenseMap.h:99
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&... Args)
Definition DenseMap.h:261
Definition DenseMap.h:759
void init(unsigned InitNumEntries)
Definition DenseMap.h:836
void copyFrom(const DenseMap &other)
Definition DenseMap.h:825
DenseMap & operator=(DenseMap &&other)
Definition DenseMap.h:817
DenseMap & operator=(const DenseMap &other)
Definition DenseMap.h:811
DenseMap(const DenseMap &other)
Definition DenseMap.h:776
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:792
void grow(unsigned AtLeast)
Definition DenseMap.h:846
void shrink_and_clear()
Definition DenseMap.h:864
DenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:787
void swap(DenseMap &RHS)
Definition DenseMap.h:802
DenseMap(DenseMap &&other)
Definition DenseMap.h:781
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition DenseMap.h:774
~DenseMap()
Definition DenseMap.h:797
Definition DenseMap.h:1238
DenseMapIterator & operator++()
Definition DenseMap.h:1307
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1302
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition DenseMap.h:1274
reference operator*() const
Definition DenseMap.h:1278
pointer operator->() const
Definition DenseMap.h:1285
DenseMapIterator()=default
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition DenseMap.h:1256
std::forward_iterator_tag iterator_category
Definition DenseMap.h:1247
ptrdiff_t difference_type
Definition DenseMap.h:1243
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition DenseMap.h:1244
value_type * pointer
Definition DenseMap.h:1245
DenseMapIterator operator++(int)
Definition DenseMap.h:1319
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1293
value_type & reference
Definition DenseMap.h:1246
Definition DenseMap.h:927
~SmallDenseMap()
Definition DenseMap.h:976
SmallDenseMap(unsigned NumInitBuckets=0)
Definition DenseMap.h:951
void swap(SmallDenseMap &RHS)
Definition DenseMap.h:981
void copyFrom(const SmallDenseMap &other)
Definition DenseMap.h:1067
void init(unsigned InitBuckets)
Definition DenseMap.h:1078
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition DenseMap.h:1059
SmallDenseMap(const SmallDenseMap &other)
Definition DenseMap.h:957
SmallDenseMap(SmallDenseMap &&other)
Definition DenseMap.h:962
void shrink_and_clear()
Definition DenseMap.h:1140
void grow(unsigned AtLeast)
Definition DenseMap.h:1087
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:968
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:973
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition DenseMap.h:1053
detail namespace with internal helper functions
Definition input_adapters.h:32
Implement std::hash so that hash_code can be used in STL containers.
Definition PointerIntPair.h:280
WPI_BASIC_JSON_TPL_DECLARATION void swap(wpi::WPI_BASIC_JSON_TPL &j1, wpi::WPI_BASIC_JSON_TPL &j2) noexcept(//NOLINT(readability-inconsistent-declaration-parameter-name, cert-dcl58-cpp) is_nothrow_move_constructible< wpi::WPI_BASIC_JSON_TPL >::value &&//NOLINT(misc-redundant-expression, cppcoreguidelines-noexcept-swap, performance-noexcept-swap) is_nothrow_move_assignable< wpi::WPI_BASIC_JSON_TPL >::value)
exchanges the values of two JSON objects
Definition json.h:5258
Foonathan namespace.
Definition ntcore_cpp.h:26
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:327
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
size_t capacity_in_bytes(const DenseMap< KeyT, ValueT, KeyInfoT > &X)
Definition DenseMap.h:1347
bool operator==(const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &LHS, const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &RHS)
Equality comparison for DenseMap.
Definition DenseMap.h:729
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:270
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:356
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
bool operator!=(const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &LHS, const DenseMapBase< DerivedT, KeyT, ValueT, KeyInfoT, BucketT > &RHS)
Inequality comparison for DenseMap.
Definition DenseMap.h:749
bool shouldReverseIterate()
Definition ReverseIteration.h:9
A suitably aligned and sized character array member which can hold elements of any type.
Definition AlignOf.h:23
const T & type
Definition type_traits.h:64
Definition DenseMap.h:43
KeyT & getFirst()
Definition DenseMap.h:46
ValueT & getSecond()
Definition DenseMap.h:48
const KeyT & getFirst() const
Definition DenseMap.h:47
const ValueT & getSecond() const
Definition DenseMap.h:49