1 // Protocol Buffers - Google's data interchange format 2 // Copyright 2008 Google Inc. All rights reserved. 3 // https://developers.google.com/protocol-buffers/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are 7 // met: 8 // 9 // * Redistributions of source code must retain the above copyright 10 // notice, this list of conditions and the following disclaimer. 11 // * Redistributions in binary form must reproduce the above 12 // copyright notice, this list of conditions and the following disclaimer 13 // in the documentation and/or other materials provided with the 14 // distribution. 15 // * Neither the name of Google Inc. nor the names of its 16 // contributors may be used to endorse or promote products derived from 17 // this software without specific prior written permission. 18 // 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 // This file defines the map container and its helpers to support protobuf maps. 32 // 33 // The Map and MapIterator types are provided by this header file. 34 // Please avoid using other types defined here, unless they are public 35 // types within Map or MapIterator, such as Map::value_type. 36 37 #ifndef GOOGLE_PROTOBUF_MAP_H__ 38 #define GOOGLE_PROTOBUF_MAP_H__ 39 40 41 #include <functional> 42 #include <initializer_list> 43 #include <iterator> 44 #include <limits> // To support Visual Studio 2008 45 #include <map> 46 #include <string> 47 #include <type_traits> 48 #include <utility> 49 50 #if defined(__cpp_lib_string_view) 51 #include <string_view> 52 #endif // defined(__cpp_lib_string_view) 53 54 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC) && defined(__APPLE__) 55 #include <mach/mach_time.h> 56 #endif 57 58 #include <google/protobuf/stubs/common.h> 59 #include <google/protobuf/arena.h> 60 #include <google/protobuf/generated_enum_util.h> 61 #include <google/protobuf/map_type_handler.h> 62 #include <google/protobuf/port.h> 63 #include <google/protobuf/stubs/hash.h> 64 65 #ifdef SWIG 66 #error "You cannot SWIG proto headers" 67 #endif 68 69 // Must be included last. 70 #include <google/protobuf/port_def.inc> 71 72 namespace google { 73 namespace protobuf { 74 75 template <typename Key, typename T> 76 class Map; 77 78 class MapIterator; 79 80 template <typename Enum> 81 struct is_proto_enum; 82 83 namespace internal { 84 template <typename Derived, typename Key, typename T, 85 WireFormatLite::FieldType key_wire_type, 86 WireFormatLite::FieldType value_wire_type> 87 class MapFieldLite; 88 89 template <typename Derived, typename Key, typename T, 90 WireFormatLite::FieldType key_wire_type, 91 WireFormatLite::FieldType value_wire_type> 92 class MapField; 93 94 template <typename Key, typename T> 95 class TypeDefinedMapFieldBase; 96 97 class DynamicMapField; 98 99 class GeneratedMessageReflection; 100 101 // re-implement std::allocator to use arena allocator for memory allocation. 102 // Used for Map implementation. Users should not use this class 103 // directly. 104 template <typename U> 105 class MapAllocator { 106 public: 107 using value_type = U; 108 using pointer = value_type*; 109 using const_pointer = const value_type*; 110 using reference = value_type&; 111 using const_reference = const value_type&; 112 using size_type = size_t; 113 using difference_type = ptrdiff_t; 114 MapAllocator()115 constexpr MapAllocator() : arena_(nullptr) {} MapAllocator(Arena * arena)116 explicit constexpr MapAllocator(Arena* arena) : arena_(arena) {} 117 template <typename X> MapAllocator(const MapAllocator<X> & allocator)118 MapAllocator(const MapAllocator<X>& allocator) // NOLINT(runtime/explicit) 119 : arena_(allocator.arena()) {} 120 121 // MapAllocator does not support alignments beyond 8. Technically we should 122 // support up to std::max_align_t, but this fails with ubsan and tcmalloc 123 // debug allocation logic which assume 8 as default alignment. 124 static_assert(alignof(value_type) <= 8, ""); 125 126 pointer allocate(size_type n, const void* /* hint */ = nullptr) { 127 // If arena is not given, malloc needs to be called which doesn't 128 // construct element object. 129 if (arena_ == nullptr) { 130 return static_cast<pointer>(::operator new(n * sizeof(value_type))); 131 } else { 132 return reinterpret_cast<pointer>( 133 Arena::CreateArray<uint8_t>(arena_, n * sizeof(value_type))); 134 } 135 } 136 deallocate(pointer p,size_type n)137 void deallocate(pointer p, size_type n) { 138 if (arena_ == nullptr) { 139 internal::SizedDelete(p, n * sizeof(value_type)); 140 } 141 } 142 143 #if !defined(GOOGLE_PROTOBUF_OS_APPLE) && !defined(GOOGLE_PROTOBUF_OS_NACL) && \ 144 !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN) 145 template <class NodeType, class... Args> construct(NodeType * p,Args &&...args)146 void construct(NodeType* p, Args&&... args) { 147 // Clang 3.6 doesn't compile static casting to void* directly. (Issue 148 // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall 149 // not cast away constness". So first the maybe const pointer is casted to 150 // const void* and after the const void* is const casted. 151 new (const_cast<void*>(static_cast<const void*>(p))) 152 NodeType(std::forward<Args>(args)...); 153 } 154 155 template <class NodeType> destroy(NodeType * p)156 void destroy(NodeType* p) { 157 p->~NodeType(); 158 } 159 #else construct(pointer p,const_reference t)160 void construct(pointer p, const_reference t) { new (p) value_type(t); } 161 destroy(pointer p)162 void destroy(pointer p) { p->~value_type(); } 163 #endif 164 165 template <typename X> 166 struct rebind { 167 using other = MapAllocator<X>; 168 }; 169 170 template <typename X> 171 bool operator==(const MapAllocator<X>& other) const { 172 return arena_ == other.arena_; 173 } 174 175 template <typename X> 176 bool operator!=(const MapAllocator<X>& other) const { 177 return arena_ != other.arena_; 178 } 179 180 // To support Visual Studio 2008 max_size()181 size_type max_size() const { 182 // parentheses around (std::...:max) prevents macro warning of max() 183 return (std::numeric_limits<size_type>::max)(); 184 } 185 186 // To support gcc-4.4, which does not properly 187 // support templated friend classes arena()188 Arena* arena() const { return arena_; } 189 190 private: 191 using DestructorSkippable_ = void; 192 Arena* arena_; 193 }; 194 195 template <typename T> 196 using KeyForTree = 197 typename std::conditional<std::is_scalar<T>::value, T, 198 std::reference_wrapper<const T>>::type; 199 200 // Default case: Not transparent. 201 // We use std::hash<key_type>/std::less<key_type> and all the lookup functions 202 // only accept `key_type`. 203 template <typename key_type> 204 struct TransparentSupport { 205 using hash = std::hash<key_type>; 206 using less = std::less<key_type>; 207 EqualsTransparentSupport208 static bool Equals(const key_type& a, const key_type& b) { return a == b; } 209 210 template <typename K> 211 using key_arg = key_type; 212 }; 213 214 #if defined(__cpp_lib_string_view) 215 // If std::string_view is available, we add transparent support for std::string 216 // keys. We use std::hash<std::string_view> as it supports the input types we 217 // care about. The lookup functions accept arbitrary `K`. This will include any 218 // key type that is convertible to std::string_view. 219 template <> 220 struct TransparentSupport<std::string> { 221 static std::string_view ImplicitConvert(std::string_view str) { return str; } 222 // If the element is not convertible to std::string_view, try to convert to 223 // std::string first. 224 // The template makes this overload lose resolution when both have the same 225 // rank otherwise. 226 template <typename = void> 227 static std::string_view ImplicitConvert(const std::string& str) { 228 return str; 229 } 230 231 struct hash : private std::hash<std::string_view> { 232 using is_transparent = void; 233 234 template <typename T> 235 size_t operator()(const T& str) const { 236 return base()(ImplicitConvert(str)); 237 } 238 239 private: 240 const std::hash<std::string_view>& base() const { return *this; } 241 }; 242 struct less { 243 using is_transparent = void; 244 245 template <typename T, typename U> 246 bool operator()(const T& t, const U& u) const { 247 return ImplicitConvert(t) < ImplicitConvert(u); 248 } 249 }; 250 251 template <typename T, typename U> 252 static bool Equals(const T& t, const U& u) { 253 return ImplicitConvert(t) == ImplicitConvert(u); 254 } 255 256 template <typename K> 257 using key_arg = K; 258 }; 259 #endif // defined(__cpp_lib_string_view) 260 261 template <typename Key> 262 using TreeForMap = 263 std::map<KeyForTree<Key>, void*, typename TransparentSupport<Key>::less, 264 MapAllocator<std::pair<const KeyForTree<Key>, void*>>>; 265 266 inline bool TableEntryIsEmpty(void* const* table, size_t b) { 267 return table[b] == nullptr; 268 } 269 inline bool TableEntryIsNonEmptyList(void* const* table, size_t b) { 270 return table[b] != nullptr && table[b] != table[b ^ 1]; 271 } 272 inline bool TableEntryIsTree(void* const* table, size_t b) { 273 return !TableEntryIsEmpty(table, b) && !TableEntryIsNonEmptyList(table, b); 274 } 275 inline bool TableEntryIsList(void* const* table, size_t b) { 276 return !TableEntryIsTree(table, b); 277 } 278 279 // This captures all numeric types. 280 inline size_t MapValueSpaceUsedExcludingSelfLong(bool) { return 0; } 281 inline size_t MapValueSpaceUsedExcludingSelfLong(const std::string& str) { 282 return StringSpaceUsedExcludingSelfLong(str); 283 } 284 template <typename T, 285 typename = decltype(std::declval<const T&>().SpaceUsedLong())> 286 size_t MapValueSpaceUsedExcludingSelfLong(const T& message) { 287 return message.SpaceUsedLong() - sizeof(T); 288 } 289 290 constexpr size_t kGlobalEmptyTableSize = 1; 291 PROTOBUF_EXPORT extern void* const kGlobalEmptyTable[kGlobalEmptyTableSize]; 292 293 // Space used for the table, trees, and nodes. 294 // Does not include the indirect space used. Eg the data of a std::string. 295 template <typename Key> 296 PROTOBUF_NOINLINE size_t SpaceUsedInTable(void** table, size_t num_buckets, 297 size_t num_elements, 298 size_t sizeof_node) { 299 size_t size = 0; 300 // The size of the table. 301 size += sizeof(void*) * num_buckets; 302 // All the nodes. 303 size += sizeof_node * num_elements; 304 // For each tree, count the overhead of the those nodes. 305 // Two buckets at a time because we only care about trees. 306 for (size_t b = 0; b < num_buckets; b += 2) { 307 if (internal::TableEntryIsTree(table, b)) { 308 using Tree = TreeForMap<Key>; 309 Tree* tree = static_cast<Tree*>(table[b]); 310 // Estimated cost of the red-black tree nodes, 3 pointers plus a 311 // bool (plus alignment, so 4 pointers). 312 size += tree->size() * 313 (sizeof(typename Tree::value_type) + sizeof(void*) * 4); 314 } 315 } 316 return size; 317 } 318 319 template <typename Map, 320 typename = typename std::enable_if< 321 !std::is_scalar<typename Map::key_type>::value || 322 !std::is_scalar<typename Map::mapped_type>::value>::type> 323 size_t SpaceUsedInValues(const Map* map) { 324 size_t size = 0; 325 for (const auto& v : *map) { 326 size += internal::MapValueSpaceUsedExcludingSelfLong(v.first) + 327 internal::MapValueSpaceUsedExcludingSelfLong(v.second); 328 } 329 return size; 330 } 331 332 inline size_t SpaceUsedInValues(const void*) { return 0; } 333 334 // Multiply two numbers where overflow is expected. 335 template <typename N> 336 N MultiplyWithOverflow(N a, N b) { 337 #if __has_builtin(__builtin_mul_overflow) 338 N res; 339 (void)__builtin_mul_overflow(a, b, &res); 340 return res; 341 #else 342 return a * b; 343 #endif 344 } 345 346 } // namespace internal 347 348 // This is the class for Map's internal value_type. Instead of using 349 // std::pair as value_type, we use this class which provides us more control of 350 // its process of construction and destruction. 351 template <typename Key, typename T> 352 struct PROTOBUF_ATTRIBUTE_STANDALONE_DEBUG MapPair { 353 using first_type = const Key; 354 using second_type = T; 355 356 MapPair(const Key& other_first, const T& other_second) 357 : first(other_first), second(other_second) {} 358 explicit MapPair(const Key& other_first) : first(other_first), second() {} 359 explicit MapPair(Key&& other_first) 360 : first(std::move(other_first)), second() {} 361 MapPair(const MapPair& other) : first(other.first), second(other.second) {} 362 363 ~MapPair() {} 364 365 // Implicitly convertible to std::pair of compatible types. 366 template <typename T1, typename T2> 367 operator std::pair<T1, T2>() const { // NOLINT(runtime/explicit) 368 return std::pair<T1, T2>(first, second); 369 } 370 371 const Key first; 372 T second; 373 374 private: 375 friend class Arena; 376 friend class Map<Key, T>; 377 }; 378 379 // Map is an associative container type used to store protobuf map 380 // fields. Each Map instance may or may not use a different hash function, a 381 // different iteration order, and so on. E.g., please don't examine 382 // implementation details to decide if the following would work: 383 // Map<int, int> m0, m1; 384 // m0[0] = m1[0] = m0[1] = m1[1] = 0; 385 // assert(m0.begin()->first == m1.begin()->first); // Bug! 386 // 387 // Map's interface is similar to std::unordered_map, except that Map is not 388 // designed to play well with exceptions. 389 template <typename Key, typename T> 390 class Map { 391 public: 392 using key_type = Key; 393 using mapped_type = T; 394 using value_type = MapPair<Key, T>; 395 396 using pointer = value_type*; 397 using const_pointer = const value_type*; 398 using reference = value_type&; 399 using const_reference = const value_type&; 400 401 using size_type = size_t; 402 using hasher = typename internal::TransparentSupport<Key>::hash; 403 404 constexpr Map() : elements_(nullptr) {} 405 explicit Map(Arena* arena) : elements_(arena) {} 406 407 Map(const Map& other) : Map() { insert(other.begin(), other.end()); } 408 409 Map(Map&& other) noexcept : Map() { 410 if (other.arena() != nullptr) { 411 *this = other; 412 } else { 413 swap(other); 414 } 415 } 416 417 Map& operator=(Map&& other) noexcept { 418 if (this != &other) { 419 if (arena() != other.arena()) { 420 *this = other; 421 } else { 422 swap(other); 423 } 424 } 425 return *this; 426 } 427 428 template <class InputIt> 429 Map(const InputIt& first, const InputIt& last) : Map() { 430 insert(first, last); 431 } 432 433 ~Map() {} 434 435 private: 436 using Allocator = internal::MapAllocator<void*>; 437 438 // InnerMap is a generic hash-based map. It doesn't contain any 439 // protocol-buffer-specific logic. It is a chaining hash map with the 440 // additional feature that some buckets can be converted to use an ordered 441 // container. This ensures O(lg n) bounds on find, insert, and erase, while 442 // avoiding the overheads of ordered containers most of the time. 443 // 444 // The implementation doesn't need the full generality of unordered_map, 445 // and it doesn't have it. More bells and whistles can be added as needed. 446 // Some implementation details: 447 // 1. The hash function has type hasher and the equality function 448 // equal_to<Key>. We inherit from hasher to save space 449 // (empty-base-class optimization). 450 // 2. The number of buckets is a power of two. 451 // 3. Buckets are converted to trees in pairs: if we convert bucket b then 452 // buckets b and b^1 will share a tree. Invariant: buckets b and b^1 have 453 // the same non-null value iff they are sharing a tree. (An alternative 454 // implementation strategy would be to have a tag bit per bucket.) 455 // 4. As is typical for hash_map and such, the Keys and Values are always 456 // stored in linked list nodes. Pointers to elements are never invalidated 457 // until the element is deleted. 458 // 5. The trees' payload type is pointer to linked-list node. Tree-converting 459 // a bucket doesn't copy Key-Value pairs. 460 // 6. Once we've tree-converted a bucket, it is never converted back. However, 461 // the items a tree contains may wind up assigned to trees or lists upon a 462 // rehash. 463 // 7. The code requires no C++ features from C++14 or later. 464 // 8. Mutations to a map do not invalidate the map's iterators, pointers to 465 // elements, or references to elements. 466 // 9. Except for erase(iterator), any non-const method can reorder iterators. 467 // 10. InnerMap uses KeyForTree<Key> when using the Tree representation, which 468 // is either `Key`, if Key is a scalar, or `reference_wrapper<const Key>` 469 // otherwise. This avoids unnecessary copies of string keys, for example. 470 class InnerMap : private hasher { 471 public: 472 explicit constexpr InnerMap(Arena* arena) 473 : hasher(), 474 num_elements_(0), 475 num_buckets_(internal::kGlobalEmptyTableSize), 476 seed_(0), 477 index_of_first_non_null_(internal::kGlobalEmptyTableSize), 478 table_(const_cast<void**>(internal::kGlobalEmptyTable)), 479 alloc_(arena) {} 480 481 ~InnerMap() { 482 if (alloc_.arena() == nullptr && 483 num_buckets_ != internal::kGlobalEmptyTableSize) { 484 clear(); 485 Dealloc<void*>(table_, num_buckets_); 486 } 487 } 488 489 private: 490 enum { kMinTableSize = 8 }; 491 492 // Linked-list nodes, as one would expect for a chaining hash table. 493 struct Node { 494 value_type kv; 495 Node* next; 496 }; 497 498 // Trees. The payload type is a copy of Key, so that we can query the tree 499 // with Keys that are not in any particular data structure. 500 // The value is a void* pointing to Node. We use void* instead of Node* to 501 // avoid code bloat. That way there is only one instantiation of the tree 502 // class per key type. 503 using Tree = internal::TreeForMap<Key>; 504 using TreeIterator = typename Tree::iterator; 505 506 static Node* NodeFromTreeIterator(TreeIterator it) { 507 return static_cast<Node*>(it->second); 508 } 509 510 // iterator and const_iterator are instantiations of iterator_base. 511 template <typename KeyValueType> 512 class iterator_base { 513 public: 514 using reference = KeyValueType&; 515 using pointer = KeyValueType*; 516 517 // Invariants: 518 // node_ is always correct. This is handy because the most common 519 // operations are operator* and operator-> and they only use node_. 520 // When node_ is set to a non-null value, all the other non-const fields 521 // are updated to be correct also, but those fields can become stale 522 // if the underlying map is modified. When those fields are needed they 523 // are rechecked, and updated if necessary. 524 iterator_base() : node_(nullptr), m_(nullptr), bucket_index_(0) {} 525 526 explicit iterator_base(const InnerMap* m) : m_(m) { 527 SearchFrom(m->index_of_first_non_null_); 528 } 529 530 // Any iterator_base can convert to any other. This is overkill, and we 531 // rely on the enclosing class to use it wisely. The standard "iterator 532 // can convert to const_iterator" is OK but the reverse direction is not. 533 template <typename U> 534 explicit iterator_base(const iterator_base<U>& it) 535 : node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {} 536 537 iterator_base(Node* n, const InnerMap* m, size_type index) 538 : node_(n), m_(m), bucket_index_(index) {} 539 540 iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index) 541 : node_(NodeFromTreeIterator(tree_it)), m_(m), bucket_index_(index) { 542 // Invariant: iterators that use buckets with trees have an even 543 // bucket_index_. 544 GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0u); 545 } 546 547 // Advance through buckets, looking for the first that isn't empty. 548 // If nothing non-empty is found then leave node_ == nullptr. 549 void SearchFrom(size_type start_bucket) { 550 GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ || 551 m_->table_[m_->index_of_first_non_null_] != nullptr); 552 node_ = nullptr; 553 for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_; 554 bucket_index_++) { 555 if (m_->TableEntryIsNonEmptyList(bucket_index_)) { 556 node_ = static_cast<Node*>(m_->table_[bucket_index_]); 557 break; 558 } else if (m_->TableEntryIsTree(bucket_index_)) { 559 Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]); 560 GOOGLE_DCHECK(!tree->empty()); 561 node_ = NodeFromTreeIterator(tree->begin()); 562 break; 563 } 564 } 565 } 566 567 reference operator*() const { return node_->kv; } 568 pointer operator->() const { return &(operator*()); } 569 570 friend bool operator==(const iterator_base& a, const iterator_base& b) { 571 return a.node_ == b.node_; 572 } 573 friend bool operator!=(const iterator_base& a, const iterator_base& b) { 574 return a.node_ != b.node_; 575 } 576 577 iterator_base& operator++() { 578 if (node_->next == nullptr) { 579 TreeIterator tree_it; 580 const bool is_list = revalidate_if_necessary(&tree_it); 581 if (is_list) { 582 SearchFrom(bucket_index_ + 1); 583 } else { 584 GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0u); 585 Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]); 586 if (++tree_it == tree->end()) { 587 SearchFrom(bucket_index_ + 2); 588 } else { 589 node_ = NodeFromTreeIterator(tree_it); 590 } 591 } 592 } else { 593 node_ = node_->next; 594 } 595 return *this; 596 } 597 598 iterator_base operator++(int /* unused */) { 599 iterator_base tmp = *this; 600 ++*this; 601 return tmp; 602 } 603 604 // Assumes node_ and m_ are correct and non-null, but other fields may be 605 // stale. Fix them as needed. Then return true iff node_ points to a 606 // Node in a list. If false is returned then *it is modified to be 607 // a valid iterator for node_. 608 bool revalidate_if_necessary(TreeIterator* it) { 609 GOOGLE_DCHECK(node_ != nullptr && m_ != nullptr); 610 // Force bucket_index_ to be in range. 611 bucket_index_ &= (m_->num_buckets_ - 1); 612 // Common case: the bucket we think is relevant points to node_. 613 if (m_->table_[bucket_index_] == static_cast<void*>(node_)) return true; 614 // Less common: the bucket is a linked list with node_ somewhere in it, 615 // but not at the head. 616 if (m_->TableEntryIsNonEmptyList(bucket_index_)) { 617 Node* l = static_cast<Node*>(m_->table_[bucket_index_]); 618 while ((l = l->next) != nullptr) { 619 if (l == node_) { 620 return true; 621 } 622 } 623 } 624 // Well, bucket_index_ still might be correct, but probably 625 // not. Revalidate just to be sure. This case is rare enough that we 626 // don't worry about potential optimizations, such as having a custom 627 // find-like method that compares Node* instead of the key. 628 iterator_base i(m_->find(node_->kv.first, it)); 629 bucket_index_ = i.bucket_index_; 630 return m_->TableEntryIsList(bucket_index_); 631 } 632 633 Node* node_; 634 const InnerMap* m_; 635 size_type bucket_index_; 636 }; 637 638 public: 639 using iterator = iterator_base<value_type>; 640 using const_iterator = iterator_base<const value_type>; 641 642 Arena* arena() const { return alloc_.arena(); } 643 644 void Swap(InnerMap* other) { 645 std::swap(num_elements_, other->num_elements_); 646 std::swap(num_buckets_, other->num_buckets_); 647 std::swap(seed_, other->seed_); 648 std::swap(index_of_first_non_null_, other->index_of_first_non_null_); 649 std::swap(table_, other->table_); 650 std::swap(alloc_, other->alloc_); 651 } 652 653 iterator begin() { return iterator(this); } 654 iterator end() { return iterator(); } 655 const_iterator begin() const { return const_iterator(this); } 656 const_iterator end() const { return const_iterator(); } 657 658 void clear() { 659 for (size_type b = 0; b < num_buckets_; b++) { 660 if (TableEntryIsNonEmptyList(b)) { 661 Node* node = static_cast<Node*>(table_[b]); 662 table_[b] = nullptr; 663 do { 664 Node* next = node->next; 665 DestroyNode(node); 666 node = next; 667 } while (node != nullptr); 668 } else if (TableEntryIsTree(b)) { 669 Tree* tree = static_cast<Tree*>(table_[b]); 670 GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0); 671 table_[b] = table_[b + 1] = nullptr; 672 typename Tree::iterator tree_it = tree->begin(); 673 do { 674 Node* node = NodeFromTreeIterator(tree_it); 675 typename Tree::iterator next = tree_it; 676 ++next; 677 tree->erase(tree_it); 678 DestroyNode(node); 679 tree_it = next; 680 } while (tree_it != tree->end()); 681 DestroyTree(tree); 682 b++; 683 } 684 } 685 num_elements_ = 0; 686 index_of_first_non_null_ = num_buckets_; 687 } 688 689 const hasher& hash_function() const { return *this; } 690 691 static size_type max_size() { 692 return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28); 693 } 694 size_type size() const { return num_elements_; } 695 bool empty() const { return size() == 0; } 696 697 template <typename K> 698 iterator find(const K& k) { 699 return iterator(FindHelper(k).first); 700 } 701 702 template <typename K> 703 const_iterator find(const K& k) const { 704 return FindHelper(k).first; 705 } 706 707 // Inserts a new element into the container if there is no element with the 708 // key in the container. 709 // The new element is: 710 // (1) Constructed in-place with the given args, if mapped_type is not 711 // arena constructible. 712 // (2) Constructed in-place with the arena and then assigned with a 713 // mapped_type temporary constructed with the given args, otherwise. 714 template <typename K, typename... Args> 715 std::pair<iterator, bool> try_emplace(K&& k, Args&&... args) { 716 return ArenaAwareTryEmplace(Arena::is_arena_constructable<mapped_type>(), 717 std::forward<K>(k), 718 std::forward<Args>(args)...); 719 } 720 721 // Inserts the key into the map, if not present. In that case, the value 722 // will be value initialized. 723 template <typename K> 724 std::pair<iterator, bool> insert(K&& k) { 725 return try_emplace(std::forward<K>(k)); 726 } 727 728 template <typename K> 729 value_type& operator[](K&& k) { 730 return *try_emplace(std::forward<K>(k)).first; 731 } 732 733 void erase(iterator it) { 734 GOOGLE_DCHECK_EQ(it.m_, this); 735 typename Tree::iterator tree_it; 736 const bool is_list = it.revalidate_if_necessary(&tree_it); 737 size_type b = it.bucket_index_; 738 Node* const item = it.node_; 739 if (is_list) { 740 GOOGLE_DCHECK(TableEntryIsNonEmptyList(b)); 741 Node* head = static_cast<Node*>(table_[b]); 742 head = EraseFromLinkedList(item, head); 743 table_[b] = static_cast<void*>(head); 744 } else { 745 GOOGLE_DCHECK(TableEntryIsTree(b)); 746 Tree* tree = static_cast<Tree*>(table_[b]); 747 tree->erase(tree_it); 748 if (tree->empty()) { 749 // Force b to be the minimum of b and b ^ 1. This is important 750 // only because we want index_of_first_non_null_ to be correct. 751 b &= ~static_cast<size_type>(1); 752 DestroyTree(tree); 753 table_[b] = table_[b + 1] = nullptr; 754 } 755 } 756 DestroyNode(item); 757 --num_elements_; 758 if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) { 759 while (index_of_first_non_null_ < num_buckets_ && 760 table_[index_of_first_non_null_] == nullptr) { 761 ++index_of_first_non_null_; 762 } 763 } 764 } 765 766 size_t SpaceUsedInternal() const { 767 return internal::SpaceUsedInTable<Key>(table_, num_buckets_, 768 num_elements_, sizeof(Node)); 769 } 770 771 private: 772 template <typename K, typename... Args> 773 std::pair<iterator, bool> TryEmplaceInternal(K&& k, Args&&... args) { 774 std::pair<const_iterator, size_type> p = FindHelper(k); 775 // Case 1: key was already present. 776 if (p.first.node_ != nullptr) 777 return std::make_pair(iterator(p.first), false); 778 // Case 2: insert. 779 if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) { 780 p = FindHelper(k); 781 } 782 const size_type b = p.second; // bucket number 783 // If K is not key_type, make the conversion to key_type explicit. 784 using TypeToInit = typename std::conditional< 785 std::is_same<typename std::decay<K>::type, key_type>::value, K&&, 786 key_type>::type; 787 Node* node = Alloc<Node>(1); 788 // Even when arena is nullptr, CreateInArenaStorage is still used to 789 // ensure the arena of submessage will be consistent. Otherwise, 790 // submessage may have its own arena when message-owned arena is enabled. 791 // Note: This only works if `Key` is not arena constructible. 792 Arena::CreateInArenaStorage(const_cast<Key*>(&node->kv.first), 793 alloc_.arena(), 794 static_cast<TypeToInit>(std::forward<K>(k))); 795 // Note: if `T` is arena constructible, `Args` needs to be empty. 796 Arena::CreateInArenaStorage(&node->kv.second, alloc_.arena(), 797 std::forward<Args>(args)...); 798 799 iterator result = InsertUnique(b, node); 800 ++num_elements_; 801 return std::make_pair(result, true); 802 } 803 804 // A helper function to perform an assignment of `mapped_type`. 805 // If the first argument is true, then it is a regular assignment. 806 // Otherwise, we first create a temporary and then perform an assignment. 807 template <typename V> 808 static void AssignMapped(std::true_type, mapped_type& mapped, V&& v) { 809 mapped = std::forward<V>(v); 810 } 811 template <typename... Args> 812 static void AssignMapped(std::false_type, mapped_type& mapped, 813 Args&&... args) { 814 mapped = mapped_type(std::forward<Args>(args)...); 815 } 816 817 // Case 1: `mapped_type` is arena constructible. A temporary object is 818 // created and then (if `Args` are not empty) assigned to a mapped value 819 // that was created with the arena. 820 template <typename K> 821 std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k) { 822 // case 1.1: "default" constructed (e.g. from arena only). 823 return TryEmplaceInternal(std::forward<K>(k)); 824 } 825 template <typename K, typename... Args> 826 std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k, 827 Args&&... args) { 828 // case 1.2: "default" constructed + copy/move assignment 829 auto p = TryEmplaceInternal(std::forward<K>(k)); 830 if (p.second) { 831 AssignMapped(std::is_same<void(typename std::decay<Args>::type...), 832 void(mapped_type)>(), 833 p.first->second, std::forward<Args>(args)...); 834 } 835 return p; 836 } 837 // Case 2: `mapped_type` is not arena constructible. Using in-place 838 // construction. 839 template <typename... Args> 840 std::pair<iterator, bool> ArenaAwareTryEmplace(std::false_type, 841 Args&&... args) { 842 return TryEmplaceInternal(std::forward<Args>(args)...); 843 } 844 845 const_iterator find(const Key& k, TreeIterator* it) const { 846 return FindHelper(k, it).first; 847 } 848 template <typename K> 849 std::pair<const_iterator, size_type> FindHelper(const K& k) const { 850 return FindHelper(k, nullptr); 851 } 852 template <typename K> 853 std::pair<const_iterator, size_type> FindHelper(const K& k, 854 TreeIterator* it) const { 855 size_type b = BucketNumber(k); 856 if (TableEntryIsNonEmptyList(b)) { 857 Node* node = static_cast<Node*>(table_[b]); 858 do { 859 if (internal::TransparentSupport<Key>::Equals(node->kv.first, k)) { 860 return std::make_pair(const_iterator(node, this, b), b); 861 } else { 862 node = node->next; 863 } 864 } while (node != nullptr); 865 } else if (TableEntryIsTree(b)) { 866 GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); 867 b &= ~static_cast<size_t>(1); 868 Tree* tree = static_cast<Tree*>(table_[b]); 869 auto tree_it = tree->find(k); 870 if (tree_it != tree->end()) { 871 if (it != nullptr) *it = tree_it; 872 return std::make_pair(const_iterator(tree_it, this, b), b); 873 } 874 } 875 return std::make_pair(end(), b); 876 } 877 878 // Insert the given Node in bucket b. If that would make bucket b too big, 879 // and bucket b is not a tree, create a tree for buckets b and b^1 to share. 880 // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct 881 // bucket. num_elements_ is not modified. 882 iterator InsertUnique(size_type b, Node* node) { 883 GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ || 884 table_[index_of_first_non_null_] != nullptr); 885 // In practice, the code that led to this point may have already 886 // determined whether we are inserting into an empty list, a short list, 887 // or whatever. But it's probably cheap enough to recompute that here; 888 // it's likely that we're inserting into an empty or short list. 889 iterator result; 890 GOOGLE_DCHECK(find(node->kv.first) == end()); 891 if (TableEntryIsEmpty(b)) { 892 result = InsertUniqueInList(b, node); 893 } else if (TableEntryIsNonEmptyList(b)) { 894 if (PROTOBUF_PREDICT_FALSE(TableEntryIsTooLong(b))) { 895 TreeConvert(b); 896 result = InsertUniqueInTree(b, node); 897 GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1)); 898 } else { 899 // Insert into a pre-existing list. This case cannot modify 900 // index_of_first_non_null_, so we skip the code to update it. 901 return InsertUniqueInList(b, node); 902 } 903 } else { 904 // Insert into a pre-existing tree. This case cannot modify 905 // index_of_first_non_null_, so we skip the code to update it. 906 return InsertUniqueInTree(b, node); 907 } 908 // parentheses around (std::min) prevents macro expansion of min(...) 909 index_of_first_non_null_ = 910 (std::min)(index_of_first_non_null_, result.bucket_index_); 911 return result; 912 } 913 914 // Returns whether we should insert after the head of the list. For 915 // non-optimized builds, we randomly decide whether to insert right at the 916 // head of the list or just after the head. This helps add a little bit of 917 // non-determinism to the map ordering. 918 bool ShouldInsertAfterHead(void* node) { 919 #ifdef NDEBUG 920 (void)node; 921 return false; 922 #else 923 // Doing modulo with a prime mixes the bits more. 924 return (reinterpret_cast<uintptr_t>(node) ^ seed_) % 13 > 6; 925 #endif 926 } 927 928 // Helper for InsertUnique. Handles the case where bucket b is a 929 // not-too-long linked list. 930 iterator InsertUniqueInList(size_type b, Node* node) { 931 if (table_[b] != nullptr && ShouldInsertAfterHead(node)) { 932 Node* first = static_cast<Node*>(table_[b]); 933 node->next = first->next; 934 first->next = node; 935 return iterator(node, this, b); 936 } 937 938 node->next = static_cast<Node*>(table_[b]); 939 table_[b] = static_cast<void*>(node); 940 return iterator(node, this, b); 941 } 942 943 // Helper for InsertUnique. Handles the case where bucket b points to a 944 // Tree. 945 iterator InsertUniqueInTree(size_type b, Node* node) { 946 GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); 947 // Maintain the invariant that node->next is null for all Nodes in Trees. 948 node->next = nullptr; 949 return iterator( 950 static_cast<Tree*>(table_[b])->insert({node->kv.first, node}).first, 951 this, b & ~static_cast<size_t>(1)); 952 } 953 954 // Returns whether it did resize. Currently this is only used when 955 // num_elements_ increases, though it could be used in other situations. 956 // It checks for load too low as well as load too high: because any number 957 // of erases can occur between inserts, the load could be as low as 0 here. 958 // Resizing to a lower size is not always helpful, but failing to do so can 959 // destroy the expected big-O bounds for some operations. By having the 960 // policy that sometimes we resize down as well as up, clients can easily 961 // keep O(size()) = O(number of buckets) if they want that. 962 bool ResizeIfLoadIsOutOfRange(size_type new_size) { 963 const size_type kMaxMapLoadTimes16 = 12; // controls RAM vs CPU tradeoff 964 const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16; 965 const size_type lo_cutoff = hi_cutoff / 4; 966 // We don't care how many elements are in trees. If a lot are, 967 // we may resize even though there are many empty buckets. In 968 // practice, this seems fine. 969 if (PROTOBUF_PREDICT_FALSE(new_size >= hi_cutoff)) { 970 if (num_buckets_ <= max_size() / 2) { 971 Resize(num_buckets_ * 2); 972 return true; 973 } 974 } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff && 975 num_buckets_ > kMinTableSize)) { 976 size_type lg2_of_size_reduction_factor = 1; 977 // It's possible we want to shrink a lot here... size() could even be 0. 978 // So, estimate how much to shrink by making sure we don't shrink so 979 // much that we would need to grow the table after a few inserts. 980 const size_type hypothetical_size = new_size * 5 / 4 + 1; 981 while ((hypothetical_size << lg2_of_size_reduction_factor) < 982 hi_cutoff) { 983 ++lg2_of_size_reduction_factor; 984 } 985 size_type new_num_buckets = std::max<size_type>( 986 kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor); 987 if (new_num_buckets != num_buckets_) { 988 Resize(new_num_buckets); 989 return true; 990 } 991 } 992 return false; 993 } 994 995 // Resize to the given number of buckets. 996 void Resize(size_t new_num_buckets) { 997 if (num_buckets_ == internal::kGlobalEmptyTableSize) { 998 // This is the global empty array. 999 // Just overwrite with a new one. No need to transfer or free anything. 1000 num_buckets_ = index_of_first_non_null_ = kMinTableSize; 1001 table_ = CreateEmptyTable(num_buckets_); 1002 seed_ = Seed(); 1003 return; 1004 } 1005 1006 GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize); 1007 void** const old_table = table_; 1008 const size_type old_table_size = num_buckets_; 1009 num_buckets_ = new_num_buckets; 1010 table_ = CreateEmptyTable(num_buckets_); 1011 const size_type start = index_of_first_non_null_; 1012 index_of_first_non_null_ = num_buckets_; 1013 for (size_type i = start; i < old_table_size; i++) { 1014 if (internal::TableEntryIsNonEmptyList(old_table, i)) { 1015 TransferList(old_table, i); 1016 } else if (internal::TableEntryIsTree(old_table, i)) { 1017 TransferTree(old_table, i++); 1018 } 1019 } 1020 Dealloc<void*>(old_table, old_table_size); 1021 } 1022 1023 void TransferList(void* const* table, size_type index) { 1024 Node* node = static_cast<Node*>(table[index]); 1025 do { 1026 Node* next = node->next; 1027 InsertUnique(BucketNumber(node->kv.first), node); 1028 node = next; 1029 } while (node != nullptr); 1030 } 1031 1032 void TransferTree(void* const* table, size_type index) { 1033 Tree* tree = static_cast<Tree*>(table[index]); 1034 typename Tree::iterator tree_it = tree->begin(); 1035 do { 1036 InsertUnique(BucketNumber(std::cref(tree_it->first).get()), 1037 NodeFromTreeIterator(tree_it)); 1038 } while (++tree_it != tree->end()); 1039 DestroyTree(tree); 1040 } 1041 1042 Node* EraseFromLinkedList(Node* item, Node* head) { 1043 if (head == item) { 1044 return head->next; 1045 } else { 1046 head->next = EraseFromLinkedList(item, head->next); 1047 return head; 1048 } 1049 } 1050 1051 bool TableEntryIsEmpty(size_type b) const { 1052 return internal::TableEntryIsEmpty(table_, b); 1053 } 1054 bool TableEntryIsNonEmptyList(size_type b) const { 1055 return internal::TableEntryIsNonEmptyList(table_, b); 1056 } 1057 bool TableEntryIsTree(size_type b) const { 1058 return internal::TableEntryIsTree(table_, b); 1059 } 1060 bool TableEntryIsList(size_type b) const { 1061 return internal::TableEntryIsList(table_, b); 1062 } 1063 1064 void TreeConvert(size_type b) { 1065 GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1)); 1066 Tree* tree = 1067 Arena::Create<Tree>(alloc_.arena(), typename Tree::key_compare(), 1068 typename Tree::allocator_type(alloc_)); 1069 size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree); 1070 GOOGLE_DCHECK_EQ(count, tree->size()); 1071 table_[b] = table_[b ^ 1] = static_cast<void*>(tree); 1072 } 1073 1074 // Copy a linked list in the given bucket to a tree. 1075 // Returns the number of things it copied. 1076 size_type CopyListToTree(size_type b, Tree* tree) { 1077 size_type count = 0; 1078 Node* node = static_cast<Node*>(table_[b]); 1079 while (node != nullptr) { 1080 tree->insert({node->kv.first, node}); 1081 ++count; 1082 Node* next = node->next; 1083 node->next = nullptr; 1084 node = next; 1085 } 1086 return count; 1087 } 1088 1089 // Return whether table_[b] is a linked list that seems awfully long. 1090 // Requires table_[b] to point to a non-empty linked list. 1091 bool TableEntryIsTooLong(size_type b) { 1092 const size_type kMaxLength = 8; 1093 size_type count = 0; 1094 Node* node = static_cast<Node*>(table_[b]); 1095 do { 1096 ++count; 1097 node = node->next; 1098 } while (node != nullptr); 1099 // Invariant: no linked list ever is more than kMaxLength in length. 1100 GOOGLE_DCHECK_LE(count, kMaxLength); 1101 return count >= kMaxLength; 1102 } 1103 1104 template <typename K> 1105 size_type BucketNumber(const K& k) const { 1106 // We xor the hash value against the random seed so that we effectively 1107 // have a random hash function. 1108 uint64_t h = hash_function()(k) ^ seed_; 1109 1110 // We use the multiplication method to determine the bucket number from 1111 // the hash value. The constant kPhi (suggested by Knuth) is roughly 1112 // (sqrt(5) - 1) / 2 * 2^64. 1113 constexpr uint64_t kPhi = uint64_t{0x9e3779b97f4a7c15}; 1114 return (internal::MultiplyWithOverflow(kPhi, h) >> 32) & 1115 (num_buckets_ - 1); 1116 } 1117 1118 // Return a power of two no less than max(kMinTableSize, n). 1119 // Assumes either n < kMinTableSize or n is a power of two. 1120 size_type TableSize(size_type n) { 1121 return n < static_cast<size_type>(kMinTableSize) 1122 ? static_cast<size_type>(kMinTableSize) 1123 : n; 1124 } 1125 1126 // Use alloc_ to allocate an array of n objects of type U. 1127 template <typename U> 1128 U* Alloc(size_type n) { 1129 using alloc_type = typename Allocator::template rebind<U>::other; 1130 return alloc_type(alloc_).allocate(n); 1131 } 1132 1133 // Use alloc_ to deallocate an array of n objects of type U. 1134 template <typename U> 1135 void Dealloc(U* t, size_type n) { 1136 using alloc_type = typename Allocator::template rebind<U>::other; 1137 alloc_type(alloc_).deallocate(t, n); 1138 } 1139 1140 void DestroyNode(Node* node) { 1141 if (alloc_.arena() == nullptr) { 1142 delete node; 1143 } 1144 } 1145 1146 void DestroyTree(Tree* tree) { 1147 if (alloc_.arena() == nullptr) { 1148 delete tree; 1149 } 1150 } 1151 1152 void** CreateEmptyTable(size_type n) { 1153 GOOGLE_DCHECK(n >= kMinTableSize); 1154 GOOGLE_DCHECK_EQ(n & (n - 1), 0u); 1155 void** result = Alloc<void*>(n); 1156 memset(result, 0, n * sizeof(result[0])); 1157 return result; 1158 } 1159 1160 // Return a randomish value. 1161 size_type Seed() const { 1162 // We get a little bit of randomness from the address of the map. The 1163 // lower bits are not very random, due to alignment, so we discard them 1164 // and shift the higher bits into their place. 1165 size_type s = reinterpret_cast<uintptr_t>(this) >> 4; 1166 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC) 1167 #if defined(__APPLE__) 1168 // Use a commpage-based fast time function on Apple environments (MacOS, 1169 // iOS, tvOS, watchOS, etc). 1170 s += mach_absolute_time(); 1171 #elif defined(__x86_64__) && defined(__GNUC__) 1172 uint32_t hi, lo; 1173 asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); 1174 s += ((static_cast<uint64_t>(hi) << 32) | lo); 1175 #elif defined(__aarch64__) && defined(__GNUC__) 1176 // There is no rdtsc on ARMv8. CNTVCT_EL0 is the virtual counter of the 1177 // system timer. It runs at a different frequency than the CPU's, but is 1178 // the best source of time-based entropy we get. 1179 uint64_t virtual_timer_value; 1180 asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); 1181 s += virtual_timer_value; 1182 #endif 1183 #endif // !defined(GOOGLE_PROTOBUF_NO_RDTSC) 1184 return s; 1185 } 1186 1187 friend class Arena; 1188 using InternalArenaConstructable_ = void; 1189 using DestructorSkippable_ = void; 1190 1191 size_type num_elements_; 1192 size_type num_buckets_; 1193 size_type seed_; 1194 size_type index_of_first_non_null_; 1195 void** table_; // an array with num_buckets_ entries 1196 Allocator alloc_; 1197 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap); 1198 }; // end of class InnerMap 1199 1200 template <typename LookupKey> 1201 using key_arg = typename internal::TransparentSupport< 1202 key_type>::template key_arg<LookupKey>; 1203 1204 public: 1205 // Iterators 1206 class const_iterator { 1207 using InnerIt = typename InnerMap::const_iterator; 1208 1209 public: 1210 using iterator_category = std::forward_iterator_tag; 1211 using value_type = typename Map::value_type; 1212 using difference_type = ptrdiff_t; 1213 using pointer = const value_type*; 1214 using reference = const value_type&; 1215 1216 const_iterator() {} 1217 explicit const_iterator(const InnerIt& it) : it_(it) {} 1218 1219 const_reference operator*() const { return *it_; } 1220 const_pointer operator->() const { return &(operator*()); } 1221 1222 const_iterator& operator++() { 1223 ++it_; 1224 return *this; 1225 } 1226 const_iterator operator++(int) { return const_iterator(it_++); } 1227 1228 friend bool operator==(const const_iterator& a, const const_iterator& b) { 1229 return a.it_ == b.it_; 1230 } 1231 friend bool operator!=(const const_iterator& a, const const_iterator& b) { 1232 return !(a == b); 1233 } 1234 1235 private: 1236 InnerIt it_; 1237 }; 1238 1239 class iterator { 1240 using InnerIt = typename InnerMap::iterator; 1241 1242 public: 1243 using iterator_category = std::forward_iterator_tag; 1244 using value_type = typename Map::value_type; 1245 using difference_type = ptrdiff_t; 1246 using pointer = value_type*; 1247 using reference = value_type&; 1248 1249 iterator() {} 1250 explicit iterator(const InnerIt& it) : it_(it) {} 1251 1252 reference operator*() const { return *it_; } 1253 pointer operator->() const { return &(operator*()); } 1254 1255 iterator& operator++() { 1256 ++it_; 1257 return *this; 1258 } 1259 iterator operator++(int) { return iterator(it_++); } 1260 1261 // Allow implicit conversion to const_iterator. 1262 operator const_iterator() const { // NOLINT(runtime/explicit) 1263 return const_iterator(typename InnerMap::const_iterator(it_)); 1264 } 1265 1266 friend bool operator==(const iterator& a, const iterator& b) { 1267 return a.it_ == b.it_; 1268 } 1269 friend bool operator!=(const iterator& a, const iterator& b) { 1270 return !(a == b); 1271 } 1272 1273 private: 1274 friend class Map; 1275 1276 InnerIt it_; 1277 }; 1278 1279 iterator begin() { return iterator(elements_.begin()); } 1280 iterator end() { return iterator(elements_.end()); } 1281 const_iterator begin() const { return const_iterator(elements_.begin()); } 1282 const_iterator end() const { return const_iterator(elements_.end()); } 1283 const_iterator cbegin() const { return begin(); } 1284 const_iterator cend() const { return end(); } 1285 1286 // Capacity 1287 size_type size() const { return elements_.size(); } 1288 bool empty() const { return size() == 0; } 1289 1290 // Element access 1291 template <typename K = key_type> 1292 T& operator[](const key_arg<K>& key) { 1293 return elements_[key].second; 1294 } 1295 template < 1296 typename K = key_type, 1297 // Disable for integral types to reduce code bloat. 1298 typename = typename std::enable_if<!std::is_integral<K>::value>::type> 1299 T& operator[](key_arg<K>&& key) { 1300 return elements_[std::forward<K>(key)].second; 1301 } 1302 1303 template <typename K = key_type> 1304 const T& at(const key_arg<K>& key) const { 1305 const_iterator it = find(key); 1306 GOOGLE_CHECK(it != end()) << "key not found: " << static_cast<Key>(key); 1307 return it->second; 1308 } 1309 1310 template <typename K = key_type> 1311 T& at(const key_arg<K>& key) { 1312 iterator it = find(key); 1313 GOOGLE_CHECK(it != end()) << "key not found: " << static_cast<Key>(key); 1314 return it->second; 1315 } 1316 1317 // Lookup 1318 template <typename K = key_type> 1319 size_type count(const key_arg<K>& key) const { 1320 return find(key) == end() ? 0 : 1; 1321 } 1322 1323 template <typename K = key_type> 1324 const_iterator find(const key_arg<K>& key) const { 1325 return const_iterator(elements_.find(key)); 1326 } 1327 template <typename K = key_type> 1328 iterator find(const key_arg<K>& key) { 1329 return iterator(elements_.find(key)); 1330 } 1331 1332 template <typename K = key_type> 1333 bool contains(const key_arg<K>& key) const { 1334 return find(key) != end(); 1335 } 1336 1337 template <typename K = key_type> 1338 std::pair<const_iterator, const_iterator> equal_range( 1339 const key_arg<K>& key) const { 1340 const_iterator it = find(key); 1341 if (it == end()) { 1342 return std::pair<const_iterator, const_iterator>(it, it); 1343 } else { 1344 const_iterator begin = it++; 1345 return std::pair<const_iterator, const_iterator>(begin, it); 1346 } 1347 } 1348 1349 template <typename K = key_type> 1350 std::pair<iterator, iterator> equal_range(const key_arg<K>& key) { 1351 iterator it = find(key); 1352 if (it == end()) { 1353 return std::pair<iterator, iterator>(it, it); 1354 } else { 1355 iterator begin = it++; 1356 return std::pair<iterator, iterator>(begin, it); 1357 } 1358 } 1359 1360 // insert 1361 template <typename K, typename... Args> 1362 std::pair<iterator, bool> try_emplace(K&& k, Args&&... args) { 1363 auto p = 1364 elements_.try_emplace(std::forward<K>(k), std::forward<Args>(args)...); 1365 return std::pair<iterator, bool>(iterator(p.first), p.second); 1366 } 1367 std::pair<iterator, bool> insert(const value_type& value) { 1368 return try_emplace(value.first, value.second); 1369 } 1370 std::pair<iterator, bool> insert(value_type&& value) { 1371 return try_emplace(value.first, std::move(value.second)); 1372 } 1373 template <typename... Args> 1374 std::pair<iterator, bool> emplace(Args&&... args) { 1375 return insert(value_type(std::forward<Args>(args)...)); 1376 } 1377 template <class InputIt> 1378 void insert(InputIt first, InputIt last) { 1379 for (; first != last; ++first) { 1380 try_emplace(first->first, first->second); 1381 } 1382 } 1383 void insert(std::initializer_list<value_type> values) { 1384 insert(values.begin(), values.end()); 1385 } 1386 1387 // Erase and clear 1388 template <typename K = key_type> 1389 size_type erase(const key_arg<K>& key) { 1390 iterator it = find(key); 1391 if (it == end()) { 1392 return 0; 1393 } else { 1394 erase(it); 1395 return 1; 1396 } 1397 } 1398 iterator erase(iterator pos) { 1399 iterator i = pos++; 1400 elements_.erase(i.it_); 1401 return pos; 1402 } 1403 void erase(iterator first, iterator last) { 1404 while (first != last) { 1405 first = erase(first); 1406 } 1407 } 1408 void clear() { elements_.clear(); } 1409 1410 // Assign 1411 Map& operator=(const Map& other) { 1412 if (this != &other) { 1413 clear(); 1414 insert(other.begin(), other.end()); 1415 } 1416 return *this; 1417 } 1418 1419 void swap(Map& other) { 1420 if (arena() == other.arena()) { 1421 InternalSwap(other); 1422 } else { 1423 // TODO(zuguang): optimize this. The temporary copy can be allocated 1424 // in the same arena as the other message, and the "other = copy" can 1425 // be replaced with the fast-path swap above. 1426 Map copy = *this; 1427 *this = other; 1428 other = copy; 1429 } 1430 } 1431 1432 void InternalSwap(Map& other) { elements_.Swap(&other.elements_); } 1433 1434 // Access to hasher. Currently this returns a copy, but it may 1435 // be modified to return a const reference in the future. 1436 hasher hash_function() const { return elements_.hash_function(); } 1437 1438 size_t SpaceUsedExcludingSelfLong() const { 1439 if (empty()) return 0; 1440 return elements_.SpaceUsedInternal() + internal::SpaceUsedInValues(this); 1441 } 1442 1443 private: 1444 Arena* arena() const { return elements_.arena(); } 1445 InnerMap elements_; 1446 1447 friend class Arena; 1448 using InternalArenaConstructable_ = void; 1449 using DestructorSkippable_ = void; 1450 template <typename Derived, typename K, typename V, 1451 internal::WireFormatLite::FieldType key_wire_type, 1452 internal::WireFormatLite::FieldType value_wire_type> 1453 friend class internal::MapFieldLite; 1454 }; 1455 1456 } // namespace protobuf 1457 } // namespace google 1458 1459 #include <google/protobuf/port_undef.inc> 1460 1461 #endif // GOOGLE_PROTOBUF_MAP_H__ 1462