// // Copyright 2017 The ANGLE Project Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // ResourceMap: // An optimized resource map which packs the first set of allocated objects into a // flat array, and then falls back to an unordered map for the higher handle values. // #ifndef LIBANGLE_RESOURCE_MAP_H_ #define LIBANGLE_RESOURCE_MAP_H_ #include #include #include "common/SimpleMutex.h" #include "common/hash_containers.h" #include "libANGLE/angletypes.h" namespace gl { // The resource map needs to be internally synchronized for maps that are placed in the share group // (as opposed to being private to the context) and that are accessed without holding the share // group lock. #if defined(ANGLE_ENABLE_SHARE_CONTEXT_LOCK) using ResourceMapMutex = angle::SimpleMutex; #else using ResourceMapMutex = angle::NoOpMutex; #endif template struct SelectResourceMapMutex { using type = ResourceMapMutex; }; template <> struct SelectResourceMapMutex { using type = angle::NoOpMutex; }; // Analysis of ANGLE's traces as well as Chrome usage reveals the following: // // - Buffers: Typical applications use no more than 4000 ids. Very few use over 6000. // - Textures: Typical applications use no more than 1200 ids. Very few use over 2000. // - Samplers: Typical applications use no more than 50 ids. Very few use over 100. // - Shaders and Programs: Typical applications use no more than 500. Very few use over 700. // - Sync objects: Typical applications use no more than 500. Very few use over 1500. // // For all the other shared types, the maximum used id is small (under 100). For // context-private parts (such as vertex arrays and queries), the id count can be in the // thousands. // // The initial size of the flat resource map is based on the above, rounded up to a multiple of // 1536. Resource maps that need a lock (kNeedsLock == true) have the maximum flat size identical // to initial flat size to avoid reallocation. For others, the maps start small and can grow. template struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 192; // The following are private to the context and don't need a lock: // // - Vertex Array Objects // - Framebuffer Objects // - Transform Feedback Objects // - Query Objects // // The rest of the maps need a lock. However, only a select few are currently locked as API // relevant to the rest of the types are protected by the share group lock. As the share group // lock is removed from more types, the resource map lock needs to be enabled for them. static constexpr bool kNeedsLock = false; }; template <> struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 6144; static constexpr bool kNeedsLock = true; }; template <> struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 1536; static constexpr bool kNeedsLock = false; }; template <> struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 1536; static constexpr bool kNeedsLock = false; }; template <> struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 1536; static constexpr bool kNeedsLock = false; }; // For the purpose of unit testing, |int| is considered private (not needing lock), and // |unsigned int| is considered shared (needing lock). template <> struct ResourceMapParams { static constexpr size_t kInitialFlatResourcesSize = 192; static constexpr bool kNeedsLock = true; }; template class ResourceMap final : angle::NonCopyable { public: ResourceMap(); ~ResourceMap(); ANGLE_INLINE ResourceType *query(IDType id) const { GLuint handle = GetIDValue(id); // No need for a lock when accessing the flat map. Either the flat map is static, or // locking is not needed. static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); if (handle < mFlatResourcesSize) { ResourceType *value = mFlatResources[handle]; return (value == InvalidPointer() ? nullptr : value); } std::lock_guard lock(mMutex); auto it = mHashedResources.find(handle); return (it == mHashedResources.end() ? nullptr : it->second); } // Returns true if the handle was reserved. Not necessarily if the resource is created. bool contains(IDType id) const; // Returns the element that was at this location. bool erase(IDType id, ResourceType **resourceOut); void assign(IDType id, ResourceType *resource); // Clears the map. void clear(); using IndexAndResource = std::pair; using HashMap = angle::HashMap; class Iterator final { public: bool operator==(const Iterator &other) const; bool operator!=(const Iterator &other) const; Iterator &operator++(); const IndexAndResource *operator->() const; const IndexAndResource &operator*() const; private: friend class ResourceMap; Iterator(const ResourceMap &origin, GLuint flatIndex, typename HashMap::const_iterator hashIndex, bool skipNulls); void updateValue(); const ResourceMap &mOrigin; GLuint mFlatIndex; typename HashMap::const_iterator mHashIndex; IndexAndResource mValue; bool mSkipNulls; }; private: friend class Iterator; template friend class UnsafeResourceMapIter; // null values represent reserved handles. Iterator begin() const; Iterator end() const; Iterator beginWithNull() const; Iterator endWithNull() const; // Used by iterators and related functions only (due to lack of thread safety). GLuint nextResource(size_t flatIndex, bool skipNulls) const; // constexpr methods cannot contain reinterpret_cast, so we need a static method. static ResourceType *InvalidPointer(); static constexpr intptr_t kInvalidPointer = static_cast(-1); static constexpr bool kNeedsLock = ResourceMapParams::kNeedsLock; using Mutex = typename SelectResourceMapMutex::type; static constexpr size_t kInitialFlatResourcesSize = ResourceMapParams::kInitialFlatResourcesSize; // Experimental testing suggests that ~10k is a reasonable upper limit. static constexpr size_t kFlatResourcesLimit = kNeedsLock ? kInitialFlatResourcesSize : 0x3000; // Due to the way assign() is implemented, kFlatResourcesLimit / kInitialFlatResourcesSize must // be a power of 2. static_assert(kFlatResourcesLimit % kInitialFlatResourcesSize == 0); static_assert(((kFlatResourcesLimit / kInitialFlatResourcesSize) & (kFlatResourcesLimit / kInitialFlatResourcesSize - 1)) == 0); size_t mFlatResourcesSize; ResourceType **mFlatResources; // A map of GL objects indexed by object ID. HashMap mHashedResources; // mFlatResources is allocated at object creation time, with a default size of // |kInitialFlatResourcesSize|. This is thread safe, because the allocation is done by the // first context in the share group. The flat map is allowed to grow up to // |kFlatResourcesLimit|, but only for maps that don't need a lock (kNeedsLock == false). // // For maps that don't need a lock, this mutex is a no-op. For those that do, the mutex is // taken when allocating / deleting objects, as well as when accessing |mHashedResources|. // Otherwise, access to the flat map (which never gets reallocated due to // |kInitialFlatResourcesSize == kFlatResourcesLimit|) is lockless. This latter is possible // because the application is not allowed to gen/delete and bind the same ID in different // threads at the same time. // // Note that because HandleAllocator is not yet thread-safe, glGen* and glDelete* functions // cannot be free of the share group mutex yet. To remove the share group mutex from those // functions, likely the HandleAllocator class should be merged with this class, and the // necessary insert/erase operations done under this same lock. mutable Mutex mMutex; }; // A helper to retrieve the resource map iterators while being explicit that this is not thread // safe. Usage of iterators are limited to clean up on destruction and capture/replay, neither of // which can race with other threads in their access to the resource map. template class UnsafeResourceMapIter { public: using ResMap = ResourceMap; UnsafeResourceMapIter(const ResMap &resourceMap) : mResourceMap(resourceMap) {} typename ResMap::Iterator begin() const { return mResourceMap.begin(); } typename ResMap::Iterator end() const { return mResourceMap.end(); } typename ResMap::Iterator beginWithNull() const { return mResourceMap.beginWithNull(); } typename ResMap::Iterator endWithNull() const { return mResourceMap.endWithNull(); } // Not a constant-time operation, should only be used for verification. bool empty() const; private: const ResMap &mResourceMap; }; template ResourceMap::ResourceMap() : mFlatResourcesSize(kInitialFlatResourcesSize), mFlatResources(new ResourceType *[kInitialFlatResourcesSize]) { memset(mFlatResources, kInvalidPointer, mFlatResourcesSize * sizeof(mFlatResources[0])); } template ResourceMap::~ResourceMap() { ASSERT(begin() == end()); delete[] mFlatResources; } template ANGLE_INLINE bool ResourceMap::contains(IDType id) const { GLuint handle = GetIDValue(id); if (handle < mFlatResourcesSize) { return mFlatResources[handle] != InvalidPointer(); } std::lock_guard lock(mMutex); return mHashedResources.find(handle) != mHashedResources.end(); } template bool ResourceMap::erase(IDType id, ResourceType **resourceOut) { GLuint handle = GetIDValue(id); if (handle < mFlatResourcesSize) { auto &value = mFlatResources[handle]; if (value == InvalidPointer()) { return false; } *resourceOut = value; value = InvalidPointer(); } else { std::lock_guard lock(mMutex); auto it = mHashedResources.find(handle); if (it == mHashedResources.end()) { return false; } *resourceOut = it->second; mHashedResources.erase(it); } return true; } template void ResourceMap::assign(IDType id, ResourceType *resource) { GLuint handle = GetIDValue(id); if (handle < kFlatResourcesLimit) { if (handle >= mFlatResourcesSize) { // No need for a lock as the flat map never grows when locking is needed. static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); // Use power-of-two. size_t newSize = mFlatResourcesSize; while (newSize <= handle) { newSize *= 2; } ResourceType **oldResources = mFlatResources; mFlatResources = new ResourceType *[newSize]; memset(&mFlatResources[mFlatResourcesSize], kInvalidPointer, (newSize - mFlatResourcesSize) * sizeof(mFlatResources[0])); memcpy(mFlatResources, oldResources, mFlatResourcesSize * sizeof(mFlatResources[0])); mFlatResourcesSize = newSize; delete[] oldResources; } ASSERT(mFlatResourcesSize > handle); mFlatResources[handle] = resource; } else { std::lock_guard lock(mMutex); mHashedResources[handle] = resource; } } template typename ResourceMap::Iterator ResourceMap::begin() const { return Iterator(*this, nextResource(0, true), mHashedResources.begin(), true); } template typename ResourceMap::Iterator ResourceMap::end() const { return Iterator(*this, static_cast(mFlatResourcesSize), mHashedResources.end(), true); } template typename ResourceMap::Iterator ResourceMap::beginWithNull() const { return Iterator(*this, nextResource(0, false), mHashedResources.begin(), false); } template typename ResourceMap::Iterator ResourceMap::endWithNull() const { return Iterator(*this, static_cast(mFlatResourcesSize), mHashedResources.end(), false); } template bool UnsafeResourceMapIter::empty() const { return begin() == end(); } template void ResourceMap::clear() { // No need for a lock as this is only called on destruction. memset(mFlatResources, kInvalidPointer, kInitialFlatResourcesSize * sizeof(mFlatResources[0])); mFlatResourcesSize = kInitialFlatResourcesSize; mHashedResources.clear(); } template GLuint ResourceMap::nextResource(size_t flatIndex, bool skipNulls) const { // This function is only used by the iterators, access to which is marked by // UnsafeResourceMapIter. Locking is the responsibility of the caller. for (size_t index = flatIndex; index < mFlatResourcesSize; index++) { if ((mFlatResources[index] != nullptr || !skipNulls) && mFlatResources[index] != InvalidPointer()) { return static_cast(index); } } return static_cast(mFlatResourcesSize); } template // static ResourceType *ResourceMap::InvalidPointer() { return reinterpret_cast(kInvalidPointer); } template ResourceMap::Iterator::Iterator( const ResourceMap &origin, GLuint flatIndex, typename ResourceMap::HashMap::const_iterator hashIndex, bool skipNulls) : mOrigin(origin), mFlatIndex(flatIndex), mHashIndex(hashIndex), mSkipNulls(skipNulls) { updateValue(); } template bool ResourceMap::Iterator::operator==(const Iterator &other) const { return (mFlatIndex == other.mFlatIndex && mHashIndex == other.mHashIndex); } template bool ResourceMap::Iterator::operator!=(const Iterator &other) const { return !(*this == other); } template typename ResourceMap::Iterator & ResourceMap::Iterator::operator++() { if (mFlatIndex < static_cast(mOrigin.mFlatResourcesSize)) { mFlatIndex = mOrigin.nextResource(mFlatIndex + 1, mSkipNulls); } else { mHashIndex++; } updateValue(); return *this; } template const typename ResourceMap::IndexAndResource * ResourceMap::Iterator::operator->() const { return &mValue; } template const typename ResourceMap::IndexAndResource & ResourceMap::Iterator::operator*() const { return mValue; } template void ResourceMap::Iterator::updateValue() { if (mFlatIndex < static_cast(mOrigin.mFlatResourcesSize)) { mValue.first = mFlatIndex; mValue.second = mOrigin.mFlatResources[mFlatIndex]; } else if (mHashIndex != mOrigin.mHashedResources.end()) { mValue.first = mHashIndex->first; mValue.second = mHashIndex->second; } } } // namespace gl #endif // LIBANGLE_RESOURCE_MAP_H_