1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ResourceMap_unittest:
7 // Unit tests for the ResourceMap template class.
8 //
9
10 #include <gtest/gtest.h>
11 #include <map>
12
13 #include "libANGLE/ResourceMap.h"
14
15 using namespace gl;
16
17 namespace gl
18 {
19 template <>
GetIDValue(int id)20 inline GLuint GetIDValue(int id)
21 {
22 return id;
23 }
24 template <>
GetIDValue(unsigned int id)25 inline GLuint GetIDValue(unsigned int id)
26 {
27 return id;
28 }
29 } // namespace gl
30
31 namespace
32 {
33 // The resourceMap class uses a lock for "unsigned int" types to support this unit test.
34 using LocklessType = int;
35 using LockedType = unsigned int;
36
37 template <typename T>
AssignAndErase()38 void AssignAndErase()
39 {
40 constexpr size_t kSize = 300;
41 ResourceMap<size_t, T> resourceMap;
42 std::vector<size_t> objects(kSize, 1);
43 for (size_t index = 0; index < kSize; ++index)
44 {
45 resourceMap.assign(index + 1, &objects[index]);
46 }
47
48 for (size_t index = 0; index < kSize; ++index)
49 {
50 size_t *found = nullptr;
51 ASSERT_TRUE(resourceMap.erase(index + 1, &found));
52 ASSERT_EQ(&objects[index], found);
53 }
54
55 ASSERT_TRUE(UnsafeResourceMapIter(resourceMap).empty());
56 }
57
58 // Tests assigning slots in the map and then deleting elements.
TEST(ResourceMapTest,AssignAndEraseLockless)59 TEST(ResourceMapTest, AssignAndEraseLockless)
60 {
61 AssignAndErase<LocklessType>();
62 }
63 // Tests assigning slots in the map and then deleting elements.
TEST(ResourceMapTest,AssignAndEraseLocked)64 TEST(ResourceMapTest, AssignAndEraseLocked)
65 {
66 AssignAndErase<LockedType>();
67 }
68
69 template <typename T>
AssignAndClear()70 void AssignAndClear()
71 {
72 constexpr size_t kSize = 280;
73 ResourceMap<size_t, T> resourceMap;
74 std::vector<size_t> objects(kSize, 1);
75 for (size_t index = 0; index < kSize; ++index)
76 {
77 resourceMap.assign(index + 1, &objects[index]);
78 }
79
80 resourceMap.clear();
81 ASSERT_TRUE(UnsafeResourceMapIter(resourceMap).empty());
82 }
83
84 // Tests assigning slots in the map and then using clear() to free it.
TEST(ResourceMapTest,AssignAndClearLockless)85 TEST(ResourceMapTest, AssignAndClearLockless)
86 {
87 AssignAndClear<LocklessType>();
88 }
89 // Tests assigning slots in the map and then using clear() to free it.
TEST(ResourceMapTest,AssignAndClearLocked)90 TEST(ResourceMapTest, AssignAndClearLocked)
91 {
92 AssignAndClear<LockedType>();
93 }
94
95 template <typename T>
BigGrowth()96 void BigGrowth()
97 {
98 constexpr size_t kSize = 8;
99
100 ResourceMap<size_t, T> resourceMap;
101 std::vector<size_t> objects;
102
103 for (size_t index = 0; index < kSize; ++index)
104 {
105 objects.push_back(index);
106 }
107
108 // Assign a large value.
109 constexpr size_t kLargeIndex = 128;
110 objects.push_back(kLargeIndex);
111
112 for (size_t &object : objects)
113 {
114 resourceMap.assign(object, &object);
115 }
116
117 for (size_t object : objects)
118 {
119 size_t *found = nullptr;
120 ASSERT_TRUE(resourceMap.erase(object, &found));
121 ASSERT_EQ(object, *found);
122 }
123
124 ASSERT_TRUE(UnsafeResourceMapIter(resourceMap).empty());
125 }
126
127 // Tests growing a map more than double the size.
TEST(ResourceMapTest,BigGrowthLockless)128 TEST(ResourceMapTest, BigGrowthLockless)
129 {
130 BigGrowth<LocklessType>();
131 }
132 // Tests growing a map more than double the size.
TEST(ResourceMapTest,BigGrowthLocked)133 TEST(ResourceMapTest, BigGrowthLocked)
134 {
135 BigGrowth<LockedType>();
136 }
137
138 template <typename T>
QueryUnassigned()139 void QueryUnassigned()
140 {
141 constexpr size_t kSize = 8;
142 constexpr T kZero = 0;
143 constexpr T kIdInFlatRange = 10;
144 constexpr T kIdOutOfFlatRange = 500;
145
146 ResourceMap<size_t, T> resourceMap;
147 std::vector<size_t> objects;
148
149 for (size_t index = 0; index < kSize; ++index)
150 {
151 objects.push_back(index);
152 }
153
154 ASSERT_FALSE(resourceMap.contains(kZero));
155 ASSERT_EQ(nullptr, resourceMap.query(kZero));
156 ASSERT_FALSE(resourceMap.contains(kIdOutOfFlatRange));
157 ASSERT_EQ(nullptr, resourceMap.query(kIdOutOfFlatRange));
158
159 for (size_t &object : objects)
160 {
161 resourceMap.assign(object, &object);
162 }
163
164 ASSERT_FALSE(UnsafeResourceMapIter(resourceMap).empty());
165
166 for (size_t &object : objects)
167 {
168 ASSERT_TRUE(resourceMap.contains(object));
169 ASSERT_EQ(&object, resourceMap.query(object));
170 }
171
172 ASSERT_FALSE(resourceMap.contains(kIdInFlatRange));
173 ASSERT_EQ(nullptr, resourceMap.query(kIdInFlatRange));
174 ASSERT_FALSE(resourceMap.contains(kIdOutOfFlatRange));
175 ASSERT_EQ(nullptr, resourceMap.query(kIdOutOfFlatRange));
176
177 for (size_t object : objects)
178 {
179 size_t *found = nullptr;
180 ASSERT_TRUE(resourceMap.erase(object, &found));
181 ASSERT_EQ(object, *found);
182 }
183
184 ASSERT_TRUE(UnsafeResourceMapIter(resourceMap).empty());
185
186 ASSERT_FALSE(resourceMap.contains(kZero));
187 ASSERT_EQ(nullptr, resourceMap.query(kZero));
188 ASSERT_FALSE(resourceMap.contains(kIdOutOfFlatRange));
189 ASSERT_EQ(nullptr, resourceMap.query(kIdOutOfFlatRange));
190 }
191
192 // Tests querying unassigned or erased values.
TEST(ResourceMapTest,QueryUnassignedLockless)193 TEST(ResourceMapTest, QueryUnassignedLockless)
194 {
195 QueryUnassigned<LocklessType>();
196 }
197 // Tests querying unassigned or erased values.
TEST(ResourceMapTest,QueryUnassignedLocked)198 TEST(ResourceMapTest, QueryUnassignedLocked)
199 {
200 QueryUnassigned<LockedType>();
201 }
202
ConcurrentAccess(size_t iterations,size_t idCycleSize)203 void ConcurrentAccess(size_t iterations, size_t idCycleSize)
204 {
205 if (std::is_same_v<ResourceMapMutex, angle::NoOpMutex>)
206 {
207 GTEST_SKIP() << "Test skipped: Locking is disabled in build.";
208 }
209
210 constexpr size_t kThreadCount = 13;
211
212 ResourceMap<size_t, LockedType> resourceMap;
213
214 std::array<std::thread, kThreadCount> threads;
215 std::array<std::map<LockedType, size_t>, kThreadCount> insertedIds;
216
217 for (size_t i = 0; i < kThreadCount; ++i)
218 {
219 threads[i] = std::thread([&, i]() {
220 // Each thread manipulates a different set of ids. The resource map guarantees that the
221 // data structure itself is thread-safe, not accesses to the same id.
222 for (size_t j = 0; j < iterations; ++j)
223 {
224 const LockedType id = (j % (idCycleSize / kThreadCount)) * kThreadCount + i;
225
226 ASSERT_LE(id, 0xFFFFu);
227 ASSERT_LE(j, 0xFFFFu);
228 const size_t value = id | j << 16;
229
230 size_t *valuePtr = reinterpret_cast<size_t *>(value);
231
232 const size_t *queryResult = resourceMap.query(id);
233 const bool containsResult = resourceMap.contains(id);
234
235 const bool expectContains = insertedIds[i].count(id) > 0;
236 if (expectContains)
237 {
238 EXPECT_TRUE(containsResult);
239 const LockedType queryResultInt =
240 static_cast<LockedType>(reinterpret_cast<size_t>(queryResult) & 0xFFFF);
241 const size_t queryResultIteration = reinterpret_cast<size_t>(queryResult) >> 16;
242 EXPECT_EQ(queryResultInt, id);
243 EXPECT_LT(queryResultIteration, j);
244
245 size_t *erasedValue = nullptr;
246 const bool erased = resourceMap.erase(id, &erasedValue);
247
248 EXPECT_TRUE(erased);
249 EXPECT_EQ(erasedValue, queryResult);
250
251 insertedIds[i].erase(id);
252 }
253 else
254 {
255 EXPECT_FALSE(containsResult);
256 EXPECT_EQ(queryResult, nullptr);
257
258 resourceMap.assign(id, valuePtr);
259 EXPECT_TRUE(resourceMap.contains(id));
260
261 ASSERT_TRUE(insertedIds[i].count(id) == 0);
262 insertedIds[i][id] = value;
263 }
264 }
265 });
266 }
267
268 for (size_t i = 0; i < kThreadCount; ++i)
269 {
270 threads[i].join();
271 }
272
273 // Verify that every value that is expected to be there is actually there
274 std::map<size_t, size_t> allIds;
275 size_t allIdsPrevSize = 0;
276
277 for (size_t i = 0; i < kThreadCount; ++i)
278 {
279 // Merge all the sets together. The sets are disjoint, which is verified by the ASSERT_EQ.
280 allIds.insert(insertedIds[i].begin(), insertedIds[i].end());
281 ASSERT_EQ(allIds.size(), allIdsPrevSize + insertedIds[i].size());
282 allIdsPrevSize = allIds.size();
283
284 // Make sure every id that is expected to be there is actually there.
285 for (auto &idValue : insertedIds[i])
286 {
287 EXPECT_TRUE(resourceMap.contains(idValue.first));
288 EXPECT_EQ(resourceMap.query(idValue.first), reinterpret_cast<size_t *>(idValue.second));
289 }
290 }
291
292 // Verify that every value that is NOT expected to be there isn't actually there
293 for (auto &idValue : UnsafeResourceMapIter(resourceMap))
294 {
295 EXPECT_TRUE(allIds.count(idValue.first) == 1);
296 EXPECT_EQ(idValue.second, reinterpret_cast<size_t *>(allIds[idValue.first]));
297 }
298
299 resourceMap.clear();
300 }
301
302 // Tests that concurrent access to thread-safe resource maps works for small ids that are mostly in
303 // the flat map range.
TEST(ResourceMapTest,ConcurrentAccessSmallIds)304 TEST(ResourceMapTest, ConcurrentAccessSmallIds)
305 {
306 ConcurrentAccess(50'000, 128);
307 }
308 // Tests that concurrent access to thread-safe resource maps works for a wider range of ids.
TEST(ResourceMapTest,ConcurrentAccessLargeIds)309 TEST(ResourceMapTest, ConcurrentAccessLargeIds)
310 {
311 ConcurrentAccess(10'000, 20'000);
312 }
313 } // anonymous namespace
314