xref: /aosp_15_r20/external/abseil-cpp/absl/base/spinlock_test_common.cc (revision 9356374a3709195abf420251b3e825997ff56c0f)
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // A bunch of threads repeatedly hash an array of ints protected by a
16 // spinlock.  If the spinlock is working properly, all elements of the
17 // array should be equal at the end of the test.
18 
19 #include <cstdint>
20 #include <limits>
21 #include <random>
22 #include <thread>  // NOLINT(build/c++11)
23 #include <type_traits>
24 #include <vector>
25 
26 #include "gtest/gtest.h"
27 #include "absl/base/attributes.h"
28 #include "absl/base/config.h"
29 #include "absl/base/internal/low_level_scheduling.h"
30 #include "absl/base/internal/scheduling_mode.h"
31 #include "absl/base/internal/spinlock.h"
32 #include "absl/base/internal/sysinfo.h"
33 #include "absl/base/macros.h"
34 #include "absl/synchronization/blocking_counter.h"
35 #include "absl/synchronization/notification.h"
36 
37 constexpr uint32_t kNumThreads = 10;
38 constexpr int32_t kIters = 1000;
39 
40 namespace absl {
41 ABSL_NAMESPACE_BEGIN
42 namespace base_internal {
43 
44 // This is defined outside of anonymous namespace so that it can be
45 // a friend of SpinLock to access protected methods for testing.
46 struct SpinLockTest {
EncodeWaitCyclesabsl::base_internal::SpinLockTest47   static uint32_t EncodeWaitCycles(int64_t wait_start_time,
48                                    int64_t wait_end_time) {
49     return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
50   }
DecodeWaitCyclesabsl::base_internal::SpinLockTest51   static int64_t DecodeWaitCycles(uint32_t lock_value) {
52     return SpinLock::DecodeWaitCycles(lock_value);
53   }
54 
IsCooperativeabsl::base_internal::SpinLockTest55   static bool IsCooperative(const SpinLock& l) { return l.IsCooperative(); }
56 };
57 
58 namespace {
59 
60 static constexpr size_t kArrayLength = 10;
61 static uint32_t values[kArrayLength];
62 
63 ABSL_CONST_INIT static SpinLock static_cooperative_spinlock(
64     absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
65 ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock(
66     absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
67 
68 // Simple integer hash function based on the public domain lookup2 hash.
69 // http://burtleburtle.net/bob/c/lookup2.c
Hash32(uint32_t a,uint32_t c)70 static uint32_t Hash32(uint32_t a, uint32_t c) {
71   uint32_t b = 0x9e3779b9UL;  // The golden ratio; an arbitrary value.
72   a -= b; a -= c; a ^= (c >> 13);
73   b -= c; b -= a; b ^= (a << 8);
74   c -= a; c -= b; c ^= (b >> 13);
75   a -= b; a -= c; a ^= (c >> 12);
76   b -= c; b -= a; b ^= (a << 16);
77   c -= a; c -= b; c ^= (b >> 5);
78   a -= b; a -= c; a ^= (c >> 3);
79   b -= c; b -= a; b ^= (a << 10);
80   c -= a; c -= b; c ^= (b >> 15);
81   return c;
82 }
83 
TestFunction(uint32_t thread_salt,SpinLock * spinlock)84 static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) {
85   for (int i = 0; i < kIters; i++) {
86     SpinLockHolder h(spinlock);
87     for (size_t j = 0; j < kArrayLength; j++) {
88       const size_t index = (j + thread_salt) % kArrayLength;
89       values[index] = Hash32(values[index], thread_salt);
90       std::this_thread::yield();
91     }
92   }
93 }
94 
ThreadedTest(SpinLock * spinlock)95 static void ThreadedTest(SpinLock* spinlock) {
96   std::vector<std::thread> threads;
97   threads.reserve(kNumThreads);
98   for (uint32_t i = 0; i < kNumThreads; ++i) {
99     threads.push_back(std::thread(TestFunction, i, spinlock));
100   }
101   for (auto& thread : threads) {
102     thread.join();
103   }
104 
105   SpinLockHolder h(spinlock);
106   for (size_t i = 1; i < kArrayLength; i++) {
107     EXPECT_EQ(values[0], values[i]);
108   }
109 }
110 
111 #ifndef ABSL_HAVE_THREAD_SANITIZER
112 static_assert(std::is_trivially_destructible<SpinLock>(), "");
113 #endif
114 
TEST(SpinLock,StackNonCooperativeDisablesScheduling)115 TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
116   SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
117   spinlock.Lock();
118   EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
119   spinlock.Unlock();
120 }
121 
TEST(SpinLock,StaticNonCooperativeDisablesScheduling)122 TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
123   static_noncooperative_spinlock.Lock();
124   EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
125   static_noncooperative_spinlock.Unlock();
126 }
127 
TEST(SpinLock,WaitCyclesEncoding)128 TEST(SpinLock, WaitCyclesEncoding) {
129   // These are implementation details not exported by SpinLock.
130   const int kProfileTimestampShift = 7;
131   const int kLockwordReservedShift = 3;
132   const uint32_t kSpinLockSleeper = 8;
133 
134   // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
135   // but the lower kProfileTimestampShift will be dropped.
136   const int kMaxCyclesShift =
137     32 - kLockwordReservedShift + kProfileTimestampShift;
138   const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
139 
140   // These bits should be zero after encoding.
141   const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
142 
143   // These bits are dropped when wait cycles are encoded.
144   const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
145 
146   // Test a bunch of random values
147   std::default_random_engine generator;
148   // Shift to avoid overflow below.
149   std::uniform_int_distribution<int64_t> time_distribution(
150       0, std::numeric_limits<int64_t>::max() >> 3);
151   std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles);
152 
153   for (int i = 0; i < 100; i++) {
154     int64_t start_time = time_distribution(generator);
155     int64_t cycles = cycle_distribution(generator);
156     int64_t end_time = start_time + cycles;
157     uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
158     EXPECT_EQ(0u, lock_value & kLockwordReservedMask);
159     int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
160     EXPECT_EQ(0, decoded & kProfileTimestampMask);
161     EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
162   }
163 
164   // Test corner cases
165   int64_t start_time = time_distribution(generator);
166   EXPECT_EQ(kSpinLockSleeper,
167             SpinLockTest::EncodeWaitCycles(start_time, start_time));
168   EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
169   EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
170   EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
171             SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
172 
173   // Check that we cannot produce kSpinLockSleeper during encoding.
174   int64_t sleeper_cycles =
175       kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
176   uint32_t sleeper_value =
177       SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
178   EXPECT_NE(sleeper_value, kSpinLockSleeper);
179 
180   // Test clamping
181   uint32_t max_value =
182     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
183   int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
184   int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
185   EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
186 
187   const int64_t step = (1 << kProfileTimestampShift);
188   uint32_t after_max_value =
189     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
190   int64_t after_max_value_decoded =
191       SpinLockTest::DecodeWaitCycles(after_max_value);
192   EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
193 
194   uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
195       start_time, start_time + kMaxCycles - step);
196   int64_t before_max_value_decoded =
197       SpinLockTest::DecodeWaitCycles(before_max_value);
198   EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
199 }
200 
TEST(SpinLockWithThreads,StackSpinLock)201 TEST(SpinLockWithThreads, StackSpinLock) {
202   SpinLock spinlock;
203   ThreadedTest(&spinlock);
204 }
205 
TEST(SpinLockWithThreads,StackCooperativeSpinLock)206 TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
207   SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
208   ThreadedTest(&spinlock);
209 }
210 
TEST(SpinLockWithThreads,StackNonCooperativeSpinLock)211 TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
212   SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
213   ThreadedTest(&spinlock);
214 }
215 
TEST(SpinLockWithThreads,StaticCooperativeSpinLock)216 TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
217   ThreadedTest(&static_cooperative_spinlock);
218 }
219 
TEST(SpinLockWithThreads,StaticNonCooperativeSpinLock)220 TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
221   ThreadedTest(&static_noncooperative_spinlock);
222 }
223 
TEST(SpinLockWithThreads,DoesNotDeadlock)224 TEST(SpinLockWithThreads, DoesNotDeadlock) {
225   struct Helper {
226     static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
227                                BlockingCounter* b) {
228       locked->WaitForNotification();  // Wait for LockThenWait() to hold "s".
229       b->DecrementCount();
230       SpinLockHolder l(spinlock);
231     }
232 
233     static void LockThenWait(Notification* locked, SpinLock* spinlock,
234                              BlockingCounter* b) {
235       SpinLockHolder l(spinlock);
236       locked->Notify();
237       b->Wait();
238     }
239 
240     static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
241       Notification locked;
242       BlockingCounter counter(num_spinners);
243       std::vector<std::thread> threads;
244 
245       threads.push_back(
246           std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
247       for (int i = 0; i < num_spinners; ++i) {
248         threads.push_back(
249             std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
250       }
251 
252       for (auto& thread : threads) {
253         thread.join();
254       }
255     }
256   };
257 
258   SpinLock stack_cooperative_spinlock(
259       base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
260   SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
261   Helper::DeadlockTest(&stack_cooperative_spinlock,
262                        base_internal::NumCPUs() * 2);
263   Helper::DeadlockTest(&stack_noncooperative_spinlock,
264                        base_internal::NumCPUs() * 2);
265   Helper::DeadlockTest(&static_cooperative_spinlock,
266                        base_internal::NumCPUs() * 2);
267   Helper::DeadlockTest(&static_noncooperative_spinlock,
268                        base_internal::NumCPUs() * 2);
269 }
270 
TEST(SpinLockTest,IsCooperative)271 TEST(SpinLockTest, IsCooperative) {
272   SpinLock default_constructor;
273   EXPECT_TRUE(SpinLockTest::IsCooperative(default_constructor));
274 
275   SpinLock cooperative(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
276   EXPECT_TRUE(SpinLockTest::IsCooperative(cooperative));
277 
278   SpinLock kernel_only(base_internal::SCHEDULE_KERNEL_ONLY);
279   EXPECT_FALSE(SpinLockTest::IsCooperative(kernel_only));
280 }
281 
282 }  // namespace
283 }  // namespace base_internal
284 ABSL_NAMESPACE_END
285 }  // namespace absl
286