blob: 84fc4dac0be74f03b4385c71248376668f4dcdaa [file] [log] [blame]
Austin Schuh36244a12019-09-21 17:52:38 -07001// Copyright 2017 The Abseil Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// A bunch of threads repeatedly hash an array of ints protected by a
16// spinlock. If the spinlock is working properly, all elements of the
17// array should be equal at the end of the test.
18
19#include <cstdint>
20#include <limits>
21#include <random>
22#include <thread> // NOLINT(build/c++11)
23#include <vector>
24
25#include "gtest/gtest.h"
26#include "absl/base/attributes.h"
27#include "absl/base/internal/low_level_scheduling.h"
28#include "absl/base/internal/scheduling_mode.h"
29#include "absl/base/internal/spinlock.h"
30#include "absl/base/internal/sysinfo.h"
31#include "absl/base/macros.h"
32#include "absl/synchronization/blocking_counter.h"
33#include "absl/synchronization/notification.h"
34
35constexpr int32_t kNumThreads = 10;
36constexpr int32_t kIters = 1000;
37
38namespace absl {
39namespace base_internal {
40
41// This is defined outside of anonymous namespace so that it can be
42// a friend of SpinLock to access protected methods for testing.
43struct SpinLockTest {
44 static uint32_t EncodeWaitCycles(int64_t wait_start_time,
45 int64_t wait_end_time) {
46 return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
47 }
48 static uint64_t DecodeWaitCycles(uint32_t lock_value) {
49 return SpinLock::DecodeWaitCycles(lock_value);
50 }
51};
52
53namespace {
54
55static constexpr int kArrayLength = 10;
56static uint32_t values[kArrayLength];
57
58static SpinLock static_spinlock(base_internal::kLinkerInitialized);
59static SpinLock static_cooperative_spinlock(
60 base_internal::kLinkerInitialized,
61 base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
62static SpinLock static_noncooperative_spinlock(
63 base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY);
64
65// Simple integer hash function based on the public domain lookup2 hash.
66// http://burtleburtle.net/bob/c/lookup2.c
67static uint32_t Hash32(uint32_t a, uint32_t c) {
68 uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value.
69 a -= b; a -= c; a ^= (c >> 13);
70 b -= c; b -= a; b ^= (a << 8);
71 c -= a; c -= b; c ^= (b >> 13);
72 a -= b; a -= c; a ^= (c >> 12);
73 b -= c; b -= a; b ^= (a << 16);
74 c -= a; c -= b; c ^= (b >> 5);
75 a -= b; a -= c; a ^= (c >> 3);
76 b -= c; b -= a; b ^= (a << 10);
77 c -= a; c -= b; c ^= (b >> 15);
78 return c;
79}
80
81static void TestFunction(int thread_salt, SpinLock* spinlock) {
82 for (int i = 0; i < kIters; i++) {
83 SpinLockHolder h(spinlock);
84 for (int j = 0; j < kArrayLength; j++) {
85 const int index = (j + thread_salt) % kArrayLength;
86 values[index] = Hash32(values[index], thread_salt);
87 std::this_thread::yield();
88 }
89 }
90}
91
92static void ThreadedTest(SpinLock* spinlock) {
93 std::vector<std::thread> threads;
94 for (int i = 0; i < kNumThreads; ++i) {
95 threads.push_back(std::thread(TestFunction, i, spinlock));
96 }
97 for (auto& thread : threads) {
98 thread.join();
99 }
100
101 SpinLockHolder h(spinlock);
102 for (int i = 1; i < kArrayLength; i++) {
103 EXPECT_EQ(values[0], values[i]);
104 }
105}
106
107TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
108 SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
109 spinlock.Lock();
110 EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
111 spinlock.Unlock();
112}
113
114TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
115 static_noncooperative_spinlock.Lock();
116 EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
117 static_noncooperative_spinlock.Unlock();
118}
119
120TEST(SpinLock, WaitCyclesEncoding) {
121 // These are implementation details not exported by SpinLock.
122 const int kProfileTimestampShift = 7;
123 const int kLockwordReservedShift = 3;
124 const uint32_t kSpinLockSleeper = 8;
125
126 // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
127 // but the lower kProfileTimestampShift will be dropped.
128 const int kMaxCyclesShift =
129 32 - kLockwordReservedShift + kProfileTimestampShift;
130 const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
131
132 // These bits should be zero after encoding.
133 const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
134
135 // These bits are dropped when wait cycles are encoded.
136 const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
137
138 // Test a bunch of random values
139 std::default_random_engine generator;
140 // Shift to avoid overflow below.
141 std::uniform_int_distribution<uint64_t> time_distribution(
142 0, std::numeric_limits<uint64_t>::max() >> 4);
143 std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
144
145 for (int i = 0; i < 100; i++) {
146 int64_t start_time = time_distribution(generator);
147 int64_t cycles = cycle_distribution(generator);
148 int64_t end_time = start_time + cycles;
149 uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
150 EXPECT_EQ(0, lock_value & kLockwordReservedMask);
151 uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
152 EXPECT_EQ(0, decoded & kProfileTimestampMask);
153 EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
154 }
155
156 // Test corner cases
157 int64_t start_time = time_distribution(generator);
158 EXPECT_EQ(kSpinLockSleeper,
159 SpinLockTest::EncodeWaitCycles(start_time, start_time));
160 EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
161 EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
162 EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
163 SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
164
165 // Check that we cannot produce kSpinLockSleeper during encoding.
166 int64_t sleeper_cycles =
167 kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
168 uint32_t sleeper_value =
169 SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
170 EXPECT_NE(sleeper_value, kSpinLockSleeper);
171
172 // Test clamping
173 uint32_t max_value =
174 SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
175 uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
176 uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
177 EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
178
179 const int64_t step = (1 << kProfileTimestampShift);
180 uint32_t after_max_value =
181 SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
182 uint64_t after_max_value_decoded =
183 SpinLockTest::DecodeWaitCycles(after_max_value);
184 EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
185
186 uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
187 start_time, start_time + kMaxCycles - step);
188 uint64_t before_max_value_decoded =
189 SpinLockTest::DecodeWaitCycles(before_max_value);
190 EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
191}
192
193TEST(SpinLockWithThreads, StaticSpinLock) {
194 ThreadedTest(&static_spinlock);
195}
196
197TEST(SpinLockWithThreads, StackSpinLock) {
198 SpinLock spinlock;
199 ThreadedTest(&spinlock);
200}
201
202TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
203 SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
204 ThreadedTest(&spinlock);
205}
206
207TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
208 SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
209 ThreadedTest(&spinlock);
210}
211
212TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
213 ThreadedTest(&static_cooperative_spinlock);
214}
215
216TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
217 ThreadedTest(&static_noncooperative_spinlock);
218}
219
220TEST(SpinLockWithThreads, DoesNotDeadlock) {
221 struct Helper {
222 static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
223 BlockingCounter* b) {
224 locked->WaitForNotification(); // Wait for LockThenWait() to hold "s".
225 b->DecrementCount();
226 SpinLockHolder l(spinlock);
227 }
228
229 static void LockThenWait(Notification* locked, SpinLock* spinlock,
230 BlockingCounter* b) {
231 SpinLockHolder l(spinlock);
232 locked->Notify();
233 b->Wait();
234 }
235
236 static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
237 Notification locked;
238 BlockingCounter counter(num_spinners);
239 std::vector<std::thread> threads;
240
241 threads.push_back(
242 std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
243 for (int i = 0; i < num_spinners; ++i) {
244 threads.push_back(
245 std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
246 }
247
248 for (auto& thread : threads) {
249 thread.join();
250 }
251 }
252 };
253
254 SpinLock stack_cooperative_spinlock(
255 base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
256 SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
257 Helper::DeadlockTest(&stack_cooperative_spinlock,
258 base_internal::NumCPUs() * 2);
259 Helper::DeadlockTest(&stack_noncooperative_spinlock,
260 base_internal::NumCPUs() * 2);
261 Helper::DeadlockTest(&static_cooperative_spinlock,
262 base_internal::NumCPUs() * 2);
263 Helper::DeadlockTest(&static_noncooperative_spinlock,
264 base_internal::NumCPUs() * 2);
265}
266
267} // namespace
268} // namespace base_internal
269} // namespace absl