blob: 65b2a158f422f5d7e9d35730c3414e8a97dece04 [file] [log] [blame]
Austin Schuh20b2b082019-09-11 20:42:56 -07001#include "aos/ipc_lib/lockless_queue.h"
2
3#include <inttypes.h>
4#include <signal.h>
5#include <unistd.h>
6#include <wait.h>
7#include <chrono>
8#include <memory>
9#include <random>
10#include <thread>
11
12#include "aos/event.h"
13#include "aos/events/epoll.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070014#include "aos/ipc_lib/aos_sync.h"
15#include "aos/ipc_lib/queue_racer.h"
16#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070017#include "aos/realtime.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070018#include "gflags/gflags.h"
19#include "gtest/gtest.h"
20
21DEFINE_int32(min_iterations, 100,
22 "Minimum number of stress test iterations to run");
23DEFINE_int32(duration, 5, "Number of seconds to test for");
24DEFINE_int32(print_rate, 60, "Number of seconds between status prints");
25
26// The roboRIO can only handle 10 threads before exploding. Set the default for
27// ARM to 10.
28DEFINE_int32(thread_count,
29#if defined(__ARM_EABI__)
30 10,
31#else
32 100,
33#endif
34 "Number of threads to race");
35
36namespace aos {
37namespace ipc_lib {
38namespace testing {
39
40namespace chrono = ::std::chrono;
41
42class LocklessQueueTest : public ::testing::Test {
43 public:
44 LocklessQueueTest() {
Austin Schuh20b2b082019-09-11 20:42:56 -070045 config_.num_watchers = 10;
46 config_.num_senders = 100;
47 config_.queue_size = 10000;
48 // Exercise the alignment code. This would throw off alignment.
49 config_.message_data_size = 101;
50
51 // Since our backing store is an array of uint64_t for alignment purposes,
52 // normalize by the size.
53 memory_.resize(LocklessQueueMemorySize(config_) / sizeof(uint64_t));
54
55 Reset();
56 }
57
58 LocklessQueueMemory *get_memory() {
59 return reinterpret_cast<LocklessQueueMemory *>(&(memory_[0]));
60 }
61
62 void Reset() { memset(get_memory(), 0, LocklessQueueMemorySize(config_)); }
63
64 // Runs until the signal is received.
65 void RunUntilWakeup(Event *ready, int priority) {
66 LocklessQueue queue(get_memory(), config_);
67 internal::EPoll epoll;
68 SignalFd signalfd({kWakeupSignal});
69
70 epoll.OnReadable(signalfd.fd(), [&signalfd, &epoll]() {
71 signalfd_siginfo result = signalfd.Read();
72
73 fprintf(stderr, "Got signal: %d\n", result.ssi_signo);
74 epoll.Quit();
75 });
76
77 // Register to be woken up *after* the signalfd is catching the signals.
78 queue.RegisterWakeup(priority);
79
80 // And signal we are now ready.
81 ready->Set();
82
83 epoll.Run();
84
85 // Cleanup.
86 queue.UnregisterWakeup();
87 epoll.DeleteFd(signalfd.fd());
88 }
89
90 // Use a type with enough alignment that we are guarenteed that everything
91 // will be aligned properly on the target platform.
92 ::std::vector<uint64_t> memory_;
93
94 LocklessQueueConfiguration config_;
95};
96
97typedef LocklessQueueTest LocklessQueueDeathTest;
98
99// Tests that wakeup doesn't do anything if nothing was registered.
100TEST_F(LocklessQueueTest, NoWatcherWakeup) {
101 LocklessQueue queue(get_memory(), config_);
102
103 EXPECT_EQ(queue.Wakeup(7), 0);
104}
105
106// Tests that wakeup doesn't do anything if a wakeup was registered and then
107// unregistered.
108TEST_F(LocklessQueueTest, UnregisteredWatcherWakeup) {
109 LocklessQueue queue(get_memory(), config_);
110
111 queue.RegisterWakeup(5);
112 queue.UnregisterWakeup();
113
114 EXPECT_EQ(queue.Wakeup(7), 0);
115}
116
117// Tests that wakeup doesn't do anything if the thread dies.
118TEST_F(LocklessQueueTest, DiedWatcherWakeup) {
119 LocklessQueue queue(get_memory(), config_);
120
121 ::std::thread([this]() {
122 // Use placement new so the destructor doesn't get run.
123 ::std::aligned_storage<sizeof(LocklessQueue), alignof(LocklessQueue)>::type
124 data;
125 LocklessQueue *q = new (&data) LocklessQueue(get_memory(), config_);
126 // Register a wakeup.
127 q->RegisterWakeup(5);
128 }).join();
129
130 EXPECT_EQ(queue.Wakeup(7), 0);
131}
132
133struct WatcherState {
134 ::std::thread t;
135 Event ready;
136};
137
138// Tests that too many watchers fails like expected.
139TEST_F(LocklessQueueTest, TooManyWatchers) {
140 // This is going to be a barrel of monkeys.
141 // We need to spin up a bunch of watchers. But, they all need to be in
142 // different threads so they have different tids.
143 ::std::vector<WatcherState> queues;
144 // Reserve num_watchers WatcherState objects so the pointer value doesn't
145 // change out from under us below.
146 queues.reserve(config_.num_watchers);
147
148 // Event used to trigger all the threads to unregister.
149 Event cleanup;
150
151 // Start all the threads.
152 for (size_t i = 0; i < config_.num_watchers; ++i) {
153 queues.emplace_back();
154
155 WatcherState *s = &queues.back();
156 queues.back().t = ::std::thread([this, &cleanup, s]() {
157 LocklessQueue q(get_memory(), config_);
158 EXPECT_TRUE(q.RegisterWakeup(0));
159
160 // Signal that this thread is ready.
161 s->ready.Set();
162
163 // And wait until we are asked to shut down.
164 cleanup.Wait();
165
166 q.UnregisterWakeup();
167 });
168 }
169
170 // Wait until all the threads are actually going.
171 for (WatcherState &w : queues) {
172 w.ready.Wait();
173 }
174
175 // Now try to allocate another one. This will fail.
176 {
177 LocklessQueue queue(get_memory(), config_);
178 EXPECT_FALSE(queue.RegisterWakeup(0));
179 }
180
181 // Trigger the threads to cleanup their resources, and wait unti they are
182 // done.
183 cleanup.Set();
184 for (WatcherState &w : queues) {
185 w.t.join();
186 }
187
188 // We should now be able to allocate a wakeup.
189 {
190 LocklessQueue queue(get_memory(), config_);
191 EXPECT_TRUE(queue.RegisterWakeup(0));
192 queue.UnregisterWakeup();
193 }
194}
195
196// Tests that too many watchers dies like expected.
Austin Schuhe516ab02020-05-06 21:37:04 -0700197TEST_F(LocklessQueueTest, TooManySenders) {
198 ::std::vector<::std::unique_ptr<LocklessQueue>> queues;
199 ::std::vector<LocklessQueue::Sender> senders;
200 for (size_t i = 0; i < config_.num_senders; ++i) {
201 queues.emplace_back(new LocklessQueue(get_memory(), config_));
202 senders.emplace_back(queues.back()->MakeSender().value());
203 }
204 queues.emplace_back(new LocklessQueue(get_memory(), config_));
205 EXPECT_FALSE(queues.back()->MakeSender());
Austin Schuh20b2b082019-09-11 20:42:56 -0700206}
207
208// Now, start 2 threads and have them receive the signals.
209TEST_F(LocklessQueueTest, WakeUpThreads) {
210 // Confirm that the wakeup signal is in range.
211 EXPECT_LE(kWakeupSignal, SIGRTMAX);
212 EXPECT_GE(kWakeupSignal, SIGRTMIN);
213
214 LocklessQueue queue(get_memory(), config_);
215
216 // Event used to make sure the thread is ready before the test starts.
217 Event ready1;
218 Event ready2;
219
220 // Start the thread.
221 ::std::thread t1([this, &ready1]() { RunUntilWakeup(&ready1, 5); });
222 ::std::thread t2([this, &ready2]() { RunUntilWakeup(&ready2, 4); });
223
224 ready1.Wait();
225 ready2.Wait();
226
227 EXPECT_EQ(queue.Wakeup(3), 2);
228
229 t1.join();
230 t2.join();
231
232 // Clean up afterwords. We are pretending to be RT when we are really not.
233 // So we will be PI boosted up.
234 UnsetCurrentThreadRealtimePriority();
235}
236
237// Do a simple send test.
238TEST_F(LocklessQueueTest, Send) {
239 LocklessQueue queue(get_memory(), config_);
240
Austin Schuhe516ab02020-05-06 21:37:04 -0700241 LocklessQueue::Sender sender = queue.MakeSender().value();
Austin Schuh20b2b082019-09-11 20:42:56 -0700242
243 // Send enough messages to wrap.
244 for (int i = 0; i < 20000; ++i) {
245 // Confirm that the queue index makes sense given the number of sends.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700246 EXPECT_EQ(queue.LatestQueueIndex().index(),
247 i == 0 ? LocklessQueue::empty_queue_index().index() : i - 1);
Austin Schuh20b2b082019-09-11 20:42:56 -0700248
249 // Send a trivial piece of data.
250 char data[100];
251 size_t s = snprintf(data, sizeof(data), "foobar%d", i);
252 sender.Send(data, s);
253
254 // Confirm that the queue index still makes sense. This is easier since the
255 // empty case has been handled.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700256 EXPECT_EQ(queue.LatestQueueIndex().index(), i);
Austin Schuh20b2b082019-09-11 20:42:56 -0700257
258 // Read a result from 5 in the past.
259 ::aos::monotonic_clock::time_point monotonic_sent_time;
260 ::aos::realtime_clock::time_point realtime_sent_time;
Austin Schuhad154822019-12-27 15:45:13 -0800261 ::aos::monotonic_clock::time_point monotonic_remote_time;
262 ::aos::realtime_clock::time_point realtime_remote_time;
263 uint32_t remote_queue_index;
Austin Schuh20b2b082019-09-11 20:42:56 -0700264 char read_data[1024];
265 size_t length;
266
267 QueueIndex index = QueueIndex::Zero(config_.queue_size);
268 if (i - 5 < 0) {
269 index = index.DecrementBy(5 - i);
270 } else {
271 index = index.IncrementBy(i - 5);
272 }
273 LocklessQueue::ReadResult read_result =
274 queue.Read(index.index(), &monotonic_sent_time, &realtime_sent_time,
Austin Schuhad154822019-12-27 15:45:13 -0800275 &monotonic_remote_time, &realtime_remote_time,
276 &remote_queue_index, &length, &(read_data[0]));
Austin Schuh20b2b082019-09-11 20:42:56 -0700277
278 // This should either return GOOD, or TOO_OLD if it is before the start of
279 // the queue.
280 if (read_result != LocklessQueue::ReadResult::GOOD) {
281 EXPECT_EQ(read_result, LocklessQueue::ReadResult::TOO_OLD);
282 }
283 }
284}
285
286// Races a bunch of sending threads to see if it all works.
287TEST_F(LocklessQueueTest, SendRace) {
288 const size_t kNumMessages = 10000 / FLAGS_thread_count;
289
290 ::std::mt19937 generator(0);
291 ::std::uniform_int_distribution<> write_wrap_count_distribution(0, 10);
292 ::std::bernoulli_distribution race_reads_distribution;
293 ::std::bernoulli_distribution wrap_writes_distribution;
294
295 const chrono::seconds print_frequency(FLAGS_print_rate);
296
297 QueueRacer racer(get_memory(), FLAGS_thread_count, kNumMessages, config_);
298 const monotonic_clock::time_point start_time =
299 monotonic_clock::now();
300 const monotonic_clock::time_point end_time =
301 start_time + chrono::seconds(FLAGS_duration);
302
303 monotonic_clock::time_point monotonic_now = start_time;
304 monotonic_clock::time_point next_print_time = start_time + print_frequency;
305 uint64_t messages = 0;
306 for (int i = 0; i < FLAGS_min_iterations || monotonic_now < end_time; ++i) {
307 bool race_reads = race_reads_distribution(generator);
308 int write_wrap_count = write_wrap_count_distribution(generator);
309 if (!wrap_writes_distribution(generator)) {
310 write_wrap_count = 0;
311 }
312 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(race_reads, write_wrap_count))
313 << ": Running with race_reads: " << race_reads
314 << ", and write_wrap_count " << write_wrap_count << " and on iteration "
315 << i;
316
317 messages += racer.CurrentIndex();
318
319 monotonic_now = monotonic_clock::now();
320 if (monotonic_now > next_print_time) {
321 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
322 monotonic_now - start_time)
323 .count();
324 printf("Finished iteration %d, %f iterations/sec, %f messages/second\n",
325 i, i / elapsed_seconds,
326 static_cast<double>(messages) / elapsed_seconds);
327 next_print_time = monotonic_now + print_frequency;
328 }
329 }
330}
331
332// Send enough messages to wrap the 32 bit send counter.
333TEST_F(LocklessQueueTest, WrappedSend) {
334 uint64_t kNumMessages = 0x100010000ul;
335 QueueRacer racer(get_memory(), 1, kNumMessages, config_);
336
337 const monotonic_clock::time_point start_time = monotonic_clock::now();
338 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(false, 0));
339 const monotonic_clock::time_point monotonic_now = monotonic_clock::now();
340 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
341 monotonic_now - start_time)
342 .count();
343 printf("Took %f seconds to write %" PRIu64 " messages, %f messages/s\n",
344 elapsed_seconds, kNumMessages,
345 static_cast<double>(kNumMessages) / elapsed_seconds);
346}
347
348} // namespace testing
349} // namespace ipc_lib
350} // namespace aos