blob: 109c2ea7599fe5f72e73d000214e8f6ee0db78c5 [file] [log] [blame]
Austin Schuh20b2b082019-09-11 20:42:56 -07001#include "aos/ipc_lib/lockless_queue.h"
2
3#include <inttypes.h>
4#include <signal.h>
5#include <unistd.h>
6#include <wait.h>
7#include <chrono>
8#include <memory>
9#include <random>
10#include <thread>
11
12#include "aos/event.h"
13#include "aos/events/epoll.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070014#include "aos/ipc_lib/aos_sync.h"
15#include "aos/ipc_lib/queue_racer.h"
16#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070017#include "aos/realtime.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070018#include "gflags/gflags.h"
19#include "gtest/gtest.h"
20
21DEFINE_int32(min_iterations, 100,
22 "Minimum number of stress test iterations to run");
23DEFINE_int32(duration, 5, "Number of seconds to test for");
24DEFINE_int32(print_rate, 60, "Number of seconds between status prints");
25
26// The roboRIO can only handle 10 threads before exploding. Set the default for
27// ARM to 10.
28DEFINE_int32(thread_count,
29#if defined(__ARM_EABI__)
30 10,
31#else
32 100,
33#endif
34 "Number of threads to race");
35
36namespace aos {
37namespace ipc_lib {
38namespace testing {
39
40namespace chrono = ::std::chrono;
41
42class LocklessQueueTest : public ::testing::Test {
43 public:
44 LocklessQueueTest() {
Austin Schuh20b2b082019-09-11 20:42:56 -070045 config_.num_watchers = 10;
46 config_.num_senders = 100;
47 config_.queue_size = 10000;
48 // Exercise the alignment code. This would throw off alignment.
49 config_.message_data_size = 101;
50
51 // Since our backing store is an array of uint64_t for alignment purposes,
52 // normalize by the size.
53 memory_.resize(LocklessQueueMemorySize(config_) / sizeof(uint64_t));
54
55 Reset();
56 }
57
58 LocklessQueueMemory *get_memory() {
59 return reinterpret_cast<LocklessQueueMemory *>(&(memory_[0]));
60 }
61
62 void Reset() { memset(get_memory(), 0, LocklessQueueMemorySize(config_)); }
63
64 // Runs until the signal is received.
65 void RunUntilWakeup(Event *ready, int priority) {
66 LocklessQueue queue(get_memory(), config_);
67 internal::EPoll epoll;
68 SignalFd signalfd({kWakeupSignal});
69
70 epoll.OnReadable(signalfd.fd(), [&signalfd, &epoll]() {
71 signalfd_siginfo result = signalfd.Read();
72
73 fprintf(stderr, "Got signal: %d\n", result.ssi_signo);
74 epoll.Quit();
75 });
76
77 // Register to be woken up *after* the signalfd is catching the signals.
78 queue.RegisterWakeup(priority);
79
80 // And signal we are now ready.
81 ready->Set();
82
83 epoll.Run();
84
85 // Cleanup.
86 queue.UnregisterWakeup();
87 epoll.DeleteFd(signalfd.fd());
88 }
89
90 // Use a type with enough alignment that we are guarenteed that everything
91 // will be aligned properly on the target platform.
92 ::std::vector<uint64_t> memory_;
93
94 LocklessQueueConfiguration config_;
95};
96
97typedef LocklessQueueTest LocklessQueueDeathTest;
98
99// Tests that wakeup doesn't do anything if nothing was registered.
100TEST_F(LocklessQueueTest, NoWatcherWakeup) {
101 LocklessQueue queue(get_memory(), config_);
102
103 EXPECT_EQ(queue.Wakeup(7), 0);
104}
105
106// Tests that wakeup doesn't do anything if a wakeup was registered and then
107// unregistered.
108TEST_F(LocklessQueueTest, UnregisteredWatcherWakeup) {
109 LocklessQueue queue(get_memory(), config_);
110
111 queue.RegisterWakeup(5);
112 queue.UnregisterWakeup();
113
114 EXPECT_EQ(queue.Wakeup(7), 0);
115}
116
117// Tests that wakeup doesn't do anything if the thread dies.
118TEST_F(LocklessQueueTest, DiedWatcherWakeup) {
119 LocklessQueue queue(get_memory(), config_);
120
121 ::std::thread([this]() {
122 // Use placement new so the destructor doesn't get run.
123 ::std::aligned_storage<sizeof(LocklessQueue), alignof(LocklessQueue)>::type
124 data;
125 LocklessQueue *q = new (&data) LocklessQueue(get_memory(), config_);
126 // Register a wakeup.
127 q->RegisterWakeup(5);
128 }).join();
129
130 EXPECT_EQ(queue.Wakeup(7), 0);
131}
132
133struct WatcherState {
134 ::std::thread t;
135 Event ready;
136};
137
138// Tests that too many watchers fails like expected.
139TEST_F(LocklessQueueTest, TooManyWatchers) {
140 // This is going to be a barrel of monkeys.
141 // We need to spin up a bunch of watchers. But, they all need to be in
142 // different threads so they have different tids.
143 ::std::vector<WatcherState> queues;
144 // Reserve num_watchers WatcherState objects so the pointer value doesn't
145 // change out from under us below.
146 queues.reserve(config_.num_watchers);
147
148 // Event used to trigger all the threads to unregister.
149 Event cleanup;
150
151 // Start all the threads.
152 for (size_t i = 0; i < config_.num_watchers; ++i) {
153 queues.emplace_back();
154
155 WatcherState *s = &queues.back();
156 queues.back().t = ::std::thread([this, &cleanup, s]() {
157 LocklessQueue q(get_memory(), config_);
158 EXPECT_TRUE(q.RegisterWakeup(0));
159
160 // Signal that this thread is ready.
161 s->ready.Set();
162
163 // And wait until we are asked to shut down.
164 cleanup.Wait();
165
166 q.UnregisterWakeup();
167 });
168 }
169
170 // Wait until all the threads are actually going.
171 for (WatcherState &w : queues) {
172 w.ready.Wait();
173 }
174
175 // Now try to allocate another one. This will fail.
176 {
177 LocklessQueue queue(get_memory(), config_);
178 EXPECT_FALSE(queue.RegisterWakeup(0));
179 }
180
181 // Trigger the threads to cleanup their resources, and wait unti they are
182 // done.
183 cleanup.Set();
184 for (WatcherState &w : queues) {
185 w.t.join();
186 }
187
188 // We should now be able to allocate a wakeup.
189 {
190 LocklessQueue queue(get_memory(), config_);
191 EXPECT_TRUE(queue.RegisterWakeup(0));
192 queue.UnregisterWakeup();
193 }
194}
195
196// Tests that too many watchers dies like expected.
197TEST_F(LocklessQueueDeathTest, TooManySenders) {
198 EXPECT_DEATH(
199 {
200 ::std::vector<::std::unique_ptr<LocklessQueue>> queues;
201 ::std::vector<LocklessQueue::Sender> senders;
202 for (size_t i = 0; i < config_.num_senders + 1; ++i) {
203 queues.emplace_back(new LocklessQueue(get_memory(), config_));
204 senders.emplace_back(queues.back()->MakeSender());
205 }
206 },
207 "Too many senders");
208}
209
210// Now, start 2 threads and have them receive the signals.
211TEST_F(LocklessQueueTest, WakeUpThreads) {
212 // Confirm that the wakeup signal is in range.
213 EXPECT_LE(kWakeupSignal, SIGRTMAX);
214 EXPECT_GE(kWakeupSignal, SIGRTMIN);
215
216 LocklessQueue queue(get_memory(), config_);
217
218 // Event used to make sure the thread is ready before the test starts.
219 Event ready1;
220 Event ready2;
221
222 // Start the thread.
223 ::std::thread t1([this, &ready1]() { RunUntilWakeup(&ready1, 5); });
224 ::std::thread t2([this, &ready2]() { RunUntilWakeup(&ready2, 4); });
225
226 ready1.Wait();
227 ready2.Wait();
228
229 EXPECT_EQ(queue.Wakeup(3), 2);
230
231 t1.join();
232 t2.join();
233
234 // Clean up afterwords. We are pretending to be RT when we are really not.
235 // So we will be PI boosted up.
236 UnsetCurrentThreadRealtimePriority();
237}
238
239// Do a simple send test.
240TEST_F(LocklessQueueTest, Send) {
241 LocklessQueue queue(get_memory(), config_);
242
243 LocklessQueue::Sender sender = queue.MakeSender();
244
245 // Send enough messages to wrap.
246 for (int i = 0; i < 20000; ++i) {
247 // Confirm that the queue index makes sense given the number of sends.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700248 EXPECT_EQ(queue.LatestQueueIndex().index(),
249 i == 0 ? LocklessQueue::empty_queue_index().index() : i - 1);
Austin Schuh20b2b082019-09-11 20:42:56 -0700250
251 // Send a trivial piece of data.
252 char data[100];
253 size_t s = snprintf(data, sizeof(data), "foobar%d", i);
254 sender.Send(data, s);
255
256 // Confirm that the queue index still makes sense. This is easier since the
257 // empty case has been handled.
Alex Perrycb7da4b2019-08-28 19:35:56 -0700258 EXPECT_EQ(queue.LatestQueueIndex().index(), i);
Austin Schuh20b2b082019-09-11 20:42:56 -0700259
260 // Read a result from 5 in the past.
261 ::aos::monotonic_clock::time_point monotonic_sent_time;
262 ::aos::realtime_clock::time_point realtime_sent_time;
Austin Schuhad154822019-12-27 15:45:13 -0800263 ::aos::monotonic_clock::time_point monotonic_remote_time;
264 ::aos::realtime_clock::time_point realtime_remote_time;
265 uint32_t remote_queue_index;
Austin Schuh20b2b082019-09-11 20:42:56 -0700266 char read_data[1024];
267 size_t length;
268
269 QueueIndex index = QueueIndex::Zero(config_.queue_size);
270 if (i - 5 < 0) {
271 index = index.DecrementBy(5 - i);
272 } else {
273 index = index.IncrementBy(i - 5);
274 }
275 LocklessQueue::ReadResult read_result =
276 queue.Read(index.index(), &monotonic_sent_time, &realtime_sent_time,
Austin Schuhad154822019-12-27 15:45:13 -0800277 &monotonic_remote_time, &realtime_remote_time,
278 &remote_queue_index, &length, &(read_data[0]));
Austin Schuh20b2b082019-09-11 20:42:56 -0700279
280 // This should either return GOOD, or TOO_OLD if it is before the start of
281 // the queue.
282 if (read_result != LocklessQueue::ReadResult::GOOD) {
283 EXPECT_EQ(read_result, LocklessQueue::ReadResult::TOO_OLD);
284 }
285 }
286}
287
288// Races a bunch of sending threads to see if it all works.
289TEST_F(LocklessQueueTest, SendRace) {
290 const size_t kNumMessages = 10000 / FLAGS_thread_count;
291
292 ::std::mt19937 generator(0);
293 ::std::uniform_int_distribution<> write_wrap_count_distribution(0, 10);
294 ::std::bernoulli_distribution race_reads_distribution;
295 ::std::bernoulli_distribution wrap_writes_distribution;
296
297 const chrono::seconds print_frequency(FLAGS_print_rate);
298
299 QueueRacer racer(get_memory(), FLAGS_thread_count, kNumMessages, config_);
300 const monotonic_clock::time_point start_time =
301 monotonic_clock::now();
302 const monotonic_clock::time_point end_time =
303 start_time + chrono::seconds(FLAGS_duration);
304
305 monotonic_clock::time_point monotonic_now = start_time;
306 monotonic_clock::time_point next_print_time = start_time + print_frequency;
307 uint64_t messages = 0;
308 for (int i = 0; i < FLAGS_min_iterations || monotonic_now < end_time; ++i) {
309 bool race_reads = race_reads_distribution(generator);
310 int write_wrap_count = write_wrap_count_distribution(generator);
311 if (!wrap_writes_distribution(generator)) {
312 write_wrap_count = 0;
313 }
314 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(race_reads, write_wrap_count))
315 << ": Running with race_reads: " << race_reads
316 << ", and write_wrap_count " << write_wrap_count << " and on iteration "
317 << i;
318
319 messages += racer.CurrentIndex();
320
321 monotonic_now = monotonic_clock::now();
322 if (monotonic_now > next_print_time) {
323 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
324 monotonic_now - start_time)
325 .count();
326 printf("Finished iteration %d, %f iterations/sec, %f messages/second\n",
327 i, i / elapsed_seconds,
328 static_cast<double>(messages) / elapsed_seconds);
329 next_print_time = monotonic_now + print_frequency;
330 }
331 }
332}
333
334// Send enough messages to wrap the 32 bit send counter.
335TEST_F(LocklessQueueTest, WrappedSend) {
336 uint64_t kNumMessages = 0x100010000ul;
337 QueueRacer racer(get_memory(), 1, kNumMessages, config_);
338
339 const monotonic_clock::time_point start_time = monotonic_clock::now();
340 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(false, 0));
341 const monotonic_clock::time_point monotonic_now = monotonic_clock::now();
342 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
343 monotonic_now - start_time)
344 .count();
345 printf("Took %f seconds to write %" PRIu64 " messages, %f messages/s\n",
346 elapsed_seconds, kNumMessages,
347 static_cast<double>(kNumMessages) / elapsed_seconds);
348}
349
350} // namespace testing
351} // namespace ipc_lib
352} // namespace aos