blob: 5b57aa9f919150bb6abb494b643ca0a03c1a37e6 [file] [log] [blame]
Austin Schuh20b2b082019-09-11 20:42:56 -07001#include "aos/ipc_lib/lockless_queue.h"
2
Austin Schuhfaec51a2023-09-08 17:43:32 -07003#include <sys/mman.h>
Austin Schuh20b2b082019-09-11 20:42:56 -07004#include <unistd.h>
5#include <wait.h>
Brian Silverman7b266d92021-02-17 21:24:02 -08006
Austin Schuh20b2b082019-09-11 20:42:56 -07007#include <chrono>
Tyler Chatowbf0609c2021-07-31 16:13:27 -07008#include <cinttypes>
9#include <csignal>
Austin Schuh20b2b082019-09-11 20:42:56 -070010#include <memory>
11#include <random>
12#include <thread>
13
Philipp Schrader790cb542023-07-05 21:06:52 -070014#include "gflags/gflags.h"
15#include "gtest/gtest.h"
16
Austin Schuh20b2b082019-09-11 20:42:56 -070017#include "aos/events/epoll.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070018#include "aos/ipc_lib/aos_sync.h"
Brian Silverman7b266d92021-02-17 21:24:02 -080019#include "aos/ipc_lib/event.h"
Austin Schuhfaec51a2023-09-08 17:43:32 -070020#include "aos/ipc_lib/lockless_queue_memory.h"
21#include "aos/ipc_lib/lockless_queue_stepping.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070022#include "aos/ipc_lib/queue_racer.h"
23#include "aos/ipc_lib/signalfd.h"
Alex Perrycb7da4b2019-08-28 19:35:56 -070024#include "aos/realtime.h"
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -070025#include "aos/util/phased_loop.h"
Austin Schuh20b2b082019-09-11 20:42:56 -070026
27DEFINE_int32(min_iterations, 100,
28 "Minimum number of stress test iterations to run");
29DEFINE_int32(duration, 5, "Number of seconds to test for");
30DEFINE_int32(print_rate, 60, "Number of seconds between status prints");
31
32// The roboRIO can only handle 10 threads before exploding. Set the default for
33// ARM to 10.
34DEFINE_int32(thread_count,
35#if defined(__ARM_EABI__)
36 10,
37#else
38 100,
39#endif
40 "Number of threads to race");
41
42namespace aos {
43namespace ipc_lib {
44namespace testing {
45
46namespace chrono = ::std::chrono;
47
48class LocklessQueueTest : public ::testing::Test {
49 public:
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -070050 static constexpr monotonic_clock::duration kChannelStorageDuration =
51 std::chrono::milliseconds(500);
52
Austin Schuh20b2b082019-09-11 20:42:56 -070053 LocklessQueueTest() {
Austin Schuh20b2b082019-09-11 20:42:56 -070054 config_.num_watchers = 10;
55 config_.num_senders = 100;
Brian Silverman177567e2020-08-12 19:51:33 -070056 config_.num_pinners = 5;
Austin Schuh20b2b082019-09-11 20:42:56 -070057 config_.queue_size = 10000;
58 // Exercise the alignment code. This would throw off alignment.
59 config_.message_data_size = 101;
60
61 // Since our backing store is an array of uint64_t for alignment purposes,
62 // normalize by the size.
63 memory_.resize(LocklessQueueMemorySize(config_) / sizeof(uint64_t));
64
65 Reset();
66 }
67
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070068 LocklessQueue queue() {
69 return LocklessQueue(reinterpret_cast<LocklessQueueMemory *>(&(memory_[0])),
70 reinterpret_cast<LocklessQueueMemory *>(&(memory_[0])),
71 config_);
Austin Schuh20b2b082019-09-11 20:42:56 -070072 }
73
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070074 void Reset() { memset(&memory_[0], 0, LocklessQueueMemorySize(config_)); }
Austin Schuh20b2b082019-09-11 20:42:56 -070075
76 // Runs until the signal is received.
77 void RunUntilWakeup(Event *ready, int priority) {
Austin Schuh20b2b082019-09-11 20:42:56 -070078 internal::EPoll epoll;
79 SignalFd signalfd({kWakeupSignal});
80
81 epoll.OnReadable(signalfd.fd(), [&signalfd, &epoll]() {
82 signalfd_siginfo result = signalfd.Read();
83
84 fprintf(stderr, "Got signal: %d\n", result.ssi_signo);
85 epoll.Quit();
86 });
87
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070088 {
89 // Register to be woken up *after* the signalfd is catching the signals.
90 LocklessQueueWatcher watcher =
91 LocklessQueueWatcher::Make(queue(), priority).value();
Austin Schuh20b2b082019-09-11 20:42:56 -070092
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070093 // And signal we are now ready.
94 ready->Set();
Austin Schuh20b2b082019-09-11 20:42:56 -070095
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070096 epoll.Run();
Austin Schuh20b2b082019-09-11 20:42:56 -070097
Brian Silvermanfc0d2e82020-08-12 19:58:35 -070098 // Cleanup, ensuring the watcher is destroyed before the signalfd.
99 }
Austin Schuh20b2b082019-09-11 20:42:56 -0700100 epoll.DeleteFd(signalfd.fd());
101 }
102
103 // Use a type with enough alignment that we are guarenteed that everything
104 // will be aligned properly on the target platform.
105 ::std::vector<uint64_t> memory_;
106
107 LocklessQueueConfiguration config_;
108};
109
Austin Schuh20b2b082019-09-11 20:42:56 -0700110// Tests that wakeup doesn't do anything if nothing was registered.
111TEST_F(LocklessQueueTest, NoWatcherWakeup) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700112 LocklessQueueWakeUpper wake_upper(queue());
Austin Schuh20b2b082019-09-11 20:42:56 -0700113
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700114 EXPECT_EQ(wake_upper.Wakeup(7), 0);
Austin Schuh20b2b082019-09-11 20:42:56 -0700115}
116
117// Tests that wakeup doesn't do anything if a wakeup was registered and then
118// unregistered.
119TEST_F(LocklessQueueTest, UnregisteredWatcherWakeup) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700120 LocklessQueueWakeUpper wake_upper(queue());
Austin Schuh20b2b082019-09-11 20:42:56 -0700121
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700122 { LocklessQueueWatcher::Make(queue(), 5).value(); }
Austin Schuh20b2b082019-09-11 20:42:56 -0700123
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700124 EXPECT_EQ(wake_upper.Wakeup(7), 0);
Austin Schuh20b2b082019-09-11 20:42:56 -0700125}
126
127// Tests that wakeup doesn't do anything if the thread dies.
128TEST_F(LocklessQueueTest, DiedWatcherWakeup) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700129 LocklessQueueWakeUpper wake_upper(queue());
Austin Schuh20b2b082019-09-11 20:42:56 -0700130
131 ::std::thread([this]() {
132 // Use placement new so the destructor doesn't get run.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700133 ::std::aligned_storage<sizeof(LocklessQueueWatcher),
134 alignof(LocklessQueueWatcher)>::type data;
135 new (&data)
136 LocklessQueueWatcher(LocklessQueueWatcher::Make(queue(), 5).value());
Brian Silverman7b266d92021-02-17 21:24:02 -0800137 }).join();
Austin Schuh20b2b082019-09-11 20:42:56 -0700138
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700139 EXPECT_EQ(wake_upper.Wakeup(7), 0);
Austin Schuh20b2b082019-09-11 20:42:56 -0700140}
141
142struct WatcherState {
143 ::std::thread t;
144 Event ready;
145};
146
147// Tests that too many watchers fails like expected.
148TEST_F(LocklessQueueTest, TooManyWatchers) {
149 // This is going to be a barrel of monkeys.
150 // We need to spin up a bunch of watchers. But, they all need to be in
151 // different threads so they have different tids.
152 ::std::vector<WatcherState> queues;
153 // Reserve num_watchers WatcherState objects so the pointer value doesn't
154 // change out from under us below.
155 queues.reserve(config_.num_watchers);
156
157 // Event used to trigger all the threads to unregister.
158 Event cleanup;
159
160 // Start all the threads.
161 for (size_t i = 0; i < config_.num_watchers; ++i) {
162 queues.emplace_back();
163
164 WatcherState *s = &queues.back();
165 queues.back().t = ::std::thread([this, &cleanup, s]() {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700166 LocklessQueueWatcher q = LocklessQueueWatcher::Make(queue(), 0).value();
Austin Schuh20b2b082019-09-11 20:42:56 -0700167
168 // Signal that this thread is ready.
169 s->ready.Set();
170
171 // And wait until we are asked to shut down.
172 cleanup.Wait();
Austin Schuh20b2b082019-09-11 20:42:56 -0700173 });
174 }
175
176 // Wait until all the threads are actually going.
177 for (WatcherState &w : queues) {
178 w.ready.Wait();
179 }
180
181 // Now try to allocate another one. This will fail.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700182 EXPECT_FALSE(LocklessQueueWatcher::Make(queue(), 0));
Austin Schuh20b2b082019-09-11 20:42:56 -0700183
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700184 // Trigger the threads to cleanup their resources, and wait until they are
Austin Schuh20b2b082019-09-11 20:42:56 -0700185 // done.
186 cleanup.Set();
187 for (WatcherState &w : queues) {
188 w.t.join();
189 }
190
191 // We should now be able to allocate a wakeup.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700192 EXPECT_TRUE(LocklessQueueWatcher::Make(queue(), 0));
Austin Schuh20b2b082019-09-11 20:42:56 -0700193}
194
195// Tests that too many watchers dies like expected.
Austin Schuhe516ab02020-05-06 21:37:04 -0700196TEST_F(LocklessQueueTest, TooManySenders) {
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700197 ::std::vector<LocklessQueueSender> senders;
Austin Schuhe516ab02020-05-06 21:37:04 -0700198 for (size_t i = 0; i < config_.num_senders; ++i) {
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700199 senders.emplace_back(
200 LocklessQueueSender::Make(queue(), kChannelStorageDuration).value());
Austin Schuhe516ab02020-05-06 21:37:04 -0700201 }
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700202 EXPECT_FALSE(LocklessQueueSender::Make(queue(), kChannelStorageDuration));
Austin Schuh20b2b082019-09-11 20:42:56 -0700203}
204
205// Now, start 2 threads and have them receive the signals.
206TEST_F(LocklessQueueTest, WakeUpThreads) {
207 // Confirm that the wakeup signal is in range.
208 EXPECT_LE(kWakeupSignal, SIGRTMAX);
209 EXPECT_GE(kWakeupSignal, SIGRTMIN);
210
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700211 LocklessQueueWakeUpper wake_upper(queue());
Austin Schuh20b2b082019-09-11 20:42:56 -0700212
213 // Event used to make sure the thread is ready before the test starts.
214 Event ready1;
215 Event ready2;
216
217 // Start the thread.
Austin Schuh07290cd2022-08-16 18:01:14 -0700218 ::std::thread t1([this, &ready1]() { RunUntilWakeup(&ready1, 2); });
219 ::std::thread t2([this, &ready2]() { RunUntilWakeup(&ready2, 1); });
Austin Schuh20b2b082019-09-11 20:42:56 -0700220
221 ready1.Wait();
222 ready2.Wait();
223
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700224 EXPECT_EQ(wake_upper.Wakeup(3), 2);
Austin Schuh20b2b082019-09-11 20:42:56 -0700225
226 t1.join();
227 t2.join();
228
229 // Clean up afterwords. We are pretending to be RT when we are really not.
230 // So we will be PI boosted up.
231 UnsetCurrentThreadRealtimePriority();
232}
233
234// Do a simple send test.
235TEST_F(LocklessQueueTest, Send) {
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700236 LocklessQueueSender sender =
237 LocklessQueueSender::Make(queue(), kChannelStorageDuration).value();
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700238 LocklessQueueReader reader(queue());
Austin Schuh20b2b082019-09-11 20:42:56 -0700239
Austin Schuh82ea7382023-07-14 15:17:34 -0700240 time::PhasedLoop loop(kChannelStorageDuration / (config_.queue_size - 1),
241 monotonic_clock::now());
242 std::function<bool(const Context &)> should_read = [](const Context &) {
243 return true;
244 };
245
Austin Schuh20b2b082019-09-11 20:42:56 -0700246 // Send enough messages to wrap.
Austin Schuh82ea7382023-07-14 15:17:34 -0700247 for (int i = 0; i < 2 * static_cast<int>(config_.queue_size); ++i) {
Austin Schuh20b2b082019-09-11 20:42:56 -0700248 // Confirm that the queue index makes sense given the number of sends.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700249 EXPECT_EQ(reader.LatestIndex().index(),
250 i == 0 ? QueueIndex::Invalid().index() : i - 1);
Austin Schuh20b2b082019-09-11 20:42:56 -0700251
252 // Send a trivial piece of data.
253 char data[100];
254 size_t s = snprintf(data, sizeof(data), "foobar%d", i);
Austin Schuh82ea7382023-07-14 15:17:34 -0700255 ASSERT_EQ(sender.Send(data, s, monotonic_clock::min_time,
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700256 realtime_clock::min_time, 0xffffffffu, UUID::Zero(),
257 nullptr, nullptr, nullptr),
258 LocklessQueueSender::Result::GOOD);
Austin Schuh20b2b082019-09-11 20:42:56 -0700259
260 // Confirm that the queue index still makes sense. This is easier since the
261 // empty case has been handled.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700262 EXPECT_EQ(reader.LatestIndex().index(), i);
Austin Schuh20b2b082019-09-11 20:42:56 -0700263
264 // Read a result from 5 in the past.
Austin Schuhb5c6f972021-03-14 21:53:07 -0700265 monotonic_clock::time_point monotonic_sent_time;
266 realtime_clock::time_point realtime_sent_time;
267 monotonic_clock::time_point monotonic_remote_time;
268 realtime_clock::time_point realtime_remote_time;
Austin Schuhad154822019-12-27 15:45:13 -0800269 uint32_t remote_queue_index;
Austin Schuha9012be2021-07-21 15:19:11 -0700270 UUID source_boot_uuid;
Austin Schuh20b2b082019-09-11 20:42:56 -0700271 char read_data[1024];
272 size_t length;
273
274 QueueIndex index = QueueIndex::Zero(config_.queue_size);
275 if (i - 5 < 0) {
276 index = index.DecrementBy(5 - i);
277 } else {
278 index = index.IncrementBy(i - 5);
279 }
Austin Schuh8902fa52021-03-14 22:39:24 -0700280 LocklessQueueReader::Result read_result = reader.Read(
281 index.index(), &monotonic_sent_time, &realtime_sent_time,
282 &monotonic_remote_time, &realtime_remote_time, &remote_queue_index,
Austin Schuh82ea7382023-07-14 15:17:34 -0700283 &source_boot_uuid, &length, &(read_data[0]), std::ref(should_read));
Austin Schuh20b2b082019-09-11 20:42:56 -0700284
285 // This should either return GOOD, or TOO_OLD if it is before the start of
286 // the queue.
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700287 if (read_result != LocklessQueueReader::Result::GOOD) {
Austin Schuh82ea7382023-07-14 15:17:34 -0700288 ASSERT_EQ(read_result, LocklessQueueReader::Result::TOO_OLD);
Austin Schuh20b2b082019-09-11 20:42:56 -0700289 }
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700290
291 loop.SleepUntilNext();
Austin Schuh20b2b082019-09-11 20:42:56 -0700292 }
293}
294
295// Races a bunch of sending threads to see if it all works.
296TEST_F(LocklessQueueTest, SendRace) {
297 const size_t kNumMessages = 10000 / FLAGS_thread_count;
298
299 ::std::mt19937 generator(0);
300 ::std::uniform_int_distribution<> write_wrap_count_distribution(0, 10);
301 ::std::bernoulli_distribution race_reads_distribution;
Austin Schuh82ea7382023-07-14 15:17:34 -0700302 ::std::bernoulli_distribution set_should_read_distribution;
303 ::std::bernoulli_distribution should_read_result_distribution;
Austin Schuh20b2b082019-09-11 20:42:56 -0700304 ::std::bernoulli_distribution wrap_writes_distribution;
305
306 const chrono::seconds print_frequency(FLAGS_print_rate);
307
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700308 QueueRacer racer(queue(), FLAGS_thread_count, kNumMessages);
309 const monotonic_clock::time_point start_time = monotonic_clock::now();
Austin Schuh20b2b082019-09-11 20:42:56 -0700310 const monotonic_clock::time_point end_time =
311 start_time + chrono::seconds(FLAGS_duration);
312
313 monotonic_clock::time_point monotonic_now = start_time;
314 monotonic_clock::time_point next_print_time = start_time + print_frequency;
315 uint64_t messages = 0;
316 for (int i = 0; i < FLAGS_min_iterations || monotonic_now < end_time; ++i) {
Austin Schuh82ea7382023-07-14 15:17:34 -0700317 const bool race_reads = race_reads_distribution(generator);
318 const bool set_should_read = set_should_read_distribution(generator);
319 const bool should_read_result = should_read_result_distribution(generator);
Austin Schuh20b2b082019-09-11 20:42:56 -0700320 int write_wrap_count = write_wrap_count_distribution(generator);
321 if (!wrap_writes_distribution(generator)) {
322 write_wrap_count = 0;
323 }
Austin Schuh82ea7382023-07-14 15:17:34 -0700324 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(
325 race_reads, write_wrap_count, set_should_read, should_read_result))
Austin Schuh20b2b082019-09-11 20:42:56 -0700326 << ": Running with race_reads: " << race_reads
327 << ", and write_wrap_count " << write_wrap_count << " and on iteration "
328 << i;
329
330 messages += racer.CurrentIndex();
331
332 monotonic_now = monotonic_clock::now();
333 if (monotonic_now > next_print_time) {
334 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
335 monotonic_now - start_time)
336 .count();
337 printf("Finished iteration %d, %f iterations/sec, %f messages/second\n",
338 i, i / elapsed_seconds,
339 static_cast<double>(messages) / elapsed_seconds);
340 next_print_time = monotonic_now + print_frequency;
341 }
342 }
343}
344
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700345namespace {
346
Austin Schuh07290cd2022-08-16 18:01:14 -0700347// Temporarily pins the current thread to the first 2 available CPUs.
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700348// This speeds up the test on some machines a lot (~4x). It also preserves
349// opportunities for the 2 threads to race each other.
350class PinForTest {
351 public:
352 PinForTest() {
Austin Schuh07290cd2022-08-16 18:01:14 -0700353 cpu_set_t cpus = GetCurrentThreadAffinity();
354 old_ = cpus;
355 int number_found = 0;
356 for (int i = 0; i < CPU_SETSIZE; ++i) {
357 if (CPU_ISSET(i, &cpus)) {
358 if (number_found < 2) {
359 ++number_found;
360 } else {
361 CPU_CLR(i, &cpus);
362 }
363 }
364 }
365 SetCurrentThreadAffinity(cpus);
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700366 }
Austin Schuh07290cd2022-08-16 18:01:14 -0700367 ~PinForTest() { SetCurrentThreadAffinity(old_); }
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700368
369 private:
370 cpu_set_t old_;
371};
372
373} // namespace
374
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700375class LocklessQueueTestTooFast : public LocklessQueueTest {
376 public:
377 LocklessQueueTestTooFast() {
378 // Force a scenario where senders get rate limited
379 config_.num_watchers = 1000;
380 config_.num_senders = 100;
381 config_.num_pinners = 5;
382 config_.queue_size = 100;
383 // Exercise the alignment code. This would throw off alignment.
384 config_.message_data_size = 101;
385
386 // Since our backing store is an array of uint64_t for alignment purposes,
387 // normalize by the size.
388 memory_.resize(LocklessQueueMemorySize(config_) / sizeof(uint64_t));
389
390 Reset();
391 }
392};
393
394// Ensure we always return OK or MESSAGES_SENT_TOO_FAST under an extreme load
395// on the Sender Queue.
396TEST_F(LocklessQueueTestTooFast, MessagesSentTooFast) {
397 PinForTest pin_cpu;
398 uint64_t kNumMessages = 1000000;
399 QueueRacer racer(queue(),
400 {FLAGS_thread_count,
401 kNumMessages,
402 {LocklessQueueSender::Result::GOOD,
403 LocklessQueueSender::Result::MESSAGES_SENT_TOO_FAST},
404 std::chrono::milliseconds(500),
405 false});
406
Austin Schuh82ea7382023-07-14 15:17:34 -0700407 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(false, 0, true, true));
Eric Schmiedebergef44b8a2022-02-28 17:30:38 -0700408}
409
410// // Send enough messages to wrap the 32 bit send counter.
Austin Schuh20b2b082019-09-11 20:42:56 -0700411TEST_F(LocklessQueueTest, WrappedSend) {
Brian Silverman0eaa1da2020-08-12 20:03:52 -0700412 PinForTest pin_cpu;
Austin Schuh20b2b082019-09-11 20:42:56 -0700413 uint64_t kNumMessages = 0x100010000ul;
Brian Silvermanfc0d2e82020-08-12 19:58:35 -0700414 QueueRacer racer(queue(), 1, kNumMessages);
Austin Schuh20b2b082019-09-11 20:42:56 -0700415
416 const monotonic_clock::time_point start_time = monotonic_clock::now();
Austin Schuh82ea7382023-07-14 15:17:34 -0700417 EXPECT_NO_FATAL_FAILURE(racer.RunIteration(false, 0, false, true));
Austin Schuh20b2b082019-09-11 20:42:56 -0700418 const monotonic_clock::time_point monotonic_now = monotonic_clock::now();
419 double elapsed_seconds = chrono::duration_cast<chrono::duration<double>>(
420 monotonic_now - start_time)
421 .count();
422 printf("Took %f seconds to write %" PRIu64 " messages, %f messages/s\n",
423 elapsed_seconds, kNumMessages,
424 static_cast<double>(kNumMessages) / elapsed_seconds);
425}
426
Austin Schuhfaec51a2023-09-08 17:43:32 -0700427#if defined(SUPPORTS_SHM_ROBUSTNESS_TEST)
428
429// Verifies that LatestIndex points to the same message as the logic from
430// "FetchNext", which increments the index until it gets "NOTHING_NEW" back.
431// This is so we can confirm fetchers and watchers all see the same message at
432// the same point in time.
433int VerifyMessages(LocklessQueue *queue, LocklessQueueMemory *memory) {
434 LocklessQueueReader reader(*queue);
435
436 const ipc_lib::QueueIndex queue_index = reader.LatestIndex();
437 if (!queue_index.valid()) {
438 return 0;
439 }
440
441 // Now loop through the queue and make sure the number in the snprintf
442 // increments.
443 char last_data = '0';
444 int i = 0;
445
446 // Callback which isn't set so we don't exercise the conditional reading code.
447 std::function<bool(const Context &)> should_read_callback;
448
449 // Now, read as far as we can until we get NOTHING_NEW. This simulates
450 // FetchNext.
451 while (true) {
452 monotonic_clock::time_point monotonic_sent_time;
453 realtime_clock::time_point realtime_sent_time;
454 monotonic_clock::time_point monotonic_remote_time;
455 realtime_clock::time_point realtime_remote_time;
456 uint32_t remote_queue_index;
457 UUID source_boot_uuid;
458 char read_data[1024];
459 size_t length;
460
461 LocklessQueueReader::Result read_result = reader.Read(
462 i, &monotonic_sent_time, &realtime_sent_time, &monotonic_remote_time,
463 &realtime_remote_time, &remote_queue_index, &source_boot_uuid, &length,
Austin Schuh0bd410a2023-11-05 12:38:12 -0800464 &(read_data[0]), should_read_callback);
Austin Schuhfaec51a2023-09-08 17:43:32 -0700465
466 if (read_result != LocklessQueueReader::Result::GOOD) {
467 if (read_result == LocklessQueueReader::Result::TOO_OLD) {
468 ++i;
469 continue;
470 }
471 CHECK(read_result == LocklessQueueReader::Result::NOTHING_NEW)
472 << ": " << static_cast<int>(read_result);
473 break;
474 }
475
476 EXPECT_GT(read_data[LocklessQueueMessageDataSize(memory) - length + 6],
477 last_data)
478 << ": Got " << read_data << " for " << i;
479 last_data = read_data[LocklessQueueMessageDataSize(memory) - length + 6];
480
481 ++i;
482 }
483
484 // The latest queue index should match the fetched queue index.
485 if (i == 0) {
486 EXPECT_FALSE(queue_index.valid());
487 } else {
488 EXPECT_EQ(queue_index.index(), i - 1);
489 }
490 return i;
491}
492
493// Tests that at all points in the publish step, fetch == fetch next. This
494// means that there is an atomic point at which the message is viewed as visible
495// to consumers. Do this by killing the writer after each change to shared
496// memory, and confirming fetch == fetch next each time.
497TEST_F(LocklessQueueTest, FetchEqFetchNext) {
498 SharedTid tid;
499
500 // Make a small queue so it is easier to debug.
501 LocklessQueueConfiguration config;
502 config.num_watchers = 1;
503 config.num_senders = 2;
504 config.num_pinners = 0;
505 config.queue_size = 3;
506 config.message_data_size = 32;
507
508 TestShmRobustness(
509 config,
510 [config, &tid](void *memory) {
511 // Initialize the queue.
512 LocklessQueue(
513 reinterpret_cast<aos::ipc_lib::LocklessQueueMemory *>(memory),
514 reinterpret_cast<aos::ipc_lib::LocklessQueueMemory *>(memory),
515 config)
516 .Initialize();
517 tid.Set();
518 },
519 [config](void *memory) {
520 LocklessQueue queue(
521 reinterpret_cast<aos::ipc_lib::LocklessQueueMemory *>(memory),
522 reinterpret_cast<aos::ipc_lib::LocklessQueueMemory *>(memory),
523 config);
524 // Now try to write some messages. We will get killed a bunch as this
525 // tries to happen.
526 LocklessQueueSender sender =
527 LocklessQueueSender::Make(queue, chrono::nanoseconds(1)).value();
528 for (int i = 0; i < 5; ++i) {
529 char data[100];
530 size_t s = snprintf(data, sizeof(data), "foobar%d", i + 1);
531 ASSERT_EQ(sender.Send(data, s + 1, monotonic_clock::min_time,
532 realtime_clock::min_time, 0xffffffffl,
533 UUID::Zero(), nullptr, nullptr, nullptr),
534 LocklessQueueSender::Result::GOOD);
535 }
536 },
537 [config, &tid](void *raw_memory) {
538 ::aos::ipc_lib::LocklessQueueMemory *const memory =
539 reinterpret_cast<::aos::ipc_lib::LocklessQueueMemory *>(raw_memory);
540 LocklessQueue queue(memory, memory, config);
Philipp Schraderab2f8432023-09-17 18:58:06 -0700541 PretendThatOwnerIsDeadForTesting(&memory->queue_setup_lock, tid.Get());
Austin Schuhfaec51a2023-09-08 17:43:32 -0700542
543 if (VLOG_IS_ON(1)) {
544 PrintLocklessQueueMemory(memory);
545 }
546
547 const int i = VerifyMessages(&queue, memory);
548
549 LocklessQueueSender sender =
550 LocklessQueueSender::Make(queue, chrono::nanoseconds(1)).value();
551 {
552 char data[100];
553 size_t s = snprintf(data, sizeof(data), "foobar%d", i + 1);
554 ASSERT_EQ(sender.Send(data, s + 1, monotonic_clock::min_time,
555 realtime_clock::min_time, 0xffffffffl,
556 UUID::Zero(), nullptr, nullptr, nullptr),
557 LocklessQueueSender::Result::GOOD);
558 }
559
560 // Now, make sure we can send 1 message and receive it to confirm we
561 // haven't corrupted next_queue_index irrevocably.
562 const int newi = VerifyMessages(&queue, memory);
563 EXPECT_EQ(newi, i + 1);
564 });
565}
566
567#endif
568
Austin Schuh20b2b082019-09-11 20:42:56 -0700569} // namespace testing
570} // namespace ipc_lib
571} // namespace aos