blob: 7a5c2ce5cd3f017e6aba82aed682cad545197a63 [file] [log] [blame]
brians343bc112013-02-10 01:53:46 +00001#include "aos/common/mutex.h"
2
Brian Silvermand41b4422013-09-01 14:02:33 -07003#include <sched.h>
4#include <math.h>
5#include <pthread.h>
6#ifdef __VXWORKS__
7#include <taskLib.h>
8#endif
9
brians343bc112013-02-10 01:53:46 +000010#include "gtest/gtest.h"
11
Brian Silverman08661c72013-09-01 17:24:38 -070012#include "aos/atom_code/ipc_lib/aos_sync.h"
Brian Silvermand41b4422013-09-01 14:02:33 -070013
brians343bc112013-02-10 01:53:46 +000014namespace aos {
15namespace testing {
16
17class MutexTest : public ::testing::Test {
18 public:
19 Mutex test_mutex;
20};
21
22typedef MutexTest MutexDeathTest;
23
24TEST_F(MutexTest, TryLock) {
25 EXPECT_TRUE(test_mutex.TryLock());
26 EXPECT_FALSE(test_mutex.TryLock());
27}
28
29TEST_F(MutexTest, Lock) {
30 test_mutex.Lock();
31 EXPECT_FALSE(test_mutex.TryLock());
32}
33
34TEST_F(MutexTest, Unlock) {
35 test_mutex.Lock();
36 EXPECT_FALSE(test_mutex.TryLock());
37 test_mutex.Unlock();
38 EXPECT_TRUE(test_mutex.TryLock());
39}
40
41#ifndef __VXWORKS__
42// Sees what happens with multiple unlocks.
43TEST_F(MutexDeathTest, RepeatUnlock) {
44 test_mutex.Lock();
45 test_mutex.Unlock();
46 EXPECT_DEATH(test_mutex.Unlock(), ".*multiple unlock.*");
47}
48
49// Sees what happens if you unlock without ever locking (or unlocking) it.
50TEST_F(MutexDeathTest, NeverLock) {
51 EXPECT_DEATH(test_mutex.Unlock(), ".*multiple unlock.*");
52}
53#endif
54
55TEST_F(MutexTest, MutexLocker) {
56 {
57 aos::MutexLocker locker(&test_mutex);
58 EXPECT_FALSE(test_mutex.TryLock());
59 }
60 EXPECT_TRUE(test_mutex.TryLock());
61}
Brian Silvermand41b4422013-09-01 14:02:33 -070062TEST_F(MutexTest, MutexUnlocker) {
63 test_mutex.Lock();
64 {
65 aos::MutexUnlocker unlocker(&test_mutex);
66 // If this fails, then something weird is going on and the next line might
67 // hang.
68 ASSERT_TRUE(test_mutex.TryLock());
69 test_mutex.Unlock();
70 }
Brian Silverman08661c72013-09-01 17:24:38 -070071 EXPECT_FALSE(test_mutex.TryLock());
Brian Silvermand41b4422013-09-01 14:02:33 -070072}
73
74// A worker thread for testing the fairness of the mutex implementation.
75class MutexFairnessWorkerThread {
76 public:
77 MutexFairnessWorkerThread(int *cycles, int index,
Brian Silverman08661c72013-09-01 17:24:38 -070078 Mutex *in_mutex, mutex *start)
79 : cycles_(cycles), index_(index), mutex_(in_mutex), start_(start) {}
Brian Silvermand41b4422013-09-01 14:02:33 -070080
81 static void *RunStatic(void *self_in) {
82 MutexFairnessWorkerThread *self =
83 static_cast<MutexFairnessWorkerThread *>(self_in);
84 self->Run();
85 delete self;
86 return NULL;
87 }
88
89 static void Reset(int cycles) {
90 cyclesRun = 0;
91 totalCycles = cycles;
92 }
93
94 private:
95 void Run() {
96 cycles_[index_] = 0;
Brian Silverman08661c72013-09-01 17:24:38 -070097 ASSERT_EQ(futex_wait(start_), 0);
Brian Silvermand41b4422013-09-01 14:02:33 -070098 while (cyclesRun < totalCycles) {
99 {
100 MutexLocker locker(mutex_);
101 ++cyclesRun;
102 }
103 ++cycles_[index_];
104 // Otherwise the fitpc implementation tends to just relock in the same
105 // thread.
106 sched_yield();
107 }
108
109#ifdef __VXWORKS__
110 // Without this, all of the "task ... deleted ..." messages come out at
111 // once, and it looks weird and triggers an socat bug (at least for
112 // Squeeze's version 1.7.1.3-1).
113 taskDelay(index_);
114#endif
115 }
116
117 int *cycles_;
118 int index_;
119 Mutex *mutex_;
Brian Silverman08661c72013-09-01 17:24:38 -0700120 mutex *start_;
Brian Silvermand41b4422013-09-01 14:02:33 -0700121 static int cyclesRun, totalCycles;
122};
123int MutexFairnessWorkerThread::cyclesRun;
124int MutexFairnessWorkerThread::totalCycles;
125// Tests the fairness of the implementation. It does this by repeatedly locking
126// and unlocking a mutex in multiple threads and then checking the standard
127// deviation of the number of times each one locks.
128//
129// It is safe to do this with threads because this is the test so it can change
130// if the implementations ever change to not support that. Fitpc logging calls
131// are not thread-safe, but it doesn't really matter because the only logging
132// call that would get made would be a LOG(FATAL) that would still terminate the
133// process.
134TEST_F(MutexTest, Fairness) {
135 static const int kThreads = 13;
136#ifdef __VXWORKS__
137 static const int kWarmupCycles = 1000, kRunCycles = 60000, kMaxDeviation = 20;
138#else
139 static const int kWarmupCycles = 30000, kRunCycles = 3000000, kMaxDeviation = 10000;
140#endif
141
142 int cycles[kThreads];
143 pthread_t workers[kThreads];
Brian Silverman08661c72013-09-01 17:24:38 -0700144 mutex start = 0;
Brian Silvermand41b4422013-09-01 14:02:33 -0700145
146 for (int repeats = 0; repeats < 2; ++repeats) {
Brian Silverman08661c72013-09-01 17:24:38 -0700147 futex_unset(&start);
Brian Silvermand41b4422013-09-01 14:02:33 -0700148 MutexFairnessWorkerThread::Reset(repeats ? kRunCycles : kWarmupCycles);
Brian Silvermand41b4422013-09-01 14:02:33 -0700149 for (int i = 0; i < kThreads; ++i) {
150 MutexFairnessWorkerThread *c = new MutexFairnessWorkerThread(cycles, i,
151 &test_mutex,
152 &start);
153 ASSERT_EQ(0, pthread_create(&workers[i], NULL,
154 MutexFairnessWorkerThread::RunStatic, c));
155 }
Brian Silverman08661c72013-09-01 17:24:38 -0700156 futex_set(&start);
Brian Silvermand41b4422013-09-01 14:02:33 -0700157 for (int i = 0; i < kThreads; ++i) {
158 ASSERT_EQ(0, pthread_join(workers[i], NULL));
159 }
160 }
161
162 double variance = 0;
163 int expected = kRunCycles / kThreads;
164 for (int i = 0; i < kThreads; ++i) {
165 variance += (cycles[i] - expected) * (cycles[i] - expected);
166 }
167 double deviation = sqrt(variance / kThreads);
168 printf("deviation=%f\n", deviation);
169 ASSERT_GT(deviation, 0);
170 EXPECT_LT(deviation, kMaxDeviation);
171}
brians343bc112013-02-10 01:53:46 +0000172
173} // namespace testing
174} // namespace aos