Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 1 | /*----------------------------------------------------------------------------*/ |
| 2 | /* Copyright (c) FIRST 2008. All Rights Reserved. */ |
| 3 | /* Open Source Software - may be modified and shared by FRC teams. The code */ |
| 4 | /* must be accompanied by the FIRST BSD license file in $(WIND_BASE)/WPILib. */ |
| 5 | /*----------------------------------------------------------------------------*/ |
| 6 | |
| 7 | #include <taskLib.h> |
| 8 | #include <intLib.h> |
| 9 | #include <assert.h> |
| 10 | |
| 11 | #include "RWLock.h" |
| 12 | |
| 13 | // A wrapper for assert that allows it to be easily turned off just in this |
| 14 | // file. That configuration is recommended for normal use because it means less |
| 15 | // code that gets executed with the scheduler locked. |
| 16 | #if 1 |
| 17 | #define rwlock_assert(expression) assert(expression) |
| 18 | // A macro to easily assert that some expression (possibly with side effects) |
| 19 | // is 0. |
| 20 | #define rwlock_assert_success(expression) do { \ |
| 21 | int ret = (expression); \ |
| 22 | assert(ret == 0); \ |
| 23 | } while (false) |
| 24 | #else |
| 25 | #define rwlock_assert(expression) ((void)0) |
| 26 | #define rwlock_assert_success(expression) ((void)(expression)) |
| 27 | #endif |
| 28 | |
| 29 | /** |
| 30 | * Class that locks the scheduler and then unlocks it in the destructor. |
| 31 | */ |
| 32 | class TaskSchedulerLocker { |
| 33 | public: |
| 34 | TaskSchedulerLocker() { |
| 35 | rwlock_assert_success(taskLock()); |
| 36 | } |
| 37 | ~TaskSchedulerLocker() { |
| 38 | rwlock_assert_success(taskUnlock()); |
| 39 | } |
| 40 | |
| 41 | private: |
| 42 | DISALLOW_COPY_AND_ASSIGN(TaskSchedulerLocker); |
| 43 | }; |
| 44 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 45 | RWLock::Locker::Locker(RWLock *lock, bool write) |
| 46 | : lock_(lock), num_(lock_->Lock(write)) { |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 47 | } |
| 48 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 49 | RWLock::Locker::Locker(const Locker &other) |
| 50 | : lock_(other.lock_), num_(lock_->AddLock()) { |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | RWLock::Locker::~Locker() { |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 54 | lock_->Unlock(num_); |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | // RWLock is implemented by just locking the scheduler while doing anything |
| 58 | // because that is the only way under vxworks to do much of anything atomically. |
| 59 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 60 | RWLock::RWLock() |
| 61 | : number_of_write_locks_(0), |
| 62 | number_of_writers_pending_(0), |
| 63 | number_of_readers_(0), |
| 64 | reader_tasks_(), |
| 65 | read_ready_(semBCreate(SEM_Q_PRIORITY, SEM_EMPTY)), |
| 66 | write_ready_(semBCreate(SEM_Q_PRIORITY, SEM_EMPTY)) { |
| 67 | rwlock_assert(read_ready_ != NULL); |
| 68 | rwlock_assert(write_ready_ != NULL); |
| 69 | } |
| 70 | |
| 71 | RWLock::~RWLock() { |
| 72 | // Make sure that nobody else currently has a lock or will ever be able to. |
| 73 | Lock(true); |
| 74 | |
| 75 | rwlock_assert_success(semDelete(read_ready_)); |
| 76 | rwlock_assert_success(semDelete(write_ready_)); |
| 77 | } |
| 78 | |
| 79 | int RWLock::Lock(bool write) { |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 80 | assert(!intContext()); |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 81 | |
| 82 | int current_task = taskIdSelf(); |
| 83 | // It's safe to do this check up here (outside of locking the scheduler) |
| 84 | // because we only care whether the current task is in there or not and that |
| 85 | // can't be changed because it's the task doing the checking. |
| 86 | bool current_task_holds_already = TaskOwns(current_task); |
| 87 | |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 88 | TaskSchedulerLocker scheduler_locker; |
| 89 | |
| 90 | // We can't be reading and writing at the same time. |
| 91 | rwlock_assert(!((number_of_write_locks_ > 0) && (number_of_readers_ > 0))); |
| 92 | |
| 93 | if (write) { |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 94 | assert(!current_task_holds_already); |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 95 | // If somebody else already has it locked. |
| 96 | // Don't have to worry about another task getting scheduled after |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 97 | // write_ready_ gets given because nobody else (except another writer, which |
| 98 | // would just block on it) will do anything while there are pending |
| 99 | // writer(s). |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 100 | if ((number_of_readers_ > 0) || (number_of_write_locks_ > 0)) { |
| 101 | ++number_of_writers_pending_; |
| 102 | // Wait for it to be our turn. |
| 103 | rwlock_assert_success(semTake(write_ready_, WAIT_FOREVER)); |
| 104 | --number_of_writers_pending_; |
| 105 | } else { |
| 106 | rwlock_assert(number_of_writers_pending_ == 0); |
| 107 | } |
| 108 | rwlock_assert((number_of_write_locks_ == 0) && (number_of_readers_ == 0)); |
| 109 | number_of_write_locks_ = 1; |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 110 | return 0; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 111 | } else { // read |
| 112 | // While there are one or more writers active or waiting. |
| 113 | // Has to be a loop in case a writer gets scheduled between the time |
| 114 | // read_ready_ gets flushed and we run. |
| 115 | while ((number_of_write_locks_ > 0) || (number_of_writers_pending_ > 0)) { |
| 116 | // Wait for the writer(s) to finish. |
| 117 | rwlock_assert_success(semTake(read_ready_, WAIT_FOREVER)); |
| 118 | } |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 119 | |
| 120 | int num = number_of_readers_; |
| 121 | number_of_readers_ = num + 1; |
| 122 | assert(num < kMaxReaders); |
| 123 | rwlock_assert(reader_tasks_[num] == 0); |
| 124 | reader_tasks_[num] = current_task; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 125 | rwlock_assert((number_of_write_locks_ == 0) && (number_of_readers_ > 0)); |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 126 | return num; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 127 | } |
| 128 | } |
| 129 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 130 | void RWLock::Unlock(int num) { |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 131 | assert(!intContext()); |
| 132 | TaskSchedulerLocker scheduler_locker; |
| 133 | |
| 134 | // We have to be reading or writing right now, but not both. |
| 135 | rwlock_assert((number_of_write_locks_ > 0) != (number_of_readers_ > 0)); |
| 136 | |
| 137 | if (number_of_write_locks_ > 0) { // we're currently writing |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 138 | rwlock_assert(num == 0); |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 139 | --number_of_write_locks_; |
| 140 | rwlock_assert((number_of_write_locks_ >= 0) && |
| 141 | (number_of_writers_pending_ >= 0)); |
| 142 | // If we were the last one. |
| 143 | if (number_of_write_locks_ == 0) { |
| 144 | // If there are no other tasks waiting to write (because otherwise they |
| 145 | // need to get priority over any readers). |
| 146 | if (number_of_writers_pending_ == 0) { |
| 147 | // Wake up any waiting readers. |
| 148 | rwlock_assert_success(semFlush(read_ready_)); |
| 149 | } else { |
| 150 | // Wake up a waiting writer. |
| 151 | // Not a problem if somebody else already did this before the waiting |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 152 | // writer got a chance to take it because it'll do nothing and return |
| 153 | // success. |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 154 | rwlock_assert_success(semGive(write_ready_)); |
| 155 | } |
| 156 | } |
| 157 | } else { // we're curently reading |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 158 | rwlock_assert(reader_tasks_[num] == taskIdSelf()); |
| 159 | reader_tasks_[num] = 0; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 160 | --number_of_readers_; |
| 161 | rwlock_assert(number_of_readers_ >= 0 && |
| 162 | (number_of_writers_pending_ >= 0)); |
| 163 | // If we were the last one. |
| 164 | if (number_of_readers_ == 0) { |
| 165 | // If there are any writers waiting for a chance to go. |
| 166 | if (number_of_writers_pending_ > 0) { |
| 167 | // Wake a waiting writer. |
| 168 | // Not a problem if somebody else already did this before the waiting |
| 169 | // writer got a chance to take it because it'll still return success. |
| 170 | rwlock_assert_success(semGive(write_ready_)); |
| 171 | } |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 176 | int RWLock::AddLock() { |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 177 | assert(!intContext()); |
| 178 | // TODO: Replace this with just atomically incrementing the right number once |
| 179 | // we start using a GCC new enough to have the nice atomic builtins. |
| 180 | // That will be safe because whether we're currently reading or writing can't |
| 181 | // change in the middle of this. |
| 182 | TaskSchedulerLocker scheduler_locker; |
| 183 | |
| 184 | // We have to be reading or writing right now, but not both. |
| 185 | rwlock_assert((number_of_write_locks_ > 0) != (number_of_readers_ > 0)); |
| 186 | |
| 187 | if (number_of_write_locks_ > 0) { // we're currently writing |
| 188 | ++number_of_write_locks_; |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 189 | return 0; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 190 | } else { // we're currently reading |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 191 | return number_of_readers_++; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 192 | } |
| 193 | } |
| 194 | |
Brian Silverman | 6a634b7 | 2013-04-24 17:05:50 -0700 | [diff] [blame] | 195 | bool RWLock::TaskOwns(int task_id) { |
| 196 | for (size_t i = 0; |
| 197 | i < sizeof(reader_tasks_) / sizeof(reader_tasks_[0]); |
| 198 | ++i) { |
| 199 | if (reader_tasks_[i] == task_id) return true; |
| 200 | } |
| 201 | return false; |
Brian Silverman | 68aa12b | 2013-04-24 17:03:29 -0700 | [diff] [blame] | 202 | } |