Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 1 | #include "aos/events/shm_event_loop.h" |
| 2 | |
| 3 | #include <sys/mman.h> |
| 4 | #include <sys/stat.h> |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 5 | #include <sys/syscall.h> |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 6 | #include <sys/types.h> |
| 7 | #include <unistd.h> |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 8 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 9 | #include <algorithm> |
| 10 | #include <atomic> |
| 11 | #include <chrono> |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 12 | #include <iterator> |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 13 | #include <stdexcept> |
| 14 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 15 | #include "aos/events/aos_logging.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 16 | #include "aos/events/epoll.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 17 | #include "aos/events/event_loop_generated.h" |
| 18 | #include "aos/events/timing_statistics.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 19 | #include "aos/ipc_lib/lockless_queue.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 20 | #include "aos/ipc_lib/signalfd.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 21 | #include "aos/realtime.h" |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 22 | #include "aos/stl_mutex/stl_mutex.h" |
Austin Schuh | fccb2d0 | 2020-01-26 16:11:19 -0800 | [diff] [blame] | 23 | #include "aos/util/file.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 24 | #include "aos/util/phased_loop.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 25 | #include "glog/logging.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 26 | |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 27 | namespace { |
| 28 | |
| 29 | // Returns the portion of the path after the last /. This very much assumes |
| 30 | // that the application name is null terminated. |
| 31 | const char *Filename(const char *path) { |
| 32 | const std::string_view path_string_view = path; |
| 33 | auto last_slash_pos = path_string_view.find_last_of("/"); |
| 34 | |
| 35 | return last_slash_pos == std::string_view::npos ? path |
| 36 | : path + last_slash_pos + 1; |
| 37 | } |
| 38 | |
| 39 | } // namespace |
| 40 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 41 | DEFINE_string(shm_base, "/dev/shm/aos", |
| 42 | "Directory to place queue backing mmaped files in."); |
| 43 | DEFINE_uint32(permissions, 0770, |
| 44 | "Permissions to make shared memory files and folders."); |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 45 | DEFINE_string(application_name, Filename(program_invocation_name), |
| 46 | "The application name"); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 47 | |
| 48 | namespace aos { |
| 49 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 50 | using namespace shm_event_loop_internal; |
| 51 | |
Austin Schuh | cdab619 | 2019-12-29 17:47:46 -0800 | [diff] [blame] | 52 | void SetShmBase(const std::string_view base) { |
| 53 | FLAGS_shm_base = std::string(base) + "/dev/shm/aos"; |
| 54 | } |
| 55 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 56 | std::string ShmFolder(const Channel *channel) { |
| 57 | CHECK(channel->has_name()); |
| 58 | CHECK_EQ(channel->name()->string_view()[0], '/'); |
| 59 | return FLAGS_shm_base + channel->name()->str() + "/"; |
| 60 | } |
| 61 | std::string ShmPath(const Channel *channel) { |
| 62 | CHECK(channel->has_type()); |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame^] | 63 | return ShmFolder(channel) + channel->type()->str() + ".v3"; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Brian Silverman | 3b0cdaf | 2020-04-28 16:51:51 -0700 | [diff] [blame] | 66 | void PageFaultData(char *data, size_t size) { |
| 67 | // This just has to divide the actual page size. Being smaller will make this |
| 68 | // a bit slower than necessary, but not much. 1024 is a pretty conservative |
| 69 | // choice (most pages are probably 4096). |
| 70 | static constexpr size_t kPageSize = 1024; |
| 71 | const size_t pages = (size + kPageSize - 1) / kPageSize; |
| 72 | for (size_t i = 0; i < pages; ++i) { |
| 73 | char zero = 0; |
| 74 | // We need to ensure there's a writable pagetable entry, but avoid modifying |
| 75 | // the data. |
| 76 | // |
| 77 | // Even if you lock the data into memory, some kernels still seem to lazily |
| 78 | // create the actual pagetable entries. This means we need to somehow |
| 79 | // "write" to the page. |
| 80 | // |
| 81 | // Also, this takes place while other processes may be concurrently |
| 82 | // opening/initializing the memory, so we need to avoid corrupting that. |
| 83 | // |
| 84 | // This is the simplest operation I could think of which achieves that: |
| 85 | // "store 0 if it's already 0". |
| 86 | __atomic_compare_exchange_n(&data[i * kPageSize], &zero, 0, true, |
| 87 | __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 88 | } |
| 89 | } |
| 90 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 91 | class MMapedQueue { |
| 92 | public: |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 93 | MMapedQueue(const Channel *channel, |
| 94 | const std::chrono::seconds channel_storage_duration) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 95 | std::string path = ShmPath(channel); |
| 96 | |
Austin Schuh | 80c7fce | 2019-12-05 20:48:43 -0800 | [diff] [blame] | 97 | config_.num_watchers = channel->num_watchers(); |
| 98 | config_.num_senders = channel->num_senders(); |
Brian Silverman | 177567e | 2020-08-12 19:51:33 -0700 | [diff] [blame^] | 99 | config_.num_pinners = 0; |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 100 | config_.queue_size = |
| 101 | channel_storage_duration.count() * channel->frequency(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 102 | config_.message_data_size = channel->max_size(); |
| 103 | |
| 104 | size_ = ipc_lib::LocklessQueueMemorySize(config_); |
| 105 | |
Austin Schuh | fccb2d0 | 2020-01-26 16:11:19 -0800 | [diff] [blame] | 106 | util::MkdirP(path, FLAGS_permissions); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 107 | |
| 108 | // There are 2 cases. Either the file already exists, or it does not |
| 109 | // already exist and we need to create it. Start by trying to create it. If |
| 110 | // that fails, the file has already been created and we can open it |
| 111 | // normally.. Once the file has been created it wil never be deleted. |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 112 | int fd = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL, |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 113 | O_CLOEXEC | FLAGS_permissions); |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 114 | if (fd == -1 && errno == EEXIST) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 115 | VLOG(1) << path << " already created."; |
| 116 | // File already exists. |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 117 | fd = open(path.c_str(), O_RDWR, O_CLOEXEC); |
| 118 | PCHECK(fd != -1) << ": Failed to open " << path; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 119 | while (true) { |
| 120 | struct stat st; |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 121 | PCHECK(fstat(fd, &st) == 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 122 | if (st.st_size != 0) { |
| 123 | CHECK_EQ(static_cast<size_t>(st.st_size), size_) |
| 124 | << ": Size of " << path |
| 125 | << " doesn't match expected size of backing queue file. Did the " |
| 126 | "queue definition change?"; |
| 127 | break; |
| 128 | } else { |
| 129 | // The creating process didn't get around to it yet. Give it a bit. |
| 130 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); |
| 131 | VLOG(1) << path << " is zero size, waiting"; |
| 132 | } |
| 133 | } |
| 134 | } else { |
| 135 | VLOG(1) << "Created " << path; |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 136 | PCHECK(fd != -1) << ": Failed to open " << path; |
| 137 | PCHECK(ftruncate(fd, size_) == 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 140 | data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 141 | PCHECK(data_ != MAP_FAILED); |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 142 | PCHECK(close(fd) == 0); |
Brian Silverman | 3b0cdaf | 2020-04-28 16:51:51 -0700 | [diff] [blame] | 143 | PageFaultData(static_cast<char *>(data_), size_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 144 | |
| 145 | ipc_lib::InitializeLocklessQueueMemory(memory(), config_); |
| 146 | } |
| 147 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 148 | ~MMapedQueue() { PCHECK(munmap(data_, size_) == 0); } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 149 | |
| 150 | ipc_lib::LocklessQueueMemory *memory() const { |
| 151 | return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_); |
| 152 | } |
| 153 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 154 | const ipc_lib::LocklessQueueConfiguration &config() const { return config_; } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 155 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 156 | absl::Span<char> GetSharedMemory() const { |
| 157 | return absl::Span<char>(static_cast<char *>(data_), size_); |
| 158 | } |
| 159 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 160 | private: |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 161 | ipc_lib::LocklessQueueConfiguration config_; |
| 162 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 163 | size_t size_; |
| 164 | void *data_; |
| 165 | }; |
| 166 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 167 | namespace { |
| 168 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 169 | const Node *MaybeMyNode(const Configuration *configuration) { |
| 170 | if (!configuration->has_nodes()) { |
| 171 | return nullptr; |
| 172 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 173 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 174 | return configuration::GetMyNode(configuration); |
| 175 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 176 | |
| 177 | namespace chrono = ::std::chrono; |
| 178 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 179 | } // namespace |
| 180 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 181 | ShmEventLoop::ShmEventLoop(const Configuration *configuration) |
| 182 | : EventLoop(configuration), |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 183 | name_(FLAGS_application_name), |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 184 | node_(MaybeMyNode(configuration)) { |
| 185 | if (configuration->has_nodes()) { |
| 186 | CHECK(node_ != nullptr) << ": Couldn't find node in config."; |
| 187 | } |
| 188 | } |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 189 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 190 | namespace shm_event_loop_internal { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 191 | |
| 192 | class SimpleShmFetcher { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 193 | public: |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 194 | explicit SimpleShmFetcher(ShmEventLoop *event_loop, const Channel *channel) |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 195 | : event_loop_(event_loop), |
| 196 | channel_(channel), |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 197 | lockless_queue_memory_( |
| 198 | channel, |
Brian Silverman | 587da25 | 2020-01-01 17:00:47 -0800 | [diff] [blame] | 199 | chrono::ceil<chrono::seconds>(chrono::nanoseconds( |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 200 | event_loop->configuration()->channel_storage_duration()))), |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 201 | lockless_queue_(lockless_queue_memory_.memory(), |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 202 | lockless_queue_memory_.config()) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 203 | context_.data = nullptr; |
| 204 | // Point the queue index at the next index to read starting now. This |
| 205 | // makes it such that FetchNext will read the next message sent after |
| 206 | // the fetcher is created. |
| 207 | PointAtNextQueueIndex(); |
| 208 | } |
| 209 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 210 | ~SimpleShmFetcher() {} |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 211 | |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 212 | // Sets this object to copy data out of the shared memory into a private |
| 213 | // buffer when fetching. |
| 214 | void CopyDataOnFetch() { |
| 215 | data_storage_.reset(static_cast<char *>( |
| 216 | malloc(channel_->max_size() + kChannelDataAlignment - 1))); |
| 217 | } |
| 218 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 219 | // Points the next message to fetch at the queue index which will be |
| 220 | // populated next. |
| 221 | void PointAtNextQueueIndex() { |
| 222 | actual_queue_index_ = lockless_queue_.LatestQueueIndex(); |
| 223 | if (!actual_queue_index_.valid()) { |
| 224 | // Nothing in the queue. The next element will show up at the 0th |
| 225 | // index in the queue. |
| 226 | actual_queue_index_ = |
| 227 | ipc_lib::QueueIndex::Zero(lockless_queue_.queue_size()); |
| 228 | } else { |
| 229 | actual_queue_index_ = actual_queue_index_.Increment(); |
| 230 | } |
| 231 | } |
| 232 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 233 | bool FetchNext() { |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 234 | const ipc_lib::LocklessQueue::ReadResult read_result = |
| 235 | DoFetch(actual_queue_index_); |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 236 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 237 | return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD; |
| 238 | } |
| 239 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 240 | bool Fetch() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 241 | const ipc_lib::QueueIndex queue_index = lockless_queue_.LatestQueueIndex(); |
| 242 | // actual_queue_index_ is only meaningful if it was set by Fetch or |
| 243 | // FetchNext. This happens when valid_data_ has been set. So, only |
| 244 | // skip checking if valid_data_ is true. |
| 245 | // |
| 246 | // Also, if the latest queue index is invalid, we are empty. So there |
| 247 | // is nothing to fetch. |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 248 | if ((context_.data != nullptr && |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 249 | queue_index == actual_queue_index_.DecrementBy(1u)) || |
| 250 | !queue_index.valid()) { |
| 251 | return false; |
| 252 | } |
| 253 | |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 254 | const ipc_lib::LocklessQueue::ReadResult read_result = DoFetch(queue_index); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 255 | |
| 256 | CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::NOTHING_NEW) |
Austin Schuh | f565259 | 2019-12-29 16:26:15 -0800 | [diff] [blame] | 257 | << ": Queue index went backwards. This should never happen. " |
| 258 | << configuration::CleanedChannelToString(channel_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 259 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 260 | return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD; |
| 261 | } |
| 262 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 263 | Context context() const { return context_; } |
| 264 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 265 | bool RegisterWakeup(int priority) { |
| 266 | return lockless_queue_.RegisterWakeup(priority); |
| 267 | } |
| 268 | |
| 269 | void UnregisterWakeup() { lockless_queue_.UnregisterWakeup(); } |
| 270 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 271 | absl::Span<char> GetSharedMemory() const { |
| 272 | return lockless_queue_memory_.GetSharedMemory(); |
| 273 | } |
| 274 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 275 | absl::Span<char> GetPrivateMemory() const { |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 276 | // Can't usefully expose this for pinning, because the buffer changes |
| 277 | // address for each message. Callers who want to work with that should just |
| 278 | // grab the whole shared memory buffer instead. |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 279 | return absl::Span<char>( |
| 280 | const_cast<SimpleShmFetcher *>(this)->data_storage_start(), |
| 281 | lockless_queue_.message_data_size()); |
| 282 | } |
| 283 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 284 | private: |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 285 | ipc_lib::LocklessQueue::ReadResult DoFetch(ipc_lib::QueueIndex queue_index) { |
| 286 | // TODO(austin): Get behind and make sure it dies. |
| 287 | char *copy_buffer = nullptr; |
| 288 | if (copy_data()) { |
| 289 | copy_buffer = data_storage_start(); |
| 290 | } |
| 291 | ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read( |
| 292 | queue_index.index(), &context_.monotonic_event_time, |
| 293 | &context_.realtime_event_time, &context_.monotonic_remote_time, |
| 294 | &context_.realtime_remote_time, &context_.remote_queue_index, |
| 295 | &context_.size, copy_buffer); |
| 296 | |
| 297 | if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) { |
| 298 | context_.queue_index = queue_index.index(); |
| 299 | if (context_.remote_queue_index == 0xffffffffu) { |
| 300 | context_.remote_queue_index = context_.queue_index; |
| 301 | } |
| 302 | if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) { |
| 303 | context_.monotonic_remote_time = context_.monotonic_event_time; |
| 304 | } |
| 305 | if (context_.realtime_remote_time == aos::realtime_clock::min_time) { |
| 306 | context_.realtime_remote_time = context_.realtime_event_time; |
| 307 | } |
| 308 | const char *const data = DataBuffer(); |
| 309 | if (data) { |
| 310 | context_.data = |
| 311 | data + lockless_queue_.message_data_size() - context_.size; |
| 312 | } else { |
| 313 | context_.data = nullptr; |
| 314 | } |
| 315 | actual_queue_index_ = queue_index.Increment(); |
| 316 | } |
| 317 | |
| 318 | // Make sure the data wasn't modified while we were reading it. This |
| 319 | // can only happen if you are reading the last message *while* it is |
| 320 | // being written to, which means you are pretty far behind. |
| 321 | CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE) |
| 322 | << ": Got behind while reading and the last message was modified " |
| 323 | "out from under us while we were reading it. Don't get so far " |
| 324 | "behind on: " |
| 325 | << configuration::CleanedChannelToString(channel_); |
| 326 | |
| 327 | // We fell behind between when we read the index and read the value. |
| 328 | // This isn't worth recovering from since this means we went to sleep |
| 329 | // for a long time in the middle of this function. |
| 330 | if (read_result == ipc_lib::LocklessQueue::ReadResult::TOO_OLD) { |
| 331 | event_loop_->SendTimingReport(); |
| 332 | LOG(FATAL) << "The next message is no longer available. " |
| 333 | << configuration::CleanedChannelToString(channel_); |
| 334 | } |
| 335 | |
| 336 | return read_result; |
| 337 | } |
| 338 | |
| 339 | char *data_storage_start() const { |
| 340 | CHECK(copy_data()); |
Brian Silverman | a1652f3 | 2020-01-29 20:41:44 -0800 | [diff] [blame] | 341 | return RoundChannelData(data_storage_.get(), channel_->max_size()); |
| 342 | } |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 343 | |
| 344 | // Note that for some modes the return value will change as new messages are |
| 345 | // read. |
| 346 | const char *DataBuffer() const { |
| 347 | if (copy_data()) { |
| 348 | return data_storage_start(); |
| 349 | } |
| 350 | return nullptr; |
| 351 | } |
| 352 | |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 353 | bool copy_data() const { return static_cast<bool>(data_storage_); } |
Brian Silverman | a1652f3 | 2020-01-29 20:41:44 -0800 | [diff] [blame] | 354 | |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 355 | aos::ShmEventLoop *event_loop_; |
Austin Schuh | f565259 | 2019-12-29 16:26:15 -0800 | [diff] [blame] | 356 | const Channel *const channel_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 357 | MMapedQueue lockless_queue_memory_; |
| 358 | ipc_lib::LocklessQueue lockless_queue_; |
| 359 | |
| 360 | ipc_lib::QueueIndex actual_queue_index_ = |
| 361 | ipc_lib::LocklessQueue::empty_queue_index(); |
| 362 | |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 363 | // This being empty indicates we're not going to copy data. |
| 364 | std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free}; |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 365 | |
| 366 | Context context_; |
| 367 | }; |
| 368 | |
| 369 | class ShmFetcher : public RawFetcher { |
| 370 | public: |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 371 | explicit ShmFetcher(ShmEventLoop *event_loop, const Channel *channel) |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 372 | : RawFetcher(event_loop, channel), |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 373 | simple_shm_fetcher_(event_loop, channel) { |
| 374 | simple_shm_fetcher_.CopyDataOnFetch(); |
| 375 | } |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 376 | |
| 377 | ~ShmFetcher() { context_.data = nullptr; } |
| 378 | |
| 379 | std::pair<bool, monotonic_clock::time_point> DoFetchNext() override { |
| 380 | if (simple_shm_fetcher_.FetchNext()) { |
| 381 | context_ = simple_shm_fetcher_.context(); |
| 382 | return std::make_pair(true, monotonic_clock::now()); |
| 383 | } |
| 384 | return std::make_pair(false, monotonic_clock::min_time); |
| 385 | } |
| 386 | |
| 387 | std::pair<bool, monotonic_clock::time_point> DoFetch() override { |
| 388 | if (simple_shm_fetcher_.Fetch()) { |
| 389 | context_ = simple_shm_fetcher_.context(); |
| 390 | return std::make_pair(true, monotonic_clock::now()); |
| 391 | } |
| 392 | return std::make_pair(false, monotonic_clock::min_time); |
| 393 | } |
| 394 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 395 | absl::Span<char> GetPrivateMemory() const { |
| 396 | return simple_shm_fetcher_.GetPrivateMemory(); |
| 397 | } |
| 398 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 399 | private: |
| 400 | SimpleShmFetcher simple_shm_fetcher_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 401 | }; |
| 402 | |
| 403 | class ShmSender : public RawSender { |
| 404 | public: |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 405 | explicit ShmSender(EventLoop *event_loop, const Channel *channel) |
| 406 | : RawSender(event_loop, channel), |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 407 | lockless_queue_memory_( |
| 408 | channel, |
Brian Silverman | 587da25 | 2020-01-01 17:00:47 -0800 | [diff] [blame] | 409 | chrono::ceil<chrono::seconds>(chrono::nanoseconds( |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 410 | event_loop->configuration()->channel_storage_duration()))), |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 411 | lockless_queue_(lockless_queue_memory_.memory(), |
| 412 | lockless_queue_memory_.config()), |
Austin Schuh | e516ab0 | 2020-05-06 21:37:04 -0700 | [diff] [blame] | 413 | lockless_queue_sender_( |
| 414 | VerifySender(lockless_queue_.MakeSender(), channel)) {} |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 415 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 416 | ~ShmSender() override {} |
| 417 | |
Austin Schuh | e516ab0 | 2020-05-06 21:37:04 -0700 | [diff] [blame] | 418 | static ipc_lib::LocklessQueue::Sender VerifySender( |
| 419 | std::optional<ipc_lib::LocklessQueue::Sender> &&sender, |
| 420 | const Channel *channel) { |
| 421 | if (sender) { |
| 422 | return std::move(sender.value()); |
| 423 | } |
| 424 | LOG(FATAL) << "Failed to create sender on " |
| 425 | << configuration::CleanedChannelToString(channel) |
| 426 | << ", too many senders."; |
| 427 | } |
| 428 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 429 | void *data() override { return lockless_queue_sender_.Data(); } |
| 430 | size_t size() override { return lockless_queue_sender_.size(); } |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 431 | bool DoSend(size_t length, |
| 432 | aos::monotonic_clock::time_point monotonic_remote_time, |
| 433 | aos::realtime_clock::time_point realtime_remote_time, |
| 434 | uint32_t remote_queue_index) override { |
Austin Schuh | 0f7ed46 | 2020-03-28 20:38:34 -0700 | [diff] [blame] | 435 | CHECK_LE(length, static_cast<size_t>(channel()->max_size())) |
| 436 | << ": Sent too big a message on " |
| 437 | << configuration::CleanedChannelToString(channel()); |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 438 | lockless_queue_sender_.Send( |
| 439 | length, monotonic_remote_time, realtime_remote_time, remote_queue_index, |
| 440 | &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 441 | lockless_queue_.Wakeup(event_loop()->priority()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 442 | return true; |
| 443 | } |
| 444 | |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 445 | bool DoSend(const void *msg, size_t length, |
| 446 | aos::monotonic_clock::time_point monotonic_remote_time, |
| 447 | aos::realtime_clock::time_point realtime_remote_time, |
| 448 | uint32_t remote_queue_index) override { |
Austin Schuh | 0f7ed46 | 2020-03-28 20:38:34 -0700 | [diff] [blame] | 449 | CHECK_LE(length, static_cast<size_t>(channel()->max_size())) |
| 450 | << ": Sent too big a message on " |
| 451 | << configuration::CleanedChannelToString(channel()); |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 452 | lockless_queue_sender_.Send(reinterpret_cast<const char *>(msg), length, |
| 453 | monotonic_remote_time, realtime_remote_time, |
| 454 | remote_queue_index, &monotonic_sent_time_, |
| 455 | &realtime_sent_time_, &sent_queue_index_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 456 | lockless_queue_.Wakeup(event_loop()->priority()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 457 | // TODO(austin): Return an error if we send too fast. |
| 458 | return true; |
| 459 | } |
| 460 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 461 | absl::Span<char> GetSharedMemory() const { |
| 462 | return lockless_queue_memory_.GetSharedMemory(); |
| 463 | } |
| 464 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 465 | private: |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 466 | MMapedQueue lockless_queue_memory_; |
| 467 | ipc_lib::LocklessQueue lockless_queue_; |
| 468 | ipc_lib::LocklessQueue::Sender lockless_queue_sender_; |
| 469 | }; |
| 470 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 471 | // Class to manage the state for a Watcher. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 472 | class ShmWatcherState : public WatcherState { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 473 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 474 | ShmWatcherState( |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 475 | ShmEventLoop *event_loop, const Channel *channel, |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 476 | std::function<void(const Context &context, const void *message)> fn, |
| 477 | bool copy_data) |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 478 | : WatcherState(event_loop, channel, std::move(fn)), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 479 | event_loop_(event_loop), |
| 480 | event_(this), |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame] | 481 | simple_shm_fetcher_(event_loop, channel) { |
| 482 | if (copy_data) { |
| 483 | simple_shm_fetcher_.CopyDataOnFetch(); |
| 484 | } |
| 485 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 486 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 487 | ~ShmWatcherState() override { event_loop_->RemoveEvent(&event_); } |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 488 | |
| 489 | void Startup(EventLoop *event_loop) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 490 | simple_shm_fetcher_.PointAtNextQueueIndex(); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 491 | CHECK(RegisterWakeup(event_loop->priority())); |
| 492 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 493 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 494 | // Returns true if there is new data available. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 495 | bool CheckForNewData() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 496 | if (!has_new_data_) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 497 | has_new_data_ = simple_shm_fetcher_.FetchNext(); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 498 | |
| 499 | if (has_new_data_) { |
| 500 | event_.set_event_time( |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 501 | simple_shm_fetcher_.context().monotonic_event_time); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 502 | event_loop_->AddEvent(&event_); |
| 503 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | return has_new_data_; |
| 507 | } |
| 508 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 509 | // Consumes the data by calling the callback. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 510 | void HandleEvent() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 511 | CHECK(has_new_data_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 512 | DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 513 | has_new_data_ = false; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 514 | CheckForNewData(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 517 | // Registers us to receive a signal on event reception. |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 518 | bool RegisterWakeup(int priority) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 519 | return simple_shm_fetcher_.RegisterWakeup(priority); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 520 | } |
| 521 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 522 | void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 523 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 524 | absl::Span<char> GetSharedMemory() const { |
| 525 | return simple_shm_fetcher_.GetSharedMemory(); |
| 526 | } |
| 527 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 528 | private: |
| 529 | bool has_new_data_ = false; |
| 530 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 531 | ShmEventLoop *event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 532 | EventHandler<ShmWatcherState> event_; |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 533 | SimpleShmFetcher simple_shm_fetcher_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 534 | }; |
| 535 | |
| 536 | // Adapter class to adapt a timerfd to a TimerHandler. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 537 | class ShmTimerHandler final : public TimerHandler { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 538 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 539 | ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn) |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 540 | : TimerHandler(shm_event_loop, std::move(fn)), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 541 | shm_event_loop_(shm_event_loop), |
| 542 | event_(this) { |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 543 | shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() { |
| 544 | // The timer may fire spurriously. HandleEvent on the event loop will |
| 545 | // call the callback if it is needed. It may also have called it when |
| 546 | // processing some other event, and the kernel decided to deliver this |
| 547 | // wakeup anyways. |
| 548 | timerfd_.Read(); |
| 549 | shm_event_loop_->HandleEvent(); |
| 550 | }); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 551 | } |
| 552 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 553 | ~ShmTimerHandler() { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 554 | Disable(); |
| 555 | shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); |
| 556 | } |
| 557 | |
| 558 | void HandleEvent() { |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 559 | CHECK(!event_.valid()); |
| 560 | const auto monotonic_now = Call(monotonic_clock::now, base_); |
| 561 | if (event_.valid()) { |
| 562 | // If someone called Setup inside Call, rescheduling is already taken care |
| 563 | // of. Bail. |
| 564 | return; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 565 | } |
| 566 | |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 567 | if (repeat_offset_ == chrono::seconds(0)) { |
| 568 | timerfd_.Disable(); |
| 569 | } else { |
| 570 | // Compute how many cycles have elapsed and schedule the next iteration |
| 571 | // for the next iteration in the future. |
| 572 | const int elapsed_cycles = |
| 573 | std::max<int>(0, (monotonic_now - base_ + repeat_offset_ - |
| 574 | std::chrono::nanoseconds(1)) / |
| 575 | repeat_offset_); |
| 576 | base_ += repeat_offset_ * elapsed_cycles; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 577 | |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 578 | // Update the heap and schedule the timerfd wakeup. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 579 | event_.set_event_time(base_); |
| 580 | shm_event_loop_->AddEvent(&event_); |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 581 | timerfd_.SetTime(base_, chrono::seconds(0)); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 582 | } |
| 583 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 584 | |
| 585 | void Setup(monotonic_clock::time_point base, |
| 586 | monotonic_clock::duration repeat_offset) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 587 | if (event_.valid()) { |
| 588 | shm_event_loop_->RemoveEvent(&event_); |
| 589 | } |
| 590 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 591 | timerfd_.SetTime(base, repeat_offset); |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 592 | base_ = base; |
| 593 | repeat_offset_ = repeat_offset; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 594 | event_.set_event_time(base_); |
| 595 | shm_event_loop_->AddEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 596 | } |
| 597 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 598 | void Disable() override { |
| 599 | shm_event_loop_->RemoveEvent(&event_); |
| 600 | timerfd_.Disable(); |
| 601 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 602 | |
| 603 | private: |
| 604 | ShmEventLoop *shm_event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 605 | EventHandler<ShmTimerHandler> event_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 606 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 607 | internal::TimerFd timerfd_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 608 | |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 609 | monotonic_clock::time_point base_; |
| 610 | monotonic_clock::duration repeat_offset_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 611 | }; |
| 612 | |
| 613 | // Adapter class to the timerfd and PhasedLoop. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 614 | class ShmPhasedLoopHandler final : public PhasedLoopHandler { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 615 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 616 | ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop, |
| 617 | ::std::function<void(int)> fn, |
| 618 | const monotonic_clock::duration interval, |
| 619 | const monotonic_clock::duration offset) |
| 620 | : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 621 | shm_event_loop_(shm_event_loop), |
| 622 | event_(this) { |
| 623 | shm_event_loop_->epoll_.OnReadable( |
| 624 | timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); }); |
| 625 | } |
| 626 | |
| 627 | void HandleEvent() { |
| 628 | // The return value for read is the number of cycles that have elapsed. |
| 629 | // Because we check to see when this event *should* have happened, there are |
| 630 | // cases where Read() will return 0, when 1 cycle has actually happened. |
| 631 | // This occurs when the timer interrupt hasn't triggered yet. Therefore, |
| 632 | // ignore it. Call handles rescheduling and calculating elapsed cycles |
| 633 | // without any extra help. |
| 634 | timerfd_.Read(); |
| 635 | event_.Invalidate(); |
| 636 | |
| 637 | Call(monotonic_clock::now, [this](monotonic_clock::time_point sleep_time) { |
| 638 | Schedule(sleep_time); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 639 | }); |
| 640 | } |
| 641 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 642 | ~ShmPhasedLoopHandler() override { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 643 | shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 644 | shm_event_loop_->RemoveEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | private: |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 648 | // Reschedules the timer. |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 649 | void Schedule(monotonic_clock::time_point sleep_time) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 650 | if (event_.valid()) { |
| 651 | shm_event_loop_->RemoveEvent(&event_); |
| 652 | } |
| 653 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 654 | timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 655 | event_.set_event_time(sleep_time); |
| 656 | shm_event_loop_->AddEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | ShmEventLoop *shm_event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 660 | EventHandler<ShmPhasedLoopHandler> event_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 661 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 662 | internal::TimerFd timerfd_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 663 | }; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 664 | |
| 665 | } // namespace shm_event_loop_internal |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 666 | |
| 667 | ::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher( |
| 668 | const Channel *channel) { |
Austin Schuh | ca4828c | 2019-12-28 14:21:35 -0800 | [diff] [blame] | 669 | if (!configuration::ChannelIsReadableOnNode(channel, node())) { |
| 670 | LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view() |
| 671 | << "\", \"type\": \"" << channel->type()->string_view() |
| 672 | << "\" } is not able to be fetched on this node. Check your " |
| 673 | "configuration."; |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 674 | } |
| 675 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 676 | return ::std::unique_ptr<RawFetcher>(new ShmFetcher(this, channel)); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | ::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender( |
| 680 | const Channel *channel) { |
Brian Silverman | 0fc6993 | 2020-01-24 21:54:02 -0800 | [diff] [blame] | 681 | TakeSender(channel); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 682 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 683 | return ::std::unique_ptr<RawSender>(new ShmSender(this, channel)); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 684 | } |
| 685 | |
| 686 | void ShmEventLoop::MakeRawWatcher( |
| 687 | const Channel *channel, |
| 688 | std::function<void(const Context &context, const void *message)> watcher) { |
Brian Silverman | 0fc6993 | 2020-01-24 21:54:02 -0800 | [diff] [blame] | 689 | TakeWatcher(channel); |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 690 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 691 | NewWatcher(::std::unique_ptr<WatcherState>( |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 692 | new ShmWatcherState(this, channel, std::move(watcher), true))); |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | void ShmEventLoop::MakeRawNoArgWatcher( |
| 696 | const Channel *channel, |
| 697 | std::function<void(const Context &context)> watcher) { |
| 698 | TakeWatcher(channel); |
| 699 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 700 | NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState( |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 701 | this, channel, |
| 702 | [watcher](const Context &context, const void *) { watcher(context); }, |
| 703 | false))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 707 | return NewTimer(::std::unique_ptr<TimerHandler>( |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 708 | new ShmTimerHandler(this, ::std::move(callback)))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 709 | } |
| 710 | |
| 711 | PhasedLoopHandler *ShmEventLoop::AddPhasedLoop( |
| 712 | ::std::function<void(int)> callback, |
| 713 | const monotonic_clock::duration interval, |
| 714 | const monotonic_clock::duration offset) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 715 | return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>( |
| 716 | new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | void ShmEventLoop::OnRun(::std::function<void()> on_run) { |
| 720 | on_run_.push_back(::std::move(on_run)); |
| 721 | } |
| 722 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 723 | void ShmEventLoop::HandleEvent() { |
| 724 | // Update all the times for handlers. |
| 725 | for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 726 | ShmWatcherState *watcher = |
| 727 | reinterpret_cast<ShmWatcherState *>(base_watcher.get()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 728 | |
| 729 | watcher->CheckForNewData(); |
| 730 | } |
| 731 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 732 | while (true) { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 733 | if (EventCount() == 0 || |
| 734 | PeekEvent()->event_time() > monotonic_clock::now()) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 735 | break; |
| 736 | } |
| 737 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 738 | EventLoopEvent *event = PopEvent(); |
| 739 | event->HandleEvent(); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 740 | } |
| 741 | } |
| 742 | |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 743 | // RAII class to mask signals. |
| 744 | class ScopedSignalMask { |
| 745 | public: |
| 746 | ScopedSignalMask(std::initializer_list<int> signals) { |
| 747 | sigset_t sigset; |
| 748 | PCHECK(sigemptyset(&sigset) == 0); |
| 749 | for (int signal : signals) { |
| 750 | PCHECK(sigaddset(&sigset, signal) == 0); |
| 751 | } |
| 752 | |
| 753 | PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0); |
| 754 | } |
| 755 | |
| 756 | ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); } |
| 757 | |
| 758 | private: |
| 759 | sigset_t old_; |
| 760 | }; |
| 761 | |
| 762 | // Class to manage the static state associated with killing multiple event |
| 763 | // loops. |
| 764 | class SignalHandler { |
| 765 | public: |
| 766 | // Gets the singleton. |
| 767 | static SignalHandler *global() { |
| 768 | static SignalHandler loop; |
| 769 | return &loop; |
| 770 | } |
| 771 | |
| 772 | // Handles the signal with the singleton. |
| 773 | static void HandleSignal(int) { global()->DoHandleSignal(); } |
| 774 | |
| 775 | // Registers an event loop to receive Exit() calls. |
| 776 | void Register(ShmEventLoop *event_loop) { |
| 777 | // Block signals while we have the mutex so we never race with the signal |
| 778 | // handler. |
| 779 | ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM}); |
| 780 | std::unique_lock<stl_mutex> locker(mutex_); |
| 781 | if (event_loops_.size() == 0) { |
| 782 | // The first caller registers the signal handler. |
| 783 | struct sigaction new_action; |
| 784 | sigemptyset(&new_action.sa_mask); |
| 785 | // This makes it so that 2 control c's to a stuck process will kill it by |
| 786 | // restoring the original signal handler. |
| 787 | new_action.sa_flags = SA_RESETHAND; |
| 788 | new_action.sa_handler = &HandleSignal; |
| 789 | |
| 790 | PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0); |
| 791 | PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0); |
| 792 | PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0); |
| 793 | } |
| 794 | |
| 795 | event_loops_.push_back(event_loop); |
| 796 | } |
| 797 | |
| 798 | // Unregisters an event loop to receive Exit() calls. |
| 799 | void Unregister(ShmEventLoop *event_loop) { |
| 800 | // Block signals while we have the mutex so we never race with the signal |
| 801 | // handler. |
| 802 | ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM}); |
| 803 | std::unique_lock<stl_mutex> locker(mutex_); |
| 804 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 805 | event_loops_.erase( |
| 806 | std::find(event_loops_.begin(), event_loops_.end(), event_loop)); |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 807 | |
| 808 | if (event_loops_.size() == 0u) { |
| 809 | // The last caller restores the original signal handlers. |
| 810 | PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0); |
| 811 | PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0); |
| 812 | PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0); |
| 813 | } |
| 814 | } |
| 815 | |
| 816 | private: |
| 817 | void DoHandleSignal() { |
| 818 | // We block signals while grabbing the lock, so there should never be a |
| 819 | // race. Confirm that this is true using trylock. |
| 820 | CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while " |
| 821 | "modifing the event loop list."; |
| 822 | for (ShmEventLoop *event_loop : event_loops_) { |
| 823 | event_loop->Exit(); |
| 824 | } |
| 825 | mutex_.unlock(); |
| 826 | } |
| 827 | |
| 828 | // Mutex to protect all state. |
| 829 | stl_mutex mutex_; |
| 830 | std::vector<ShmEventLoop *> event_loops_; |
| 831 | struct sigaction old_action_int_; |
| 832 | struct sigaction old_action_hup_; |
| 833 | struct sigaction old_action_term_; |
| 834 | }; |
| 835 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 836 | void ShmEventLoop::Run() { |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 837 | SignalHandler::global()->Register(this); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 838 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 839 | std::unique_ptr<ipc_lib::SignalFd> signalfd; |
| 840 | |
| 841 | if (watchers_.size() > 0) { |
| 842 | signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal})); |
| 843 | |
| 844 | epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() { |
| 845 | signalfd_siginfo result = signalfd_ptr->Read(); |
| 846 | CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal); |
| 847 | |
| 848 | // TODO(austin): We should really be checking *everything*, not just |
| 849 | // watchers, and calling the oldest thing first. That will improve |
| 850 | // determinism a lot. |
| 851 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 852 | HandleEvent(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 853 | }); |
| 854 | } |
| 855 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 856 | MaybeScheduleTimingReports(); |
| 857 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 858 | ReserveEvents(); |
| 859 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 860 | { |
| 861 | AosLogToFbs aos_logger; |
| 862 | if (!skip_logger_) { |
| 863 | aos_logger.Initialize(MakeSender<logging::LogMessageFbs>("/aos")); |
| 864 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 865 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 866 | aos::SetCurrentThreadName(name_.substr(0, 16)); |
Brian Silverman | 6a54ff3 | 2020-04-28 16:41:39 -0700 | [diff] [blame] | 867 | const cpu_set_t default_affinity = DefaultAffinity(); |
| 868 | if (!CPU_EQUAL(&affinity_, &default_affinity)) { |
| 869 | ::aos::SetCurrentThreadAffinity(affinity_); |
| 870 | } |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 871 | // Now, all the callbacks are setup. Lock everything into memory and go RT. |
| 872 | if (priority_ != 0) { |
| 873 | ::aos::InitRT(); |
| 874 | |
| 875 | LOG(INFO) << "Setting priority to " << priority_; |
| 876 | ::aos::SetCurrentThreadRealtimePriority(priority_); |
| 877 | } |
| 878 | |
| 879 | set_is_running(true); |
| 880 | |
| 881 | // Now that we are realtime (but before the OnRun handlers run), snap the |
| 882 | // queue index. |
| 883 | for (::std::unique_ptr<WatcherState> &watcher : watchers_) { |
| 884 | watcher->Startup(this); |
| 885 | } |
| 886 | |
| 887 | // Now that we are RT, run all the OnRun handlers. |
| 888 | for (const auto &run : on_run_) { |
| 889 | run(); |
| 890 | } |
| 891 | |
| 892 | // And start our main event loop which runs all the timers and handles Quit. |
| 893 | epoll_.Run(); |
| 894 | |
| 895 | // Once epoll exits, there is no useful nonrt work left to do. |
| 896 | set_is_running(false); |
| 897 | |
| 898 | // Nothing time or synchronization critical needs to happen after this |
| 899 | // point. Drop RT priority. |
| 900 | ::aos::UnsetCurrentThreadRealtimePriority(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 901 | } |
| 902 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 903 | for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 904 | ShmWatcherState *watcher = |
| 905 | reinterpret_cast<ShmWatcherState *>(base_watcher.get()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 906 | watcher->UnregisterWakeup(); |
| 907 | } |
| 908 | |
| 909 | if (watchers_.size() > 0) { |
| 910 | epoll_.DeleteFd(signalfd->fd()); |
| 911 | signalfd.reset(); |
| 912 | } |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 913 | |
| 914 | SignalHandler::global()->Unregister(this); |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 915 | |
| 916 | // Trigger any remaining senders or fetchers to be cleared before destroying |
| 917 | // the event loop so the book keeping matches. Do this in the thread that |
| 918 | // created the timing reporter. |
| 919 | timing_report_sender_.reset(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 920 | } |
| 921 | |
| 922 | void ShmEventLoop::Exit() { epoll_.Quit(); } |
| 923 | |
| 924 | ShmEventLoop::~ShmEventLoop() { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 925 | // Force everything with a registered fd with epoll to be destroyed now. |
| 926 | timers_.clear(); |
| 927 | phased_loops_.clear(); |
| 928 | watchers_.clear(); |
| 929 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 930 | CHECK(!is_running()) << ": ShmEventLoop destroyed while running"; |
| 931 | } |
| 932 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 933 | void ShmEventLoop::SetRuntimeRealtimePriority(int priority) { |
| 934 | if (is_running()) { |
| 935 | LOG(FATAL) << "Cannot set realtime priority while running."; |
| 936 | } |
| 937 | priority_ = priority; |
| 938 | } |
| 939 | |
Brian Silverman | 6a54ff3 | 2020-04-28 16:41:39 -0700 | [diff] [blame] | 940 | void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) { |
| 941 | if (is_running()) { |
| 942 | LOG(FATAL) << "Cannot set affinity while running."; |
| 943 | } |
| 944 | affinity_ = cpuset; |
| 945 | } |
| 946 | |
James Kuszmaul | 57c2baa | 2020-01-19 14:52:52 -0800 | [diff] [blame] | 947 | void ShmEventLoop::set_name(const std::string_view name) { |
| 948 | name_ = std::string(name); |
| 949 | UpdateTimingReport(); |
| 950 | } |
| 951 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 952 | absl::Span<char> ShmEventLoop::GetWatcherSharedMemory(const Channel *channel) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 953 | ShmWatcherState *const watcher_state = |
| 954 | static_cast<ShmWatcherState *>(GetWatcherState(channel)); |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 955 | return watcher_state->GetSharedMemory(); |
| 956 | } |
| 957 | |
| 958 | absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory( |
| 959 | const aos::RawSender *sender) const { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 960 | return static_cast<const ShmSender *>(sender)->GetSharedMemory(); |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 961 | } |
| 962 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 963 | absl::Span<char> ShmEventLoop::GetShmFetcherPrivateMemory( |
| 964 | const aos::RawFetcher *fetcher) const { |
| 965 | return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory(); |
| 966 | } |
| 967 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 968 | pid_t ShmEventLoop::GetTid() { return syscall(SYS_gettid); } |
| 969 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 970 | } // namespace aos |