Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 1 | #include "aos/events/shm_event_loop.h" |
| 2 | |
| 3 | #include <sys/mman.h> |
| 4 | #include <sys/stat.h> |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 5 | #include <sys/syscall.h> |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 6 | #include <sys/types.h> |
| 7 | #include <unistd.h> |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 8 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 9 | #include <algorithm> |
| 10 | #include <atomic> |
| 11 | #include <chrono> |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 12 | #include <iterator> |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 13 | #include <stdexcept> |
| 14 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 15 | #include "aos/events/aos_logging.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 16 | #include "aos/events/epoll.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 17 | #include "aos/events/event_loop_generated.h" |
| 18 | #include "aos/events/timing_statistics.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 19 | #include "aos/ipc_lib/lockless_queue.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 20 | #include "aos/ipc_lib/signalfd.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 21 | #include "aos/realtime.h" |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 22 | #include "aos/stl_mutex/stl_mutex.h" |
Austin Schuh | fccb2d0 | 2020-01-26 16:11:19 -0800 | [diff] [blame] | 23 | #include "aos/util/file.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 24 | #include "aos/util/phased_loop.h" |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 25 | #include "glog/logging.h" |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 26 | |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 27 | namespace { |
| 28 | |
| 29 | // Returns the portion of the path after the last /. This very much assumes |
| 30 | // that the application name is null terminated. |
| 31 | const char *Filename(const char *path) { |
| 32 | const std::string_view path_string_view = path; |
| 33 | auto last_slash_pos = path_string_view.find_last_of("/"); |
| 34 | |
| 35 | return last_slash_pos == std::string_view::npos ? path |
| 36 | : path + last_slash_pos + 1; |
| 37 | } |
| 38 | |
| 39 | } // namespace |
| 40 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 41 | DEFINE_string(shm_base, "/dev/shm/aos", |
| 42 | "Directory to place queue backing mmaped files in."); |
| 43 | DEFINE_uint32(permissions, 0770, |
| 44 | "Permissions to make shared memory files and folders."); |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 45 | DEFINE_string(application_name, Filename(program_invocation_name), |
| 46 | "The application name"); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 47 | |
| 48 | namespace aos { |
| 49 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 50 | using namespace shm_event_loop_internal; |
| 51 | |
Austin Schuh | cdab619 | 2019-12-29 17:47:46 -0800 | [diff] [blame] | 52 | void SetShmBase(const std::string_view base) { |
| 53 | FLAGS_shm_base = std::string(base) + "/dev/shm/aos"; |
| 54 | } |
| 55 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 56 | std::string ShmFolder(const Channel *channel) { |
| 57 | CHECK(channel->has_name()); |
| 58 | CHECK_EQ(channel->name()->string_view()[0], '/'); |
| 59 | return FLAGS_shm_base + channel->name()->str() + "/"; |
| 60 | } |
| 61 | std::string ShmPath(const Channel *channel) { |
| 62 | CHECK(channel->has_type()); |
Austin Schuh | 3328d13 | 2020-02-28 13:54:57 -0800 | [diff] [blame] | 63 | return ShmFolder(channel) + channel->type()->str() + ".v2"; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Brian Silverman | 3b0cdaf | 2020-04-28 16:51:51 -0700 | [diff] [blame] | 66 | void PageFaultData(char *data, size_t size) { |
| 67 | // This just has to divide the actual page size. Being smaller will make this |
| 68 | // a bit slower than necessary, but not much. 1024 is a pretty conservative |
| 69 | // choice (most pages are probably 4096). |
| 70 | static constexpr size_t kPageSize = 1024; |
| 71 | const size_t pages = (size + kPageSize - 1) / kPageSize; |
| 72 | for (size_t i = 0; i < pages; ++i) { |
| 73 | char zero = 0; |
| 74 | // We need to ensure there's a writable pagetable entry, but avoid modifying |
| 75 | // the data. |
| 76 | // |
| 77 | // Even if you lock the data into memory, some kernels still seem to lazily |
| 78 | // create the actual pagetable entries. This means we need to somehow |
| 79 | // "write" to the page. |
| 80 | // |
| 81 | // Also, this takes place while other processes may be concurrently |
| 82 | // opening/initializing the memory, so we need to avoid corrupting that. |
| 83 | // |
| 84 | // This is the simplest operation I could think of which achieves that: |
| 85 | // "store 0 if it's already 0". |
| 86 | __atomic_compare_exchange_n(&data[i * kPageSize], &zero, 0, true, |
| 87 | __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 88 | } |
| 89 | } |
| 90 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 91 | class MMapedQueue { |
| 92 | public: |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 93 | MMapedQueue(const Channel *channel, |
| 94 | const std::chrono::seconds channel_storage_duration) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 95 | std::string path = ShmPath(channel); |
| 96 | |
Austin Schuh | 80c7fce | 2019-12-05 20:48:43 -0800 | [diff] [blame] | 97 | config_.num_watchers = channel->num_watchers(); |
| 98 | config_.num_senders = channel->num_senders(); |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 99 | config_.queue_size = |
| 100 | channel_storage_duration.count() * channel->frequency(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 101 | config_.message_data_size = channel->max_size(); |
| 102 | |
| 103 | size_ = ipc_lib::LocklessQueueMemorySize(config_); |
| 104 | |
Austin Schuh | fccb2d0 | 2020-01-26 16:11:19 -0800 | [diff] [blame] | 105 | util::MkdirP(path, FLAGS_permissions); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 106 | |
| 107 | // There are 2 cases. Either the file already exists, or it does not |
| 108 | // already exist and we need to create it. Start by trying to create it. If |
| 109 | // that fails, the file has already been created and we can open it |
| 110 | // normally.. Once the file has been created it wil never be deleted. |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 111 | int fd = open(path.c_str(), O_RDWR | O_CREAT | O_EXCL, |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 112 | O_CLOEXEC | FLAGS_permissions); |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 113 | if (fd == -1 && errno == EEXIST) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 114 | VLOG(1) << path << " already created."; |
| 115 | // File already exists. |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 116 | fd = open(path.c_str(), O_RDWR, O_CLOEXEC); |
| 117 | PCHECK(fd != -1) << ": Failed to open " << path; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 118 | while (true) { |
| 119 | struct stat st; |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 120 | PCHECK(fstat(fd, &st) == 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 121 | if (st.st_size != 0) { |
| 122 | CHECK_EQ(static_cast<size_t>(st.st_size), size_) |
| 123 | << ": Size of " << path |
| 124 | << " doesn't match expected size of backing queue file. Did the " |
| 125 | "queue definition change?"; |
| 126 | break; |
| 127 | } else { |
| 128 | // The creating process didn't get around to it yet. Give it a bit. |
| 129 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); |
| 130 | VLOG(1) << path << " is zero size, waiting"; |
| 131 | } |
| 132 | } |
| 133 | } else { |
| 134 | VLOG(1) << "Created " << path; |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 135 | PCHECK(fd != -1) << ": Failed to open " << path; |
| 136 | PCHECK(ftruncate(fd, size_) == 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 139 | data_ = mmap(NULL, size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 140 | PCHECK(data_ != MAP_FAILED); |
Brian Silverman | f9f30ea | 2020-03-04 23:18:54 -0800 | [diff] [blame] | 141 | PCHECK(close(fd) == 0); |
Brian Silverman | 3b0cdaf | 2020-04-28 16:51:51 -0700 | [diff] [blame] | 142 | PageFaultData(static_cast<char *>(data_), size_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 143 | |
| 144 | ipc_lib::InitializeLocklessQueueMemory(memory(), config_); |
| 145 | } |
| 146 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 147 | ~MMapedQueue() { PCHECK(munmap(data_, size_) == 0); } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 148 | |
| 149 | ipc_lib::LocklessQueueMemory *memory() const { |
| 150 | return reinterpret_cast<ipc_lib::LocklessQueueMemory *>(data_); |
| 151 | } |
| 152 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 153 | const ipc_lib::LocklessQueueConfiguration &config() const { return config_; } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 154 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 155 | absl::Span<char> GetSharedMemory() const { |
| 156 | return absl::Span<char>(static_cast<char *>(data_), size_); |
| 157 | } |
| 158 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 159 | private: |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 160 | ipc_lib::LocklessQueueConfiguration config_; |
| 161 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 162 | size_t size_; |
| 163 | void *data_; |
| 164 | }; |
| 165 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 166 | namespace { |
| 167 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 168 | const Node *MaybeMyNode(const Configuration *configuration) { |
| 169 | if (!configuration->has_nodes()) { |
| 170 | return nullptr; |
| 171 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 172 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 173 | return configuration::GetMyNode(configuration); |
| 174 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 175 | |
| 176 | namespace chrono = ::std::chrono; |
| 177 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 178 | } // namespace |
| 179 | |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 180 | ShmEventLoop::ShmEventLoop(const Configuration *configuration) |
| 181 | : EventLoop(configuration), |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 182 | name_(FLAGS_application_name), |
Austin Schuh | 15649d6 | 2019-12-28 16:36:38 -0800 | [diff] [blame] | 183 | node_(MaybeMyNode(configuration)) { |
| 184 | if (configuration->has_nodes()) { |
| 185 | CHECK(node_ != nullptr) << ": Couldn't find node in config."; |
| 186 | } |
| 187 | } |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 188 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 189 | namespace shm_event_loop_internal { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 190 | |
| 191 | class SimpleShmFetcher { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 192 | public: |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 193 | explicit SimpleShmFetcher(ShmEventLoop *event_loop, const Channel *channel) |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 194 | : event_loop_(event_loop), |
| 195 | channel_(channel), |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 196 | lockless_queue_memory_( |
| 197 | channel, |
Brian Silverman | 587da25 | 2020-01-01 17:00:47 -0800 | [diff] [blame] | 198 | chrono::ceil<chrono::seconds>(chrono::nanoseconds( |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 199 | event_loop->configuration()->channel_storage_duration()))), |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 200 | lockless_queue_(lockless_queue_memory_.memory(), |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 201 | lockless_queue_memory_.config()) { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 202 | context_.data = nullptr; |
| 203 | // Point the queue index at the next index to read starting now. This |
| 204 | // makes it such that FetchNext will read the next message sent after |
| 205 | // the fetcher is created. |
| 206 | PointAtNextQueueIndex(); |
| 207 | } |
| 208 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 209 | ~SimpleShmFetcher() {} |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 210 | |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 211 | // Sets this object to copy data out of the shared memory into a private |
| 212 | // buffer when fetching. |
| 213 | void CopyDataOnFetch() { |
| 214 | data_storage_.reset(static_cast<char *>( |
| 215 | malloc(channel_->max_size() + kChannelDataAlignment - 1))); |
| 216 | } |
| 217 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 218 | // Points the next message to fetch at the queue index which will be |
| 219 | // populated next. |
| 220 | void PointAtNextQueueIndex() { |
| 221 | actual_queue_index_ = lockless_queue_.LatestQueueIndex(); |
| 222 | if (!actual_queue_index_.valid()) { |
| 223 | // Nothing in the queue. The next element will show up at the 0th |
| 224 | // index in the queue. |
| 225 | actual_queue_index_ = |
| 226 | ipc_lib::QueueIndex::Zero(lockless_queue_.queue_size()); |
| 227 | } else { |
| 228 | actual_queue_index_ = actual_queue_index_.Increment(); |
| 229 | } |
| 230 | } |
| 231 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 232 | bool FetchNext() { |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 233 | const ipc_lib::LocklessQueue::ReadResult read_result = |
| 234 | DoFetch(actual_queue_index_); |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 235 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 236 | return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD; |
| 237 | } |
| 238 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 239 | bool Fetch() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 240 | const ipc_lib::QueueIndex queue_index = lockless_queue_.LatestQueueIndex(); |
| 241 | // actual_queue_index_ is only meaningful if it was set by Fetch or |
| 242 | // FetchNext. This happens when valid_data_ has been set. So, only |
| 243 | // skip checking if valid_data_ is true. |
| 244 | // |
| 245 | // Also, if the latest queue index is invalid, we are empty. So there |
| 246 | // is nothing to fetch. |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 247 | if ((context_.data != nullptr && |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 248 | queue_index == actual_queue_index_.DecrementBy(1u)) || |
| 249 | !queue_index.valid()) { |
| 250 | return false; |
| 251 | } |
| 252 | |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 253 | const ipc_lib::LocklessQueue::ReadResult read_result = DoFetch(queue_index); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 254 | |
| 255 | CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::NOTHING_NEW) |
Austin Schuh | f565259 | 2019-12-29 16:26:15 -0800 | [diff] [blame] | 256 | << ": Queue index went backwards. This should never happen. " |
| 257 | << configuration::CleanedChannelToString(channel_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 258 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 259 | return read_result == ipc_lib::LocklessQueue::ReadResult::GOOD; |
| 260 | } |
| 261 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 262 | Context context() const { return context_; } |
| 263 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 264 | bool RegisterWakeup(int priority) { |
| 265 | return lockless_queue_.RegisterWakeup(priority); |
| 266 | } |
| 267 | |
| 268 | void UnregisterWakeup() { lockless_queue_.UnregisterWakeup(); } |
| 269 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 270 | absl::Span<char> GetSharedMemory() const { |
| 271 | return lockless_queue_memory_.GetSharedMemory(); |
| 272 | } |
| 273 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 274 | absl::Span<char> GetPrivateMemory() const { |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 275 | // Can't usefully expose this for pinning, because the buffer changes |
| 276 | // address for each message. Callers who want to work with that should just |
| 277 | // grab the whole shared memory buffer instead. |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 278 | return absl::Span<char>( |
| 279 | const_cast<SimpleShmFetcher *>(this)->data_storage_start(), |
| 280 | lockless_queue_.message_data_size()); |
| 281 | } |
| 282 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 283 | private: |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 284 | ipc_lib::LocklessQueue::ReadResult DoFetch(ipc_lib::QueueIndex queue_index) { |
| 285 | // TODO(austin): Get behind and make sure it dies. |
| 286 | char *copy_buffer = nullptr; |
| 287 | if (copy_data()) { |
| 288 | copy_buffer = data_storage_start(); |
| 289 | } |
| 290 | ipc_lib::LocklessQueue::ReadResult read_result = lockless_queue_.Read( |
| 291 | queue_index.index(), &context_.monotonic_event_time, |
| 292 | &context_.realtime_event_time, &context_.monotonic_remote_time, |
| 293 | &context_.realtime_remote_time, &context_.remote_queue_index, |
| 294 | &context_.size, copy_buffer); |
| 295 | |
| 296 | if (read_result == ipc_lib::LocklessQueue::ReadResult::GOOD) { |
| 297 | context_.queue_index = queue_index.index(); |
| 298 | if (context_.remote_queue_index == 0xffffffffu) { |
| 299 | context_.remote_queue_index = context_.queue_index; |
| 300 | } |
| 301 | if (context_.monotonic_remote_time == aos::monotonic_clock::min_time) { |
| 302 | context_.monotonic_remote_time = context_.monotonic_event_time; |
| 303 | } |
| 304 | if (context_.realtime_remote_time == aos::realtime_clock::min_time) { |
| 305 | context_.realtime_remote_time = context_.realtime_event_time; |
| 306 | } |
| 307 | const char *const data = DataBuffer(); |
| 308 | if (data) { |
| 309 | context_.data = |
| 310 | data + lockless_queue_.message_data_size() - context_.size; |
| 311 | } else { |
| 312 | context_.data = nullptr; |
| 313 | } |
| 314 | actual_queue_index_ = queue_index.Increment(); |
| 315 | } |
| 316 | |
| 317 | // Make sure the data wasn't modified while we were reading it. This |
| 318 | // can only happen if you are reading the last message *while* it is |
| 319 | // being written to, which means you are pretty far behind. |
| 320 | CHECK(read_result != ipc_lib::LocklessQueue::ReadResult::OVERWROTE) |
| 321 | << ": Got behind while reading and the last message was modified " |
| 322 | "out from under us while we were reading it. Don't get so far " |
| 323 | "behind on: " |
| 324 | << configuration::CleanedChannelToString(channel_); |
| 325 | |
| 326 | // We fell behind between when we read the index and read the value. |
| 327 | // This isn't worth recovering from since this means we went to sleep |
| 328 | // for a long time in the middle of this function. |
| 329 | if (read_result == ipc_lib::LocklessQueue::ReadResult::TOO_OLD) { |
| 330 | event_loop_->SendTimingReport(); |
| 331 | LOG(FATAL) << "The next message is no longer available. " |
| 332 | << configuration::CleanedChannelToString(channel_); |
| 333 | } |
| 334 | |
| 335 | return read_result; |
| 336 | } |
| 337 | |
| 338 | char *data_storage_start() const { |
| 339 | CHECK(copy_data()); |
Brian Silverman | a1652f3 | 2020-01-29 20:41:44 -0800 | [diff] [blame] | 340 | return RoundChannelData(data_storage_.get(), channel_->max_size()); |
| 341 | } |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 342 | |
| 343 | // Note that for some modes the return value will change as new messages are |
| 344 | // read. |
| 345 | const char *DataBuffer() const { |
| 346 | if (copy_data()) { |
| 347 | return data_storage_start(); |
| 348 | } |
| 349 | return nullptr; |
| 350 | } |
| 351 | |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 352 | bool copy_data() const { return static_cast<bool>(data_storage_); } |
Brian Silverman | a1652f3 | 2020-01-29 20:41:44 -0800 | [diff] [blame] | 353 | |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 354 | aos::ShmEventLoop *event_loop_; |
Austin Schuh | f565259 | 2019-12-29 16:26:15 -0800 | [diff] [blame] | 355 | const Channel *const channel_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 356 | MMapedQueue lockless_queue_memory_; |
| 357 | ipc_lib::LocklessQueue lockless_queue_; |
| 358 | |
| 359 | ipc_lib::QueueIndex actual_queue_index_ = |
| 360 | ipc_lib::LocklessQueue::empty_queue_index(); |
| 361 | |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 362 | // This being empty indicates we're not going to copy data. |
| 363 | std::unique_ptr<char, decltype(&free)> data_storage_{nullptr, &free}; |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 364 | |
| 365 | Context context_; |
| 366 | }; |
| 367 | |
| 368 | class ShmFetcher : public RawFetcher { |
| 369 | public: |
Austin Schuh | 432784f | 2020-06-23 17:27:35 -0700 | [diff] [blame] | 370 | explicit ShmFetcher(ShmEventLoop *event_loop, const Channel *channel) |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 371 | : RawFetcher(event_loop, channel), |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 372 | simple_shm_fetcher_(event_loop, channel) { |
| 373 | simple_shm_fetcher_.CopyDataOnFetch(); |
| 374 | } |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 375 | |
| 376 | ~ShmFetcher() { context_.data = nullptr; } |
| 377 | |
| 378 | std::pair<bool, monotonic_clock::time_point> DoFetchNext() override { |
| 379 | if (simple_shm_fetcher_.FetchNext()) { |
| 380 | context_ = simple_shm_fetcher_.context(); |
| 381 | return std::make_pair(true, monotonic_clock::now()); |
| 382 | } |
| 383 | return std::make_pair(false, monotonic_clock::min_time); |
| 384 | } |
| 385 | |
| 386 | std::pair<bool, monotonic_clock::time_point> DoFetch() override { |
| 387 | if (simple_shm_fetcher_.Fetch()) { |
| 388 | context_ = simple_shm_fetcher_.context(); |
| 389 | return std::make_pair(true, monotonic_clock::now()); |
| 390 | } |
| 391 | return std::make_pair(false, monotonic_clock::min_time); |
| 392 | } |
| 393 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 394 | absl::Span<char> GetPrivateMemory() const { |
| 395 | return simple_shm_fetcher_.GetPrivateMemory(); |
| 396 | } |
| 397 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 398 | private: |
| 399 | SimpleShmFetcher simple_shm_fetcher_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 400 | }; |
| 401 | |
| 402 | class ShmSender : public RawSender { |
| 403 | public: |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 404 | explicit ShmSender(EventLoop *event_loop, const Channel *channel) |
| 405 | : RawSender(event_loop, channel), |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 406 | lockless_queue_memory_( |
| 407 | channel, |
Brian Silverman | 587da25 | 2020-01-01 17:00:47 -0800 | [diff] [blame] | 408 | chrono::ceil<chrono::seconds>(chrono::nanoseconds( |
Austin Schuh | aa79e4e | 2019-12-29 20:43:32 -0800 | [diff] [blame] | 409 | event_loop->configuration()->channel_storage_duration()))), |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 410 | lockless_queue_(lockless_queue_memory_.memory(), |
| 411 | lockless_queue_memory_.config()), |
Austin Schuh | e516ab0 | 2020-05-06 21:37:04 -0700 | [diff] [blame] | 412 | lockless_queue_sender_( |
| 413 | VerifySender(lockless_queue_.MakeSender(), channel)) {} |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 414 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 415 | ~ShmSender() override {} |
| 416 | |
Austin Schuh | e516ab0 | 2020-05-06 21:37:04 -0700 | [diff] [blame] | 417 | static ipc_lib::LocklessQueue::Sender VerifySender( |
| 418 | std::optional<ipc_lib::LocklessQueue::Sender> &&sender, |
| 419 | const Channel *channel) { |
| 420 | if (sender) { |
| 421 | return std::move(sender.value()); |
| 422 | } |
| 423 | LOG(FATAL) << "Failed to create sender on " |
| 424 | << configuration::CleanedChannelToString(channel) |
| 425 | << ", too many senders."; |
| 426 | } |
| 427 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 428 | void *data() override { return lockless_queue_sender_.Data(); } |
| 429 | size_t size() override { return lockless_queue_sender_.size(); } |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 430 | bool DoSend(size_t length, |
| 431 | aos::monotonic_clock::time_point monotonic_remote_time, |
| 432 | aos::realtime_clock::time_point realtime_remote_time, |
| 433 | uint32_t remote_queue_index) override { |
Austin Schuh | 0f7ed46 | 2020-03-28 20:38:34 -0700 | [diff] [blame] | 434 | CHECK_LE(length, static_cast<size_t>(channel()->max_size())) |
| 435 | << ": Sent too big a message on " |
| 436 | << configuration::CleanedChannelToString(channel()); |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 437 | lockless_queue_sender_.Send( |
| 438 | length, monotonic_remote_time, realtime_remote_time, remote_queue_index, |
| 439 | &monotonic_sent_time_, &realtime_sent_time_, &sent_queue_index_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 440 | lockless_queue_.Wakeup(event_loop()->priority()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 441 | return true; |
| 442 | } |
| 443 | |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 444 | bool DoSend(const void *msg, size_t length, |
| 445 | aos::monotonic_clock::time_point monotonic_remote_time, |
| 446 | aos::realtime_clock::time_point realtime_remote_time, |
| 447 | uint32_t remote_queue_index) override { |
Austin Schuh | 0f7ed46 | 2020-03-28 20:38:34 -0700 | [diff] [blame] | 448 | CHECK_LE(length, static_cast<size_t>(channel()->max_size())) |
| 449 | << ": Sent too big a message on " |
| 450 | << configuration::CleanedChannelToString(channel()); |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 451 | lockless_queue_sender_.Send(reinterpret_cast<const char *>(msg), length, |
| 452 | monotonic_remote_time, realtime_remote_time, |
| 453 | remote_queue_index, &monotonic_sent_time_, |
| 454 | &realtime_sent_time_, &sent_queue_index_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 455 | lockless_queue_.Wakeup(event_loop()->priority()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 456 | // TODO(austin): Return an error if we send too fast. |
| 457 | return true; |
| 458 | } |
| 459 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 460 | absl::Span<char> GetSharedMemory() const { |
| 461 | return lockless_queue_memory_.GetSharedMemory(); |
| 462 | } |
| 463 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 464 | private: |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 465 | MMapedQueue lockless_queue_memory_; |
| 466 | ipc_lib::LocklessQueue lockless_queue_; |
| 467 | ipc_lib::LocklessQueue::Sender lockless_queue_sender_; |
| 468 | }; |
| 469 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 470 | // Class to manage the state for a Watcher. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 471 | class ShmWatcherState : public WatcherState { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 472 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 473 | ShmWatcherState( |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 474 | ShmEventLoop *event_loop, const Channel *channel, |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 475 | std::function<void(const Context &context, const void *message)> fn, |
| 476 | bool copy_data) |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 477 | : WatcherState(event_loop, channel, std::move(fn)), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 478 | event_loop_(event_loop), |
| 479 | event_(this), |
Brian Silverman | 3bca532 | 2020-08-12 19:35:29 -0700 | [diff] [blame^] | 480 | simple_shm_fetcher_(event_loop, channel) { |
| 481 | if (copy_data) { |
| 482 | simple_shm_fetcher_.CopyDataOnFetch(); |
| 483 | } |
| 484 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 485 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 486 | ~ShmWatcherState() override { event_loop_->RemoveEvent(&event_); } |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 487 | |
| 488 | void Startup(EventLoop *event_loop) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 489 | simple_shm_fetcher_.PointAtNextQueueIndex(); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 490 | CHECK(RegisterWakeup(event_loop->priority())); |
| 491 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 492 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 493 | // Returns true if there is new data available. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 494 | bool CheckForNewData() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 495 | if (!has_new_data_) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 496 | has_new_data_ = simple_shm_fetcher_.FetchNext(); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 497 | |
| 498 | if (has_new_data_) { |
| 499 | event_.set_event_time( |
Austin Schuh | ad15482 | 2019-12-27 15:45:13 -0800 | [diff] [blame] | 500 | simple_shm_fetcher_.context().monotonic_event_time); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 501 | event_loop_->AddEvent(&event_); |
| 502 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 503 | } |
| 504 | |
| 505 | return has_new_data_; |
| 506 | } |
| 507 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 508 | // Consumes the data by calling the callback. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 509 | void HandleEvent() { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 510 | CHECK(has_new_data_); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 511 | DoCallCallback(monotonic_clock::now, simple_shm_fetcher_.context()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 512 | has_new_data_ = false; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 513 | CheckForNewData(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 514 | } |
| 515 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 516 | // Registers us to receive a signal on event reception. |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 517 | bool RegisterWakeup(int priority) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 518 | return simple_shm_fetcher_.RegisterWakeup(priority); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 521 | void UnregisterWakeup() { return simple_shm_fetcher_.UnregisterWakeup(); } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 522 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 523 | absl::Span<char> GetSharedMemory() const { |
| 524 | return simple_shm_fetcher_.GetSharedMemory(); |
| 525 | } |
| 526 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 527 | private: |
| 528 | bool has_new_data_ = false; |
| 529 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 530 | ShmEventLoop *event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 531 | EventHandler<ShmWatcherState> event_; |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 532 | SimpleShmFetcher simple_shm_fetcher_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 533 | }; |
| 534 | |
| 535 | // Adapter class to adapt a timerfd to a TimerHandler. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 536 | class ShmTimerHandler final : public TimerHandler { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 537 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 538 | ShmTimerHandler(ShmEventLoop *shm_event_loop, ::std::function<void()> fn) |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 539 | : TimerHandler(shm_event_loop, std::move(fn)), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 540 | shm_event_loop_(shm_event_loop), |
| 541 | event_(this) { |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 542 | shm_event_loop_->epoll_.OnReadable(timerfd_.fd(), [this]() { |
| 543 | // The timer may fire spurriously. HandleEvent on the event loop will |
| 544 | // call the callback if it is needed. It may also have called it when |
| 545 | // processing some other event, and the kernel decided to deliver this |
| 546 | // wakeup anyways. |
| 547 | timerfd_.Read(); |
| 548 | shm_event_loop_->HandleEvent(); |
| 549 | }); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 550 | } |
| 551 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 552 | ~ShmTimerHandler() { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 553 | Disable(); |
| 554 | shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); |
| 555 | } |
| 556 | |
| 557 | void HandleEvent() { |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 558 | CHECK(!event_.valid()); |
| 559 | const auto monotonic_now = Call(monotonic_clock::now, base_); |
| 560 | if (event_.valid()) { |
| 561 | // If someone called Setup inside Call, rescheduling is already taken care |
| 562 | // of. Bail. |
| 563 | return; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 564 | } |
| 565 | |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 566 | if (repeat_offset_ == chrono::seconds(0)) { |
| 567 | timerfd_.Disable(); |
| 568 | } else { |
| 569 | // Compute how many cycles have elapsed and schedule the next iteration |
| 570 | // for the next iteration in the future. |
| 571 | const int elapsed_cycles = |
| 572 | std::max<int>(0, (monotonic_now - base_ + repeat_offset_ - |
| 573 | std::chrono::nanoseconds(1)) / |
| 574 | repeat_offset_); |
| 575 | base_ += repeat_offset_ * elapsed_cycles; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 576 | |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 577 | // Update the heap and schedule the timerfd wakeup. |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 578 | event_.set_event_time(base_); |
| 579 | shm_event_loop_->AddEvent(&event_); |
Austin Schuh | cde39fd | 2020-02-22 20:58:24 -0800 | [diff] [blame] | 580 | timerfd_.SetTime(base_, chrono::seconds(0)); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 581 | } |
| 582 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 583 | |
| 584 | void Setup(monotonic_clock::time_point base, |
| 585 | monotonic_clock::duration repeat_offset) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 586 | if (event_.valid()) { |
| 587 | shm_event_loop_->RemoveEvent(&event_); |
| 588 | } |
| 589 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 590 | timerfd_.SetTime(base, repeat_offset); |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 591 | base_ = base; |
| 592 | repeat_offset_ = repeat_offset; |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 593 | event_.set_event_time(base_); |
| 594 | shm_event_loop_->AddEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 595 | } |
| 596 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 597 | void Disable() override { |
| 598 | shm_event_loop_->RemoveEvent(&event_); |
| 599 | timerfd_.Disable(); |
| 600 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 601 | |
| 602 | private: |
| 603 | ShmEventLoop *shm_event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 604 | EventHandler<ShmTimerHandler> event_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 605 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 606 | internal::TimerFd timerfd_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 607 | |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 608 | monotonic_clock::time_point base_; |
| 609 | monotonic_clock::duration repeat_offset_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 610 | }; |
| 611 | |
| 612 | // Adapter class to the timerfd and PhasedLoop. |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 613 | class ShmPhasedLoopHandler final : public PhasedLoopHandler { |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 614 | public: |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 615 | ShmPhasedLoopHandler(ShmEventLoop *shm_event_loop, |
| 616 | ::std::function<void(int)> fn, |
| 617 | const monotonic_clock::duration interval, |
| 618 | const monotonic_clock::duration offset) |
| 619 | : PhasedLoopHandler(shm_event_loop, std::move(fn), interval, offset), |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 620 | shm_event_loop_(shm_event_loop), |
| 621 | event_(this) { |
| 622 | shm_event_loop_->epoll_.OnReadable( |
| 623 | timerfd_.fd(), [this]() { shm_event_loop_->HandleEvent(); }); |
| 624 | } |
| 625 | |
| 626 | void HandleEvent() { |
| 627 | // The return value for read is the number of cycles that have elapsed. |
| 628 | // Because we check to see when this event *should* have happened, there are |
| 629 | // cases where Read() will return 0, when 1 cycle has actually happened. |
| 630 | // This occurs when the timer interrupt hasn't triggered yet. Therefore, |
| 631 | // ignore it. Call handles rescheduling and calculating elapsed cycles |
| 632 | // without any extra help. |
| 633 | timerfd_.Read(); |
| 634 | event_.Invalidate(); |
| 635 | |
| 636 | Call(monotonic_clock::now, [this](monotonic_clock::time_point sleep_time) { |
| 637 | Schedule(sleep_time); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 638 | }); |
| 639 | } |
| 640 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 641 | ~ShmPhasedLoopHandler() override { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 642 | shm_event_loop_->epoll_.DeleteFd(timerfd_.fd()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 643 | shm_event_loop_->RemoveEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 644 | } |
| 645 | |
| 646 | private: |
Austin Schuh | de8a8ff | 2019-11-30 15:25:36 -0800 | [diff] [blame] | 647 | // Reschedules the timer. |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 648 | void Schedule(monotonic_clock::time_point sleep_time) override { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 649 | if (event_.valid()) { |
| 650 | shm_event_loop_->RemoveEvent(&event_); |
| 651 | } |
| 652 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 653 | timerfd_.SetTime(sleep_time, ::aos::monotonic_clock::zero()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 654 | event_.set_event_time(sleep_time); |
| 655 | shm_event_loop_->AddEvent(&event_); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 656 | } |
| 657 | |
| 658 | ShmEventLoop *shm_event_loop_; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 659 | EventHandler<ShmPhasedLoopHandler> event_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 660 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 661 | internal::TimerFd timerfd_; |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 662 | }; |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 663 | |
| 664 | } // namespace shm_event_loop_internal |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 665 | |
| 666 | ::std::unique_ptr<RawFetcher> ShmEventLoop::MakeRawFetcher( |
| 667 | const Channel *channel) { |
Austin Schuh | ca4828c | 2019-12-28 14:21:35 -0800 | [diff] [blame] | 668 | if (!configuration::ChannelIsReadableOnNode(channel, node())) { |
| 669 | LOG(FATAL) << "Channel { \"name\": \"" << channel->name()->string_view() |
| 670 | << "\", \"type\": \"" << channel->type()->string_view() |
| 671 | << "\" } is not able to be fetched on this node. Check your " |
| 672 | "configuration."; |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 673 | } |
| 674 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 675 | return ::std::unique_ptr<RawFetcher>(new ShmFetcher(this, channel)); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 676 | } |
| 677 | |
| 678 | ::std::unique_ptr<RawSender> ShmEventLoop::MakeRawSender( |
| 679 | const Channel *channel) { |
Brian Silverman | 0fc6993 | 2020-01-24 21:54:02 -0800 | [diff] [blame] | 680 | TakeSender(channel); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 681 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 682 | return ::std::unique_ptr<RawSender>(new ShmSender(this, channel)); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 683 | } |
| 684 | |
| 685 | void ShmEventLoop::MakeRawWatcher( |
| 686 | const Channel *channel, |
| 687 | std::function<void(const Context &context, const void *message)> watcher) { |
Brian Silverman | 0fc6993 | 2020-01-24 21:54:02 -0800 | [diff] [blame] | 688 | TakeWatcher(channel); |
Austin Schuh | 217a978 | 2019-12-21 23:02:50 -0800 | [diff] [blame] | 689 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 690 | NewWatcher(::std::unique_ptr<WatcherState>( |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 691 | new ShmWatcherState(this, channel, std::move(watcher), true))); |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | void ShmEventLoop::MakeRawNoArgWatcher( |
| 695 | const Channel *channel, |
| 696 | std::function<void(const Context &context)> watcher) { |
| 697 | TakeWatcher(channel); |
| 698 | |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 699 | NewWatcher(::std::unique_ptr<WatcherState>(new ShmWatcherState( |
Brian Silverman | 6b8a3c3 | 2020-03-06 11:26:14 -0800 | [diff] [blame] | 700 | this, channel, |
| 701 | [watcher](const Context &context, const void *) { watcher(context); }, |
| 702 | false))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | TimerHandler *ShmEventLoop::AddTimer(::std::function<void()> callback) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 706 | return NewTimer(::std::unique_ptr<TimerHandler>( |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 707 | new ShmTimerHandler(this, ::std::move(callback)))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 708 | } |
| 709 | |
| 710 | PhasedLoopHandler *ShmEventLoop::AddPhasedLoop( |
| 711 | ::std::function<void(int)> callback, |
| 712 | const monotonic_clock::duration interval, |
| 713 | const monotonic_clock::duration offset) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 714 | return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>( |
| 715 | new ShmPhasedLoopHandler(this, ::std::move(callback), interval, offset))); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | void ShmEventLoop::OnRun(::std::function<void()> on_run) { |
| 719 | on_run_.push_back(::std::move(on_run)); |
| 720 | } |
| 721 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 722 | void ShmEventLoop::HandleEvent() { |
| 723 | // Update all the times for handlers. |
| 724 | for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 725 | ShmWatcherState *watcher = |
| 726 | reinterpret_cast<ShmWatcherState *>(base_watcher.get()); |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 727 | |
| 728 | watcher->CheckForNewData(); |
| 729 | } |
| 730 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 731 | while (true) { |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 732 | if (EventCount() == 0 || |
| 733 | PeekEvent()->event_time() > monotonic_clock::now()) { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 734 | break; |
| 735 | } |
| 736 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 737 | EventLoopEvent *event = PopEvent(); |
| 738 | event->HandleEvent(); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 739 | } |
| 740 | } |
| 741 | |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 742 | // RAII class to mask signals. |
| 743 | class ScopedSignalMask { |
| 744 | public: |
| 745 | ScopedSignalMask(std::initializer_list<int> signals) { |
| 746 | sigset_t sigset; |
| 747 | PCHECK(sigemptyset(&sigset) == 0); |
| 748 | for (int signal : signals) { |
| 749 | PCHECK(sigaddset(&sigset, signal) == 0); |
| 750 | } |
| 751 | |
| 752 | PCHECK(sigprocmask(SIG_BLOCK, &sigset, &old_) == 0); |
| 753 | } |
| 754 | |
| 755 | ~ScopedSignalMask() { PCHECK(sigprocmask(SIG_SETMASK, &old_, nullptr) == 0); } |
| 756 | |
| 757 | private: |
| 758 | sigset_t old_; |
| 759 | }; |
| 760 | |
| 761 | // Class to manage the static state associated with killing multiple event |
| 762 | // loops. |
| 763 | class SignalHandler { |
| 764 | public: |
| 765 | // Gets the singleton. |
| 766 | static SignalHandler *global() { |
| 767 | static SignalHandler loop; |
| 768 | return &loop; |
| 769 | } |
| 770 | |
| 771 | // Handles the signal with the singleton. |
| 772 | static void HandleSignal(int) { global()->DoHandleSignal(); } |
| 773 | |
| 774 | // Registers an event loop to receive Exit() calls. |
| 775 | void Register(ShmEventLoop *event_loop) { |
| 776 | // Block signals while we have the mutex so we never race with the signal |
| 777 | // handler. |
| 778 | ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM}); |
| 779 | std::unique_lock<stl_mutex> locker(mutex_); |
| 780 | if (event_loops_.size() == 0) { |
| 781 | // The first caller registers the signal handler. |
| 782 | struct sigaction new_action; |
| 783 | sigemptyset(&new_action.sa_mask); |
| 784 | // This makes it so that 2 control c's to a stuck process will kill it by |
| 785 | // restoring the original signal handler. |
| 786 | new_action.sa_flags = SA_RESETHAND; |
| 787 | new_action.sa_handler = &HandleSignal; |
| 788 | |
| 789 | PCHECK(sigaction(SIGINT, &new_action, &old_action_int_) == 0); |
| 790 | PCHECK(sigaction(SIGHUP, &new_action, &old_action_hup_) == 0); |
| 791 | PCHECK(sigaction(SIGTERM, &new_action, &old_action_term_) == 0); |
| 792 | } |
| 793 | |
| 794 | event_loops_.push_back(event_loop); |
| 795 | } |
| 796 | |
| 797 | // Unregisters an event loop to receive Exit() calls. |
| 798 | void Unregister(ShmEventLoop *event_loop) { |
| 799 | // Block signals while we have the mutex so we never race with the signal |
| 800 | // handler. |
| 801 | ScopedSignalMask mask({SIGINT, SIGHUP, SIGTERM}); |
| 802 | std::unique_lock<stl_mutex> locker(mutex_); |
| 803 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 804 | event_loops_.erase( |
| 805 | std::find(event_loops_.begin(), event_loops_.end(), event_loop)); |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 806 | |
| 807 | if (event_loops_.size() == 0u) { |
| 808 | // The last caller restores the original signal handlers. |
| 809 | PCHECK(sigaction(SIGINT, &old_action_int_, nullptr) == 0); |
| 810 | PCHECK(sigaction(SIGHUP, &old_action_hup_, nullptr) == 0); |
| 811 | PCHECK(sigaction(SIGTERM, &old_action_term_, nullptr) == 0); |
| 812 | } |
| 813 | } |
| 814 | |
| 815 | private: |
| 816 | void DoHandleSignal() { |
| 817 | // We block signals while grabbing the lock, so there should never be a |
| 818 | // race. Confirm that this is true using trylock. |
| 819 | CHECK(mutex_.try_lock()) << ": sigprocmask failed to block signals while " |
| 820 | "modifing the event loop list."; |
| 821 | for (ShmEventLoop *event_loop : event_loops_) { |
| 822 | event_loop->Exit(); |
| 823 | } |
| 824 | mutex_.unlock(); |
| 825 | } |
| 826 | |
| 827 | // Mutex to protect all state. |
| 828 | stl_mutex mutex_; |
| 829 | std::vector<ShmEventLoop *> event_loops_; |
| 830 | struct sigaction old_action_int_; |
| 831 | struct sigaction old_action_hup_; |
| 832 | struct sigaction old_action_term_; |
| 833 | }; |
| 834 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 835 | void ShmEventLoop::Run() { |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 836 | SignalHandler::global()->Register(this); |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 837 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 838 | std::unique_ptr<ipc_lib::SignalFd> signalfd; |
| 839 | |
| 840 | if (watchers_.size() > 0) { |
| 841 | signalfd.reset(new ipc_lib::SignalFd({ipc_lib::kWakeupSignal})); |
| 842 | |
| 843 | epoll_.OnReadable(signalfd->fd(), [signalfd_ptr = signalfd.get(), this]() { |
| 844 | signalfd_siginfo result = signalfd_ptr->Read(); |
| 845 | CHECK_EQ(result.ssi_signo, ipc_lib::kWakeupSignal); |
| 846 | |
| 847 | // TODO(austin): We should really be checking *everything*, not just |
| 848 | // watchers, and calling the oldest thing first. That will improve |
| 849 | // determinism a lot. |
| 850 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 851 | HandleEvent(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 852 | }); |
| 853 | } |
| 854 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 855 | MaybeScheduleTimingReports(); |
| 856 | |
Austin Schuh | 7d87b67 | 2019-12-01 20:23:49 -0800 | [diff] [blame] | 857 | ReserveEvents(); |
| 858 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 859 | { |
| 860 | AosLogToFbs aos_logger; |
| 861 | if (!skip_logger_) { |
| 862 | aos_logger.Initialize(MakeSender<logging::LogMessageFbs>("/aos")); |
| 863 | } |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 864 | |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 865 | aos::SetCurrentThreadName(name_.substr(0, 16)); |
Brian Silverman | 6a54ff3 | 2020-04-28 16:41:39 -0700 | [diff] [blame] | 866 | const cpu_set_t default_affinity = DefaultAffinity(); |
| 867 | if (!CPU_EQUAL(&affinity_, &default_affinity)) { |
| 868 | ::aos::SetCurrentThreadAffinity(affinity_); |
| 869 | } |
Tyler Chatow | 67ddb03 | 2020-01-12 14:30:04 -0800 | [diff] [blame] | 870 | // Now, all the callbacks are setup. Lock everything into memory and go RT. |
| 871 | if (priority_ != 0) { |
| 872 | ::aos::InitRT(); |
| 873 | |
| 874 | LOG(INFO) << "Setting priority to " << priority_; |
| 875 | ::aos::SetCurrentThreadRealtimePriority(priority_); |
| 876 | } |
| 877 | |
| 878 | set_is_running(true); |
| 879 | |
| 880 | // Now that we are realtime (but before the OnRun handlers run), snap the |
| 881 | // queue index. |
| 882 | for (::std::unique_ptr<WatcherState> &watcher : watchers_) { |
| 883 | watcher->Startup(this); |
| 884 | } |
| 885 | |
| 886 | // Now that we are RT, run all the OnRun handlers. |
| 887 | for (const auto &run : on_run_) { |
| 888 | run(); |
| 889 | } |
| 890 | |
| 891 | // And start our main event loop which runs all the timers and handles Quit. |
| 892 | epoll_.Run(); |
| 893 | |
| 894 | // Once epoll exits, there is no useful nonrt work left to do. |
| 895 | set_is_running(false); |
| 896 | |
| 897 | // Nothing time or synchronization critical needs to happen after this |
| 898 | // point. Drop RT priority. |
| 899 | ::aos::UnsetCurrentThreadRealtimePriority(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 900 | } |
| 901 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 902 | for (::std::unique_ptr<WatcherState> &base_watcher : watchers_) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 903 | ShmWatcherState *watcher = |
| 904 | reinterpret_cast<ShmWatcherState *>(base_watcher.get()); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 905 | watcher->UnregisterWakeup(); |
| 906 | } |
| 907 | |
| 908 | if (watchers_.size() > 0) { |
| 909 | epoll_.DeleteFd(signalfd->fd()); |
| 910 | signalfd.reset(); |
| 911 | } |
Austin Schuh | 32fd5a7 | 2019-12-01 22:20:26 -0800 | [diff] [blame] | 912 | |
| 913 | SignalHandler::global()->Unregister(this); |
Austin Schuh | e84c3ed | 2019-12-14 15:29:48 -0800 | [diff] [blame] | 914 | |
| 915 | // Trigger any remaining senders or fetchers to be cleared before destroying |
| 916 | // the event loop so the book keeping matches. Do this in the thread that |
| 917 | // created the timing reporter. |
| 918 | timing_report_sender_.reset(); |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 919 | } |
| 920 | |
| 921 | void ShmEventLoop::Exit() { epoll_.Quit(); } |
| 922 | |
| 923 | ShmEventLoop::~ShmEventLoop() { |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 924 | // Force everything with a registered fd with epoll to be destroyed now. |
| 925 | timers_.clear(); |
| 926 | phased_loops_.clear(); |
| 927 | watchers_.clear(); |
| 928 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 929 | CHECK(!is_running()) << ": ShmEventLoop destroyed while running"; |
| 930 | } |
| 931 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 932 | void ShmEventLoop::SetRuntimeRealtimePriority(int priority) { |
| 933 | if (is_running()) { |
| 934 | LOG(FATAL) << "Cannot set realtime priority while running."; |
| 935 | } |
| 936 | priority_ = priority; |
| 937 | } |
| 938 | |
Brian Silverman | 6a54ff3 | 2020-04-28 16:41:39 -0700 | [diff] [blame] | 939 | void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) { |
| 940 | if (is_running()) { |
| 941 | LOG(FATAL) << "Cannot set affinity while running."; |
| 942 | } |
| 943 | affinity_ = cpuset; |
| 944 | } |
| 945 | |
James Kuszmaul | 57c2baa | 2020-01-19 14:52:52 -0800 | [diff] [blame] | 946 | void ShmEventLoop::set_name(const std::string_view name) { |
| 947 | name_ = std::string(name); |
| 948 | UpdateTimingReport(); |
| 949 | } |
| 950 | |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 951 | absl::Span<char> ShmEventLoop::GetWatcherSharedMemory(const Channel *channel) { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 952 | ShmWatcherState *const watcher_state = |
| 953 | static_cast<ShmWatcherState *>(GetWatcherState(channel)); |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 954 | return watcher_state->GetSharedMemory(); |
| 955 | } |
| 956 | |
| 957 | absl::Span<char> ShmEventLoop::GetShmSenderSharedMemory( |
| 958 | const aos::RawSender *sender) const { |
Brian Silverman | 148d43d | 2020-06-07 18:19:22 -0500 | [diff] [blame] | 959 | return static_cast<const ShmSender *>(sender)->GetSharedMemory(); |
Brian Silverman | 5120afb | 2020-01-31 17:44:35 -0800 | [diff] [blame] | 960 | } |
| 961 | |
Brian Silverman | 6d2b359 | 2020-06-18 14:40:15 -0700 | [diff] [blame] | 962 | absl::Span<char> ShmEventLoop::GetShmFetcherPrivateMemory( |
| 963 | const aos::RawFetcher *fetcher) const { |
| 964 | return static_cast<const ShmFetcher *>(fetcher)->GetPrivateMemory(); |
| 965 | } |
| 966 | |
Austin Schuh | 39788ff | 2019-12-01 18:22:57 -0800 | [diff] [blame] | 967 | pid_t ShmEventLoop::GetTid() { return syscall(SYS_gettid); } |
| 968 | |
Alex Perry | cb7da4b | 2019-08-28 19:35:56 -0700 | [diff] [blame] | 969 | } // namespace aos |