Brian Silverman | 9c614bc | 2016-02-15 20:20:02 -0500 | [diff] [blame^] | 1 | // Protocol Buffers - Google's data interchange format |
| 2 | // Copyright 2008 Google Inc. All rights reserved. |
| 3 | // https://developers.google.com/protocol-buffers/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | #include <google/protobuf/arena.h> |
| 32 | |
| 33 | #ifdef ADDRESS_SANITIZER |
| 34 | #include <sanitizer/asan_interface.h> |
| 35 | #endif |
| 36 | |
| 37 | namespace google { |
| 38 | namespace protobuf { |
| 39 | |
| 40 | google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_; |
| 41 | #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
| 42 | Arena::ThreadCache& Arena::thread_cache() { |
| 43 | static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ = |
| 44 | new internal::ThreadLocalStorage<ThreadCache>(); |
| 45 | return *thread_cache_->Get(); |
| 46 | } |
| 47 | #elif defined(PROTOBUF_USE_DLLS) |
| 48 | Arena::ThreadCache& Arena::thread_cache() { |
| 49 | static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL }; |
| 50 | return thread_cache_; |
| 51 | } |
| 52 | #else |
| 53 | GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL }; |
| 54 | #endif |
| 55 | |
| 56 | void Arena::Init() { |
| 57 | lifecycle_id_ = lifecycle_id_generator_.GetNext(); |
| 58 | blocks_ = 0; |
| 59 | hint_ = 0; |
| 60 | owns_first_block_ = true; |
| 61 | cleanup_list_ = 0; |
| 62 | |
| 63 | if (options_.initial_block != NULL && options_.initial_block_size > 0) { |
| 64 | GOOGLE_CHECK_GE(options_.initial_block_size, sizeof(Block)) |
| 65 | << ": Initial block size too small for header."; |
| 66 | |
| 67 | // Add first unowned block to list. |
| 68 | Block* first_block = reinterpret_cast<Block*>(options_.initial_block); |
| 69 | first_block->size = options_.initial_block_size; |
| 70 | first_block->pos = kHeaderSize; |
| 71 | first_block->next = NULL; |
| 72 | // Thread which calls Init() owns the first block. This allows the |
| 73 | // single-threaded case to allocate on the first block without taking any |
| 74 | // locks. |
| 75 | first_block->owner = &thread_cache(); |
| 76 | SetThreadCacheBlock(first_block); |
| 77 | AddBlockInternal(first_block); |
| 78 | owns_first_block_ = false; |
| 79 | } |
| 80 | |
| 81 | // Call the initialization hook |
| 82 | if (options_.on_arena_init != NULL) { |
| 83 | hooks_cookie_ = options_.on_arena_init(this); |
| 84 | } else { |
| 85 | hooks_cookie_ = NULL; |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | Arena::~Arena() { |
| 90 | uint64 space_allocated = ResetInternal(); |
| 91 | |
| 92 | // Call the destruction hook |
| 93 | if (options_.on_arena_destruction != NULL) { |
| 94 | options_.on_arena_destruction(this, hooks_cookie_, space_allocated); |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | uint64 Arena::Reset() { |
| 99 | // Invalidate any ThreadCaches pointing to any blocks we just destroyed. |
| 100 | lifecycle_id_ = lifecycle_id_generator_.GetNext(); |
| 101 | return ResetInternal(); |
| 102 | } |
| 103 | |
| 104 | uint64 Arena::ResetInternal() { |
| 105 | CleanupList(); |
| 106 | uint64 space_allocated = FreeBlocks(); |
| 107 | |
| 108 | // Call the reset hook |
| 109 | if (options_.on_arena_reset != NULL) { |
| 110 | options_.on_arena_reset(this, hooks_cookie_, space_allocated); |
| 111 | } |
| 112 | |
| 113 | return space_allocated; |
| 114 | } |
| 115 | |
| 116 | Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n, |
| 117 | size_t start_block_size, size_t max_block_size) { |
| 118 | size_t size; |
| 119 | if (my_last_block != NULL) { |
| 120 | // Double the current block size, up to a limit. |
| 121 | size = 2 * (my_last_block->size); |
| 122 | if (size > max_block_size) size = max_block_size; |
| 123 | } else { |
| 124 | size = start_block_size; |
| 125 | } |
| 126 | if (n > size - kHeaderSize) { |
| 127 | // TODO(sanjay): Check if n + kHeaderSize would overflow |
| 128 | size = kHeaderSize + n; |
| 129 | } |
| 130 | |
| 131 | Block* b = reinterpret_cast<Block*>(options_.block_alloc(size)); |
| 132 | b->pos = kHeaderSize + n; |
| 133 | b->size = size; |
| 134 | if (b->avail() == 0) { |
| 135 | // Do not attempt to reuse this block. |
| 136 | b->owner = NULL; |
| 137 | } else { |
| 138 | b->owner = me; |
| 139 | } |
| 140 | #ifdef ADDRESS_SANITIZER |
| 141 | // Poison the rest of the block for ASAN. It was unpoisoned by the underlying |
| 142 | // malloc but it's not yet usable until we return it as part of an allocation. |
| 143 | ASAN_POISON_MEMORY_REGION( |
| 144 | reinterpret_cast<char*>(b) + b->pos, b->size - b->pos); |
| 145 | #endif |
| 146 | return b; |
| 147 | } |
| 148 | |
| 149 | void Arena::AddBlock(Block* b) { |
| 150 | MutexLock l(&blocks_lock_); |
| 151 | AddBlockInternal(b); |
| 152 | } |
| 153 | |
| 154 | void Arena::AddBlockInternal(Block* b) { |
| 155 | b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_)); |
| 156 | google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b)); |
| 157 | if (b->avail() != 0) { |
| 158 | // Direct future allocations to this block. |
| 159 | google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b)); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | void Arena::AddListNode(void* elem, void (*cleanup)(void*)) { |
| 164 | Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node))); |
| 165 | node->elem = elem; |
| 166 | node->cleanup = cleanup; |
| 167 | node->next = reinterpret_cast<Node*>( |
| 168 | google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_, |
| 169 | reinterpret_cast<google::protobuf::internal::AtomicWord>(node))); |
| 170 | } |
| 171 | |
| 172 | void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) { |
| 173 | // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.) |
| 174 | n = (n + 7) & -8; |
| 175 | |
| 176 | // Monitor allocation if needed. |
| 177 | if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) && |
| 178 | options_.on_arena_allocation != NULL) { |
| 179 | options_.on_arena_allocation(allocated, n, hooks_cookie_); |
| 180 | } |
| 181 | |
| 182 | // If this thread already owns a block in this arena then try to use that. |
| 183 | // This fast path optimizes the case where multiple threads allocate from the |
| 184 | // same arena. |
| 185 | if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ && |
| 186 | thread_cache().last_block_used_ != NULL) { |
| 187 | if (thread_cache().last_block_used_->avail() < n) { |
| 188 | return SlowAlloc(n); |
| 189 | } |
| 190 | return AllocFromBlock(thread_cache().last_block_used_, n); |
| 191 | } |
| 192 | |
| 193 | // Check whether we own the last accessed block on this arena. |
| 194 | // This fast path optimizes the case where a single thread uses multiple |
| 195 | // arenas. |
| 196 | void* me = &thread_cache(); |
| 197 | Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_)); |
| 198 | if (!b || b->owner != me || b->avail() < n) { |
| 199 | return SlowAlloc(n); |
| 200 | } |
| 201 | return AllocFromBlock(b, n); |
| 202 | } |
| 203 | |
| 204 | void* Arena::AllocFromBlock(Block* b, size_t n) { |
| 205 | size_t p = b->pos; |
| 206 | b->pos = p + n; |
| 207 | #ifdef ADDRESS_SANITIZER |
| 208 | ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n); |
| 209 | #endif |
| 210 | return reinterpret_cast<char*>(b) + p; |
| 211 | } |
| 212 | |
| 213 | void* Arena::SlowAlloc(size_t n) { |
| 214 | void* me = &thread_cache(); |
| 215 | Block* b = FindBlock(me); // Find block owned by me. |
| 216 | // See if allocation fits in my latest block. |
| 217 | if (b != NULL && b->avail() >= n) { |
| 218 | SetThreadCacheBlock(b); |
| 219 | google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b)); |
| 220 | return AllocFromBlock(b, n); |
| 221 | } |
| 222 | b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size); |
| 223 | AddBlock(b); |
| 224 | if (b->owner == me) { // If this block can be reused (see NewBlock()). |
| 225 | SetThreadCacheBlock(b); |
| 226 | } |
| 227 | return reinterpret_cast<char*>(b) + kHeaderSize; |
| 228 | } |
| 229 | |
| 230 | uint64 Arena::SpaceAllocated() const { |
| 231 | uint64 space_allocated = 0; |
| 232 | Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_)); |
| 233 | while (b != NULL) { |
| 234 | space_allocated += (b->size); |
| 235 | b = b->next; |
| 236 | } |
| 237 | return space_allocated; |
| 238 | } |
| 239 | |
| 240 | uint64 Arena::SpaceUsed() const { |
| 241 | uint64 space_used = 0; |
| 242 | Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_)); |
| 243 | while (b != NULL) { |
| 244 | space_used += (b->pos - kHeaderSize); |
| 245 | b = b->next; |
| 246 | } |
| 247 | return space_used; |
| 248 | } |
| 249 | |
| 250 | uint64 Arena::FreeBlocks() { |
| 251 | uint64 space_allocated = 0; |
| 252 | Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_)); |
| 253 | Block* first_block = NULL; |
| 254 | while (b != NULL) { |
| 255 | space_allocated += (b->size); |
| 256 | Block* next = b->next; |
| 257 | if (next != NULL) { |
| 258 | options_.block_dealloc(b, b->size); |
| 259 | } else { |
| 260 | if (owns_first_block_) { |
| 261 | options_.block_dealloc(b, b->size); |
| 262 | } else { |
| 263 | // User passed in the first block, skip free'ing the memory. |
| 264 | first_block = b; |
| 265 | } |
| 266 | } |
| 267 | b = next; |
| 268 | } |
| 269 | blocks_ = 0; |
| 270 | hint_ = 0; |
| 271 | if (!owns_first_block_) { |
| 272 | // Make the first block that was passed in through ArenaOptions |
| 273 | // available for reuse. |
| 274 | first_block->pos = kHeaderSize; |
| 275 | // Thread which calls Reset() owns the first block. This allows the |
| 276 | // single-threaded case to allocate on the first block without taking any |
| 277 | // locks. |
| 278 | first_block->owner = &thread_cache(); |
| 279 | SetThreadCacheBlock(first_block); |
| 280 | AddBlockInternal(first_block); |
| 281 | } |
| 282 | return space_allocated; |
| 283 | } |
| 284 | |
| 285 | void Arena::CleanupList() { |
| 286 | Node* head = |
| 287 | reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_)); |
| 288 | while (head != NULL) { |
| 289 | head->cleanup(head->elem); |
| 290 | head = head->next; |
| 291 | } |
| 292 | cleanup_list_ = 0; |
| 293 | } |
| 294 | |
| 295 | Arena::Block* Arena::FindBlock(void* me) { |
| 296 | // TODO(sanjay): We might want to keep a separate list with one |
| 297 | // entry per thread. |
| 298 | Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_)); |
| 299 | while (b != NULL && b->owner != me) { |
| 300 | b = b->next; |
| 301 | } |
| 302 | return b; |
| 303 | } |
| 304 | |
| 305 | } // namespace protobuf |
| 306 | } // namespace google |