Brian Silverman | 70325d6 | 2015-09-20 17:00:43 -0400 | [diff] [blame^] | 1 | // Copyright (c) 2000, Google Inc. |
| 2 | // All rights reserved. |
| 3 | // |
| 4 | // Redistribution and use in source and binary forms, with or without |
| 5 | // modification, are permitted provided that the following conditions are |
| 6 | // met: |
| 7 | // |
| 8 | // * Redistributions of source code must retain the above copyright |
| 9 | // notice, this list of conditions and the following disclaimer. |
| 10 | // * Redistributions in binary form must reproduce the above |
| 11 | // copyright notice, this list of conditions and the following disclaimer |
| 12 | // in the documentation and/or other materials provided with the |
| 13 | // distribution. |
| 14 | // * Neither the name of Google Inc. nor the names of its |
| 15 | // contributors may be used to endorse or promote products derived from |
| 16 | // this software without specific prior written permission. |
| 17 | // |
| 18 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | // --- |
| 30 | // |
| 31 | // Reorganized by Craig Silverstein |
| 32 | // "Handles" by Ilan Horn |
| 33 | // |
| 34 | // This approach to arenas overcomes many of the limitations described |
| 35 | // in the "Specialized allocators" section of |
| 36 | // http://www.pdos.lcs.mit.edu/~dm/c++-new.html |
| 37 | // |
| 38 | // A somewhat similar approach to Gladiator, but for heap-detection, was |
| 39 | // suggested by Ron van der Wal and Scott Meyers at |
| 40 | // http://www.aristeia.com/BookErrata/M27Comments_frames.html |
| 41 | |
| 42 | #include <config.h> |
| 43 | #include "base/arena.h" |
| 44 | #include "base/arena-inl.h" |
| 45 | #include <assert.h> |
| 46 | #include <algorithm> |
| 47 | #ifdef HAVE_UNISTD_H |
| 48 | # include <unistd.h> |
| 49 | #endif |
| 50 | #include <vector> |
| 51 | #include <sys/types.h> // one place uintptr_t might be |
| 52 | #ifdef HAVE_INTTYPES_H |
| 53 | # include <inttypes.h> |
| 54 | #endif // another place uintptr_t might be |
| 55 | #ifdef HAVE_UNISTD_H |
| 56 | # include <unistd.h> |
| 57 | #endif // last place uintptr_t might be |
| 58 | #include "base/macros.h" // for uint64 |
| 59 | #include "base/mutex.h" |
| 60 | #include "base/util.h" // for DCHECK_* |
| 61 | |
| 62 | using std::min; |
| 63 | using std::vector; |
| 64 | |
| 65 | // TODO(csilvers): add in a portable implementation of aligned_malloc |
| 66 | static void* aligned_malloc(size_t size, size_t alignment) { |
| 67 | LOG(FATAL) << "page_aligned_ not currently supported\n"; |
| 68 | } |
| 69 | |
| 70 | // The value here doesn't matter until page_aligned_ is supported. |
| 71 | static const int kPageSize = 8192; // should be getpagesize() |
| 72 | |
| 73 | namespace ctemplate { |
| 74 | |
| 75 | // We used to only keep track of how much space has been allocated in |
| 76 | // debug mode. Now we track this for optimized builds, as well. If you |
| 77 | // want to play with the old scheme to see if this helps performance, |
| 78 | // change this ARENASET() macro to a NOP. However, NOTE: some |
| 79 | // applications of arenas depend on this space information (exported |
| 80 | // via bytes_allocated()). |
| 81 | #define ARENASET(x) (x) |
| 82 | |
| 83 | // ---------------------------------------------------------------------- |
| 84 | // BaseArena::BaseArena() |
| 85 | // BaseArena::~BaseArena() |
| 86 | // Destroying the arena automatically calls Reset() |
| 87 | // ---------------------------------------------------------------------- |
| 88 | |
| 89 | |
| 90 | BaseArena::BaseArena(char* first, const size_t block_size, bool align_to_page) |
| 91 | : remaining_(0), |
| 92 | first_block_we_own_(first ? 1 : 0), |
| 93 | block_size_(block_size), |
| 94 | freestart_(NULL), // set for real in Reset() |
| 95 | last_alloc_(NULL), |
| 96 | blocks_alloced_(1), |
| 97 | overflow_blocks_(NULL), |
| 98 | page_aligned_(align_to_page), |
| 99 | handle_alignment_(1), |
| 100 | handle_alignment_bits_(0), |
| 101 | block_size_bits_(0) { |
| 102 | assert(block_size > kDefaultAlignment); |
| 103 | |
| 104 | while ((static_cast<size_t>(1) << block_size_bits_) < block_size_) { |
| 105 | ++block_size_bits_; |
| 106 | } |
| 107 | |
| 108 | if (page_aligned_) { |
| 109 | // kPageSize must be power of 2, so make sure of this. |
| 110 | CHECK(kPageSize > 0 && 0 == (kPageSize & (kPageSize - 1))) |
| 111 | << "kPageSize[ " << kPageSize << "] is not " |
| 112 | << "correctly initialized: not a power of 2."; |
| 113 | } |
| 114 | |
| 115 | if (first) { |
| 116 | CHECK(!page_aligned_ || |
| 117 | (reinterpret_cast<uintptr_t>(first) & (kPageSize - 1)) == 0); |
| 118 | first_blocks_[0].mem = first; |
| 119 | } else { |
| 120 | if (page_aligned_) { |
| 121 | // Make sure the blocksize is page multiple, as we need to end on a page |
| 122 | // boundary. |
| 123 | CHECK_EQ(block_size & (kPageSize - 1), 0) << "block_size is not a" |
| 124 | << "multiple of kPageSize"; |
| 125 | first_blocks_[0].mem = reinterpret_cast<char*>(aligned_malloc(block_size_, |
| 126 | kPageSize)); |
| 127 | PCHECK(NULL != first_blocks_[0].mem); |
| 128 | } else { |
| 129 | first_blocks_[0].mem = reinterpret_cast<char*>(malloc(block_size_)); |
| 130 | } |
| 131 | } |
| 132 | first_blocks_[0].size = block_size_; |
| 133 | |
| 134 | Reset(); |
| 135 | } |
| 136 | |
| 137 | BaseArena::~BaseArena() { |
| 138 | FreeBlocks(); |
| 139 | assert(overflow_blocks_ == NULL); // FreeBlocks() should do that |
| 140 | // The first X blocks stay allocated always by default. Delete them now. |
| 141 | for ( int i = first_block_we_own_; i < blocks_alloced_; ++i ) |
| 142 | free(first_blocks_[i].mem); |
| 143 | } |
| 144 | |
| 145 | // ---------------------------------------------------------------------- |
| 146 | // BaseArena::block_count() |
| 147 | // Only reason this is in .cc file is because it involves STL. |
| 148 | // ---------------------------------------------------------------------- |
| 149 | |
| 150 | int BaseArena::block_count() const { |
| 151 | return (blocks_alloced_ + |
| 152 | (overflow_blocks_ ? static_cast<int>(overflow_blocks_->size()) : 0)); |
| 153 | } |
| 154 | |
| 155 | // ---------------------------------------------------------------------- |
| 156 | // BaseArena::Reset() |
| 157 | // Clears all the memory an arena is using. |
| 158 | // ---------------------------------------------------------------------- |
| 159 | |
| 160 | void BaseArena::Reset() { |
| 161 | FreeBlocks(); |
| 162 | freestart_ = first_blocks_[0].mem; |
| 163 | remaining_ = first_blocks_[0].size; |
| 164 | last_alloc_ = NULL; |
| 165 | |
| 166 | ARENASET(status_.bytes_allocated_ = block_size_); |
| 167 | |
| 168 | // We do not know for sure whether or not the first block is aligned, |
| 169 | // so we fix that right now. |
| 170 | const int overage = reinterpret_cast<uintptr_t>(freestart_) & |
| 171 | (kDefaultAlignment-1); |
| 172 | if (overage > 0) { |
| 173 | const int waste = kDefaultAlignment - overage; |
| 174 | freestart_ += waste; |
| 175 | remaining_ -= waste; |
| 176 | } |
| 177 | freestart_when_empty_ = freestart_; |
| 178 | assert(!(reinterpret_cast<uintptr_t>(freestart_)&(kDefaultAlignment-1))); |
| 179 | } |
| 180 | |
| 181 | // ---------------------------------------------------------------------- |
| 182 | // BaseArena::MakeNewBlock() |
| 183 | // Our sbrk() equivalent. We always make blocks of the same size |
| 184 | // (though GetMemory() can also make a new block for really big |
| 185 | // data. |
| 186 | // ---------------------------------------------------------------------- |
| 187 | |
| 188 | void BaseArena::MakeNewBlock() { |
| 189 | AllocatedBlock *block = AllocNewBlock(block_size_); |
| 190 | freestart_ = block->mem; |
| 191 | remaining_ = block->size; |
| 192 | } |
| 193 | |
| 194 | // ------------------------------------------------------------- |
| 195 | // BaseArena::AllocNewBlock() |
| 196 | // Adds and returns an AllocatedBlock. |
| 197 | // The returned AllocatedBlock* is valid until the next call |
| 198 | // to AllocNewBlock or Reset. (i.e. anything that might |
| 199 | // affect overflow_blocks_). |
| 200 | // ------------------------------------------------------------- |
| 201 | |
| 202 | BaseArena::AllocatedBlock* BaseArena::AllocNewBlock(const size_t block_size) { |
| 203 | AllocatedBlock *block; |
| 204 | // Find the next block. |
| 205 | if ( blocks_alloced_ < ARRAYSIZE(first_blocks_) ) { |
| 206 | // Use one of the pre-allocated blocks |
| 207 | block = &first_blocks_[blocks_alloced_++]; |
| 208 | } else { // oops, out of space, move to the vector |
| 209 | if (overflow_blocks_ == NULL) overflow_blocks_ = new vector<AllocatedBlock>; |
| 210 | // Adds another block to the vector. |
| 211 | overflow_blocks_->resize(overflow_blocks_->size()+1); |
| 212 | // block points to the last block of the vector. |
| 213 | block = &overflow_blocks_->back(); |
| 214 | } |
| 215 | |
| 216 | if (page_aligned_) { |
| 217 | // We need the size to be multiple of kPageSize to mprotect it later. |
| 218 | size_t num_pages = ((block_size - 1) / kPageSize) + 1; |
| 219 | size_t new_block_size = num_pages * kPageSize; |
| 220 | block->mem = reinterpret_cast<char*>(aligned_malloc(new_block_size, |
| 221 | kPageSize)); |
| 222 | PCHECK(NULL != block->mem); |
| 223 | block->size = new_block_size; |
| 224 | } else { |
| 225 | block->mem = reinterpret_cast<char*>(malloc(block_size)); |
| 226 | block->size = block_size; |
| 227 | } |
| 228 | |
| 229 | ARENASET(status_.bytes_allocated_ += block_size); |
| 230 | |
| 231 | return block; |
| 232 | } |
| 233 | |
| 234 | // ---------------------------------------------------------------------- |
| 235 | // BaseArena::IndexToBlock() |
| 236 | // Index encoding is as follows: |
| 237 | // For blocks in the first_blocks_ array, we use index of the block in |
| 238 | // the array. |
| 239 | // For blocks in the overflow_blocks_ vector, we use the index of the |
| 240 | // block in iverflow_blocks_, plus the size of the first_blocks_ array. |
| 241 | // ---------------------------------------------------------------------- |
| 242 | |
| 243 | const BaseArena::AllocatedBlock *BaseArena::IndexToBlock(int index) const { |
| 244 | if (index < ARRAYSIZE(first_blocks_)) { |
| 245 | return &first_blocks_[index]; |
| 246 | } |
| 247 | CHECK(overflow_blocks_ != NULL); |
| 248 | int index_in_overflow_blocks = index - ARRAYSIZE(first_blocks_); |
| 249 | CHECK_GE(index_in_overflow_blocks, 0); |
| 250 | CHECK_LT(static_cast<size_t>(index_in_overflow_blocks), |
| 251 | overflow_blocks_->size()); |
| 252 | return &(*overflow_blocks_)[index_in_overflow_blocks]; |
| 253 | } |
| 254 | |
| 255 | // ---------------------------------------------------------------------- |
| 256 | // BaseArena::GetMemoryFallback() |
| 257 | // We take memory out of our pool, aligned on the byte boundary |
| 258 | // requested. If we don't have space in our current pool, we |
| 259 | // allocate a new block (wasting the remaining space in the |
| 260 | // current block) and give you that. If your memory needs are |
| 261 | // too big for a single block, we make a special your-memory-only |
| 262 | // allocation -- this is equivalent to not using the arena at all. |
| 263 | // ---------------------------------------------------------------------- |
| 264 | |
| 265 | void* BaseArena::GetMemoryFallback(const size_t size, const int align_as_int) { |
| 266 | if (0 == size) { |
| 267 | return NULL; // stl/stl_alloc.h says this is okay |
| 268 | } |
| 269 | // This makes the type-checker happy. |
| 270 | const size_t align = static_cast<size_t>(align_as_int); |
| 271 | |
| 272 | assert(align_as_int > 0 && 0 == (align & (align - 1))); // must be power of 2 |
| 273 | |
| 274 | // If the object is more than a quarter of the block size, allocate |
| 275 | // it separately to avoid wasting too much space in leftover bytes |
| 276 | if (block_size_ == 0 || size > block_size_/4) { |
| 277 | // then it gets its own block in the arena |
| 278 | assert(align <= kDefaultAlignment); // because that's what new gives us |
| 279 | // This block stays separate from the rest of the world; in particular |
| 280 | // we don't update last_alloc_ so you can't reclaim space on this block. |
| 281 | return AllocNewBlock(size)->mem; |
| 282 | } |
| 283 | |
| 284 | const size_t overage = |
| 285 | (reinterpret_cast<uintptr_t>(freestart_) & (align-1)); |
| 286 | if (overage) { |
| 287 | const size_t waste = align - overage; |
| 288 | freestart_ += waste; |
| 289 | if (waste < remaining_) { |
| 290 | remaining_ -= waste; |
| 291 | } else { |
| 292 | remaining_ = 0; |
| 293 | } |
| 294 | } |
| 295 | if (size > remaining_) { |
| 296 | MakeNewBlock(); |
| 297 | } |
| 298 | remaining_ -= size; |
| 299 | last_alloc_ = freestart_; |
| 300 | freestart_ += size; |
| 301 | assert(0 == (reinterpret_cast<uintptr_t>(last_alloc_) & (align-1))); |
| 302 | return reinterpret_cast<void*>(last_alloc_); |
| 303 | } |
| 304 | |
| 305 | // ---------------------------------------------------------------------- |
| 306 | // BaseArena::ReturnMemoryFallback() |
| 307 | // BaseArena::FreeBlocks() |
| 308 | // Unlike GetMemory(), which does actual work, ReturnMemory() is a |
| 309 | // no-op: we don't "free" memory until Reset() is called. We do |
| 310 | // update some stats, though. Note we do no checking that the |
| 311 | // pointer you pass in was actually allocated by us, or that it |
| 312 | // was allocated for the size you say, so be careful here! |
| 313 | // FreeBlocks() does the work for Reset(), actually freeing all |
| 314 | // memory allocated in one fell swoop. |
| 315 | // ---------------------------------------------------------------------- |
| 316 | |
| 317 | void BaseArena::FreeBlocks() { |
| 318 | for ( int i = 1; i < blocks_alloced_; ++i ) { // keep first block alloced |
| 319 | free(first_blocks_[i].mem); |
| 320 | first_blocks_[i].mem = NULL; |
| 321 | first_blocks_[i].size = 0; |
| 322 | } |
| 323 | blocks_alloced_ = 1; |
| 324 | if (overflow_blocks_ != NULL) { |
| 325 | vector<AllocatedBlock>::iterator it; |
| 326 | for (it = overflow_blocks_->begin(); it != overflow_blocks_->end(); ++it) { |
| 327 | free(it->mem); |
| 328 | } |
| 329 | delete overflow_blocks_; // These should be used very rarely |
| 330 | overflow_blocks_ = NULL; |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | // ---------------------------------------------------------------------- |
| 335 | // BaseArena::AdjustLastAlloc() |
| 336 | // If you realize you didn't want your last alloc to be for |
| 337 | // the size you asked, after all, you can fix it by calling |
| 338 | // this. We'll grow or shrink the last-alloc region if we |
| 339 | // can (we can always shrink, but we might not be able to |
| 340 | // grow if you want to grow too big. |
| 341 | // RETURNS true if we successfully modified the last-alloc |
| 342 | // region, false if the pointer you passed in wasn't actually |
| 343 | // the last alloc or if you tried to grow bigger than we could. |
| 344 | // ---------------------------------------------------------------------- |
| 345 | |
| 346 | bool BaseArena::AdjustLastAlloc(void *last_alloc, const size_t newsize) { |
| 347 | // It's only legal to call this on the last thing you alloced. |
| 348 | if (last_alloc == NULL || last_alloc != last_alloc_) return false; |
| 349 | // last_alloc_ should never point into a "big" block, w/ size >= block_size_ |
| 350 | assert(freestart_ >= last_alloc_ && freestart_ <= last_alloc_ + block_size_); |
| 351 | assert(remaining_ >= 0); // should be: it's a size_t! |
| 352 | if (newsize > (freestart_ - last_alloc_) + remaining_) |
| 353 | return false; // not enough room, even after we get back last_alloc_ space |
| 354 | const char* old_freestart = freestart_; // where last alloc used to end |
| 355 | freestart_ = last_alloc_ + newsize; // where last alloc ends now |
| 356 | remaining_ -= (freestart_ - old_freestart); // how much new space we've taken |
| 357 | return true; |
| 358 | } |
| 359 | |
| 360 | // ---------------------------------------------------------------------- |
| 361 | // BaseArena::GetMemoryWithHandle() |
| 362 | // First, memory is allocated using GetMemory, using handle_alignment_. |
| 363 | // Since using different alignments for different handles would make |
| 364 | // the handles incompatible (e.g., we could end up with the same handle |
| 365 | // value referencing two different allocations, the alignment is not passed |
| 366 | // as an argument to GetMemoryWithHandle, and handle_alignment_ is used |
| 367 | // automatically for all GetMemoryWithHandle calls. |
| 368 | // Then we go about building a handle to reference the allocated memory. |
| 369 | // The block index used for the allocation, along with the offset inside |
| 370 | // the block, are encoded into the handle as follows: |
| 371 | // (block_index*block_size)+offset |
| 372 | // offset is simply the difference between the pointer returned by |
| 373 | // GetMemory and the starting pointer of the block. |
| 374 | // The above value is then divided by the alignment. As we know that |
| 375 | // both offset and the block_size are divisable by the alignment (this is |
| 376 | // enforced by set_handle_alignment() for block_size, and by GetMemory() |
| 377 | // for the offset), this does not lose any information, but allows to cram |
| 378 | // more into the limited space in handle. |
| 379 | // If the result does not fit into an unsigned 32-bit integer, we |
| 380 | // have run out of space that the handle can represent, and return |
| 381 | // an invalid handle. Note that the returned pointer is still usable, |
| 382 | // but this allocation cannot be referenced by a handle. |
| 383 | // ---------------------------------------------------------------------- |
| 384 | |
| 385 | void* BaseArena::GetMemoryWithHandle( |
| 386 | const size_t size, BaseArena::Handle* handle) { |
| 387 | CHECK(handle != NULL); |
| 388 | // For efficiency, handles are always allocated aligned to a power of 2. |
| 389 | void* p = GetMemory(size, (1 << handle_alignment_bits_)); |
| 390 | // Find the index of the block the memory was allocated from. In most |
| 391 | // cases, this will be the last block, so the following loop will |
| 392 | // iterate exactly once. |
| 393 | int block_index; |
| 394 | const AllocatedBlock* block = NULL; |
| 395 | for (block_index = block_count() - 1; block_index >= 0; --block_index) { |
| 396 | block = IndexToBlock(block_index); |
| 397 | if ((p >= block->mem) && (p < (block->mem + block->size))) { |
| 398 | break; |
| 399 | } |
| 400 | } |
| 401 | CHECK_GE(block_index, 0) << "Failed to find block that was allocated from"; |
| 402 | CHECK(block != NULL) << "Failed to find block that was allocated from"; |
| 403 | const uint64 offset = reinterpret_cast<char*>(p) - block->mem; |
| 404 | DCHECK_LT(offset, block_size_); |
| 405 | DCHECK((offset & ((1 << handle_alignment_bits_) - 1)) == 0); |
| 406 | DCHECK((block_size_ & ((1 << handle_alignment_bits_) - 1)) == 0); |
| 407 | uint64 handle_value = |
| 408 | ((static_cast<uint64>(block_index) << block_size_bits_) + offset) >> |
| 409 | handle_alignment_bits_; |
| 410 | if (handle_value >= static_cast<uint64>(0xFFFFFFFF)) { |
| 411 | // We ran out of space to be able to return a handle, so return an invalid |
| 412 | // handle. |
| 413 | handle_value = Handle::kInvalidValue; |
| 414 | } |
| 415 | handle->handle_ = static_cast<uint32>(handle_value); |
| 416 | return p; |
| 417 | } |
| 418 | |
| 419 | // ---------------------------------------------------------------------- |
| 420 | // BaseArena::set_handle_alignment() |
| 421 | // Set the alignment to be used when Handles are requested. This can only |
| 422 | // be set for an arena that is empty - it cannot be changed on the fly. |
| 423 | // The alignment must be a power of 2 that the block size is divisable by. |
| 424 | // The default alignment is 1. |
| 425 | // Trying to set an alignment that does not meet the above constraints will |
| 426 | // cause a CHECK-failure. |
| 427 | // ---------------------------------------------------------------------- |
| 428 | |
| 429 | void BaseArena::set_handle_alignment(int align) { |
| 430 | CHECK(align > 0 && 0 == (align & (align - 1))); // must be power of 2 |
| 431 | CHECK(static_cast<size_t>(align) < block_size_); |
| 432 | CHECK((block_size_ % align) == 0); |
| 433 | CHECK(is_empty()); |
| 434 | handle_alignment_ = align; |
| 435 | handle_alignment_bits_ = 0; |
| 436 | while ((1 << handle_alignment_bits_) < handle_alignment_) { |
| 437 | ++handle_alignment_bits_; |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | // ---------------------------------------------------------------------- |
| 442 | // BaseArena::HandleToPointer() |
| 443 | // First, the handle value needs to gain back the alignment factor that |
| 444 | // was divided out of it by GetMemoryWithHandle. Once this is done, it |
| 445 | // becomes trivial to extract the block index and offset in the block out |
| 446 | // of it, and calculate the pointer. |
| 447 | // ---------------------------------------------------------------------- |
| 448 | |
| 449 | void* BaseArena::HandleToPointer(const Handle& h) const { |
| 450 | CHECK(h.valid()); |
| 451 | uint64 handle = static_cast<uint64>(h.handle_) << handle_alignment_bits_; |
| 452 | int block_index = static_cast<int>(handle >> block_size_bits_); |
| 453 | size_t block_offset = |
| 454 | static_cast<size_t>(handle & ((1 << block_size_bits_) - 1)); |
| 455 | const AllocatedBlock* block = IndexToBlock(block_index); |
| 456 | CHECK(block != NULL); |
| 457 | return reinterpret_cast<void*>(block->mem + block_offset); |
| 458 | } |
| 459 | |
| 460 | |
| 461 | // ---------------------------------------------------------------------- |
| 462 | // UnsafeArena::Realloc() |
| 463 | // SafeArena::Realloc() |
| 464 | // If you decide you want to grow -- or shrink -- a memory region, |
| 465 | // we'll do it for you here. Typically this will involve copying |
| 466 | // the existing memory to somewhere else on the arena that has |
| 467 | // more space reserved. But if you're reallocing the last-allocated |
| 468 | // block, we may be able to accomodate you just by updating a |
| 469 | // pointer. In any case, we return a pointer to the new memory |
| 470 | // location, which may be the same as the pointer you passed in. |
| 471 | // Here's an example of how you might use Realloc(): |
| 472 | // |
| 473 | // compr_buf = arena->Alloc(uncompr_size); // get too-much space |
| 474 | // int compr_size; |
| 475 | // zlib.Compress(uncompr_buf, uncompr_size, compr_buf, &compr_size); |
| 476 | // compr_buf = arena->Realloc(compr_buf, uncompr_size, compr_size); |
| 477 | // ---------------------------------------------------------------------- |
| 478 | |
| 479 | char* UnsafeArena::Realloc(char* s, size_t oldsize, size_t newsize) { |
| 480 | assert(oldsize >= 0 && newsize >= 0); |
| 481 | if ( AdjustLastAlloc(s, newsize) ) // in case s was last alloc |
| 482 | return s; |
| 483 | if ( newsize <= oldsize ) { |
| 484 | return s; // no need to do anything; we're ain't reclaiming any memory! |
| 485 | } |
| 486 | char * newstr = Alloc(newsize); |
| 487 | memcpy(newstr, s, min(oldsize, newsize)); |
| 488 | return newstr; |
| 489 | } |
| 490 | |
| 491 | char* SafeArena::Realloc(char* s, size_t oldsize, size_t newsize) { |
| 492 | assert(oldsize >= 0 && newsize >= 0); |
| 493 | { MutexLock lock(&mutex_); |
| 494 | if ( AdjustLastAlloc(s, newsize) ) // in case s was last alloc |
| 495 | return s; |
| 496 | } |
| 497 | if ( newsize <= oldsize ) { |
| 498 | return s; // no need to do anything; we're ain't reclaiming any memory! |
| 499 | } |
| 500 | char * newstr = Alloc(newsize); |
| 501 | memcpy(newstr, s, min(oldsize, newsize)); |
| 502 | return newstr; |
| 503 | } |
| 504 | |
| 505 | } |