blob: db91155ba9d251fc471a09d3d41e7bd354fddb90 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2006, Google Inc.
3 * All rights reserved.
Brian Silverman20350ac2021-11-17 18:19:55 -08004 *
Austin Schuh745610d2015-09-06 18:19:50 -07005 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
Brian Silverman20350ac2021-11-17 18:19:55 -08008 *
Austin Schuh745610d2015-09-06 18:19:50 -07009 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
Brian Silverman20350ac2021-11-17 18:19:55 -080018 *
Austin Schuh745610d2015-09-06 18:19:50 -070019 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32// A low-level allocator that can be used by other low-level
33// modules without introducing dependency cycles.
34// This allocator is slow and wasteful of memory;
35// it should not be used when performance is key.
36
37#include "base/low_level_alloc.h"
38#include "base/dynamic_annotations.h"
39#include "base/spinlock.h"
40#include "base/logging.h"
41#include "malloc_hook-inl.h"
42#include <gperftools/malloc_hook.h>
43#include <errno.h>
44#ifdef HAVE_UNISTD_H
45#include <unistd.h>
46#endif
47#ifdef HAVE_MMAP
48#include <sys/mman.h>
49#endif
50#include <new> // for placement-new
51
52// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
53// form of the name instead.
54#ifndef MAP_ANONYMOUS
55# define MAP_ANONYMOUS MAP_ANON
56#endif
57
58// A first-fit allocator with amortized logarithmic free() time.
59
Brian Silverman20350ac2021-11-17 18:19:55 -080060LowLevelAlloc::PagesAllocator::~PagesAllocator() {
61}
62
Austin Schuh745610d2015-09-06 18:19:50 -070063// ---------------------------------------------------------------------------
64static const int kMaxLevel = 30;
65
66// We put this class-only struct in a namespace to avoid polluting the
67// global namespace with this struct name (thus risking an ODR violation).
68namespace low_level_alloc_internal {
69 // This struct describes one allocated block, or one free block.
70 struct AllocList {
71 struct Header {
72 intptr_t size; // size of entire region, including this field. Must be
73 // first. Valid in both allocated and unallocated blocks
74 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
75 LowLevelAlloc::Arena *arena; // pointer to parent arena
76 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
77 } header;
78
79 // Next two fields: in unallocated blocks: freelist skiplist data
80 // in allocated blocks: overlaps with client data
81 int levels; // levels in skiplist used
82 AllocList *next[kMaxLevel]; // actually has levels elements.
83 // The AllocList node may not have room for
84 // all kMaxLevel entries. See max_fit in
85 // LLA_SkiplistLevels()
86 };
87}
88using low_level_alloc_internal::AllocList;
89
90
91// ---------------------------------------------------------------------------
92// A trivial skiplist implementation. This is used to keep the freelist
93// in address order while taking only logarithmic time per insert and delete.
94
95// An integer approximation of log2(size/base)
96// Requires size >= base.
97static int IntLog2(size_t size, size_t base) {
98 int result = 0;
99 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
100 result++;
101 }
102 // floor(size / 2**result) <= base < floor(size / 2**(result-1))
103 // => log2(size/(base+1)) <= result < 1+log2(size/base)
104 // => result ~= log2(size/base)
105 return result;
106}
107
108// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
109static int Random() {
Brian Silverman20350ac2021-11-17 18:19:55 -0800110 static uint32 r = 1; // no locking---it's not critical
Austin Schuh745610d2015-09-06 18:19:50 -0700111 int result = 1;
112 while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
113 result++;
114 }
115 return result;
116}
117
118// Return a number of skiplist levels for a node of size bytes, where
119// base is the minimum node size. Compute level=log2(size / base)+n
120// where n is 1 if random is false and otherwise a random number generated with
121// the standard distribution for a skiplist: See Random() above.
122// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
123// term, so first-fit searches touch fewer nodes. "level" is clipped so
124// level<kMaxLevel and next[level-1] will fit in the node.
125// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
126static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
127 // max_fit is the maximum number of levels that will fit in a node for the
128 // given size. We can't return more than max_fit, no matter what the
129 // random number generator says.
130 int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
131 int level = IntLog2(size, base) + (random? Random() : 1);
132 if (level > max_fit) level = max_fit;
133 if (level > kMaxLevel-1) level = kMaxLevel - 1;
134 RAW_CHECK(level >= 1, "block not big enough for even one level");
135 return level;
136}
137
138// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
139// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
140// points to the last element at level i in the AllocList less than *e, or is
141// head if no such element exists.
142static AllocList *LLA_SkiplistSearch(AllocList *head,
143 AllocList *e, AllocList **prev) {
144 AllocList *p = head;
145 for (int level = head->levels - 1; level >= 0; level--) {
146 for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
147 }
148 prev[level] = p;
149 }
150 return (head->levels == 0) ? 0 : prev[0]->next[0];
151}
152
153// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
154// Requires that e->levels be previously set by the caller (using
155// LLA_SkiplistLevels())
156static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
157 AllocList **prev) {
158 LLA_SkiplistSearch(head, e, prev);
159 for (; head->levels < e->levels; head->levels++) { // extend prev pointers
160 prev[head->levels] = head; // to all *e's levels
161 }
162 for (int i = 0; i != e->levels; i++) { // add element to list
163 e->next[i] = prev[i]->next[i];
164 prev[i]->next[i] = e;
165 }
166}
167
168// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
169// Requires that e->levels be previous set by the caller (using
170// LLA_SkiplistLevels())
171static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
172 AllocList **prev) {
173 AllocList *found = LLA_SkiplistSearch(head, e, prev);
174 RAW_CHECK(e == found, "element not in freelist");
175 for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
176 prev[i]->next[i] = e->next[i];
177 }
178 while (head->levels > 0 && head->next[head->levels - 1] == 0) {
179 head->levels--; // reduce head->levels if level unused
180 }
181}
182
183// ---------------------------------------------------------------------------
184// Arena implementation
185
186struct LowLevelAlloc::Arena {
187 Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
188 explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
189 // for non-static init
190
191 SpinLock mu; // protects freelist, allocation_count,
192 // pagesize, roundup, min_size
193 AllocList freelist; // head of free list; sorted by addr (under mu)
194 int32 allocation_count; // count of allocated blocks (under mu)
195 int32 flags; // flags passed to NewArena (ro after init)
196 size_t pagesize; // ==getpagesize() (init under mu, then ro)
197 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
198 // (init under mu, then ro)
199 size_t min_size; // smallest allocation block size
200 // (init under mu, then ro)
Brian Silverman20350ac2021-11-17 18:19:55 -0800201 PagesAllocator *allocator;
Austin Schuh745610d2015-09-06 18:19:50 -0700202};
203
204// The default arena, which is used when 0 is passed instead of an Arena
205// pointer.
206static struct LowLevelAlloc::Arena default_arena;
207
208// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
209// do not want malloc hook reporting, so that for them there's no malloc hook
210// reporting even during arena creation.
211static struct LowLevelAlloc::Arena unhooked_arena;
212static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
213
Brian Silverman20350ac2021-11-17 18:19:55 -0800214namespace {
215
216 class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
217 public:
218 virtual ~DefaultPagesAllocator() {};
219 virtual void *MapPages(int32 flags, size_t size);
220 virtual void UnMapPages(int32 flags, void *addr, size_t size);
221 };
222
223}
224
Austin Schuh745610d2015-09-06 18:19:50 -0700225// magic numbers to identify allocated and unallocated blocks
226static const intptr_t kMagicAllocated = 0x4c833e95;
227static const intptr_t kMagicUnallocated = ~kMagicAllocated;
228
229namespace {
230 class SCOPED_LOCKABLE ArenaLock {
231 public:
232 explicit ArenaLock(LowLevelAlloc::Arena *arena)
233 EXCLUSIVE_LOCK_FUNCTION(arena->mu)
234 : left_(false), mask_valid_(false), arena_(arena) {
235 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
236 // We've decided not to support async-signal-safe arena use until
237 // there a demonstrated need. Here's how one could do it though
238 // (would need to be made more portable).
239#if 0
240 sigset_t all;
241 sigfillset(&all);
242 this->mask_valid_ =
243 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
244#else
245 RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
246#endif
247 }
248 this->arena_->mu.Lock();
249 }
250 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
Brian Silverman20350ac2021-11-17 18:19:55 -0800251 void Leave() UNLOCK_FUNCTION() {
Austin Schuh745610d2015-09-06 18:19:50 -0700252 this->arena_->mu.Unlock();
253#if 0
254 if (this->mask_valid_) {
255 pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
256 }
257#endif
258 this->left_ = true;
259 }
260 private:
261 bool left_; // whether left region
262 bool mask_valid_;
263#if 0
264 sigset_t mask_; // old mask of blocked signals
265#endif
266 LowLevelAlloc::Arena *arena_;
267 DISALLOW_COPY_AND_ASSIGN(ArenaLock);
268 };
269} // anonymous namespace
270
271// create an appropriate magic number for an object at "ptr"
272// "magic" should be kMagicAllocated or kMagicUnallocated
273inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
274 return magic ^ reinterpret_cast<intptr_t>(ptr);
275}
276
277// Initialize the fields of an Arena
278static void ArenaInit(LowLevelAlloc::Arena *arena) {
279 if (arena->pagesize == 0) {
280 arena->pagesize = getpagesize();
281 // Round up block sizes to a power of two close to the header size.
282 arena->roundup = 16;
283 while (arena->roundup < sizeof (arena->freelist.header)) {
284 arena->roundup += arena->roundup;
285 }
286 // Don't allocate blocks less than twice the roundup size to avoid tiny
287 // free blocks.
288 arena->min_size = 2 * arena->roundup;
289 arena->freelist.header.size = 0;
290 arena->freelist.header.magic =
291 Magic(kMagicUnallocated, &arena->freelist.header);
292 arena->freelist.header.arena = arena;
293 arena->freelist.levels = 0;
294 memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
295 arena->allocation_count = 0;
296 if (arena == &default_arena) {
297 // Default arena should be hooked, e.g. for heap-checker to trace
298 // pointer chains through objects in the default arena.
299 arena->flags = LowLevelAlloc::kCallMallocHook;
300 } else if (arena == &unhooked_async_sig_safe_arena) {
301 arena->flags = LowLevelAlloc::kAsyncSignalSafe;
302 } else {
303 arena->flags = 0; // other arenas' flags may be overridden by client,
304 // but unhooked_arena will have 0 in 'flags'.
305 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800306 arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
Austin Schuh745610d2015-09-06 18:19:50 -0700307 }
308}
309
310// L < meta_data_arena->mu
311LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
312 Arena *meta_data_arena) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800313 return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
314}
315
316// L < meta_data_arena->mu
317LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
318 Arena *meta_data_arena,
319 PagesAllocator *allocator) {
Austin Schuh745610d2015-09-06 18:19:50 -0700320 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
321 if (meta_data_arena == &default_arena) {
322 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
323 meta_data_arena = &unhooked_async_sig_safe_arena;
324 } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
325 meta_data_arena = &unhooked_arena;
326 }
327 }
328 // Arena(0) uses the constructor for non-static contexts
329 Arena *result =
330 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
331 ArenaInit(result);
332 result->flags = flags;
Brian Silverman20350ac2021-11-17 18:19:55 -0800333 if (allocator) {
334 result->allocator = allocator;
335 }
Austin Schuh745610d2015-09-06 18:19:50 -0700336 return result;
337}
338
339// L < arena->mu, L < arena->arena->mu
340bool LowLevelAlloc::DeleteArena(Arena *arena) {
341 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
342 "may not delete default arena");
343 ArenaLock section(arena);
344 bool empty = (arena->allocation_count == 0);
345 section.Leave();
346 if (empty) {
347 while (arena->freelist.next[0] != 0) {
348 AllocList *region = arena->freelist.next[0];
349 size_t size = region->header.size;
350 arena->freelist.next[0] = region->next[0];
351 RAW_CHECK(region->header.magic ==
352 Magic(kMagicUnallocated, &region->header),
353 "bad magic number in DeleteArena()");
354 RAW_CHECK(region->header.arena == arena,
355 "bad arena pointer in DeleteArena()");
356 RAW_CHECK(size % arena->pagesize == 0,
357 "empty arena has non-page-aligned block size");
358 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
359 "empty arena has non-page-aligned block");
360 int munmap_result;
361 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
362 munmap_result = munmap(region, size);
363 } else {
364 munmap_result = MallocHook::UnhookedMUnmap(region, size);
365 }
366 RAW_CHECK(munmap_result == 0,
367 "LowLevelAlloc::DeleteArena: munmap failed address");
368 }
369 Free(arena);
370 }
371 return empty;
372}
373
374// ---------------------------------------------------------------------------
375
376// Return value rounded up to next multiple of align.
377// align must be a power of two.
378static intptr_t RoundUp(intptr_t addr, intptr_t align) {
379 return (addr + align - 1) & ~(align - 1);
380}
381
382// Equivalent to "return prev->next[i]" but with sanity checking
383// that the freelist is in the correct order, that it
384// consists of regions marked "unallocated", and that no two regions
385// are adjacent in memory (they should have been coalesced).
386// L < arena->mu
387static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
388 RAW_CHECK(i < prev->levels, "too few levels in Next()");
389 AllocList *next = prev->next[i];
390 if (next != 0) {
391 RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
392 "bad magic number in Next()");
393 RAW_CHECK(next->header.arena == arena,
394 "bad arena pointer in Next()");
395 if (prev != &arena->freelist) {
396 RAW_CHECK(prev < next, "unordered freelist");
397 RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
398 reinterpret_cast<char *>(next), "malformed freelist");
399 }
400 }
401 return next;
402}
403
404// Coalesce list item "a" with its successor if they are adjacent.
405static void Coalesce(AllocList *a) {
406 AllocList *n = a->next[0];
407 if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
408 reinterpret_cast<char *>(n)) {
409 LowLevelAlloc::Arena *arena = a->header.arena;
410 a->header.size += n->header.size;
411 n->header.magic = 0;
412 n->header.arena = 0;
413 AllocList *prev[kMaxLevel];
414 LLA_SkiplistDelete(&arena->freelist, n, prev);
415 LLA_SkiplistDelete(&arena->freelist, a, prev);
416 a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
417 LLA_SkiplistInsert(&arena->freelist, a, prev);
418 }
419}
420
421// Adds block at location "v" to the free list
422// L >= arena->mu
423static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
424 AllocList *f = reinterpret_cast<AllocList *>(
425 reinterpret_cast<char *>(v) - sizeof (f->header));
426 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
427 "bad magic number in AddToFreelist()");
428 RAW_CHECK(f->header.arena == arena,
429 "bad arena pointer in AddToFreelist()");
430 f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
431 AllocList *prev[kMaxLevel];
432 LLA_SkiplistInsert(&arena->freelist, f, prev);
433 f->header.magic = Magic(kMagicUnallocated, &f->header);
434 Coalesce(f); // maybe coalesce with successor
435 Coalesce(prev[0]); // maybe coalesce with predecessor
436}
437
438// Frees storage allocated by LowLevelAlloc::Alloc().
439// L < arena->mu
440void LowLevelAlloc::Free(void *v) {
441 if (v != 0) {
442 AllocList *f = reinterpret_cast<AllocList *>(
443 reinterpret_cast<char *>(v) - sizeof (f->header));
444 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
445 "bad magic number in Free()");
446 LowLevelAlloc::Arena *arena = f->header.arena;
447 if ((arena->flags & kCallMallocHook) != 0) {
448 MallocHook::InvokeDeleteHook(v);
449 }
450 ArenaLock section(arena);
451 AddToFreelist(v, arena);
452 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
453 arena->allocation_count--;
454 section.Leave();
455 }
456}
457
458// allocates and returns a block of size bytes, to be freed with Free()
459// L < arena->mu
460static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
461 void *result = 0;
462 if (request != 0) {
463 AllocList *s; // will point to region that satisfies request
464 ArenaLock section(arena);
465 ArenaInit(arena);
466 // round up with header
467 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
468 for (;;) { // loop until we find a suitable region
469 // find the minimum levels that a block of this size must have
470 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
471 if (i < arena->freelist.levels) { // potential blocks exist
472 AllocList *before = &arena->freelist; // predecessor of s
473 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
474 before = s;
475 }
476 if (s != 0) { // we found a region
477 break;
478 }
479 }
480 // we unlock before mmap() both because mmap() may call a callback hook,
481 // and because it may be slow.
482 arena->mu.Unlock();
483 // mmap generous 64K chunks to decrease
484 // the chances/impact of fragmentation:
485 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
Brian Silverman20350ac2021-11-17 18:19:55 -0800486 void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
Austin Schuh745610d2015-09-06 18:19:50 -0700487 arena->mu.Lock();
488 s = reinterpret_cast<AllocList *>(new_pages);
489 s->header.size = new_pages_size;
490 // Pretend the block is allocated; call AddToFreelist() to free it.
491 s->header.magic = Magic(kMagicAllocated, &s->header);
492 s->header.arena = arena;
493 AddToFreelist(&s->levels, arena); // insert new region into free list
494 }
495 AllocList *prev[kMaxLevel];
496 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
497 // s points to the first free region that's big enough
498 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
499 AllocList *n = reinterpret_cast<AllocList *>
500 (req_rnd + reinterpret_cast<char *>(s));
501 n->header.size = s->header.size - req_rnd;
502 n->header.magic = Magic(kMagicAllocated, &n->header);
503 n->header.arena = arena;
504 s->header.size = req_rnd;
505 AddToFreelist(&n->levels, arena);
506 }
507 s->header.magic = Magic(kMagicAllocated, &s->header);
508 RAW_CHECK(s->header.arena == arena, "");
509 arena->allocation_count++;
510 section.Leave();
511 result = &s->levels;
512 }
Austin Schuh745610d2015-09-06 18:19:50 -0700513 return result;
514}
515
516void *LowLevelAlloc::Alloc(size_t request) {
517 void *result = DoAllocWithArena(request, &default_arena);
518 if ((default_arena.flags & kCallMallocHook) != 0) {
519 // this call must be directly in the user-called allocator function
520 // for MallocHook::GetCallerStackTrace to work properly
521 MallocHook::InvokeNewHook(result, request);
522 }
523 return result;
524}
525
526void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
527 RAW_CHECK(arena != 0, "must pass a valid arena");
528 void *result = DoAllocWithArena(request, arena);
529 if ((arena->flags & kCallMallocHook) != 0) {
530 // this call must be directly in the user-called allocator function
531 // for MallocHook::GetCallerStackTrace to work properly
532 MallocHook::InvokeNewHook(result, request);
533 }
534 return result;
535}
536
537LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
538 return &default_arena;
539}
Brian Silverman20350ac2021-11-17 18:19:55 -0800540
541static DefaultPagesAllocator *default_pages_allocator;
542static union {
543 char chars[sizeof(DefaultPagesAllocator)];
544 void *ptr;
545} debug_pages_allocator_space;
546
547LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
548 if (default_pages_allocator) {
549 return default_pages_allocator;
550 }
551 default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
552 return default_pages_allocator;
553}
554
555void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
556 void *new_pages;
557 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
558 new_pages = MallocHook::UnhookedMMap(0, size,
559 PROT_WRITE|PROT_READ,
560 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
561 } else {
562 new_pages = mmap(0, size,
563 PROT_WRITE|PROT_READ,
564 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
565 }
566 RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
567
568 return new_pages;
569}
570
571void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
572 int munmap_result;
573 if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
574 munmap_result = munmap(region, size);
575 } else {
576 munmap_result = MallocHook::UnhookedMUnmap(region, size);
577 }
578 RAW_CHECK(munmap_result == 0,
579 "LowLevelAlloc::DeleteArena: munmap failed address");
580}