blob: 6c0946a625a97661fdd33859bfe79d0a6fe1e479 [file] [log] [blame]
Brian Silverman20350ac2021-11-17 18:19:55 -08001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2014, gperftools Contributors
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30//
31
32#include "config.h"
33
34#include "emergency_malloc.h"
35
36#include <errno.h> // for ENOMEM, errno
37#include <string.h> // for memset
38
39#include "base/basictypes.h"
40#include "base/logging.h"
41#include "base/low_level_alloc.h"
42#include "base/spinlock.h"
43#include "internal_logging.h"
44
45
46namespace tcmalloc {
47 __attribute__ ((visibility("internal"))) char *emergency_arena_start;
48 __attribute__ ((visibility("internal"))) uintptr_t emergency_arena_start_shifted;
49
50 static CACHELINE_ALIGNED SpinLock emergency_malloc_lock(base::LINKER_INITIALIZED);
51 static char *emergency_arena_end;
52 static LowLevelAlloc::Arena *emergency_arena;
53
54 class EmergencyArenaPagesAllocator : public LowLevelAlloc::PagesAllocator {
55 ~EmergencyArenaPagesAllocator() {}
56 void *MapPages(int32 flags, size_t size) {
57 char *new_end = emergency_arena_end + size;
58 if (new_end > emergency_arena_start + kEmergencyArenaSize) {
59 RAW_LOG(FATAL, "Unable to allocate %zu bytes in emergency zone.", size);
60 }
61 char *rv = emergency_arena_end;
62 emergency_arena_end = new_end;
63 return static_cast<void *>(rv);
64 }
65 void UnMapPages(int32 flags, void *addr, size_t size) {
66 RAW_LOG(FATAL, "UnMapPages is not implemented for emergency arena");
67 }
68 };
69
70 static union {
71 char bytes[sizeof(EmergencyArenaPagesAllocator)];
72 void *ptr;
73 } pages_allocator_place;
74
75 static void InitEmergencyMalloc(void) {
76 const int32 flags = LowLevelAlloc::kAsyncSignalSafe;
77
78 void *arena = LowLevelAlloc::GetDefaultPagesAllocator()->MapPages(flags, kEmergencyArenaSize * 2);
79
80 uintptr_t arena_ptr = reinterpret_cast<uintptr_t>(arena);
81 uintptr_t ptr = (arena_ptr + kEmergencyArenaSize - 1) & ~(kEmergencyArenaSize-1);
82
83 emergency_arena_end = emergency_arena_start = reinterpret_cast<char *>(ptr);
84 EmergencyArenaPagesAllocator *allocator = new (pages_allocator_place.bytes) EmergencyArenaPagesAllocator();
85 emergency_arena = LowLevelAlloc::NewArenaWithCustomAlloc(0, LowLevelAlloc::DefaultArena(), allocator);
86
87 emergency_arena_start_shifted = reinterpret_cast<uintptr_t>(emergency_arena_start) >> kEmergencyArenaShift;
88
89 uintptr_t head_unmap_size = ptr - arena_ptr;
90 CHECK_CONDITION(head_unmap_size < kEmergencyArenaSize);
91 if (head_unmap_size != 0) {
92 LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, arena, ptr - arena_ptr);
93 }
94
95 uintptr_t tail_unmap_size = kEmergencyArenaSize - head_unmap_size;
96 void *tail_start = reinterpret_cast<void *>(arena_ptr + head_unmap_size + kEmergencyArenaSize);
97 LowLevelAlloc::GetDefaultPagesAllocator()->UnMapPages(flags, tail_start, tail_unmap_size);
98 }
99
100 PERFTOOLS_DLL_DECL void *EmergencyMalloc(size_t size) {
101 SpinLockHolder l(&emergency_malloc_lock);
102
103 if (emergency_arena_start == NULL) {
104 InitEmergencyMalloc();
105 CHECK_CONDITION(emergency_arena_start != NULL);
106 }
107
108 void *rv = LowLevelAlloc::AllocWithArena(size, emergency_arena);
109 if (rv == NULL) {
110 errno = ENOMEM;
111 }
112 return rv;
113 }
114
115 PERFTOOLS_DLL_DECL void EmergencyFree(void *p) {
116 SpinLockHolder l(&emergency_malloc_lock);
117 if (emergency_arena_start == NULL) {
118 InitEmergencyMalloc();
119 CHECK_CONDITION(emergency_arena_start != NULL);
120 free(p);
121 return;
122 }
123 CHECK_CONDITION(emergency_arena_start);
124 LowLevelAlloc::Free(p);
125 }
126
127 PERFTOOLS_DLL_DECL void *EmergencyRealloc(void *_old_ptr, size_t new_size) {
128 if (_old_ptr == NULL) {
129 return EmergencyMalloc(new_size);
130 }
131 if (new_size == 0) {
132 EmergencyFree(_old_ptr);
133 return NULL;
134 }
135 SpinLockHolder l(&emergency_malloc_lock);
136 CHECK_CONDITION(emergency_arena_start);
137
138 char *old_ptr = static_cast<char *>(_old_ptr);
139 CHECK_CONDITION(old_ptr <= emergency_arena_end);
140 CHECK_CONDITION(emergency_arena_start <= old_ptr);
141
142 // NOTE: we don't know previous size of old_ptr chunk. So instead
143 // of trying to figure out right size of copied memory, we just
144 // copy largest possible size. We don't care about being slow.
145 size_t old_ptr_size = emergency_arena_end - old_ptr;
146 size_t copy_size = (new_size < old_ptr_size) ? new_size : old_ptr_size;
147
148 void *new_ptr = LowLevelAlloc::AllocWithArena(new_size, emergency_arena);
149 if (new_ptr == NULL) {
150 errno = ENOMEM;
151 return NULL;
152 }
153 memcpy(new_ptr, old_ptr, copy_size);
154
155 LowLevelAlloc::Free(old_ptr);
156 return new_ptr;
157 }
158
159 PERFTOOLS_DLL_DECL void *EmergencyCalloc(size_t n, size_t elem_size) {
160 // Overflow check
161 const size_t size = n * elem_size;
162 if (elem_size != 0 && size / elem_size != n) return NULL;
163 void *rv = EmergencyMalloc(size);
164 if (rv != NULL) {
165 memset(rv, 0, size);
166 }
167 return rv;
168 }
169};