blob: b0f7509f008e69f530c82cc45e25cb2b47ca0e0e [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2000, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Urs Holzle <opensource@google.com>
33
34#include "config.h"
35#include <errno.h>
36#ifdef HAVE_FCNTL_H
37#include <fcntl.h>
38#endif
39#ifdef HAVE_INTTYPES_H
40#include <inttypes.h>
41#endif
42// We only need malloc.h for struct mallinfo.
43#ifdef HAVE_STRUCT_MALLINFO
44// Malloc can be in several places on older versions of OS X.
45# if defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_MALLOC_H)
48# include <malloc/malloc.h>
49# elif defined(HAVE_SYS_MALLOC_H)
50# include <sys/malloc.h>
51# endif
52#endif
53#ifdef HAVE_PTHREAD
54#include <pthread.h>
55#endif
56#include <stdarg.h>
57#include <stdio.h>
58#include <string.h>
59#ifdef HAVE_MMAP
60#include <sys/mman.h>
61#endif
62#include <sys/stat.h>
63#include <sys/types.h>
64#ifdef HAVE_UNISTD_H
65#include <unistd.h>
66#endif
67
68#include <gperftools/malloc_extension.h>
69#include <gperftools/malloc_hook.h>
70#include <gperftools/stacktrace.h>
71#include "addressmap-inl.h"
72#include "base/commandlineflags.h"
73#include "base/googleinit.h"
74#include "base/logging.h"
75#include "base/spinlock.h"
76#include "malloc_hook-inl.h"
77#include "symbolize.h"
78
79// NOTE: due to #define below, tcmalloc.cc will omit tc_XXX
80// definitions. So that debug implementations can be defined
81// instead. We're going to use do_malloc, do_free and other do_XXX
82// functions that are defined in tcmalloc.cc for actual memory
83// management
84#define TCMALLOC_USING_DEBUGALLOCATION
85#include "tcmalloc.cc"
86
87// __THROW is defined in glibc systems. It means, counter-intuitively,
88// "This function will never throw an exception." It's an optional
89// optimization tool, but we may need to use it to match glibc prototypes.
90#ifndef __THROW // I guess we're not on a glibc system
91# define __THROW // __THROW is just an optimization, so ok to make it ""
92#endif
93
94// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
95// form of the name instead.
96#ifndef MAP_ANONYMOUS
97# define MAP_ANONYMOUS MAP_ANON
98#endif
99
100// ========================================================================= //
101
102DEFINE_bool(malloctrace,
103 EnvToBool("TCMALLOC_TRACE", false),
104 "Enables memory (de)allocation tracing to /tmp/google.alloc.");
105#ifdef HAVE_MMAP
106DEFINE_bool(malloc_page_fence,
107 EnvToBool("TCMALLOC_PAGE_FENCE", false),
108 "Enables putting of memory allocations at page boundaries "
109 "with a guard page following the allocation (to catch buffer "
110 "overruns right when they happen).");
111DEFINE_bool(malloc_page_fence_never_reclaim,
Brian Silverman20350ac2021-11-17 18:19:55 -0800112 EnvToBool("TCMALLOC_PAGE_FENCE_NEVER_RECLAIM", false),
Austin Schuh745610d2015-09-06 18:19:50 -0700113 "Enables making the virtual address space inaccessible "
114 "upon a deallocation instead of returning it and reusing later.");
Brian Silverman20350ac2021-11-17 18:19:55 -0800115DEFINE_bool(malloc_page_fence_readable,
116 EnvToBool("TCMALLOC_PAGE_FENCE_READABLE", false),
117 "Permits reads to the page fence.");
Austin Schuh745610d2015-09-06 18:19:50 -0700118#else
119DEFINE_bool(malloc_page_fence, false, "Not usable (requires mmap)");
120DEFINE_bool(malloc_page_fence_never_reclaim, false, "Not usable (required mmap)");
121#endif
122DEFINE_bool(malloc_reclaim_memory,
123 EnvToBool("TCMALLOC_RECLAIM_MEMORY", true),
124 "If set to false, we never return memory to malloc "
125 "when an object is deallocated. This ensures that all "
126 "heap object addresses are unique.");
127DEFINE_int32(max_free_queue_size,
128 EnvToInt("TCMALLOC_MAX_FREE_QUEUE_SIZE", 10*1024*1024),
129 "If greater than 0, keep freed blocks in a queue instead of "
130 "releasing them to the allocator immediately. Release them when "
131 "the total size of all blocks in the queue would otherwise exceed "
132 "this limit.");
133
134DEFINE_bool(symbolize_stacktrace,
135 EnvToBool("TCMALLOC_SYMBOLIZE_STACKTRACE", true),
136 "Symbolize the stack trace when provided (on some error exits)");
137
138// If we are LD_PRELOAD-ed against a non-pthreads app, then
139// pthread_once won't be defined. We declare it here, for that
140// case (with weak linkage) which will cause the non-definition to
141// resolve to NULL. We can then check for NULL or not in Instance.
142extern "C" int pthread_once(pthread_once_t *, void (*)(void))
143 ATTRIBUTE_WEAK;
144
145// ========================================================================= //
146
147// A safe version of printf() that does not do any allocation and
148// uses very little stack space.
149static void TracePrintf(int fd, const char *fmt, ...)
150 __attribute__ ((__format__ (__printf__, 2, 3)));
151
152// Round "value" up to next "alignment" boundary.
153// Requires that "alignment" be a power of two.
154static intptr_t RoundUp(intptr_t value, intptr_t alignment) {
155 return (value + alignment - 1) & ~(alignment - 1);
156}
157
158// ========================================================================= //
159
160class MallocBlock;
161
162// A circular buffer to hold freed blocks of memory. MallocBlock::Deallocate
163// (below) pushes blocks into this queue instead of returning them to the
164// underlying allocator immediately. See MallocBlock::Deallocate for more
165// information.
166//
167// We can't use an STL class for this because we need to be careful not to
168// perform any heap de-allocations in any of the code in this class, since the
169// code in MallocBlock::Deallocate is not re-entrant.
170template <typename QueueEntry>
171class FreeQueue {
172 public:
173 FreeQueue() : q_front_(0), q_back_(0) {}
174
175 bool Full() {
176 return (q_front_ + 1) % kFreeQueueSize == q_back_;
177 }
178
179 void Push(const QueueEntry& block) {
180 q_[q_front_] = block;
181 q_front_ = (q_front_ + 1) % kFreeQueueSize;
182 }
183
184 QueueEntry Pop() {
185 RAW_CHECK(q_back_ != q_front_, "Queue is empty");
186 const QueueEntry& ret = q_[q_back_];
187 q_back_ = (q_back_ + 1) % kFreeQueueSize;
188 return ret;
189 }
190
191 size_t size() const {
192 return (q_front_ - q_back_ + kFreeQueueSize) % kFreeQueueSize;
193 }
194
195 private:
196 // Maximum number of blocks kept in the free queue before being freed.
197 static const int kFreeQueueSize = 1024;
198
199 QueueEntry q_[kFreeQueueSize];
200 int q_front_;
201 int q_back_;
202};
203
204struct MallocBlockQueueEntry {
205 MallocBlockQueueEntry() : block(NULL), size(0),
206 num_deleter_pcs(0), deleter_threadid(0) {}
207 MallocBlockQueueEntry(MallocBlock* b, size_t s) : block(b), size(s) {
208 if (FLAGS_max_free_queue_size != 0 && b != NULL) {
209 // Adjust the number of frames to skip (4) if you change the
210 // location of this call.
211 num_deleter_pcs =
Brian Silverman20350ac2021-11-17 18:19:55 -0800212 MallocHook::GetCallerStackTrace(
213 deleter_pcs,
214 sizeof(deleter_pcs) / sizeof(deleter_pcs[0]),
215 4);
Austin Schuh745610d2015-09-06 18:19:50 -0700216 deleter_threadid = pthread_self();
217 } else {
218 num_deleter_pcs = 0;
219 // Zero is an illegal pthread id by my reading of the pthread
220 // implementation:
221 deleter_threadid = 0;
222 }
223 }
224
225 MallocBlock* block;
226 size_t size;
227
228 // When deleted and put in the free queue, we (flag-controlled)
229 // record the stack so that if corruption is later found, we can
230 // print the deleter's stack. (These three vars add 144 bytes of
231 // overhead under the LP64 data model.)
232 void* deleter_pcs[16];
233 int num_deleter_pcs;
234 pthread_t deleter_threadid;
235};
236
237class MallocBlock {
238 public: // allocation type constants
239
240 // Different allocation types we distinguish.
241 // Note: The lower 4 bits are not random: we index kAllocName array
242 // by these values masked with kAllocTypeMask;
243 // the rest are "random" magic bits to help catch memory corruption.
244 static const int kMallocType = 0xEFCDAB90;
245 static const int kNewType = 0xFEBADC81;
246 static const int kArrayNewType = 0xBCEADF72;
247
248 private: // constants
249
250 // A mask used on alloc types above to get to 0, 1, 2
251 static const int kAllocTypeMask = 0x3;
252 // An additional bit to set in AllocType constants
253 // to mark now deallocated regions.
254 static const int kDeallocatedTypeBit = 0x4;
255
256 // For better memory debugging, we initialize all storage to known
257 // values, and overwrite the storage when it's deallocated:
258 // Byte that fills uninitialized storage.
259 static const int kMagicUninitializedByte = 0xAB;
260 // Byte that fills deallocated storage.
261 // NOTE: tcmalloc.cc depends on the value of kMagicDeletedByte
262 // to work around a bug in the pthread library.
263 static const int kMagicDeletedByte = 0xCD;
264 // A size_t (type of alloc_type_ below) in a deallocated storage
265 // filled with kMagicDeletedByte.
266 static const size_t kMagicDeletedSizeT =
267 0xCDCDCDCD | (((size_t)0xCDCDCDCD << 16) << 16);
268 // Initializer works for 32 and 64 bit size_ts;
269 // "<< 16 << 16" is to fool gcc from issuing a warning
270 // when size_ts are 32 bits.
271
272 // NOTE: on Linux, you can enable malloc debugging support in libc by
273 // setting the environment variable MALLOC_CHECK_ to 1 before you
274 // start the program (see man malloc).
275
276 // We use either do_malloc or mmap to make the actual allocation. In
277 // order to remember which one of the two was used for any block, we store an
278 // appropriate magic word next to the block.
Brian Silverman20350ac2021-11-17 18:19:55 -0800279 static const size_t kMagicMalloc = 0xDEADBEEF;
280 static const size_t kMagicMMap = 0xABCDEFAB;
Austin Schuh745610d2015-09-06 18:19:50 -0700281
282 // This array will be filled with 0xCD, for use with memcmp.
283 static unsigned char kMagicDeletedBuffer[1024];
284 static pthread_once_t deleted_buffer_initialized_;
285 static bool deleted_buffer_initialized_no_pthreads_;
286
287 private: // data layout
288
289 // The four fields size1_,offset_,magic1_,alloc_type_
290 // should together occupy a multiple of 16 bytes. (At the
291 // moment, sizeof(size_t) == 4 or 8 depending on piii vs
292 // k8, and 4 of those sum to 16 or 32 bytes).
293 // This, combined with do_malloc's alignment guarantees,
294 // ensures that SSE types can be stored into the returned
295 // block, at &size2_.
296 size_t size1_;
297 size_t offset_; // normally 0 unless memaligned memory
298 // see comments in memalign() and FromRawPointer().
299 size_t magic1_;
300 size_t alloc_type_;
301 // here comes the actual data (variable length)
302 // ...
303 // then come the size2_ and magic2_, or a full page of mprotect-ed memory
304 // if the malloc_page_fence feature is enabled.
305 size_t size2_;
Brian Silverman20350ac2021-11-17 18:19:55 -0800306 size_t magic2_;
Austin Schuh745610d2015-09-06 18:19:50 -0700307
308 private: // static data and helpers
309
310 // Allocation map: stores the allocation type for each allocated object,
311 // or the type or'ed with kDeallocatedTypeBit
312 // for each formerly allocated object.
313 typedef AddressMap<int> AllocMap;
314 static AllocMap* alloc_map_;
315 // This protects alloc_map_ and consistent state of metadata
316 // for each still-allocated object in it.
317 // We use spin locks instead of pthread_mutex_t locks
318 // to prevent crashes via calls to pthread_mutex_(un)lock
319 // for the (de)allocations coming from pthreads initialization itself.
320 static SpinLock alloc_map_lock_;
321
322 // A queue of freed blocks. Instead of releasing blocks to the allocator
323 // immediately, we put them in a queue, freeing them only when necessary
324 // to keep the total size of all the freed blocks below the limit set by
325 // FLAGS_max_free_queue_size.
326 static FreeQueue<MallocBlockQueueEntry>* free_queue_;
327
328 static size_t free_queue_size_; // total size of blocks in free_queue_
329 // protects free_queue_ and free_queue_size_
330 static SpinLock free_queue_lock_;
331
332 // Names of allocation types (kMallocType, kNewType, kArrayNewType)
333 static const char* const kAllocName[];
334 // Names of corresponding deallocation types
335 static const char* const kDeallocName[];
336
337 static const char* AllocName(int type) {
338 return kAllocName[type & kAllocTypeMask];
339 }
340
341 static const char* DeallocName(int type) {
342 return kDeallocName[type & kAllocTypeMask];
343 }
344
345 private: // helper accessors
346
347 bool IsMMapped() const { return kMagicMMap == magic1_; }
348
Brian Silverman20350ac2021-11-17 18:19:55 -0800349 bool IsValidMagicValue(size_t value) const {
Austin Schuh745610d2015-09-06 18:19:50 -0700350 return kMagicMMap == value || kMagicMalloc == value;
351 }
352
353 static size_t real_malloced_size(size_t size) {
354 return size + sizeof(MallocBlock);
355 }
356
357 /*
358 * Here we assume size of page is kMinAlign aligned,
359 * so if size is MALLOC_ALIGNMENT aligned too, then we could
360 * guarantee return address is also kMinAlign aligned, because
361 * mmap return address at nearby page boundary on Linux.
362 */
363 static size_t real_mmapped_size(size_t size) {
364 size_t tmp = size + MallocBlock::data_offset();
365 tmp = RoundUp(tmp, kMinAlign);
366 return tmp;
367 }
368
369 size_t real_size() {
370 return IsMMapped() ? real_mmapped_size(size1_) : real_malloced_size(size1_);
371 }
372
373 // NOTE: if the block is mmapped (that is, we're using the
374 // malloc_page_fence option) then there's no size2 or magic2
375 // (instead, the guard page begins where size2 would be).
376
377 size_t* size2_addr() { return (size_t*)((char*)&size2_ + size1_); }
378 const size_t* size2_addr() const {
379 return (const size_t*)((char*)&size2_ + size1_);
380 }
381
Brian Silverman20350ac2021-11-17 18:19:55 -0800382 size_t* magic2_addr() { return (size_t*)(size2_addr() + 1); }
383 const size_t* magic2_addr() const { return (const size_t*)(size2_addr() + 1); }
Austin Schuh745610d2015-09-06 18:19:50 -0700384
385 private: // other helpers
386
387 void Initialize(size_t size, int type) {
388 RAW_CHECK(IsValidMagicValue(magic1_), "");
389 // record us as allocated in the map
390 alloc_map_lock_.Lock();
391 if (!alloc_map_) {
392 void* p = do_malloc(sizeof(AllocMap));
393 alloc_map_ = new(p) AllocMap(do_malloc, do_free);
394 }
395 alloc_map_->Insert(data_addr(), type);
396 // initialize us
397 size1_ = size;
398 offset_ = 0;
399 alloc_type_ = type;
400 if (!IsMMapped()) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800401 bit_store(magic2_addr(), &magic1_);
402 bit_store(size2_addr(), &size);
Austin Schuh745610d2015-09-06 18:19:50 -0700403 }
404 alloc_map_lock_.Unlock();
405 memset(data_addr(), kMagicUninitializedByte, size);
406 if (!IsMMapped()) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800407 RAW_CHECK(memcmp(&size1_, size2_addr(), sizeof(size1_)) == 0, "should hold");
408 RAW_CHECK(memcmp(&magic1_, magic2_addr(), sizeof(magic1_)) == 0, "should hold");
Austin Schuh745610d2015-09-06 18:19:50 -0700409 }
410 }
411
Brian Silverman20350ac2021-11-17 18:19:55 -0800412 size_t CheckAndClear(int type, size_t given_size) {
Austin Schuh745610d2015-09-06 18:19:50 -0700413 alloc_map_lock_.Lock();
414 CheckLocked(type);
415 if (!IsMMapped()) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800416 RAW_CHECK(memcmp(&size1_, size2_addr(), sizeof(size1_)) == 0, "should hold");
Austin Schuh745610d2015-09-06 18:19:50 -0700417 }
418 // record us as deallocated in the map
419 alloc_map_->Insert(data_addr(), type | kDeallocatedTypeBit);
420 alloc_map_lock_.Unlock();
421 // clear us
422 const size_t size = real_size();
Brian Silverman20350ac2021-11-17 18:19:55 -0800423 RAW_CHECK(!given_size || given_size == size1_,
424 "right size must be passed to sized delete");
Austin Schuh745610d2015-09-06 18:19:50 -0700425 memset(this, kMagicDeletedByte, size);
426 return size;
427 }
428
429 void CheckLocked(int type) const {
430 int map_type = 0;
431 const int* found_type =
432 alloc_map_ != NULL ? alloc_map_->Find(data_addr()) : NULL;
433 if (found_type == NULL) {
434 RAW_LOG(FATAL, "memory allocation bug: object at %p "
435 "has never been allocated", data_addr());
436 } else {
437 map_type = *found_type;
438 }
439 if ((map_type & kDeallocatedTypeBit) != 0) {
440 RAW_LOG(FATAL, "memory allocation bug: object at %p "
441 "has been already deallocated (it was allocated with %s)",
442 data_addr(), AllocName(map_type & ~kDeallocatedTypeBit));
443 }
444 if (alloc_type_ == kMagicDeletedSizeT) {
445 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
446 "has been corrupted; or else the object has been already "
447 "deallocated and our memory map has been corrupted",
448 data_addr());
449 }
450 if (!IsValidMagicValue(magic1_)) {
451 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
452 "has been corrupted; "
453 "or else our memory map has been corrupted and this is a "
454 "deallocation for not (currently) heap-allocated object",
455 data_addr());
456 }
457 if (!IsMMapped()) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800458 if (memcmp(&size1_, size2_addr(), sizeof(size1_))) {
Austin Schuh745610d2015-09-06 18:19:50 -0700459 RAW_LOG(FATAL, "memory stomping bug: a word after object at %p "
460 "has been corrupted", data_addr());
461 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800462 size_t addr;
463 bit_store(&addr, magic2_addr());
464 if (!IsValidMagicValue(addr)) {
Austin Schuh745610d2015-09-06 18:19:50 -0700465 RAW_LOG(FATAL, "memory stomping bug: a word after object at %p "
466 "has been corrupted", data_addr());
467 }
468 }
469 if (alloc_type_ != type) {
470 if ((alloc_type_ != MallocBlock::kMallocType) &&
471 (alloc_type_ != MallocBlock::kNewType) &&
472 (alloc_type_ != MallocBlock::kArrayNewType)) {
473 RAW_LOG(FATAL, "memory stomping bug: a word before object at %p "
474 "has been corrupted", data_addr());
475 }
476 RAW_LOG(FATAL, "memory allocation/deallocation mismatch at %p: "
477 "allocated with %s being deallocated with %s",
478 data_addr(), AllocName(alloc_type_), DeallocName(type));
479 }
480 if (alloc_type_ != map_type) {
481 RAW_LOG(FATAL, "memory stomping bug: our memory map has been corrupted : "
482 "allocation at %p made with %s "
483 "is recorded in the map to be made with %s",
484 data_addr(), AllocName(alloc_type_), AllocName(map_type));
485 }
486 }
487
488 public: // public accessors
489
490 void* data_addr() { return (void*)&size2_; }
491 const void* data_addr() const { return (const void*)&size2_; }
492
493 static size_t data_offset() { return OFFSETOF_MEMBER(MallocBlock, size2_); }
494
495 size_t data_size() const { return size1_; }
496
497 void set_offset(int offset) { this->offset_ = offset; }
498
499 public: // our main interface
500
501 static MallocBlock* Allocate(size_t size, int type) {
502 // Prevent an integer overflow / crash with large allocation sizes.
503 // TODO - Note that for a e.g. 64-bit size_t, max_size_t may not actually
504 // be the maximum value, depending on how the compiler treats ~0. The worst
505 // practical effect is that allocations are limited to 4Gb or so, even if
506 // the address space could take more.
507 static size_t max_size_t = ~0;
508 if (size > max_size_t - sizeof(MallocBlock)) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800509 RAW_LOG(ERROR, "Massive size passed to malloc: %zu", size);
Austin Schuh745610d2015-09-06 18:19:50 -0700510 return NULL;
511 }
512 MallocBlock* b = NULL;
513 const bool use_malloc_page_fence = FLAGS_malloc_page_fence;
Brian Silverman20350ac2021-11-17 18:19:55 -0800514 const bool malloc_page_fence_readable = FLAGS_malloc_page_fence_readable;
Austin Schuh745610d2015-09-06 18:19:50 -0700515#ifdef HAVE_MMAP
516 if (use_malloc_page_fence) {
517 // Put the block towards the end of the page and make the next page
518 // inaccessible. This will catch buffer overrun right when it happens.
519 size_t sz = real_mmapped_size(size);
520 int pagesize = getpagesize();
521 int num_pages = (sz + pagesize - 1) / pagesize + 1;
522 char* p = (char*) mmap(NULL, num_pages * pagesize, PROT_READ|PROT_WRITE,
523 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
524 if (p == MAP_FAILED) {
525 // If the allocation fails, abort rather than returning NULL to
526 // malloc. This is because in most cases, the program will run out
527 // of memory in this mode due to tremendous amount of wastage. There
528 // is no point in propagating the error elsewhere.
529 RAW_LOG(FATAL, "Out of memory: possibly due to page fence overhead: %s",
530 strerror(errno));
531 }
532 // Mark the page after the block inaccessible
Brian Silverman20350ac2021-11-17 18:19:55 -0800533 if (mprotect(p + (num_pages - 1) * pagesize, pagesize,
534 PROT_NONE|(malloc_page_fence_readable ? PROT_READ : 0))) {
Austin Schuh745610d2015-09-06 18:19:50 -0700535 RAW_LOG(FATAL, "Guard page setup failed: %s", strerror(errno));
536 }
537 b = (MallocBlock*) (p + (num_pages - 1) * pagesize - sz);
538 } else {
539 b = (MallocBlock*) do_malloc(real_malloced_size(size));
540 }
541#else
542 b = (MallocBlock*) do_malloc(real_malloced_size(size));
543#endif
544
545 // It would be nice to output a diagnostic on allocation failure
546 // here, but logging (other than FATAL) requires allocating
547 // memory, which could trigger a nasty recursion. Instead, preserve
548 // malloc semantics and return NULL on failure.
549 if (b != NULL) {
550 b->magic1_ = use_malloc_page_fence ? kMagicMMap : kMagicMalloc;
551 b->Initialize(size, type);
552 }
553 return b;
554 }
555
Brian Silverman20350ac2021-11-17 18:19:55 -0800556 void Deallocate(int type, size_t given_size) {
Austin Schuh745610d2015-09-06 18:19:50 -0700557 if (IsMMapped()) { // have to do this before CheckAndClear
558#ifdef HAVE_MMAP
Brian Silverman20350ac2021-11-17 18:19:55 -0800559 int size = CheckAndClear(type, given_size);
Austin Schuh745610d2015-09-06 18:19:50 -0700560 int pagesize = getpagesize();
561 int num_pages = (size + pagesize - 1) / pagesize + 1;
562 char* p = (char*) this;
563 if (FLAGS_malloc_page_fence_never_reclaim ||
564 !FLAGS_malloc_reclaim_memory) {
565 mprotect(p - (num_pages - 1) * pagesize + size,
566 num_pages * pagesize, PROT_NONE);
567 } else {
568 munmap(p - (num_pages - 1) * pagesize + size, num_pages * pagesize);
569 }
570#endif
571 } else {
Brian Silverman20350ac2021-11-17 18:19:55 -0800572 const size_t size = CheckAndClear(type, given_size);
Austin Schuh745610d2015-09-06 18:19:50 -0700573 if (FLAGS_malloc_reclaim_memory) {
574 // Instead of freeing the block immediately, push it onto a queue of
575 // recently freed blocks. Free only enough blocks to keep from
576 // exceeding the capacity of the queue or causing the total amount of
577 // un-released memory in the queue from exceeding
578 // FLAGS_max_free_queue_size.
579 ProcessFreeQueue(this, size, FLAGS_max_free_queue_size);
580 }
581 }
582 }
583
584 static size_t FreeQueueSize() {
585 SpinLockHolder l(&free_queue_lock_);
586 return free_queue_size_;
587 }
588
589 static void ProcessFreeQueue(MallocBlock* b, size_t size,
590 int max_free_queue_size) {
591 // MallocBlockQueueEntry are about 144 in size, so we can only
592 // use a small array of them on the stack.
593 MallocBlockQueueEntry entries[4];
594 int num_entries = 0;
595 MallocBlockQueueEntry new_entry(b, size);
596 free_queue_lock_.Lock();
597 if (free_queue_ == NULL)
598 free_queue_ = new FreeQueue<MallocBlockQueueEntry>;
599 RAW_CHECK(!free_queue_->Full(), "Free queue mustn't be full!");
600
601 if (b != NULL) {
602 free_queue_size_ += size + sizeof(MallocBlockQueueEntry);
603 free_queue_->Push(new_entry);
604 }
605
606 // Free blocks until the total size of unfreed blocks no longer exceeds
607 // max_free_queue_size, and the free queue has at least one free
608 // space in it.
609 while (free_queue_size_ > max_free_queue_size || free_queue_->Full()) {
610 RAW_CHECK(num_entries < arraysize(entries), "entries array overflow");
611 entries[num_entries] = free_queue_->Pop();
612 free_queue_size_ -=
613 entries[num_entries].size + sizeof(MallocBlockQueueEntry);
614 num_entries++;
615 if (num_entries == arraysize(entries)) {
616 // The queue will not be full at this point, so it is ok to
617 // release the lock. The queue may still contain more than
618 // max_free_queue_size, but this is not a strict invariant.
619 free_queue_lock_.Unlock();
620 for (int i = 0; i < num_entries; i++) {
621 CheckForDanglingWrites(entries[i]);
622 do_free(entries[i].block);
623 }
624 num_entries = 0;
625 free_queue_lock_.Lock();
626 }
627 }
Austin Schuh745610d2015-09-06 18:19:50 -0700628 free_queue_lock_.Unlock();
629 for (int i = 0; i < num_entries; i++) {
630 CheckForDanglingWrites(entries[i]);
631 do_free(entries[i].block);
632 }
633 }
634
635 static void InitDeletedBuffer() {
636 memset(kMagicDeletedBuffer, kMagicDeletedByte, sizeof(kMagicDeletedBuffer));
637 deleted_buffer_initialized_no_pthreads_ = true;
638 }
639
640 static void CheckForDanglingWrites(const MallocBlockQueueEntry& queue_entry) {
641 // Initialize the buffer if necessary.
642 if (pthread_once)
643 pthread_once(&deleted_buffer_initialized_, &InitDeletedBuffer);
644 if (!deleted_buffer_initialized_no_pthreads_) {
645 // This will be the case on systems that don't link in pthreads,
646 // including on FreeBSD where pthread_once has a non-zero address
647 // (but doesn't do anything) even when pthreads isn't linked in.
648 InitDeletedBuffer();
649 }
650
651 const unsigned char* p =
652 reinterpret_cast<unsigned char*>(queue_entry.block);
653
654 static const size_t size_of_buffer = sizeof(kMagicDeletedBuffer);
655 const size_t size = queue_entry.size;
656 const size_t buffers = size / size_of_buffer;
657 const size_t remainder = size % size_of_buffer;
658 size_t buffer_idx;
659 for (buffer_idx = 0; buffer_idx < buffers; ++buffer_idx) {
660 CheckForCorruptedBuffer(queue_entry, buffer_idx, p, size_of_buffer);
661 p += size_of_buffer;
662 }
663 CheckForCorruptedBuffer(queue_entry, buffer_idx, p, remainder);
664 }
665
666 static void CheckForCorruptedBuffer(const MallocBlockQueueEntry& queue_entry,
667 size_t buffer_idx,
668 const unsigned char* buffer,
669 size_t size_of_buffer) {
670 if (memcmp(buffer, kMagicDeletedBuffer, size_of_buffer) == 0) {
671 return;
672 }
673
674 RAW_LOG(ERROR,
675 "Found a corrupted memory buffer in MallocBlock (may be offset "
676 "from user ptr): buffer index: %zd, buffer ptr: %p, size of "
677 "buffer: %zd", buffer_idx, buffer, size_of_buffer);
678
679 // The magic deleted buffer should only be 1024 bytes, but in case
680 // this changes, let's put an upper limit on the number of debug
681 // lines we'll output:
682 if (size_of_buffer <= 1024) {
683 for (int i = 0; i < size_of_buffer; ++i) {
684 if (buffer[i] != kMagicDeletedByte) {
685 RAW_LOG(ERROR, "Buffer byte %d is 0x%02x (should be 0x%02x).",
686 i, buffer[i], kMagicDeletedByte);
687 }
688 }
689 } else {
690 RAW_LOG(ERROR, "Buffer too large to print corruption.");
691 }
692
693 const MallocBlock* b = queue_entry.block;
694 const size_t size = queue_entry.size;
695 if (queue_entry.num_deleter_pcs > 0) {
696 TracePrintf(STDERR_FILENO, "Deleted by thread %p\n",
697 reinterpret_cast<void*>(
698 PRINTABLE_PTHREAD(queue_entry.deleter_threadid)));
699
700 // We don't want to allocate or deallocate memory here, so we use
701 // placement-new. It's ok that we don't destroy this, since we're
702 // just going to error-exit below anyway. Union is for alignment.
703 union { void* alignment; char buf[sizeof(SymbolTable)]; } tablebuf;
704 SymbolTable* symbolization_table = new (tablebuf.buf) SymbolTable;
705 for (int i = 0; i < queue_entry.num_deleter_pcs; i++) {
706 // Symbolizes the previous address of pc because pc may be in the
707 // next function. This may happen when the function ends with
708 // a call to a function annotated noreturn (e.g. CHECK).
709 char *pc = reinterpret_cast<char*>(queue_entry.deleter_pcs[i]);
710 symbolization_table->Add(pc - 1);
711 }
712 if (FLAGS_symbolize_stacktrace)
713 symbolization_table->Symbolize();
714 for (int i = 0; i < queue_entry.num_deleter_pcs; i++) {
715 char *pc = reinterpret_cast<char*>(queue_entry.deleter_pcs[i]);
716 TracePrintf(STDERR_FILENO, " @ %p %s\n",
717 pc, symbolization_table->GetSymbol(pc - 1));
718 }
719 } else {
720 RAW_LOG(ERROR,
721 "Skipping the printing of the deleter's stack! Its stack was "
722 "not found; either the corruption occurred too early in "
723 "execution to obtain a stack trace or --max_free_queue_size was "
724 "set to 0.");
725 }
726
727 RAW_LOG(FATAL,
728 "Memory was written to after being freed. MallocBlock: %p, user "
729 "ptr: %p, size: %zd. If you can't find the source of the error, "
730 "try using ASan (http://code.google.com/p/address-sanitizer/), "
731 "Valgrind, or Purify, or study the "
732 "output of the deleter's stack printed above.",
733 b, b->data_addr(), size);
734 }
735
736 static MallocBlock* FromRawPointer(void* p) {
737 const size_t data_offset = MallocBlock::data_offset();
738 // Find the header just before client's memory.
739 MallocBlock *mb = reinterpret_cast<MallocBlock *>(
740 reinterpret_cast<char *>(p) - data_offset);
741 // If mb->alloc_type_ is kMagicDeletedSizeT, we're not an ok pointer.
742 if (mb->alloc_type_ == kMagicDeletedSizeT) {
743 RAW_LOG(FATAL, "memory allocation bug: object at %p has been already"
744 " deallocated; or else a word before the object has been"
745 " corrupted (memory stomping bug)", p);
746 }
747 // If mb->offset_ is zero (common case), mb is the real header.
748 // If mb->offset_ is non-zero, this block was allocated by debug
749 // memallign implementation, and mb->offset_ is the distance
750 // backwards to the real header from mb, which is a fake header.
751 if (mb->offset_ == 0) {
752 return mb;
753 }
754
755 MallocBlock *main_block = reinterpret_cast<MallocBlock *>(
756 reinterpret_cast<char *>(mb) - mb->offset_);
757
758 if (main_block->offset_ != 0) {
759 RAW_LOG(FATAL, "memory corruption bug: offset_ field is corrupted."
760 " Need 0 but got %x",
761 (unsigned)(main_block->offset_));
762 }
763 if (main_block >= p) {
764 RAW_LOG(FATAL, "memory corruption bug: offset_ field is corrupted."
765 " Detected main_block address overflow: %x",
766 (unsigned)(mb->offset_));
767 }
768 if (main_block->size2_addr() < p) {
769 RAW_LOG(FATAL, "memory corruption bug: offset_ field is corrupted."
770 " It points below it's own main_block: %x",
771 (unsigned)(mb->offset_));
772 }
773
774 return main_block;
775 }
776
777 static const MallocBlock* FromRawPointer(const void* p) {
778 // const-safe version: we just cast about
779 return FromRawPointer(const_cast<void*>(p));
780 }
781
782 void Check(int type) const {
783 alloc_map_lock_.Lock();
784 CheckLocked(type);
785 alloc_map_lock_.Unlock();
786 }
787
788 static bool CheckEverything() {
789 alloc_map_lock_.Lock();
790 if (alloc_map_ != NULL) alloc_map_->Iterate(CheckCallback, 0);
791 alloc_map_lock_.Unlock();
792 return true; // if we get here, we're okay
793 }
794
795 static bool MemoryStats(int* blocks, size_t* total,
796 int histogram[kMallocHistogramSize]) {
797 memset(histogram, 0, kMallocHistogramSize * sizeof(int));
798 alloc_map_lock_.Lock();
799 stats_blocks_ = 0;
800 stats_total_ = 0;
801 stats_histogram_ = histogram;
802 if (alloc_map_ != NULL) alloc_map_->Iterate(StatsCallback, 0);
803 *blocks = stats_blocks_;
804 *total = stats_total_;
805 alloc_map_lock_.Unlock();
806 return true;
807 }
808
809 private: // helpers for CheckEverything and MemoryStats
810
811 static void CheckCallback(const void* ptr, int* type, int dummy) {
812 if ((*type & kDeallocatedTypeBit) == 0) {
813 FromRawPointer(ptr)->CheckLocked(*type);
814 }
815 }
816
817 // Accumulation variables for StatsCallback protected by alloc_map_lock_
818 static int stats_blocks_;
819 static size_t stats_total_;
820 static int* stats_histogram_;
821
822 static void StatsCallback(const void* ptr, int* type, int dummy) {
823 if ((*type & kDeallocatedTypeBit) == 0) {
824 const MallocBlock* b = FromRawPointer(ptr);
825 b->CheckLocked(*type);
826 ++stats_blocks_;
827 size_t mysize = b->size1_;
828 int entry = 0;
829 stats_total_ += mysize;
830 while (mysize) {
831 ++entry;
832 mysize >>= 1;
833 }
834 RAW_CHECK(entry < kMallocHistogramSize,
835 "kMallocHistogramSize should be at least as large as log2 "
836 "of the maximum process memory size");
837 stats_histogram_[entry] += 1;
838 }
839 }
840};
841
842void DanglingWriteChecker() {
843 // Clear out the remaining free queue to check for dangling writes.
844 MallocBlock::ProcessFreeQueue(NULL, 0, 0);
845}
846
847// ========================================================================= //
848
Brian Silverman20350ac2021-11-17 18:19:55 -0800849const size_t MallocBlock::kMagicMalloc;
850const size_t MallocBlock::kMagicMMap;
Austin Schuh745610d2015-09-06 18:19:50 -0700851
852MallocBlock::AllocMap* MallocBlock::alloc_map_ = NULL;
853SpinLock MallocBlock::alloc_map_lock_(SpinLock::LINKER_INITIALIZED);
854
855FreeQueue<MallocBlockQueueEntry>* MallocBlock::free_queue_ = NULL;
856size_t MallocBlock::free_queue_size_ = 0;
857SpinLock MallocBlock::free_queue_lock_(SpinLock::LINKER_INITIALIZED);
858
859unsigned char MallocBlock::kMagicDeletedBuffer[1024];
860pthread_once_t MallocBlock::deleted_buffer_initialized_ = PTHREAD_ONCE_INIT;
861bool MallocBlock::deleted_buffer_initialized_no_pthreads_ = false;
862
863const char* const MallocBlock::kAllocName[] = {
864 "malloc",
865 "new",
866 "new []",
867 NULL,
868};
869
870const char* const MallocBlock::kDeallocName[] = {
871 "free",
872 "delete",
873 "delete []",
874 NULL,
875};
876
877int MallocBlock::stats_blocks_;
878size_t MallocBlock::stats_total_;
879int* MallocBlock::stats_histogram_;
880
881// ========================================================================= //
882
883// The following cut-down version of printf() avoids
884// using stdio or ostreams.
885// This is to guarantee no recursive calls into
886// the allocator and to bound the stack space consumed. (The pthread
887// manager thread in linuxthreads has a very small stack,
888// so fprintf can't be called.)
889static void TracePrintf(int fd, const char *fmt, ...) {
890 char buf[64];
891 int i = 0;
892 va_list ap;
893 va_start(ap, fmt);
894 const char *p = fmt;
895 char numbuf[25];
896 if (fd < 0) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800897 va_end(ap);
Austin Schuh745610d2015-09-06 18:19:50 -0700898 return;
899 }
900 numbuf[sizeof(numbuf)-1] = 0;
901 while (*p != '\0') { // until end of format string
902 char *s = &numbuf[sizeof(numbuf)-1];
903 if (p[0] == '%' && p[1] != 0) { // handle % formats
904 int64 l = 0;
905 unsigned long base = 0;
906 if (*++p == 's') { // %s
907 s = va_arg(ap, char *);
908 } else if (*p == 'l' && p[1] == 'd') { // %ld
909 l = va_arg(ap, long);
910 base = 10;
911 p++;
912 } else if (*p == 'l' && p[1] == 'u') { // %lu
913 l = va_arg(ap, unsigned long);
914 base = 10;
915 p++;
916 } else if (*p == 'z' && p[1] == 'u') { // %zu
917 l = va_arg(ap, size_t);
918 base = 10;
919 p++;
920 } else if (*p == 'u') { // %u
921 l = va_arg(ap, unsigned int);
922 base = 10;
923 } else if (*p == 'd') { // %d
924 l = va_arg(ap, int);
925 base = 10;
926 } else if (*p == 'p') { // %p
927 l = va_arg(ap, intptr_t);
928 base = 16;
929 } else {
930 write(STDERR_FILENO, "Unimplemented TracePrintf format\n", 33);
931 write(STDERR_FILENO, p, 2);
932 write(STDERR_FILENO, "\n", 1);
933 abort();
934 }
935 p++;
936 if (base != 0) {
937 bool minus = (l < 0 && base == 10);
938 uint64 ul = minus? -l : l;
939 do {
940 *--s = "0123456789abcdef"[ul % base];
941 ul /= base;
942 } while (ul != 0);
943 if (base == 16) {
944 *--s = 'x';
945 *--s = '0';
946 } else if (minus) {
947 *--s = '-';
948 }
949 }
950 } else { // handle normal characters
951 *--s = *p++;
952 }
953 while (*s != 0) {
954 if (i == sizeof(buf)) {
955 write(fd, buf, i);
956 i = 0;
957 }
958 buf[i++] = *s++;
959 }
960 }
961 if (i != 0) {
962 write(fd, buf, i);
963 }
964 va_end(ap);
965}
966
967// Return the file descriptor we're writing a log to
968static int TraceFd() {
969 static int trace_fd = -1;
970 if (trace_fd == -1) { // Open the trace file on the first call
971 const char *val = getenv("TCMALLOC_TRACE_FILE");
972 bool fallback_to_stderr = false;
973 if (!val) {
974 val = "/tmp/google.alloc";
975 fallback_to_stderr = true;
976 }
977 trace_fd = open(val, O_CREAT|O_TRUNC|O_WRONLY, 0666);
978 if (trace_fd == -1) {
979 if (fallback_to_stderr) {
980 trace_fd = 2;
981 TracePrintf(trace_fd, "Can't open %s. Logging to stderr.\n", val);
982 } else {
983 TracePrintf(2, "Can't open %s. Logging disabled.\n", val);
984 }
985 }
986 // Add a header to the log.
987 TracePrintf(trace_fd, "Trace started: %lu\n",
988 static_cast<unsigned long>(time(NULL)));
989 TracePrintf(trace_fd,
990 "func\tsize\tptr\tthread_id\tstack pcs for tools/symbolize\n");
991 }
992 return trace_fd;
993}
994
995// Print the hex stack dump on a single line. PCs are separated by tabs.
996static void TraceStack(void) {
997 void *pcs[16];
998 int n = GetStackTrace(pcs, sizeof(pcs)/sizeof(pcs[0]), 0);
999 for (int i = 0; i != n; i++) {
1000 TracePrintf(TraceFd(), "\t%p", pcs[i]);
1001 }
1002}
1003
1004// This protects MALLOC_TRACE, to make sure its info is atomically written.
1005static SpinLock malloc_trace_lock(SpinLock::LINKER_INITIALIZED);
1006
1007#define MALLOC_TRACE(name, size, addr) \
1008 do { \
1009 if (FLAGS_malloctrace) { \
1010 SpinLockHolder l(&malloc_trace_lock); \
Brian Silverman20350ac2021-11-17 18:19:55 -08001011 TracePrintf(TraceFd(), "%s\t%zu\t%p\t%" GPRIuPTHREAD, \
Austin Schuh745610d2015-09-06 18:19:50 -07001012 name, size, addr, PRINTABLE_PTHREAD(pthread_self())); \
1013 TraceStack(); \
1014 TracePrintf(TraceFd(), "\n"); \
1015 } \
1016 } while (0)
1017
1018// ========================================================================= //
1019
1020// Write the characters buf[0, ..., size-1] to
1021// the malloc trace buffer.
1022// This function is intended for debugging,
1023// and is not declared in any header file.
1024// You must insert a declaration of it by hand when you need
1025// to use it.
1026void __malloctrace_write(const char *buf, size_t size) {
1027 if (FLAGS_malloctrace) {
1028 write(TraceFd(), buf, size);
1029 }
1030}
1031
1032// ========================================================================= //
1033
1034// General debug allocation/deallocation
1035
1036static inline void* DebugAllocate(size_t size, int type) {
1037 MallocBlock* ptr = MallocBlock::Allocate(size, type);
1038 if (ptr == NULL) return NULL;
1039 MALLOC_TRACE("malloc", size, ptr->data_addr());
1040 return ptr->data_addr();
1041}
1042
Brian Silverman20350ac2021-11-17 18:19:55 -08001043static inline void DebugDeallocate(void* ptr, int type, size_t given_size) {
Austin Schuh745610d2015-09-06 18:19:50 -07001044 MALLOC_TRACE("free",
1045 (ptr != 0 ? MallocBlock::FromRawPointer(ptr)->data_size() : 0),
1046 ptr);
Brian Silverman20350ac2021-11-17 18:19:55 -08001047 if (ptr) MallocBlock::FromRawPointer(ptr)->Deallocate(type, given_size);
Austin Schuh745610d2015-09-06 18:19:50 -07001048}
1049
1050// ========================================================================= //
1051
1052// The following functions may be called via MallocExtension::instance()
1053// for memory verification and statistics.
1054class DebugMallocImplementation : public TCMallocImplementation {
1055 public:
1056 virtual bool GetNumericProperty(const char* name, size_t* value) {
1057 bool result = TCMallocImplementation::GetNumericProperty(name, value);
1058 if (result && (strcmp(name, "generic.current_allocated_bytes") == 0)) {
1059 // Subtract bytes kept in the free queue
1060 size_t qsize = MallocBlock::FreeQueueSize();
1061 if (*value >= qsize) {
1062 *value -= qsize;
1063 }
1064 }
1065 return result;
1066 }
1067
1068 virtual bool VerifyNewMemory(const void* p) {
1069 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kNewType);
1070 return true;
1071 }
1072
1073 virtual bool VerifyArrayNewMemory(const void* p) {
1074 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kArrayNewType);
1075 return true;
1076 }
1077
1078 virtual bool VerifyMallocMemory(const void* p) {
1079 if (p) MallocBlock::FromRawPointer(p)->Check(MallocBlock::kMallocType);
1080 return true;
1081 }
1082
1083 virtual bool VerifyAllMemory() {
1084 return MallocBlock::CheckEverything();
1085 }
1086
1087 virtual bool MallocMemoryStats(int* blocks, size_t* total,
1088 int histogram[kMallocHistogramSize]) {
1089 return MallocBlock::MemoryStats(blocks, total, histogram);
1090 }
1091
1092 virtual size_t GetEstimatedAllocatedSize(size_t size) {
1093 return size;
1094 }
1095
1096 virtual size_t GetAllocatedSize(const void* p) {
1097 if (p) {
1098 RAW_CHECK(GetOwnership(p) != MallocExtension::kNotOwned,
1099 "ptr not allocated by tcmalloc");
1100 return MallocBlock::FromRawPointer(p)->data_size();
1101 }
1102 return 0;
1103 }
1104
1105 virtual MallocExtension::Ownership GetOwnership(const void* p) {
1106 if (!p) {
1107 // nobody owns NULL
1108 return MallocExtension::kNotOwned;
1109 }
1110
1111 // FIXME: note that correct GetOwnership should not touch memory
1112 // that is not owned by tcmalloc. Main implementation is using
1113 // pagemap to discover if page in question is owned by us or
1114 // not. But pagemap only has marks for first and last page of
1115 // spans. Note that if p was returned out of our memalign with
1116 // big alignment, then it will point outside of marked pages. Also
1117 // note that FromRawPointer call below requires touching memory
1118 // before pointer in order to handle memalign-ed chunks
1119 // (offset_). This leaves us with two options:
1120 //
1121 // * do FromRawPointer first and have possibility of crashing if
1122 // we're given not owned pointer
1123 //
1124 // * return incorrect ownership for those large memalign chunks
1125 //
1126 // I've decided to choose later, which appears to happen rarer and
1127 // therefore is arguably a lesser evil
1128
1129 MallocExtension::Ownership rv = TCMallocImplementation::GetOwnership(p);
1130 if (rv != MallocExtension::kOwned) {
1131 return rv;
1132 }
1133
1134 const MallocBlock* mb = MallocBlock::FromRawPointer(p);
1135 return TCMallocImplementation::GetOwnership(mb);
1136 }
1137
1138 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) {
1139 static const char* kDebugFreeQueue = "debug.free_queue";
1140
1141 TCMallocImplementation::GetFreeListSizes(v);
1142
1143 MallocExtension::FreeListInfo i;
1144 i.type = kDebugFreeQueue;
1145 i.min_object_size = 0;
1146 i.max_object_size = numeric_limits<size_t>::max();
1147 i.total_bytes_free = MallocBlock::FreeQueueSize();
1148 v->push_back(i);
1149 }
1150
1151 };
1152
1153static union {
1154 char chars[sizeof(DebugMallocImplementation)];
1155 void *ptr;
1156} debug_malloc_implementation_space;
1157
1158REGISTER_MODULE_INITIALIZER(debugallocation, {
1159#if (__cplusplus >= 201103L)
Brian Silverman20350ac2021-11-17 18:19:55 -08001160 static_assert(alignof(decltype(debug_malloc_implementation_space)) >= alignof(DebugMallocImplementation),
1161 "DebugMallocImplementation is expected to need just word alignment");
Austin Schuh745610d2015-09-06 18:19:50 -07001162#endif
1163 // Either we or valgrind will control memory management. We
1164 // register our extension if we're the winner. Otherwise let
1165 // Valgrind use its own malloc (so don't register our extension).
1166 if (!RunningOnValgrind()) {
1167 DebugMallocImplementation *impl = new (debug_malloc_implementation_space.chars) DebugMallocImplementation();
1168 MallocExtension::Register(impl);
1169 }
1170});
1171
1172REGISTER_MODULE_DESTRUCTOR(debugallocation, {
1173 if (!RunningOnValgrind()) {
1174 // When the program exits, check all blocks still in the free
1175 // queue for corruption.
1176 DanglingWriteChecker();
1177 }
1178});
1179
1180// ========================================================================= //
1181
1182struct debug_alloc_retry_data {
1183 size_t size;
1184 int new_type;
1185};
1186
1187static void *retry_debug_allocate(void *arg) {
1188 debug_alloc_retry_data *data = static_cast<debug_alloc_retry_data *>(arg);
1189 return DebugAllocate(data->size, data->new_type);
1190}
1191
1192// This is mostly the same a cpp_alloc in tcmalloc.cc.
1193// TODO(csilvers): change Allocate() above to call cpp_alloc, so we
1194// don't have to reproduce the logic here. To make tc_new_mode work
1195// properly, I think we'll need to separate out the logic of throwing
1196// from the logic of calling the new-handler.
1197inline void* debug_cpp_alloc(size_t size, int new_type, bool nothrow) {
1198 void* p = DebugAllocate(size, new_type);
1199 if (p != NULL) {
1200 return p;
1201 }
1202 struct debug_alloc_retry_data data;
1203 data.size = size;
1204 data.new_type = new_type;
1205 return handle_oom(retry_debug_allocate, &data,
1206 true, nothrow);
1207}
1208
1209inline void* do_debug_malloc_or_debug_cpp_alloc(size_t size) {
1210 void* p = DebugAllocate(size, MallocBlock::kMallocType);
1211 if (p != NULL) {
1212 return p;
1213 }
1214 struct debug_alloc_retry_data data;
1215 data.size = size;
1216 data.new_type = MallocBlock::kMallocType;
1217 return handle_oom(retry_debug_allocate, &data,
1218 false, true);
1219}
1220
1221// Exported routines
1222
Brian Silverman20350ac2021-11-17 18:19:55 -08001223// frame forcer and force_frame exist only to prevent tail calls to
1224// DebugDeallocate to be actually implemented as tail calls. This is
1225// important because stack trace capturing in MallocBlockQueueEntry
1226// relies on google_malloc section being on stack and tc_XXX functions
1227// are in that section. So they must not jump to DebugDeallocate but
1228// have to do call. frame_forcer call at the end of such functions
1229// prevents tail calls to DebugDeallocate.
1230static int frame_forcer;
1231static void force_frame() {
1232 int dummy = *(int volatile *)&frame_forcer;
1233 (void)dummy;
1234}
1235
1236extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) PERFTOOLS_NOTHROW {
1237 if (ThreadCache::IsUseEmergencyMalloc()) {
1238 return tcmalloc::EmergencyMalloc(size);
1239 }
Austin Schuh745610d2015-09-06 18:19:50 -07001240 void* ptr = do_debug_malloc_or_debug_cpp_alloc(size);
1241 MallocHook::InvokeNewHook(ptr, size);
1242 return ptr;
1243}
1244
Brian Silverman20350ac2021-11-17 18:19:55 -08001245extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) PERFTOOLS_NOTHROW {
1246 if (tcmalloc::IsEmergencyPtr(ptr)) {
1247 return tcmalloc::EmergencyFree(ptr);
1248 }
Austin Schuh745610d2015-09-06 18:19:50 -07001249 MallocHook::InvokeDeleteHook(ptr);
Brian Silverman20350ac2021-11-17 18:19:55 -08001250 DebugDeallocate(ptr, MallocBlock::kMallocType, 0);
1251 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001252}
1253
Brian Silverman20350ac2021-11-17 18:19:55 -08001254extern "C" PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOOLS_NOTHROW {
1255 MallocHook::InvokeDeleteHook(ptr);
1256 DebugDeallocate(ptr, MallocBlock::kMallocType, size);
1257 force_frame();
1258}
1259
1260extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t count, size_t size) PERFTOOLS_NOTHROW {
1261 if (ThreadCache::IsUseEmergencyMalloc()) {
1262 return tcmalloc::EmergencyCalloc(count, size);
1263 }
Austin Schuh745610d2015-09-06 18:19:50 -07001264 // Overflow check
1265 const size_t total_size = count * size;
1266 if (size != 0 && total_size / size != count) return NULL;
1267
1268 void* block = do_debug_malloc_or_debug_cpp_alloc(total_size);
1269 MallocHook::InvokeNewHook(block, total_size);
1270 if (block) memset(block, 0, total_size);
1271 return block;
1272}
1273
Brian Silverman20350ac2021-11-17 18:19:55 -08001274extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) PERFTOOLS_NOTHROW {
1275 if (tcmalloc::IsEmergencyPtr(ptr)) {
1276 return tcmalloc::EmergencyFree(ptr);
1277 }
Austin Schuh745610d2015-09-06 18:19:50 -07001278 MallocHook::InvokeDeleteHook(ptr);
Brian Silverman20350ac2021-11-17 18:19:55 -08001279 DebugDeallocate(ptr, MallocBlock::kMallocType, 0);
1280 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001281}
1282
Brian Silverman20350ac2021-11-17 18:19:55 -08001283extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS_NOTHROW {
1284 if (tcmalloc::IsEmergencyPtr(ptr)) {
1285 return tcmalloc::EmergencyRealloc(ptr, size);
1286 }
Austin Schuh745610d2015-09-06 18:19:50 -07001287 if (ptr == NULL) {
1288 ptr = do_debug_malloc_or_debug_cpp_alloc(size);
1289 MallocHook::InvokeNewHook(ptr, size);
1290 return ptr;
1291 }
1292 if (size == 0) {
1293 MallocHook::InvokeDeleteHook(ptr);
Brian Silverman20350ac2021-11-17 18:19:55 -08001294 DebugDeallocate(ptr, MallocBlock::kMallocType, 0);
Austin Schuh745610d2015-09-06 18:19:50 -07001295 return NULL;
1296 }
1297 MallocBlock* old = MallocBlock::FromRawPointer(ptr);
1298 old->Check(MallocBlock::kMallocType);
1299 MallocBlock* p = MallocBlock::Allocate(size, MallocBlock::kMallocType);
1300
1301 // If realloc fails we are to leave the old block untouched and
1302 // return null
1303 if (p == NULL) return NULL;
1304
1305 // if ptr was allocated via memalign, then old->data_size() is not
1306 // start of user data. So we must be careful to copy only user-data
1307 char *old_begin = (char *)old->data_addr();
1308 char *old_end = old_begin + old->data_size();
1309
1310 ssize_t old_ssize = old_end - (char *)ptr;
1311 CHECK_CONDITION(old_ssize >= 0);
1312
1313 size_t old_size = (size_t)old_ssize;
1314 CHECK_CONDITION(old_size <= old->data_size());
1315
1316 memcpy(p->data_addr(), ptr, (old_size < size) ? old_size : size);
1317 MallocHook::InvokeDeleteHook(ptr);
1318 MallocHook::InvokeNewHook(p->data_addr(), size);
Brian Silverman20350ac2021-11-17 18:19:55 -08001319 DebugDeallocate(ptr, MallocBlock::kMallocType, 0);
Austin Schuh745610d2015-09-06 18:19:50 -07001320 MALLOC_TRACE("realloc", p->data_size(), p->data_addr());
1321 return p->data_addr();
1322}
1323
1324extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
1325 void* ptr = debug_cpp_alloc(size, MallocBlock::kNewType, false);
1326 MallocHook::InvokeNewHook(ptr, size);
1327 if (ptr == NULL) {
Brian Silverman20350ac2021-11-17 18:19:55 -08001328 RAW_LOG(FATAL, "Unable to allocate %zu bytes: new failed.", size);
Austin Schuh745610d2015-09-06 18:19:50 -07001329 }
1330 return ptr;
1331}
1332
Brian Silverman20350ac2021-11-17 18:19:55 -08001333extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size, const std::nothrow_t&) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001334 void* ptr = debug_cpp_alloc(size, MallocBlock::kNewType, true);
1335 MallocHook::InvokeNewHook(ptr, size);
1336 return ptr;
1337}
1338
Brian Silverman20350ac2021-11-17 18:19:55 -08001339extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001340 MallocHook::InvokeDeleteHook(p);
Brian Silverman20350ac2021-11-17 18:19:55 -08001341 DebugDeallocate(p, MallocBlock::kNewType, 0);
1342 force_frame();
1343}
1344
1345extern "C" PERFTOOLS_DLL_DECL void tc_delete_sized(void* p, size_t size) PERFTOOLS_NOTHROW {
1346 MallocHook::InvokeDeleteHook(p);
1347 DebugDeallocate(p, MallocBlock::kNewType, size);
1348 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001349}
1350
1351// Some STL implementations explicitly invoke this.
1352// It is completely equivalent to a normal delete (delete never throws).
Brian Silverman20350ac2021-11-17 18:19:55 -08001353extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p, const std::nothrow_t&) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001354 MallocHook::InvokeDeleteHook(p);
Brian Silverman20350ac2021-11-17 18:19:55 -08001355 DebugDeallocate(p, MallocBlock::kNewType, 0);
1356 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001357}
1358
1359extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
1360 void* ptr = debug_cpp_alloc(size, MallocBlock::kArrayNewType, false);
1361 MallocHook::InvokeNewHook(ptr, size);
1362 if (ptr == NULL) {
Brian Silverman20350ac2021-11-17 18:19:55 -08001363 RAW_LOG(FATAL, "Unable to allocate %zu bytes: new[] failed.", size);
Austin Schuh745610d2015-09-06 18:19:50 -07001364 }
1365 return ptr;
1366}
1367
1368extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::nothrow_t&)
Brian Silverman20350ac2021-11-17 18:19:55 -08001369 PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001370 void* ptr = debug_cpp_alloc(size, MallocBlock::kArrayNewType, true);
1371 MallocHook::InvokeNewHook(ptr, size);
1372 return ptr;
1373}
1374
Brian Silverman20350ac2021-11-17 18:19:55 -08001375extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001376 MallocHook::InvokeDeleteHook(p);
Brian Silverman20350ac2021-11-17 18:19:55 -08001377 DebugDeallocate(p, MallocBlock::kArrayNewType, 0);
1378 force_frame();
1379}
1380
1381extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_sized(void* p, size_t size) PERFTOOLS_NOTHROW {
1382 MallocHook::InvokeDeleteHook(p);
1383 DebugDeallocate(p, MallocBlock::kArrayNewType, size);
1384 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001385}
1386
1387// Some STL implementations explicitly invoke this.
1388// It is completely equivalent to a normal delete (delete never throws).
Brian Silverman20350ac2021-11-17 18:19:55 -08001389extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p, const std::nothrow_t&) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001390 MallocHook::InvokeDeleteHook(p);
Brian Silverman20350ac2021-11-17 18:19:55 -08001391 DebugDeallocate(p, MallocBlock::kArrayNewType, 0);
1392 force_frame();
Austin Schuh745610d2015-09-06 18:19:50 -07001393}
1394
1395// This is mostly the same as do_memalign in tcmalloc.cc.
Brian Silverman20350ac2021-11-17 18:19:55 -08001396static void *do_debug_memalign(size_t alignment, size_t size, int type) {
Austin Schuh745610d2015-09-06 18:19:50 -07001397 // Allocate >= size bytes aligned on "alignment" boundary
1398 // "alignment" is a power of two.
1399 void *p = 0;
1400 RAW_CHECK((alignment & (alignment-1)) == 0, "must be power of two");
1401 const size_t data_offset = MallocBlock::data_offset();
1402 // Allocate "alignment-1" extra bytes to ensure alignment is possible, and
1403 // a further data_offset bytes for an additional fake header.
1404 size_t extra_bytes = data_offset + alignment - 1;
1405 if (size + extra_bytes < size) return NULL; // Overflow
Brian Silverman20350ac2021-11-17 18:19:55 -08001406 p = DebugAllocate(size + extra_bytes, type);
Austin Schuh745610d2015-09-06 18:19:50 -07001407 if (p != 0) {
1408 intptr_t orig_p = reinterpret_cast<intptr_t>(p);
1409 // Leave data_offset bytes for fake header, and round up to meet
1410 // alignment.
1411 p = reinterpret_cast<void *>(RoundUp(orig_p + data_offset, alignment));
1412 // Create a fake header block with an offset_ that points back to the
1413 // real header. FromRawPointer uses this value.
1414 MallocBlock *fake_hdr = reinterpret_cast<MallocBlock *>(
1415 reinterpret_cast<char *>(p) - data_offset);
1416 // offset_ is distance between real and fake headers.
1417 // p is now end of fake header (beginning of client area),
1418 // and orig_p is the end of the real header, so offset_
1419 // is their difference.
1420 //
1421 // Note that other fields of fake_hdr are initialized with
1422 // kMagicUninitializedByte
1423 fake_hdr->set_offset(reinterpret_cast<intptr_t>(p) - orig_p);
1424 }
1425 return p;
1426}
1427
1428struct memalign_retry_data {
1429 size_t align;
1430 size_t size;
Brian Silverman20350ac2021-11-17 18:19:55 -08001431 int type;
Austin Schuh745610d2015-09-06 18:19:50 -07001432};
1433
1434static void *retry_debug_memalign(void *arg) {
1435 memalign_retry_data *data = static_cast<memalign_retry_data *>(arg);
Brian Silverman20350ac2021-11-17 18:19:55 -08001436 return do_debug_memalign(data->align, data->size, data->type);
Austin Schuh745610d2015-09-06 18:19:50 -07001437}
1438
Brian Silverman20350ac2021-11-17 18:19:55 -08001439ATTRIBUTE_ALWAYS_INLINE
Austin Schuh745610d2015-09-06 18:19:50 -07001440inline void* do_debug_memalign_or_debug_cpp_memalign(size_t align,
Brian Silverman20350ac2021-11-17 18:19:55 -08001441 size_t size,
1442 int type,
1443 bool from_operator,
1444 bool nothrow) {
1445 void* p = do_debug_memalign(align, size, type);
Austin Schuh745610d2015-09-06 18:19:50 -07001446 if (p != NULL) {
1447 return p;
1448 }
1449
1450 struct memalign_retry_data data;
1451 data.align = align;
1452 data.size = size;
Brian Silverman20350ac2021-11-17 18:19:55 -08001453 data.type = type;
Austin Schuh745610d2015-09-06 18:19:50 -07001454 return handle_oom(retry_debug_memalign, &data,
Brian Silverman20350ac2021-11-17 18:19:55 -08001455 from_operator, nothrow);
Austin Schuh745610d2015-09-06 18:19:50 -07001456}
1457
Brian Silverman20350ac2021-11-17 18:19:55 -08001458extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align, size_t size) PERFTOOLS_NOTHROW {
1459 void *p = do_debug_memalign_or_debug_cpp_memalign(align, size, MallocBlock::kMallocType, false, true);
Austin Schuh745610d2015-09-06 18:19:50 -07001460 MallocHook::InvokeNewHook(p, size);
1461 return p;
1462}
1463
1464// Implementation taken from tcmalloc/tcmalloc.cc
1465extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(void** result_ptr, size_t align, size_t size)
Brian Silverman20350ac2021-11-17 18:19:55 -08001466 PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001467 if (((align % sizeof(void*)) != 0) ||
1468 ((align & (align - 1)) != 0) ||
1469 (align == 0)) {
1470 return EINVAL;
1471 }
1472
Brian Silverman20350ac2021-11-17 18:19:55 -08001473 void* result = do_debug_memalign_or_debug_cpp_memalign(align, size, MallocBlock::kMallocType, false, true);
Austin Schuh745610d2015-09-06 18:19:50 -07001474 MallocHook::InvokeNewHook(result, size);
1475 if (result == NULL) {
1476 return ENOMEM;
1477 } else {
1478 *result_ptr = result;
1479 return 0;
1480 }
1481}
1482
Brian Silverman20350ac2021-11-17 18:19:55 -08001483extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001484 // Allocate >= size bytes starting on a page boundary
Brian Silverman20350ac2021-11-17 18:19:55 -08001485 void *p = do_debug_memalign_or_debug_cpp_memalign(getpagesize(), size, MallocBlock::kMallocType, false, true);
Austin Schuh745610d2015-09-06 18:19:50 -07001486 MallocHook::InvokeNewHook(p, size);
1487 return p;
1488}
1489
Brian Silverman20350ac2021-11-17 18:19:55 -08001490extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001491 // Round size up to a multiple of pages
1492 // then allocate memory on a page boundary
1493 int pagesize = getpagesize();
1494 size = RoundUp(size, pagesize);
1495 if (size == 0) { // pvalloc(0) should allocate one page, according to
1496 size = pagesize; // http://man.free4web.biz/man3/libmpatrol.3.html
1497 }
Brian Silverman20350ac2021-11-17 18:19:55 -08001498 void *p = do_debug_memalign_or_debug_cpp_memalign(pagesize, size, MallocBlock::kMallocType, false, true);
Austin Schuh745610d2015-09-06 18:19:50 -07001499 MallocHook::InvokeNewHook(p, size);
1500 return p;
1501}
1502
Brian Silverman20350ac2021-11-17 18:19:55 -08001503#if defined(ENABLE_ALIGNED_NEW_DELETE)
1504
1505extern "C" PERFTOOLS_DLL_DECL void* tc_new_aligned(size_t size, std::align_val_t align) {
1506 void* result = do_debug_memalign_or_debug_cpp_memalign(static_cast<size_t>(align), size, MallocBlock::kNewType, true, false);
1507 MallocHook::InvokeNewHook(result, size);
1508 return result;
1509}
1510
1511extern "C" PERFTOOLS_DLL_DECL void* tc_new_aligned_nothrow(size_t size, std::align_val_t align, const std::nothrow_t&) PERFTOOLS_NOTHROW {
1512 void* result = do_debug_memalign_or_debug_cpp_memalign(static_cast<size_t>(align), size, MallocBlock::kNewType, true, true);
1513 MallocHook::InvokeNewHook(result, size);
1514 return result;
1515}
1516
1517extern "C" PERFTOOLS_DLL_DECL void tc_delete_aligned(void* p, std::align_val_t) PERFTOOLS_NOTHROW {
1518 tc_delete(p);
1519}
1520
1521extern "C" PERFTOOLS_DLL_DECL void tc_delete_sized_aligned(void* p, size_t size, std::align_val_t align) PERFTOOLS_NOTHROW {
1522 // Reproduce actual size calculation done by do_debug_memalign
1523 const size_t alignment = static_cast<size_t>(align);
1524 const size_t data_offset = MallocBlock::data_offset();
1525 const size_t extra_bytes = data_offset + alignment - 1;
1526
1527 tc_delete_sized(p, size + extra_bytes);
1528}
1529
1530extern "C" PERFTOOLS_DLL_DECL void tc_delete_aligned_nothrow(void* p, std::align_val_t, const std::nothrow_t&) PERFTOOLS_NOTHROW {
1531 tc_delete(p);
1532}
1533
1534extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_aligned(size_t size, std::align_val_t align) {
1535 void* result = do_debug_memalign_or_debug_cpp_memalign(static_cast<size_t>(align), size, MallocBlock::kArrayNewType, true, false);
1536 MallocHook::InvokeNewHook(result, size);
1537 return result;
1538}
1539
1540extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_aligned_nothrow(size_t size, std::align_val_t align, const std::nothrow_t& nt) PERFTOOLS_NOTHROW {
1541 void* result = do_debug_memalign_or_debug_cpp_memalign(static_cast<size_t>(align), size, MallocBlock::kArrayNewType, true, true);
1542 MallocHook::InvokeNewHook(result, size);
1543 return result;
1544}
1545
1546extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_aligned(void* p, std::align_val_t) PERFTOOLS_NOTHROW {
1547 tc_deletearray(p);
1548}
1549
1550extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_sized_aligned(void* p, size_t size, std::align_val_t align) PERFTOOLS_NOTHROW {
1551 // Reproduce actual size calculation done by do_debug_memalign
1552 const size_t alignment = static_cast<size_t>(align);
1553 const size_t data_offset = MallocBlock::data_offset();
1554 const size_t extra_bytes = data_offset + alignment - 1;
1555
1556 tc_deletearray_sized(p, size + extra_bytes);
1557}
1558
1559extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_aligned_nothrow(void* p, std::align_val_t, const std::nothrow_t&) PERFTOOLS_NOTHROW {
1560 tc_deletearray(p);
1561}
1562
1563#endif // defined(ENABLE_ALIGNED_NEW_DELETE)
1564
Austin Schuh745610d2015-09-06 18:19:50 -07001565// malloc_stats just falls through to the base implementation.
Brian Silverman20350ac2021-11-17 18:19:55 -08001566extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001567 do_malloc_stats();
1568}
1569
Brian Silverman20350ac2021-11-17 18:19:55 -08001570extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001571 return do_mallopt(cmd, value);
1572}
1573
1574#ifdef HAVE_STRUCT_MALLINFO
Brian Silverman20350ac2021-11-17 18:19:55 -08001575extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001576 return do_mallinfo();
1577}
1578#endif
1579
Brian Silverman20350ac2021-11-17 18:19:55 -08001580extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001581 return MallocExtension::instance()->GetAllocatedSize(ptr);
1582}
1583
Brian Silverman20350ac2021-11-17 18:19:55 -08001584extern "C" PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) PERFTOOLS_NOTHROW {
Austin Schuh745610d2015-09-06 18:19:50 -07001585 void* result = DebugAllocate(size, MallocBlock::kMallocType);
1586 MallocHook::InvokeNewHook(result, size);
1587 return result;
1588}