blob: 47df7792246c0522a2715cb2b3c57ea0bd1af4f7 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2005, Google Inc.
3// All rights reserved.
Brian Silverman20350ac2021-11-17 18:19:55 -08004//
Austin Schuh745610d2015-09-06 18:19:50 -07005// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
Brian Silverman20350ac2021-11-17 18:19:55 -08008//
Austin Schuh745610d2015-09-06 18:19:50 -07009// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
Brian Silverman20350ac2021-11-17 18:19:55 -080018//
Austin Schuh745610d2015-09-06 18:19:50 -070019// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat
33//
34// TODO: Log large allocations
35
36#include <config.h>
37#include <stddef.h>
38#include <stdio.h>
39#include <stdlib.h>
40#ifdef HAVE_UNISTD_H
41#include <unistd.h>
42#endif
43#ifdef HAVE_INTTYPES_H
44#include <inttypes.h>
45#endif
46#ifdef HAVE_FCNTL_H
47#include <fcntl.h> // for open()
48#endif
49#ifdef HAVE_MMAP
50#include <sys/mman.h>
51#endif
52#include <errno.h>
53#include <assert.h>
54#include <sys/types.h>
55#include <signal.h>
56
57#include <algorithm>
58#include <string>
59
60#include <gperftools/heap-profiler.h>
61
62#include "base/logging.h"
63#include "base/basictypes.h" // for PRId64, among other things
64#include "base/googleinit.h"
65#include "base/commandlineflags.h"
66#include "malloc_hook-inl.h"
67#include "tcmalloc_guard.h"
68#include <gperftools/malloc_hook.h>
69#include <gperftools/malloc_extension.h>
70#include "base/spinlock.h"
71#include "base/low_level_alloc.h"
72#include "base/sysinfo.h" // for GetUniquePathFromEnv()
73#include "heap-profile-table.h"
74#include "memory_region_map.h"
75
76
77#ifndef PATH_MAX
78#ifdef MAXPATHLEN
79#define PATH_MAX MAXPATHLEN
80#else
81#define PATH_MAX 4096 // seems conservative for max filename len!
82#endif
83#endif
84
Brian Silverman20350ac2021-11-17 18:19:55 -080085using std::string;
86using std::sort;
Austin Schuh745610d2015-09-06 18:19:50 -070087
88//----------------------------------------------------------------------
89// Flags that control heap-profiling
90//
91// The thread-safety of the profiler depends on these being immutable
92// after main starts, so don't change them.
93//----------------------------------------------------------------------
94
95DEFINE_int64(heap_profile_allocation_interval,
96 EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
97 "If non-zero, dump heap profiling information once every "
98 "specified number of bytes allocated by the program since "
99 "the last dump.");
100DEFINE_int64(heap_profile_deallocation_interval,
101 EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
102 "If non-zero, dump heap profiling information once every "
103 "specified number of bytes deallocated by the program "
104 "since the last dump.");
105// We could also add flags that report whenever inuse_bytes changes by
106// X or -X, but there hasn't been a need for that yet, so we haven't.
107DEFINE_int64(heap_profile_inuse_interval,
108 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
109 "If non-zero, dump heap profiling information whenever "
110 "the high-water memory usage mark increases by the specified "
111 "number of bytes.");
112DEFINE_int64(heap_profile_time_interval,
113 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0),
114 "If non-zero, dump heap profiling information once every "
115 "specified number of seconds since the last dump.");
116DEFINE_bool(mmap_log,
117 EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
118 "Should mmap/munmap calls be logged?");
119DEFINE_bool(mmap_profile,
120 EnvToBool("HEAP_PROFILE_MMAP", false),
121 "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
122DEFINE_bool(only_mmap_profile,
123 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
124 "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
125 "do not profile malloc/new/etc");
126
127
128//----------------------------------------------------------------------
129// Locking
130//----------------------------------------------------------------------
131
132// A pthread_mutex has way too much lock contention to be used here.
133//
134// I would like to use Mutex, but it can call malloc(),
135// which can cause us to fall into an infinite recursion.
136//
137// So we use a simple spinlock.
138static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
139
140//----------------------------------------------------------------------
141// Simple allocator for heap profiler's internal memory
142//----------------------------------------------------------------------
143
144static LowLevelAlloc::Arena *heap_profiler_memory;
145
146static void* ProfilerMalloc(size_t bytes) {
147 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
148}
149static void ProfilerFree(void* p) {
150 LowLevelAlloc::Free(p);
151}
152
153// We use buffers of this size in DoGetHeapProfile.
154static const int kProfileBufferSize = 1 << 20;
155
156// This is a last-ditch buffer we use in DumpProfileLocked in case we
157// can't allocate more memory from ProfilerMalloc. We expect this
158// will be used by HeapProfileEndWriter when the application has to
159// exit due to out-of-memory. This buffer is allocated in
160// HeapProfilerStart. Access to this must be protected by heap_lock.
161static char* global_profiler_buffer = NULL;
162
163
164//----------------------------------------------------------------------
165// Profiling control/state data
166//----------------------------------------------------------------------
167
168// Access to all of these is protected by heap_lock.
169static bool is_on = false; // If are on as a subsytem.
170static bool dumping = false; // Dumping status to prevent recursion
171static char* filename_prefix = NULL; // Prefix used for profile file names
172 // (NULL if no need for dumping yet)
173static int dump_count = 0; // How many dumps so far
174static int64 last_dump_alloc = 0; // alloc_size when did we last dump
175static int64 last_dump_free = 0; // free_size when did we last dump
176static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
177static int64 last_dump_time = 0; // The time of the last dump
178
179static HeapProfileTable* heap_profile = NULL; // the heap profile table
180
181//----------------------------------------------------------------------
182// Profile generation
183//----------------------------------------------------------------------
184
185// Input must be a buffer of size at least 1MB.
186static char* DoGetHeapProfileLocked(char* buf, int buflen) {
187 // We used to be smarter about estimating the required memory and
188 // then capping it to 1MB and generating the profile into that.
189 if (buf == NULL || buflen < 1)
190 return NULL;
191
192 RAW_DCHECK(heap_lock.IsHeld(), "");
193 int bytes_written = 0;
194 if (is_on) {
195 HeapProfileTable::Stats const stats = heap_profile->total();
196 (void)stats; // avoid an unused-variable warning in non-debug mode.
197 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
198 // FillOrderedProfile should not reduce the set of active mmap-ed regions,
199 // hence MemoryRegionMap will let us remove everything we've added above:
200 RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
201 // if this fails, we somehow removed by FillOrderedProfile
202 // more than we have added.
203 }
204 buf[bytes_written] = '\0';
205 RAW_DCHECK(bytes_written == strlen(buf), "");
206
207 return buf;
208}
209
210extern "C" char* GetHeapProfile() {
211 // Use normal malloc: we return the profile to the user to free it:
212 char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
213 SpinLockHolder l(&heap_lock);
214 return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
215}
216
217// defined below
218static void NewHook(const void* ptr, size_t size);
219static void DeleteHook(const void* ptr);
220
221// Helper for HeapProfilerDump.
222static void DumpProfileLocked(const char* reason) {
223 RAW_DCHECK(heap_lock.IsHeld(), "");
224 RAW_DCHECK(is_on, "");
225 RAW_DCHECK(!dumping, "");
226
227 if (filename_prefix == NULL) return; // we do not yet need dumping
228
229 dumping = true;
230
231 // Make file name
232 char file_name[1000];
233 dump_count++;
234 snprintf(file_name, sizeof(file_name), "%s.%04d%s",
235 filename_prefix, dump_count, HeapProfileTable::kFileExt);
236
237 // Dump the profile
238 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
239 // We must use file routines that don't access memory, since we hold
240 // a memory lock now.
241 RawFD fd = RawOpenForWriting(file_name);
242 if (fd == kIllegalRawFD) {
243 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
244 dumping = false;
245 return;
246 }
247
248 // This case may be impossible, but it's best to be safe.
249 // It's safe to use the global buffer: we're protected by heap_lock.
250 if (global_profiler_buffer == NULL) {
251 global_profiler_buffer =
252 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
253 }
254
255 char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
256 kProfileBufferSize);
257 RawWrite(fd, profile, strlen(profile));
258 RawClose(fd);
259
260 dumping = false;
261}
262
263//----------------------------------------------------------------------
264// Profile collection
265//----------------------------------------------------------------------
266
267// Dump a profile after either an allocation or deallocation, if
268// the memory use has changed enough since the last dump.
269static void MaybeDumpProfileLocked() {
270 if (!dumping) {
271 const HeapProfileTable::Stats& total = heap_profile->total();
272 const int64 inuse_bytes = total.alloc_size - total.free_size;
273 bool need_to_dump = false;
274 char buf[128];
Brian Silverman20350ac2021-11-17 18:19:55 -0800275
Austin Schuh745610d2015-09-06 18:19:50 -0700276 if (FLAGS_heap_profile_allocation_interval > 0 &&
277 total.alloc_size >=
278 last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
279 snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, "
280 "%" PRId64 " MB currently in use"),
281 total.alloc_size >> 20, inuse_bytes >> 20);
282 need_to_dump = true;
283 } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
284 total.free_size >=
285 last_dump_free + FLAGS_heap_profile_deallocation_interval) {
286 snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, "
287 "%" PRId64 " MB currently in use"),
288 total.free_size >> 20, inuse_bytes >> 20);
289 need_to_dump = true;
290 } else if (FLAGS_heap_profile_inuse_interval > 0 &&
291 inuse_bytes >
292 high_water_mark + FLAGS_heap_profile_inuse_interval) {
293 snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use",
294 inuse_bytes >> 20);
295 need_to_dump = true;
Brian Silverman20350ac2021-11-17 18:19:55 -0800296 } else if (FLAGS_heap_profile_time_interval > 0 ) {
297 int64 current_time = time(NULL);
298 if (current_time - last_dump_time >=
299 FLAGS_heap_profile_time_interval) {
300 snprintf(buf, sizeof(buf), "%" PRId64 " sec since the last dump",
301 current_time - last_dump_time);
302 need_to_dump = true;
303 last_dump_time = current_time;
304 }
Austin Schuh745610d2015-09-06 18:19:50 -0700305 }
306 if (need_to_dump) {
307 DumpProfileLocked(buf);
308
309 last_dump_alloc = total.alloc_size;
310 last_dump_free = total.free_size;
311 if (inuse_bytes > high_water_mark)
312 high_water_mark = inuse_bytes;
313 }
314 }
315}
316
317// Record an allocation in the profile.
318static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
319 // Take the stack trace outside the critical section.
320 void* stack[HeapProfileTable::kMaxStackDepth];
321 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
322 SpinLockHolder l(&heap_lock);
323 if (is_on) {
324 heap_profile->RecordAlloc(ptr, bytes, depth, stack);
325 MaybeDumpProfileLocked();
326 }
327}
328
329// Record a deallocation in the profile.
330static void RecordFree(const void* ptr) {
331 SpinLockHolder l(&heap_lock);
332 if (is_on) {
333 heap_profile->RecordFree(ptr);
334 MaybeDumpProfileLocked();
335 }
336}
337
338//----------------------------------------------------------------------
339// Allocation/deallocation hooks for MallocHook
340//----------------------------------------------------------------------
341
342// static
343void NewHook(const void* ptr, size_t size) {
344 if (ptr != NULL) RecordAlloc(ptr, size, 0);
345}
346
347// static
348void DeleteHook(const void* ptr) {
349 if (ptr != NULL) RecordFree(ptr);
350}
351
352// TODO(jandrews): Re-enable stack tracing
353#ifdef TODO_REENABLE_STACK_TRACING
354static void RawInfoStackDumper(const char* message, void*) {
355 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
356 // -1 is to chop the \n which will be added by RAW_LOG
357}
358#endif
359
360static void MmapHook(const void* result, const void* start, size_t size,
361 int prot, int flags, int fd, off_t offset) {
362 if (FLAGS_mmap_log) { // log it
Brian Silverman20350ac2021-11-17 18:19:55 -0800363 // We use PRIxPTR not just '%p' to avoid deadlocks
Austin Schuh745610d2015-09-06 18:19:50 -0700364 // in pretty-printing of NULL as "nil".
365 // TODO(maxim): instead should use a safe snprintf reimplementation
366 RAW_LOG(INFO,
Brian Silverman20350ac2021-11-17 18:19:55 -0800367 "mmap(start=0x%" PRIxPTR ", len=%zu, prot=0x%x, flags=0x%x, "
Austin Schuh745610d2015-09-06 18:19:50 -0700368 "fd=%d, offset=0x%x) = 0x%" PRIxPTR "",
369 (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
370 (uintptr_t) result);
371#ifdef TODO_REENABLE_STACK_TRACING
372 DumpStackTrace(1, RawInfoStackDumper, NULL);
373#endif
374 }
375}
376
377static void MremapHook(const void* result, const void* old_addr,
378 size_t old_size, size_t new_size,
379 int flags, const void* new_addr) {
380 if (FLAGS_mmap_log) { // log it
Brian Silverman20350ac2021-11-17 18:19:55 -0800381 // We use PRIxPTR not just '%p' to avoid deadlocks
Austin Schuh745610d2015-09-06 18:19:50 -0700382 // in pretty-printing of NULL as "nil".
383 // TODO(maxim): instead should use a safe snprintf reimplementation
384 RAW_LOG(INFO,
Brian Silverman20350ac2021-11-17 18:19:55 -0800385 "mremap(old_addr=0x%" PRIxPTR ", old_size=%zu, "
386 "new_size=%zu, flags=0x%x, new_addr=0x%" PRIxPTR ") = "
Austin Schuh745610d2015-09-06 18:19:50 -0700387 "0x%" PRIxPTR "",
388 (uintptr_t) old_addr, old_size, new_size, flags,
389 (uintptr_t) new_addr, (uintptr_t) result);
390#ifdef TODO_REENABLE_STACK_TRACING
391 DumpStackTrace(1, RawInfoStackDumper, NULL);
392#endif
393 }
394}
395
396static void MunmapHook(const void* ptr, size_t size) {
397 if (FLAGS_mmap_log) { // log it
Brian Silverman20350ac2021-11-17 18:19:55 -0800398 // We use PRIxPTR not just '%p' to avoid deadlocks
Austin Schuh745610d2015-09-06 18:19:50 -0700399 // in pretty-printing of NULL as "nil".
400 // TODO(maxim): instead should use a safe snprintf reimplementation
Brian Silverman20350ac2021-11-17 18:19:55 -0800401 RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%zu)",
Austin Schuh745610d2015-09-06 18:19:50 -0700402 (uintptr_t) ptr, size);
403#ifdef TODO_REENABLE_STACK_TRACING
404 DumpStackTrace(1, RawInfoStackDumper, NULL);
405#endif
406 }
407}
408
409static void SbrkHook(const void* result, ptrdiff_t increment) {
410 if (FLAGS_mmap_log) { // log it
Brian Silverman20350ac2021-11-17 18:19:55 -0800411 RAW_LOG(INFO, "sbrk(inc=%zd) = 0x%" PRIxPTR "",
Austin Schuh745610d2015-09-06 18:19:50 -0700412 increment, (uintptr_t) result);
413#ifdef TODO_REENABLE_STACK_TRACING
414 DumpStackTrace(1, RawInfoStackDumper, NULL);
415#endif
416 }
417}
418
419//----------------------------------------------------------------------
420// Starting/stopping/dumping
421//----------------------------------------------------------------------
422
423extern "C" void HeapProfilerStart(const char* prefix) {
424 SpinLockHolder l(&heap_lock);
425
426 if (is_on) return;
427
428 is_on = true;
429
430 RAW_VLOG(0, "Starting tracking the heap");
431
432 // This should be done before the hooks are set up, since it should
433 // call new, and we want that to be accounted for correctly.
434 MallocExtension::Initialize();
435
436 if (FLAGS_only_mmap_profile) {
437 FLAGS_mmap_profile = true;
438 }
439
440 if (FLAGS_mmap_profile) {
441 // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
442 // call stack traces of at least size kMaxStackDepth:
443 MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth,
444 /* use_buckets */ true);
445 }
446
447 if (FLAGS_mmap_log) {
448 // Install our hooks to do the logging:
449 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
450 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
451 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
452 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
453 }
454
455 heap_profiler_memory =
456 LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
457
458 // Reserve space now for the heap profiler, so we can still write a
459 // heap profile even if the application runs out of memory.
460 global_profiler_buffer =
461 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
462
463 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
464 HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);
465
466 last_dump_alloc = 0;
467 last_dump_free = 0;
468 high_water_mark = 0;
469 last_dump_time = 0;
470
471 // We do not reset dump_count so if the user does a sequence of
472 // HeapProfilerStart/HeapProfileStop, we will get a continuous
473 // sequence of profiles.
474
475 if (FLAGS_only_mmap_profile == false) {
476 // Now set the hooks that capture new/delete and malloc/free.
477 RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
478 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
479 }
480
481 // Copy filename prefix
482 RAW_DCHECK(filename_prefix == NULL, "");
483 const int prefix_length = strlen(prefix);
484 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
485 memcpy(filename_prefix, prefix, prefix_length);
486 filename_prefix[prefix_length] = '\0';
487}
488
489extern "C" int IsHeapProfilerRunning() {
490 SpinLockHolder l(&heap_lock);
491 return is_on ? 1 : 0; // return an int, because C code doesn't have bool
492}
493
494extern "C" void HeapProfilerStop() {
495 SpinLockHolder l(&heap_lock);
496
497 if (!is_on) return;
498
499 if (FLAGS_only_mmap_profile == false) {
500 // Unset our new/delete hooks, checking they were set:
501 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
502 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
503 }
504 if (FLAGS_mmap_log) {
505 // Restore mmap/sbrk hooks, checking that our hooks were set:
506 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
507 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
508 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
509 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
510 }
511
512 // free profile
513 heap_profile->~HeapProfileTable();
514 ProfilerFree(heap_profile);
515 heap_profile = NULL;
516
517 // free output-buffer memory
518 ProfilerFree(global_profiler_buffer);
519
520 // free prefix
521 ProfilerFree(filename_prefix);
522 filename_prefix = NULL;
523
524 if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
525 RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
526 }
527
528 if (FLAGS_mmap_profile) {
529 MemoryRegionMap::Shutdown();
530 }
531
532 is_on = false;
533}
534
535extern "C" void HeapProfilerDump(const char *reason) {
536 SpinLockHolder l(&heap_lock);
537 if (is_on && !dumping) {
538 DumpProfileLocked(reason);
539 }
540}
541
542// Signal handler that is registered when a user selectable signal
543// number is defined in the environment variable HEAPPROFILESIGNAL.
544static void HeapProfilerDumpSignal(int signal_number) {
545 (void)signal_number;
546 if (!heap_lock.TryLock()) {
547 return;
548 }
549 if (is_on && !dumping) {
550 DumpProfileLocked("signal");
551 }
552 heap_lock.Unlock();
553}
554
555
556//----------------------------------------------------------------------
557// Initialization/finalization code
558//----------------------------------------------------------------------
559
560// Initialization code
561static void HeapProfilerInit() {
562 // Everything after this point is for setting up the profiler based on envvar
563 char fname[PATH_MAX];
564 if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
565 return;
566 }
567 // We do a uid check so we don't write out files in a setuid executable.
568#ifdef HAVE_GETEUID
569 if (getuid() != geteuid()) {
570 RAW_LOG(WARNING, ("HeapProfiler: ignoring HEAPPROFILE because "
571 "program seems to be setuid\n"));
572 return;
573 }
574#endif
575
576 char *signal_number_str = getenv("HEAPPROFILESIGNAL");
577 if (signal_number_str != NULL) {
578 long int signal_number = strtol(signal_number_str, NULL, 10);
579 intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, HeapProfilerDumpSignal));
580 if (old_signal_handler == reinterpret_cast<intptr_t>(SIG_ERR)) {
581 RAW_LOG(FATAL, "Failed to set signal. Perhaps signal number %s is invalid\n", signal_number_str);
582 } else if (old_signal_handler == 0) {
583 RAW_LOG(INFO,"Using signal %d as heap profiling switch", signal_number);
584 } else {
585 RAW_LOG(FATAL, "Signal %d already in use\n", signal_number);
586 }
587 }
588
589 HeapProfileTable::CleanupOldProfiles(fname);
590
591 HeapProfilerStart(fname);
592}
593
594// class used for finalization -- dumps the heap-profile at program exit
595struct HeapProfileEndWriter {
596 ~HeapProfileEndWriter() {
597 char buf[128];
598 if (heap_profile) {
599 const HeapProfileTable::Stats& total = heap_profile->total();
600 const int64 inuse_bytes = total.alloc_size - total.free_size;
601
602 if ((inuse_bytes >> 20) > 0) {
603 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " MB in use"),
604 inuse_bytes >> 20);
605 } else if ((inuse_bytes >> 10) > 0) {
606 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " kB in use"),
607 inuse_bytes >> 10);
608 } else {
609 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " bytes in use"),
610 inuse_bytes);
611 }
612 } else {
613 snprintf(buf, sizeof(buf), ("Exiting"));
614 }
615 HeapProfilerDump(buf);
616 }
617};
618
619// We want to make sure tcmalloc is up and running before starting the profiler
620static const TCMallocGuard tcmalloc_initializer;
621REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
622static HeapProfileEndWriter heap_profile_end_writer;