blob: 17d86976bc42cf132e5e8ba5f3e6ebe64fecbf32 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2005, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat
33//
34// TODO: Log large allocations
35
36#include <config.h>
37#include <stddef.h>
38#include <stdio.h>
39#include <stdlib.h>
40#ifdef HAVE_UNISTD_H
41#include <unistd.h>
42#endif
43#ifdef HAVE_INTTYPES_H
44#include <inttypes.h>
45#endif
46#ifdef HAVE_FCNTL_H
47#include <fcntl.h> // for open()
48#endif
49#ifdef HAVE_MMAP
50#include <sys/mman.h>
51#endif
52#include <errno.h>
53#include <assert.h>
54#include <sys/types.h>
55#include <signal.h>
56
57#include <algorithm>
58#include <string>
59
60#include <gperftools/heap-profiler.h>
61
62#include "base/logging.h"
63#include "base/basictypes.h" // for PRId64, among other things
64#include "base/googleinit.h"
65#include "base/commandlineflags.h"
66#include "malloc_hook-inl.h"
67#include "tcmalloc_guard.h"
68#include <gperftools/malloc_hook.h>
69#include <gperftools/malloc_extension.h>
70#include "base/spinlock.h"
71#include "base/low_level_alloc.h"
72#include "base/sysinfo.h" // for GetUniquePathFromEnv()
73#include "heap-profile-table.h"
74#include "memory_region_map.h"
75
76
77#ifndef PATH_MAX
78#ifdef MAXPATHLEN
79#define PATH_MAX MAXPATHLEN
80#else
81#define PATH_MAX 4096 // seems conservative for max filename len!
82#endif
83#endif
84
85using STL_NAMESPACE::string;
86using STL_NAMESPACE::sort;
87
88//----------------------------------------------------------------------
89// Flags that control heap-profiling
90//
91// The thread-safety of the profiler depends on these being immutable
92// after main starts, so don't change them.
93//----------------------------------------------------------------------
94
95DEFINE_int64(heap_profile_allocation_interval,
96 EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
97 "If non-zero, dump heap profiling information once every "
98 "specified number of bytes allocated by the program since "
99 "the last dump.");
100DEFINE_int64(heap_profile_deallocation_interval,
101 EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
102 "If non-zero, dump heap profiling information once every "
103 "specified number of bytes deallocated by the program "
104 "since the last dump.");
105// We could also add flags that report whenever inuse_bytes changes by
106// X or -X, but there hasn't been a need for that yet, so we haven't.
107DEFINE_int64(heap_profile_inuse_interval,
108 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
109 "If non-zero, dump heap profiling information whenever "
110 "the high-water memory usage mark increases by the specified "
111 "number of bytes.");
112DEFINE_int64(heap_profile_time_interval,
113 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0),
114 "If non-zero, dump heap profiling information once every "
115 "specified number of seconds since the last dump.");
116DEFINE_bool(mmap_log,
117 EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
118 "Should mmap/munmap calls be logged?");
119DEFINE_bool(mmap_profile,
120 EnvToBool("HEAP_PROFILE_MMAP", false),
121 "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
122DEFINE_bool(only_mmap_profile,
123 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
124 "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
125 "do not profile malloc/new/etc");
126
127
128//----------------------------------------------------------------------
129// Locking
130//----------------------------------------------------------------------
131
132// A pthread_mutex has way too much lock contention to be used here.
133//
134// I would like to use Mutex, but it can call malloc(),
135// which can cause us to fall into an infinite recursion.
136//
137// So we use a simple spinlock.
138static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
139
140//----------------------------------------------------------------------
141// Simple allocator for heap profiler's internal memory
142//----------------------------------------------------------------------
143
144static LowLevelAlloc::Arena *heap_profiler_memory;
145
146static void* ProfilerMalloc(size_t bytes) {
147 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
148}
149static void ProfilerFree(void* p) {
150 LowLevelAlloc::Free(p);
151}
152
153// We use buffers of this size in DoGetHeapProfile.
154static const int kProfileBufferSize = 1 << 20;
155
156// This is a last-ditch buffer we use in DumpProfileLocked in case we
157// can't allocate more memory from ProfilerMalloc. We expect this
158// will be used by HeapProfileEndWriter when the application has to
159// exit due to out-of-memory. This buffer is allocated in
160// HeapProfilerStart. Access to this must be protected by heap_lock.
161static char* global_profiler_buffer = NULL;
162
163
164//----------------------------------------------------------------------
165// Profiling control/state data
166//----------------------------------------------------------------------
167
168// Access to all of these is protected by heap_lock.
169static bool is_on = false; // If are on as a subsytem.
170static bool dumping = false; // Dumping status to prevent recursion
171static char* filename_prefix = NULL; // Prefix used for profile file names
172 // (NULL if no need for dumping yet)
173static int dump_count = 0; // How many dumps so far
174static int64 last_dump_alloc = 0; // alloc_size when did we last dump
175static int64 last_dump_free = 0; // free_size when did we last dump
176static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
177static int64 last_dump_time = 0; // The time of the last dump
178
179static HeapProfileTable* heap_profile = NULL; // the heap profile table
180
181//----------------------------------------------------------------------
182// Profile generation
183//----------------------------------------------------------------------
184
185// Input must be a buffer of size at least 1MB.
186static char* DoGetHeapProfileLocked(char* buf, int buflen) {
187 // We used to be smarter about estimating the required memory and
188 // then capping it to 1MB and generating the profile into that.
189 if (buf == NULL || buflen < 1)
190 return NULL;
191
192 RAW_DCHECK(heap_lock.IsHeld(), "");
193 int bytes_written = 0;
194 if (is_on) {
195 HeapProfileTable::Stats const stats = heap_profile->total();
196 (void)stats; // avoid an unused-variable warning in non-debug mode.
197 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
198 // FillOrderedProfile should not reduce the set of active mmap-ed regions,
199 // hence MemoryRegionMap will let us remove everything we've added above:
200 RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
201 // if this fails, we somehow removed by FillOrderedProfile
202 // more than we have added.
203 }
204 buf[bytes_written] = '\0';
205 RAW_DCHECK(bytes_written == strlen(buf), "");
206
207 return buf;
208}
209
210extern "C" char* GetHeapProfile() {
211 // Use normal malloc: we return the profile to the user to free it:
212 char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
213 SpinLockHolder l(&heap_lock);
214 return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
215}
216
217// defined below
218static void NewHook(const void* ptr, size_t size);
219static void DeleteHook(const void* ptr);
220
221// Helper for HeapProfilerDump.
222static void DumpProfileLocked(const char* reason) {
223 RAW_DCHECK(heap_lock.IsHeld(), "");
224 RAW_DCHECK(is_on, "");
225 RAW_DCHECK(!dumping, "");
226
227 if (filename_prefix == NULL) return; // we do not yet need dumping
228
229 dumping = true;
230
231 // Make file name
232 char file_name[1000];
233 dump_count++;
234 snprintf(file_name, sizeof(file_name), "%s.%04d%s",
235 filename_prefix, dump_count, HeapProfileTable::kFileExt);
236
237 // Dump the profile
238 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
239 // We must use file routines that don't access memory, since we hold
240 // a memory lock now.
241 RawFD fd = RawOpenForWriting(file_name);
242 if (fd == kIllegalRawFD) {
243 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
244 dumping = false;
245 return;
246 }
247
248 // This case may be impossible, but it's best to be safe.
249 // It's safe to use the global buffer: we're protected by heap_lock.
250 if (global_profiler_buffer == NULL) {
251 global_profiler_buffer =
252 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
253 }
254
255 char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
256 kProfileBufferSize);
257 RawWrite(fd, profile, strlen(profile));
258 RawClose(fd);
259
260 dumping = false;
261}
262
263//----------------------------------------------------------------------
264// Profile collection
265//----------------------------------------------------------------------
266
267// Dump a profile after either an allocation or deallocation, if
268// the memory use has changed enough since the last dump.
269static void MaybeDumpProfileLocked() {
270 if (!dumping) {
271 const HeapProfileTable::Stats& total = heap_profile->total();
272 const int64 inuse_bytes = total.alloc_size - total.free_size;
273 bool need_to_dump = false;
274 char buf[128];
275 int64 current_time = time(NULL);
276 if (FLAGS_heap_profile_allocation_interval > 0 &&
277 total.alloc_size >=
278 last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
279 snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, "
280 "%" PRId64 " MB currently in use"),
281 total.alloc_size >> 20, inuse_bytes >> 20);
282 need_to_dump = true;
283 } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
284 total.free_size >=
285 last_dump_free + FLAGS_heap_profile_deallocation_interval) {
286 snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, "
287 "%" PRId64 " MB currently in use"),
288 total.free_size >> 20, inuse_bytes >> 20);
289 need_to_dump = true;
290 } else if (FLAGS_heap_profile_inuse_interval > 0 &&
291 inuse_bytes >
292 high_water_mark + FLAGS_heap_profile_inuse_interval) {
293 snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use",
294 inuse_bytes >> 20);
295 need_to_dump = true;
296 } else if (FLAGS_heap_profile_time_interval > 0 &&
297 current_time - last_dump_time >=
298 FLAGS_heap_profile_time_interval) {
299 snprintf(buf, sizeof(buf), "%" PRId64 " sec since the last dump",
300 current_time - last_dump_time);
301 need_to_dump = true;
302 last_dump_time = current_time;
303 }
304 if (need_to_dump) {
305 DumpProfileLocked(buf);
306
307 last_dump_alloc = total.alloc_size;
308 last_dump_free = total.free_size;
309 if (inuse_bytes > high_water_mark)
310 high_water_mark = inuse_bytes;
311 }
312 }
313}
314
315// Record an allocation in the profile.
316static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
317 // Take the stack trace outside the critical section.
318 void* stack[HeapProfileTable::kMaxStackDepth];
319 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
320 SpinLockHolder l(&heap_lock);
321 if (is_on) {
322 heap_profile->RecordAlloc(ptr, bytes, depth, stack);
323 MaybeDumpProfileLocked();
324 }
325}
326
327// Record a deallocation in the profile.
328static void RecordFree(const void* ptr) {
329 SpinLockHolder l(&heap_lock);
330 if (is_on) {
331 heap_profile->RecordFree(ptr);
332 MaybeDumpProfileLocked();
333 }
334}
335
336//----------------------------------------------------------------------
337// Allocation/deallocation hooks for MallocHook
338//----------------------------------------------------------------------
339
340// static
341void NewHook(const void* ptr, size_t size) {
342 if (ptr != NULL) RecordAlloc(ptr, size, 0);
343}
344
345// static
346void DeleteHook(const void* ptr) {
347 if (ptr != NULL) RecordFree(ptr);
348}
349
350// TODO(jandrews): Re-enable stack tracing
351#ifdef TODO_REENABLE_STACK_TRACING
352static void RawInfoStackDumper(const char* message, void*) {
353 RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
354 // -1 is to chop the \n which will be added by RAW_LOG
355}
356#endif
357
358static void MmapHook(const void* result, const void* start, size_t size,
359 int prot, int flags, int fd, off_t offset) {
360 if (FLAGS_mmap_log) { // log it
361 // We use PRIxS not just '%p' to avoid deadlocks
362 // in pretty-printing of NULL as "nil".
363 // TODO(maxim): instead should use a safe snprintf reimplementation
364 RAW_LOG(INFO,
365 "mmap(start=0x%" PRIxPTR ", len=%" PRIuS ", prot=0x%x, flags=0x%x, "
366 "fd=%d, offset=0x%x) = 0x%" PRIxPTR "",
367 (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
368 (uintptr_t) result);
369#ifdef TODO_REENABLE_STACK_TRACING
370 DumpStackTrace(1, RawInfoStackDumper, NULL);
371#endif
372 }
373}
374
375static void MremapHook(const void* result, const void* old_addr,
376 size_t old_size, size_t new_size,
377 int flags, const void* new_addr) {
378 if (FLAGS_mmap_log) { // log it
379 // We use PRIxS not just '%p' to avoid deadlocks
380 // in pretty-printing of NULL as "nil".
381 // TODO(maxim): instead should use a safe snprintf reimplementation
382 RAW_LOG(INFO,
383 "mremap(old_addr=0x%" PRIxPTR ", old_size=%" PRIuS ", "
384 "new_size=%" PRIuS ", flags=0x%x, new_addr=0x%" PRIxPTR ") = "
385 "0x%" PRIxPTR "",
386 (uintptr_t) old_addr, old_size, new_size, flags,
387 (uintptr_t) new_addr, (uintptr_t) result);
388#ifdef TODO_REENABLE_STACK_TRACING
389 DumpStackTrace(1, RawInfoStackDumper, NULL);
390#endif
391 }
392}
393
394static void MunmapHook(const void* ptr, size_t size) {
395 if (FLAGS_mmap_log) { // log it
396 // We use PRIxS not just '%p' to avoid deadlocks
397 // in pretty-printing of NULL as "nil".
398 // TODO(maxim): instead should use a safe snprintf reimplementation
399 RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%" PRIuS ")",
400 (uintptr_t) ptr, size);
401#ifdef TODO_REENABLE_STACK_TRACING
402 DumpStackTrace(1, RawInfoStackDumper, NULL);
403#endif
404 }
405}
406
407static void SbrkHook(const void* result, ptrdiff_t increment) {
408 if (FLAGS_mmap_log) { // log it
409 RAW_LOG(INFO, "sbrk(inc=%" PRIdS ") = 0x%" PRIxPTR "",
410 increment, (uintptr_t) result);
411#ifdef TODO_REENABLE_STACK_TRACING
412 DumpStackTrace(1, RawInfoStackDumper, NULL);
413#endif
414 }
415}
416
417//----------------------------------------------------------------------
418// Starting/stopping/dumping
419//----------------------------------------------------------------------
420
421extern "C" void HeapProfilerStart(const char* prefix) {
422 SpinLockHolder l(&heap_lock);
423
424 if (is_on) return;
425
426 is_on = true;
427
428 RAW_VLOG(0, "Starting tracking the heap");
429
430 // This should be done before the hooks are set up, since it should
431 // call new, and we want that to be accounted for correctly.
432 MallocExtension::Initialize();
433
434 if (FLAGS_only_mmap_profile) {
435 FLAGS_mmap_profile = true;
436 }
437
438 if (FLAGS_mmap_profile) {
439 // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
440 // call stack traces of at least size kMaxStackDepth:
441 MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth,
442 /* use_buckets */ true);
443 }
444
445 if (FLAGS_mmap_log) {
446 // Install our hooks to do the logging:
447 RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
448 RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
449 RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
450 RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
451 }
452
453 heap_profiler_memory =
454 LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
455
456 // Reserve space now for the heap profiler, so we can still write a
457 // heap profile even if the application runs out of memory.
458 global_profiler_buffer =
459 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
460
461 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
462 HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);
463
464 last_dump_alloc = 0;
465 last_dump_free = 0;
466 high_water_mark = 0;
467 last_dump_time = 0;
468
469 // We do not reset dump_count so if the user does a sequence of
470 // HeapProfilerStart/HeapProfileStop, we will get a continuous
471 // sequence of profiles.
472
473 if (FLAGS_only_mmap_profile == false) {
474 // Now set the hooks that capture new/delete and malloc/free.
475 RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
476 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
477 }
478
479 // Copy filename prefix
480 RAW_DCHECK(filename_prefix == NULL, "");
481 const int prefix_length = strlen(prefix);
482 filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
483 memcpy(filename_prefix, prefix, prefix_length);
484 filename_prefix[prefix_length] = '\0';
485}
486
487extern "C" int IsHeapProfilerRunning() {
488 SpinLockHolder l(&heap_lock);
489 return is_on ? 1 : 0; // return an int, because C code doesn't have bool
490}
491
492extern "C" void HeapProfilerStop() {
493 SpinLockHolder l(&heap_lock);
494
495 if (!is_on) return;
496
497 if (FLAGS_only_mmap_profile == false) {
498 // Unset our new/delete hooks, checking they were set:
499 RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
500 RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
501 }
502 if (FLAGS_mmap_log) {
503 // Restore mmap/sbrk hooks, checking that our hooks were set:
504 RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
505 RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
506 RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
507 RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
508 }
509
510 // free profile
511 heap_profile->~HeapProfileTable();
512 ProfilerFree(heap_profile);
513 heap_profile = NULL;
514
515 // free output-buffer memory
516 ProfilerFree(global_profiler_buffer);
517
518 // free prefix
519 ProfilerFree(filename_prefix);
520 filename_prefix = NULL;
521
522 if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
523 RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
524 }
525
526 if (FLAGS_mmap_profile) {
527 MemoryRegionMap::Shutdown();
528 }
529
530 is_on = false;
531}
532
533extern "C" void HeapProfilerDump(const char *reason) {
534 SpinLockHolder l(&heap_lock);
535 if (is_on && !dumping) {
536 DumpProfileLocked(reason);
537 }
538}
539
540// Signal handler that is registered when a user selectable signal
541// number is defined in the environment variable HEAPPROFILESIGNAL.
542static void HeapProfilerDumpSignal(int signal_number) {
543 (void)signal_number;
544 if (!heap_lock.TryLock()) {
545 return;
546 }
547 if (is_on && !dumping) {
548 DumpProfileLocked("signal");
549 }
550 heap_lock.Unlock();
551}
552
553
554//----------------------------------------------------------------------
555// Initialization/finalization code
556//----------------------------------------------------------------------
557
558// Initialization code
559static void HeapProfilerInit() {
560 // Everything after this point is for setting up the profiler based on envvar
561 char fname[PATH_MAX];
562 if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
563 return;
564 }
565 // We do a uid check so we don't write out files in a setuid executable.
566#ifdef HAVE_GETEUID
567 if (getuid() != geteuid()) {
568 RAW_LOG(WARNING, ("HeapProfiler: ignoring HEAPPROFILE because "
569 "program seems to be setuid\n"));
570 return;
571 }
572#endif
573
574 char *signal_number_str = getenv("HEAPPROFILESIGNAL");
575 if (signal_number_str != NULL) {
576 long int signal_number = strtol(signal_number_str, NULL, 10);
577 intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, HeapProfilerDumpSignal));
578 if (old_signal_handler == reinterpret_cast<intptr_t>(SIG_ERR)) {
579 RAW_LOG(FATAL, "Failed to set signal. Perhaps signal number %s is invalid\n", signal_number_str);
580 } else if (old_signal_handler == 0) {
581 RAW_LOG(INFO,"Using signal %d as heap profiling switch", signal_number);
582 } else {
583 RAW_LOG(FATAL, "Signal %d already in use\n", signal_number);
584 }
585 }
586
587 HeapProfileTable::CleanupOldProfiles(fname);
588
589 HeapProfilerStart(fname);
590}
591
592// class used for finalization -- dumps the heap-profile at program exit
593struct HeapProfileEndWriter {
594 ~HeapProfileEndWriter() {
595 char buf[128];
596 if (heap_profile) {
597 const HeapProfileTable::Stats& total = heap_profile->total();
598 const int64 inuse_bytes = total.alloc_size - total.free_size;
599
600 if ((inuse_bytes >> 20) > 0) {
601 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " MB in use"),
602 inuse_bytes >> 20);
603 } else if ((inuse_bytes >> 10) > 0) {
604 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " kB in use"),
605 inuse_bytes >> 10);
606 } else {
607 snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " bytes in use"),
608 inuse_bytes);
609 }
610 } else {
611 snprintf(buf, sizeof(buf), ("Exiting"));
612 }
613 HeapProfilerDump(buf);
614 }
615};
616
617// We want to make sure tcmalloc is up and running before starting the profiler
618static const TCMallocGuard tcmalloc_initializer;
619REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
620static HeapProfileEndWriter heap_profile_end_writer;