blob: 93d592c3b7fe046d27558ebd314fd7fb37a0bb9e [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2006, Google Inc.
3// All rights reserved.
Brian Silverman20350ac2021-11-17 18:19:55 -08004//
Austin Schuh745610d2015-09-06 18:19:50 -07005// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
Brian Silverman20350ac2021-11-17 18:19:55 -08008//
Austin Schuh745610d2015-09-06 18:19:50 -07009// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
Brian Silverman20350ac2021-11-17 18:19:55 -080018//
Austin Schuh745610d2015-09-06 18:19:50 -070019// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat
33// Maxim Lifantsev (refactoring)
34//
35
36#include <config.h>
37
38#ifdef HAVE_UNISTD_H
39#include <unistd.h> // for write()
40#endif
41#include <fcntl.h> // for open()
42#ifdef HAVE_GLOB_H
43#include <glob.h>
44#ifndef GLOB_NOMATCH // true on some old cygwins
45# define GLOB_NOMATCH 0
46#endif
47#endif
48#ifdef HAVE_INTTYPES_H
49#include <inttypes.h> // for PRIxPTR
50#endif
51#ifdef HAVE_POLL_H
52#include <poll.h>
53#endif
54#include <errno.h>
55#include <stdarg.h>
56#include <string>
57#include <map>
58#include <algorithm> // for sort(), equal(), and copy()
59
60#include "heap-profile-table.h"
61
62#include "base/logging.h"
63#include "raw_printer.h"
64#include "symbolize.h"
65#include <gperftools/stacktrace.h>
66#include <gperftools/malloc_hook.h>
67#include "memory_region_map.h"
68#include "base/commandlineflags.h"
69#include "base/logging.h" // for the RawFD I/O commands
70#include "base/sysinfo.h"
71
72using std::sort;
73using std::equal;
74using std::copy;
75using std::string;
76using std::map;
77
78using tcmalloc::FillProcSelfMaps; // from sysinfo.h
79using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
80
81//----------------------------------------------------------------------
82
83DEFINE_bool(cleanup_old_heap_profiles,
84 EnvToBool("HEAP_PROFILE_CLEANUP", true),
85 "At initialization time, delete old heap profiles.");
86
87DEFINE_int32(heap_check_max_leaks,
88 EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
89 "The maximum number of leak reports to print.");
90
91//----------------------------------------------------------------------
92
93// header of the dumped heap profile
94static const char kProfileHeader[] = "heap profile: ";
95static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
96
97//----------------------------------------------------------------------
98
99const char HeapProfileTable::kFileExt[] = ".heap";
100
101//----------------------------------------------------------------------
102
103static const int kHashTableSize = 179999; // Size for bucket_table_.
104/*static*/ const int HeapProfileTable::kMaxStackDepth;
105
106//----------------------------------------------------------------------
107
108// We strip out different number of stack frames in debug mode
109// because less inlining happens in that case
110#ifdef NDEBUG
111static const int kStripFrames = 2;
112#else
113static const int kStripFrames = 3;
114#endif
115
116// For sorting Stats or Buckets by in-use space
117static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
118 HeapProfileTable::Stats* b) {
119 // Return true iff "a" has more allocated space than "b"
120 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
121}
122
123//----------------------------------------------------------------------
124
125HeapProfileTable::HeapProfileTable(Allocator alloc,
126 DeAllocator dealloc,
127 bool profile_mmap)
128 : alloc_(alloc),
129 dealloc_(dealloc),
130 profile_mmap_(profile_mmap),
131 bucket_table_(NULL),
132 num_buckets_(0),
133 address_map_(NULL) {
134 // Make a hash table for buckets.
135 const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
136 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
137 memset(bucket_table_, 0, table_bytes);
138
139 // Make an allocation map.
140 address_map_ =
141 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
142
143 // Initialize.
144 memset(&total_, 0, sizeof(total_));
145 num_buckets_ = 0;
146}
147
148HeapProfileTable::~HeapProfileTable() {
149 // Free the allocation map.
150 address_map_->~AllocationMap();
151 dealloc_(address_map_);
152 address_map_ = NULL;
153
154 // Free the hash table.
155 for (int i = 0; i < kHashTableSize; i++) {
156 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
157 Bucket* bucket = curr;
158 curr = curr->next;
159 dealloc_(bucket->stack);
160 dealloc_(bucket);
161 }
162 }
163 dealloc_(bucket_table_);
164 bucket_table_ = NULL;
165}
166
167HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
168 const void* const key[]) {
169 // Make hash-value
170 uintptr_t h = 0;
171 for (int i = 0; i < depth; i++) {
172 h += reinterpret_cast<uintptr_t>(key[i]);
173 h += h << 10;
174 h ^= h >> 6;
175 }
176 h += h << 3;
177 h ^= h >> 11;
178
179 // Lookup stack trace in table
180 unsigned int buck = ((unsigned int) h) % kHashTableSize;
181 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
182 if ((b->hash == h) &&
183 (b->depth == depth) &&
184 equal(key, key + depth, b->stack)) {
185 return b;
186 }
187 }
188
189 // Create new bucket
190 const size_t key_size = sizeof(key[0]) * depth;
191 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
192 copy(key, key + depth, kcopy);
193 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
194 memset(b, 0, sizeof(*b));
195 b->hash = h;
196 b->depth = depth;
197 b->stack = kcopy;
198 b->next = bucket_table_[buck];
199 bucket_table_[buck] = b;
200 num_buckets_++;
201 return b;
202}
203
204int HeapProfileTable::GetCallerStackTrace(
205 int skip_count, void* stack[kMaxStackDepth]) {
206 return MallocHook::GetCallerStackTrace(
207 stack, kMaxStackDepth, kStripFrames + skip_count + 1);
208}
209
210void HeapProfileTable::RecordAlloc(
211 const void* ptr, size_t bytes, int stack_depth,
212 const void* const call_stack[]) {
213 Bucket* b = GetBucket(stack_depth, call_stack);
214 b->allocs++;
215 b->alloc_size += bytes;
216 total_.allocs++;
217 total_.alloc_size += bytes;
218
219 AllocValue v;
220 v.set_bucket(b); // also did set_live(false); set_ignore(false)
221 v.bytes = bytes;
222 address_map_->Insert(ptr, v);
223}
224
225void HeapProfileTable::RecordFree(const void* ptr) {
226 AllocValue v;
227 if (address_map_->FindAndRemove(ptr, &v)) {
228 Bucket* b = v.bucket();
229 b->frees++;
230 b->free_size += v.bytes;
231 total_.frees++;
232 total_.free_size += v.bytes;
233 }
234}
235
236bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
237 const AllocValue* alloc_value = address_map_->Find(ptr);
238 if (alloc_value != NULL) *object_size = alloc_value->bytes;
239 return alloc_value != NULL;
240}
241
242bool HeapProfileTable::FindAllocDetails(const void* ptr,
243 AllocInfo* info) const {
244 const AllocValue* alloc_value = address_map_->Find(ptr);
245 if (alloc_value != NULL) {
246 info->object_size = alloc_value->bytes;
247 info->call_stack = alloc_value->bucket()->stack;
248 info->stack_depth = alloc_value->bucket()->depth;
249 }
250 return alloc_value != NULL;
251}
252
253bool HeapProfileTable::FindInsideAlloc(const void* ptr,
254 size_t max_size,
255 const void** object_ptr,
256 size_t* object_size) const {
257 const AllocValue* alloc_value =
258 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
259 if (alloc_value != NULL) *object_size = alloc_value->bytes;
260 return alloc_value != NULL;
261}
262
263bool HeapProfileTable::MarkAsLive(const void* ptr) {
264 AllocValue* alloc = address_map_->FindMutable(ptr);
265 if (alloc && !alloc->live()) {
266 alloc->set_live(true);
267 return true;
268 }
269 return false;
270}
271
272void HeapProfileTable::MarkAsIgnored(const void* ptr) {
273 AllocValue* alloc = address_map_->FindMutable(ptr);
274 if (alloc) {
275 alloc->set_ignore(true);
276 }
277}
278
279// We'd be happier using snprintfer, but we don't to reduce dependencies.
280int HeapProfileTable::UnparseBucket(const Bucket& b,
281 char* buf, int buflen, int bufsize,
282 const char* extra,
283 Stats* profile_stats) {
284 if (profile_stats != NULL) {
285 profile_stats->allocs += b.allocs;
286 profile_stats->alloc_size += b.alloc_size;
287 profile_stats->frees += b.frees;
288 profile_stats->free_size += b.free_size;
289 }
290 int printed =
291 snprintf(buf + buflen, bufsize - buflen, "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
292 b.allocs - b.frees,
293 b.alloc_size - b.free_size,
294 b.allocs,
295 b.alloc_size,
296 extra);
297 // If it looks like the snprintf failed, ignore the fact we printed anything
298 if (printed < 0 || printed >= bufsize - buflen) return buflen;
299 buflen += printed;
300 for (int d = 0; d < b.depth; d++) {
301 printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
302 reinterpret_cast<uintptr_t>(b.stack[d]));
303 if (printed < 0 || printed >= bufsize - buflen) return buflen;
304 buflen += printed;
305 }
306 printed = snprintf(buf + buflen, bufsize - buflen, "\n");
307 if (printed < 0 || printed >= bufsize - buflen) return buflen;
308 buflen += printed;
309 return buflen;
310}
311
312HeapProfileTable::Bucket**
313HeapProfileTable::MakeSortedBucketList() const {
314 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
315
316 int bucket_count = 0;
317 for (int i = 0; i < kHashTableSize; i++) {
318 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
319 list[bucket_count++] = curr;
320 }
321 }
322 RAW_DCHECK(bucket_count == num_buckets_, "");
323
324 sort(list, list + num_buckets_, ByAllocatedSpace);
325
326 return list;
327}
328
329void HeapProfileTable::IterateOrderedAllocContexts(
330 AllocContextIterator callback) const {
331 Bucket** list = MakeSortedBucketList();
332 AllocContextInfo info;
333 for (int i = 0; i < num_buckets_; ++i) {
334 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
335 info.stack_depth = list[i]->depth;
336 info.call_stack = list[i]->stack;
337 callback(info);
338 }
339 dealloc_(list);
340}
341
342int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
343 Bucket** list = MakeSortedBucketList();
344
345 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
346 // In the cases buf is too small, we'd rather leave out the last
347 // buckets than leave out the /proc/self/maps info. To ensure that,
348 // we actually print the /proc/self/maps info first, then move it to
349 // the end of the buffer, then write the bucket info into whatever
350 // is remaining, and then move the maps info one last time to close
351 // any gaps. Whew!
352 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
353 if (map_length < 0 || map_length >= size) {
354 dealloc_(list);
355 return 0;
356 }
357 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
358 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
359 RAW_DCHECK(map_length <= size, "");
360 char* const map_start = buf + size - map_length; // move to end
361 memmove(map_start, buf, map_length);
362 size -= map_length;
363
364 Stats stats;
365 memset(&stats, 0, sizeof(stats));
366 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
367 if (bucket_length < 0 || bucket_length >= size) {
368 dealloc_(list);
369 return 0;
370 }
371 bucket_length = UnparseBucket(total_, buf, bucket_length, size,
372 " heapprofile", &stats);
373
374 // Dump the mmap list first.
375 if (profile_mmap_) {
376 BufferArgs buffer(buf, bucket_length, size);
377 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
378 bucket_length = buffer.buflen;
379 }
380
381 for (int i = 0; i < num_buckets_; i++) {
382 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
383 &stats);
384 }
385 RAW_DCHECK(bucket_length < size, "");
386
387 dealloc_(list);
388
389 RAW_DCHECK(buf + bucket_length <= map_start, "");
390 memmove(buf + bucket_length, map_start, map_length); // close the gap
391
392 return bucket_length + map_length;
393}
394
395// static
396void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
397 BufferArgs* args) {
398 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
399 "", NULL);
400}
401
402inline
403void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
404 const DumpArgs& args) {
405 if (v->live()) {
406 v->set_live(false);
407 return;
408 }
409 if (v->ignore()) {
410 return;
411 }
412 Bucket b;
413 memset(&b, 0, sizeof(b));
414 b.allocs = 1;
415 b.alloc_size = v->bytes;
416 b.depth = v->bucket()->depth;
417 b.stack = v->bucket()->stack;
418 char buf[1024];
419 int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
420 RawWrite(args.fd, buf, len);
421}
422
423// Callback from NonLiveSnapshot; adds entry to arg->dest
424// if not the entry is not live and is not present in arg->base.
425void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
426 AddNonLiveArgs* arg) {
427 if (v->live()) {
428 v->set_live(false);
429 } else {
430 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
431 // Present in arg->base, so do not save
432 } else {
433 arg->dest->Add(ptr, *v);
434 }
435 }
436}
437
438bool HeapProfileTable::WriteProfile(const char* file_name,
439 const Bucket& total,
440 AllocationMap* allocations) {
441 RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
442 RawFD fd = RawOpenForWriting(file_name);
Brian Silverman20350ac2021-11-17 18:19:55 -0800443 if (fd == kIllegalRawFD) {
Austin Schuh745610d2015-09-06 18:19:50 -0700444 RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
445 return false;
446 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800447 RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
448 char buf[512];
449 int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile", NULL);
450 RawWrite(fd, buf, len);
451 const DumpArgs args(fd, NULL);
452 allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
453 RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
454 DumpProcSelfMaps(fd);
455 RawClose(fd);
456 return true;
Austin Schuh745610d2015-09-06 18:19:50 -0700457}
458
459void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
460 if (!FLAGS_cleanup_old_heap_profiles)
461 return;
462 string pattern = string(prefix) + ".*" + kFileExt;
463#if defined(HAVE_GLOB_H)
464 glob_t g;
465 const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
466 if (r == 0 || r == GLOB_NOMATCH) {
467 const int prefix_length = strlen(prefix);
468 for (int i = 0; i < g.gl_pathc; i++) {
469 const char* fname = g.gl_pathv[i];
470 if ((strlen(fname) >= prefix_length) &&
471 (memcmp(fname, prefix, prefix_length) == 0)) {
472 RAW_VLOG(1, "Removing old heap profile %s", fname);
473 unlink(fname);
474 }
475 }
476 }
477 globfree(&g);
478#else /* HAVE_GLOB_H */
479 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
480#endif
481}
482
483HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
484 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
485 address_map_->Iterate(AddToSnapshot, s);
486 return s;
487}
488
489void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
490 s->~Snapshot();
491 dealloc_(s);
492}
493
494// Callback from TakeSnapshot; adds a single entry to snapshot
495void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
496 Snapshot* snapshot) {
497 snapshot->Add(ptr, *v);
498}
499
500HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
501 Snapshot* base) {
502 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
503 int(total_.allocs - total_.frees),
504 int(total_.alloc_size - total_.free_size));
505
506 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
507 AddNonLiveArgs args;
508 args.dest = s;
509 args.base = base;
510 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
511 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
512 int(s->total_.allocs - s->total_.frees),
513 int(s->total_.alloc_size - s->total_.free_size));
514 return s;
515}
516
517// Information kept per unique bucket seen
518struct HeapProfileTable::Snapshot::Entry {
519 int count;
520 int bytes;
521 Bucket* bucket;
522 Entry() : count(0), bytes(0) { }
523
524 // Order by decreasing bytes
525 bool operator<(const Entry& x) const {
526 return this->bytes > x.bytes;
527 }
528};
529
530// State used to generate leak report. We keep a mapping from Bucket pointer
531// the collected stats for that bucket.
532struct HeapProfileTable::Snapshot::ReportState {
533 map<Bucket*, Entry> buckets_;
534};
535
536// Callback from ReportLeaks; updates ReportState.
537void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
538 AllocValue* v,
539 ReportState* state) {
540 Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
541 e->bucket = v->bucket();
542 e->count++;
543 e->bytes += v->bytes;
544}
545
546void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
547 const char* filename,
548 bool should_symbolize) {
549 // This is only used by the heap leak checker, but is intimately
550 // tied to the allocation map that belongs in this module and is
551 // therefore placed here.
Brian Silverman20350ac2021-11-17 18:19:55 -0800552 RAW_LOG(ERROR, "Leak check %s detected leaks of %zu bytes "
553 "in %zu objects",
Austin Schuh745610d2015-09-06 18:19:50 -0700554 checker_name,
555 size_t(total_.alloc_size),
556 size_t(total_.allocs));
557
558 // Group objects by Bucket
559 ReportState state;
560 map_.Iterate(&ReportCallback, &state);
561
562 // Sort buckets by decreasing leaked size
563 const int n = state.buckets_.size();
564 Entry* entries = new Entry[n];
565 int dst = 0;
566 for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
567 iter != state.buckets_.end();
568 ++iter) {
569 entries[dst++] = iter->second;
570 }
571 sort(entries, entries + n);
572
573 // Report a bounded number of leaks to keep the leak report from
574 // growing too long.
575 const int to_report =
576 (FLAGS_heap_check_max_leaks > 0 &&
577 n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
578 RAW_LOG(ERROR, "The %d largest leaks:", to_report);
579
580 // Print
581 SymbolTable symbolization_table;
582 for (int i = 0; i < to_report; i++) {
583 const Entry& e = entries[i];
584 for (int j = 0; j < e.bucket->depth; j++) {
585 symbolization_table.Add(e.bucket->stack[j]);
586 }
587 }
588 static const int kBufSize = 2<<10;
589 char buffer[kBufSize];
590 if (should_symbolize)
591 symbolization_table.Symbolize();
592 for (int i = 0; i < to_report; i++) {
593 const Entry& e = entries[i];
594 base::RawPrinter printer(buffer, kBufSize);
595 printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
596 e.bytes, e.count);
597 for (int j = 0; j < e.bucket->depth; j++) {
598 const void* pc = e.bucket->stack[j];
599 printer.Printf("\t@ %" PRIxPTR " %s\n",
600 reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
601 }
602 RAW_LOG(ERROR, "%s", buffer);
603 }
604
605 if (to_report < n) {
606 RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
607 to_report, n-1);
608 }
609 delete[] entries;
610
611 // TODO: Dump the sorted Entry list instead of dumping raw data?
612 // (should be much shorter)
613 if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
614 RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
615 }
616}
617
618void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
619 AllocValue* v,
620 char* unused) {
621 // Perhaps also log the allocation stack trace (unsymbolized)
622 // on this line in case somebody finds it useful.
Brian Silverman20350ac2021-11-17 18:19:55 -0800623 RAW_LOG(ERROR, "leaked %zu byte object %p", v->bytes, ptr);
Austin Schuh745610d2015-09-06 18:19:50 -0700624}
625
626void HeapProfileTable::Snapshot::ReportIndividualObjects() {
627 char unused;
628 map_.Iterate(ReportObject, &unused);
629}