Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame^] | 1 | // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- |
| 2 | // Copyright (c) 2006, Google Inc. |
| 3 | // All rights reserved. |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | // --- |
| 32 | // Author: Sanjay Ghemawat |
| 33 | // Maxim Lifantsev (refactoring) |
| 34 | // |
| 35 | |
| 36 | #include <config.h> |
| 37 | |
| 38 | #ifdef HAVE_UNISTD_H |
| 39 | #include <unistd.h> // for write() |
| 40 | #endif |
| 41 | #include <fcntl.h> // for open() |
| 42 | #ifdef HAVE_GLOB_H |
| 43 | #include <glob.h> |
| 44 | #ifndef GLOB_NOMATCH // true on some old cygwins |
| 45 | # define GLOB_NOMATCH 0 |
| 46 | #endif |
| 47 | #endif |
| 48 | #ifdef HAVE_INTTYPES_H |
| 49 | #include <inttypes.h> // for PRIxPTR |
| 50 | #endif |
| 51 | #ifdef HAVE_POLL_H |
| 52 | #include <poll.h> |
| 53 | #endif |
| 54 | #include <errno.h> |
| 55 | #include <stdarg.h> |
| 56 | #include <string> |
| 57 | #include <map> |
| 58 | #include <algorithm> // for sort(), equal(), and copy() |
| 59 | |
| 60 | #include "heap-profile-table.h" |
| 61 | |
| 62 | #include "base/logging.h" |
| 63 | #include "raw_printer.h" |
| 64 | #include "symbolize.h" |
| 65 | #include <gperftools/stacktrace.h> |
| 66 | #include <gperftools/malloc_hook.h> |
| 67 | #include "memory_region_map.h" |
| 68 | #include "base/commandlineflags.h" |
| 69 | #include "base/logging.h" // for the RawFD I/O commands |
| 70 | #include "base/sysinfo.h" |
| 71 | |
| 72 | using std::sort; |
| 73 | using std::equal; |
| 74 | using std::copy; |
| 75 | using std::string; |
| 76 | using std::map; |
| 77 | |
| 78 | using tcmalloc::FillProcSelfMaps; // from sysinfo.h |
| 79 | using tcmalloc::DumpProcSelfMaps; // from sysinfo.h |
| 80 | |
| 81 | //---------------------------------------------------------------------- |
| 82 | |
| 83 | DEFINE_bool(cleanup_old_heap_profiles, |
| 84 | EnvToBool("HEAP_PROFILE_CLEANUP", true), |
| 85 | "At initialization time, delete old heap profiles."); |
| 86 | |
| 87 | DEFINE_int32(heap_check_max_leaks, |
| 88 | EnvToInt("HEAP_CHECK_MAX_LEAKS", 20), |
| 89 | "The maximum number of leak reports to print."); |
| 90 | |
| 91 | //---------------------------------------------------------------------- |
| 92 | |
| 93 | // header of the dumped heap profile |
| 94 | static const char kProfileHeader[] = "heap profile: "; |
| 95 | static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n"; |
| 96 | |
| 97 | //---------------------------------------------------------------------- |
| 98 | |
| 99 | const char HeapProfileTable::kFileExt[] = ".heap"; |
| 100 | |
| 101 | //---------------------------------------------------------------------- |
| 102 | |
| 103 | static const int kHashTableSize = 179999; // Size for bucket_table_. |
| 104 | /*static*/ const int HeapProfileTable::kMaxStackDepth; |
| 105 | |
| 106 | //---------------------------------------------------------------------- |
| 107 | |
| 108 | // We strip out different number of stack frames in debug mode |
| 109 | // because less inlining happens in that case |
| 110 | #ifdef NDEBUG |
| 111 | static const int kStripFrames = 2; |
| 112 | #else |
| 113 | static const int kStripFrames = 3; |
| 114 | #endif |
| 115 | |
| 116 | // For sorting Stats or Buckets by in-use space |
| 117 | static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
| 118 | HeapProfileTable::Stats* b) { |
| 119 | // Return true iff "a" has more allocated space than "b" |
| 120 | return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
| 121 | } |
| 122 | |
| 123 | //---------------------------------------------------------------------- |
| 124 | |
| 125 | HeapProfileTable::HeapProfileTable(Allocator alloc, |
| 126 | DeAllocator dealloc, |
| 127 | bool profile_mmap) |
| 128 | : alloc_(alloc), |
| 129 | dealloc_(dealloc), |
| 130 | profile_mmap_(profile_mmap), |
| 131 | bucket_table_(NULL), |
| 132 | num_buckets_(0), |
| 133 | address_map_(NULL) { |
| 134 | // Make a hash table for buckets. |
| 135 | const int table_bytes = kHashTableSize * sizeof(*bucket_table_); |
| 136 | bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes)); |
| 137 | memset(bucket_table_, 0, table_bytes); |
| 138 | |
| 139 | // Make an allocation map. |
| 140 | address_map_ = |
| 141 | new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); |
| 142 | |
| 143 | // Initialize. |
| 144 | memset(&total_, 0, sizeof(total_)); |
| 145 | num_buckets_ = 0; |
| 146 | } |
| 147 | |
| 148 | HeapProfileTable::~HeapProfileTable() { |
| 149 | // Free the allocation map. |
| 150 | address_map_->~AllocationMap(); |
| 151 | dealloc_(address_map_); |
| 152 | address_map_ = NULL; |
| 153 | |
| 154 | // Free the hash table. |
| 155 | for (int i = 0; i < kHashTableSize; i++) { |
| 156 | for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) { |
| 157 | Bucket* bucket = curr; |
| 158 | curr = curr->next; |
| 159 | dealloc_(bucket->stack); |
| 160 | dealloc_(bucket); |
| 161 | } |
| 162 | } |
| 163 | dealloc_(bucket_table_); |
| 164 | bucket_table_ = NULL; |
| 165 | } |
| 166 | |
| 167 | HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, |
| 168 | const void* const key[]) { |
| 169 | // Make hash-value |
| 170 | uintptr_t h = 0; |
| 171 | for (int i = 0; i < depth; i++) { |
| 172 | h += reinterpret_cast<uintptr_t>(key[i]); |
| 173 | h += h << 10; |
| 174 | h ^= h >> 6; |
| 175 | } |
| 176 | h += h << 3; |
| 177 | h ^= h >> 11; |
| 178 | |
| 179 | // Lookup stack trace in table |
| 180 | unsigned int buck = ((unsigned int) h) % kHashTableSize; |
| 181 | for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) { |
| 182 | if ((b->hash == h) && |
| 183 | (b->depth == depth) && |
| 184 | equal(key, key + depth, b->stack)) { |
| 185 | return b; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | // Create new bucket |
| 190 | const size_t key_size = sizeof(key[0]) * depth; |
| 191 | const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
| 192 | copy(key, key + depth, kcopy); |
| 193 | Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
| 194 | memset(b, 0, sizeof(*b)); |
| 195 | b->hash = h; |
| 196 | b->depth = depth; |
| 197 | b->stack = kcopy; |
| 198 | b->next = bucket_table_[buck]; |
| 199 | bucket_table_[buck] = b; |
| 200 | num_buckets_++; |
| 201 | return b; |
| 202 | } |
| 203 | |
| 204 | int HeapProfileTable::GetCallerStackTrace( |
| 205 | int skip_count, void* stack[kMaxStackDepth]) { |
| 206 | return MallocHook::GetCallerStackTrace( |
| 207 | stack, kMaxStackDepth, kStripFrames + skip_count + 1); |
| 208 | } |
| 209 | |
| 210 | void HeapProfileTable::RecordAlloc( |
| 211 | const void* ptr, size_t bytes, int stack_depth, |
| 212 | const void* const call_stack[]) { |
| 213 | Bucket* b = GetBucket(stack_depth, call_stack); |
| 214 | b->allocs++; |
| 215 | b->alloc_size += bytes; |
| 216 | total_.allocs++; |
| 217 | total_.alloc_size += bytes; |
| 218 | |
| 219 | AllocValue v; |
| 220 | v.set_bucket(b); // also did set_live(false); set_ignore(false) |
| 221 | v.bytes = bytes; |
| 222 | address_map_->Insert(ptr, v); |
| 223 | } |
| 224 | |
| 225 | void HeapProfileTable::RecordFree(const void* ptr) { |
| 226 | AllocValue v; |
| 227 | if (address_map_->FindAndRemove(ptr, &v)) { |
| 228 | Bucket* b = v.bucket(); |
| 229 | b->frees++; |
| 230 | b->free_size += v.bytes; |
| 231 | total_.frees++; |
| 232 | total_.free_size += v.bytes; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
| 237 | const AllocValue* alloc_value = address_map_->Find(ptr); |
| 238 | if (alloc_value != NULL) *object_size = alloc_value->bytes; |
| 239 | return alloc_value != NULL; |
| 240 | } |
| 241 | |
| 242 | bool HeapProfileTable::FindAllocDetails(const void* ptr, |
| 243 | AllocInfo* info) const { |
| 244 | const AllocValue* alloc_value = address_map_->Find(ptr); |
| 245 | if (alloc_value != NULL) { |
| 246 | info->object_size = alloc_value->bytes; |
| 247 | info->call_stack = alloc_value->bucket()->stack; |
| 248 | info->stack_depth = alloc_value->bucket()->depth; |
| 249 | } |
| 250 | return alloc_value != NULL; |
| 251 | } |
| 252 | |
| 253 | bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
| 254 | size_t max_size, |
| 255 | const void** object_ptr, |
| 256 | size_t* object_size) const { |
| 257 | const AllocValue* alloc_value = |
| 258 | address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
| 259 | if (alloc_value != NULL) *object_size = alloc_value->bytes; |
| 260 | return alloc_value != NULL; |
| 261 | } |
| 262 | |
| 263 | bool HeapProfileTable::MarkAsLive(const void* ptr) { |
| 264 | AllocValue* alloc = address_map_->FindMutable(ptr); |
| 265 | if (alloc && !alloc->live()) { |
| 266 | alloc->set_live(true); |
| 267 | return true; |
| 268 | } |
| 269 | return false; |
| 270 | } |
| 271 | |
| 272 | void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
| 273 | AllocValue* alloc = address_map_->FindMutable(ptr); |
| 274 | if (alloc) { |
| 275 | alloc->set_ignore(true); |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | // We'd be happier using snprintfer, but we don't to reduce dependencies. |
| 280 | int HeapProfileTable::UnparseBucket(const Bucket& b, |
| 281 | char* buf, int buflen, int bufsize, |
| 282 | const char* extra, |
| 283 | Stats* profile_stats) { |
| 284 | if (profile_stats != NULL) { |
| 285 | profile_stats->allocs += b.allocs; |
| 286 | profile_stats->alloc_size += b.alloc_size; |
| 287 | profile_stats->frees += b.frees; |
| 288 | profile_stats->free_size += b.free_size; |
| 289 | } |
| 290 | int printed = |
| 291 | snprintf(buf + buflen, bufsize - buflen, "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s", |
| 292 | b.allocs - b.frees, |
| 293 | b.alloc_size - b.free_size, |
| 294 | b.allocs, |
| 295 | b.alloc_size, |
| 296 | extra); |
| 297 | // If it looks like the snprintf failed, ignore the fact we printed anything |
| 298 | if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 299 | buflen += printed; |
| 300 | for (int d = 0; d < b.depth; d++) { |
| 301 | printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR, |
| 302 | reinterpret_cast<uintptr_t>(b.stack[d])); |
| 303 | if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 304 | buflen += printed; |
| 305 | } |
| 306 | printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
| 307 | if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 308 | buflen += printed; |
| 309 | return buflen; |
| 310 | } |
| 311 | |
| 312 | HeapProfileTable::Bucket** |
| 313 | HeapProfileTable::MakeSortedBucketList() const { |
| 314 | Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
| 315 | |
| 316 | int bucket_count = 0; |
| 317 | for (int i = 0; i < kHashTableSize; i++) { |
| 318 | for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) { |
| 319 | list[bucket_count++] = curr; |
| 320 | } |
| 321 | } |
| 322 | RAW_DCHECK(bucket_count == num_buckets_, ""); |
| 323 | |
| 324 | sort(list, list + num_buckets_, ByAllocatedSpace); |
| 325 | |
| 326 | return list; |
| 327 | } |
| 328 | |
| 329 | void HeapProfileTable::IterateOrderedAllocContexts( |
| 330 | AllocContextIterator callback) const { |
| 331 | Bucket** list = MakeSortedBucketList(); |
| 332 | AllocContextInfo info; |
| 333 | for (int i = 0; i < num_buckets_; ++i) { |
| 334 | *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
| 335 | info.stack_depth = list[i]->depth; |
| 336 | info.call_stack = list[i]->stack; |
| 337 | callback(info); |
| 338 | } |
| 339 | dealloc_(list); |
| 340 | } |
| 341 | |
| 342 | int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
| 343 | Bucket** list = MakeSortedBucketList(); |
| 344 | |
| 345 | // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info". |
| 346 | // In the cases buf is too small, we'd rather leave out the last |
| 347 | // buckets than leave out the /proc/self/maps info. To ensure that, |
| 348 | // we actually print the /proc/self/maps info first, then move it to |
| 349 | // the end of the buffer, then write the bucket info into whatever |
| 350 | // is remaining, and then move the maps info one last time to close |
| 351 | // any gaps. Whew! |
| 352 | int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader); |
| 353 | if (map_length < 0 || map_length >= size) { |
| 354 | dealloc_(list); |
| 355 | return 0; |
| 356 | } |
| 357 | bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety? |
| 358 | map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); |
| 359 | RAW_DCHECK(map_length <= size, ""); |
| 360 | char* const map_start = buf + size - map_length; // move to end |
| 361 | memmove(map_start, buf, map_length); |
| 362 | size -= map_length; |
| 363 | |
| 364 | Stats stats; |
| 365 | memset(&stats, 0, sizeof(stats)); |
| 366 | int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
| 367 | if (bucket_length < 0 || bucket_length >= size) { |
| 368 | dealloc_(list); |
| 369 | return 0; |
| 370 | } |
| 371 | bucket_length = UnparseBucket(total_, buf, bucket_length, size, |
| 372 | " heapprofile", &stats); |
| 373 | |
| 374 | // Dump the mmap list first. |
| 375 | if (profile_mmap_) { |
| 376 | BufferArgs buffer(buf, bucket_length, size); |
| 377 | MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer); |
| 378 | bucket_length = buffer.buflen; |
| 379 | } |
| 380 | |
| 381 | for (int i = 0; i < num_buckets_; i++) { |
| 382 | bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
| 383 | &stats); |
| 384 | } |
| 385 | RAW_DCHECK(bucket_length < size, ""); |
| 386 | |
| 387 | dealloc_(list); |
| 388 | |
| 389 | RAW_DCHECK(buf + bucket_length <= map_start, ""); |
| 390 | memmove(buf + bucket_length, map_start, map_length); // close the gap |
| 391 | |
| 392 | return bucket_length + map_length; |
| 393 | } |
| 394 | |
| 395 | // static |
| 396 | void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, |
| 397 | BufferArgs* args) { |
| 398 | args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, |
| 399 | "", NULL); |
| 400 | } |
| 401 | |
| 402 | inline |
| 403 | void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v, |
| 404 | const DumpArgs& args) { |
| 405 | if (v->live()) { |
| 406 | v->set_live(false); |
| 407 | return; |
| 408 | } |
| 409 | if (v->ignore()) { |
| 410 | return; |
| 411 | } |
| 412 | Bucket b; |
| 413 | memset(&b, 0, sizeof(b)); |
| 414 | b.allocs = 1; |
| 415 | b.alloc_size = v->bytes; |
| 416 | b.depth = v->bucket()->depth; |
| 417 | b.stack = v->bucket()->stack; |
| 418 | char buf[1024]; |
| 419 | int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats); |
| 420 | RawWrite(args.fd, buf, len); |
| 421 | } |
| 422 | |
| 423 | // Callback from NonLiveSnapshot; adds entry to arg->dest |
| 424 | // if not the entry is not live and is not present in arg->base. |
| 425 | void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
| 426 | AddNonLiveArgs* arg) { |
| 427 | if (v->live()) { |
| 428 | v->set_live(false); |
| 429 | } else { |
| 430 | if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { |
| 431 | // Present in arg->base, so do not save |
| 432 | } else { |
| 433 | arg->dest->Add(ptr, *v); |
| 434 | } |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | bool HeapProfileTable::WriteProfile(const char* file_name, |
| 439 | const Bucket& total, |
| 440 | AllocationMap* allocations) { |
| 441 | RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name); |
| 442 | RawFD fd = RawOpenForWriting(file_name); |
| 443 | if (fd != kIllegalRawFD) { |
| 444 | RawWrite(fd, kProfileHeader, strlen(kProfileHeader)); |
| 445 | char buf[512]; |
| 446 | int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile", |
| 447 | NULL); |
| 448 | RawWrite(fd, buf, len); |
| 449 | const DumpArgs args(fd, NULL); |
| 450 | allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args); |
| 451 | RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader)); |
| 452 | DumpProcSelfMaps(fd); |
| 453 | RawClose(fd); |
| 454 | return true; |
| 455 | } else { |
| 456 | RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name); |
| 457 | return false; |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | void HeapProfileTable::CleanupOldProfiles(const char* prefix) { |
| 462 | if (!FLAGS_cleanup_old_heap_profiles) |
| 463 | return; |
| 464 | string pattern = string(prefix) + ".*" + kFileExt; |
| 465 | #if defined(HAVE_GLOB_H) |
| 466 | glob_t g; |
| 467 | const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g); |
| 468 | if (r == 0 || r == GLOB_NOMATCH) { |
| 469 | const int prefix_length = strlen(prefix); |
| 470 | for (int i = 0; i < g.gl_pathc; i++) { |
| 471 | const char* fname = g.gl_pathv[i]; |
| 472 | if ((strlen(fname) >= prefix_length) && |
| 473 | (memcmp(fname, prefix, prefix_length) == 0)) { |
| 474 | RAW_VLOG(1, "Removing old heap profile %s", fname); |
| 475 | unlink(fname); |
| 476 | } |
| 477 | } |
| 478 | } |
| 479 | globfree(&g); |
| 480 | #else /* HAVE_GLOB_H */ |
| 481 | RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); |
| 482 | #endif |
| 483 | } |
| 484 | |
| 485 | HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
| 486 | Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
| 487 | address_map_->Iterate(AddToSnapshot, s); |
| 488 | return s; |
| 489 | } |
| 490 | |
| 491 | void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { |
| 492 | s->~Snapshot(); |
| 493 | dealloc_(s); |
| 494 | } |
| 495 | |
| 496 | // Callback from TakeSnapshot; adds a single entry to snapshot |
| 497 | void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, |
| 498 | Snapshot* snapshot) { |
| 499 | snapshot->Add(ptr, *v); |
| 500 | } |
| 501 | |
| 502 | HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
| 503 | Snapshot* base) { |
| 504 | RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", |
| 505 | int(total_.allocs - total_.frees), |
| 506 | int(total_.alloc_size - total_.free_size)); |
| 507 | |
| 508 | Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
| 509 | AddNonLiveArgs args; |
| 510 | args.dest = s; |
| 511 | args.base = base; |
| 512 | address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
| 513 | RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
| 514 | int(s->total_.allocs - s->total_.frees), |
| 515 | int(s->total_.alloc_size - s->total_.free_size)); |
| 516 | return s; |
| 517 | } |
| 518 | |
| 519 | // Information kept per unique bucket seen |
| 520 | struct HeapProfileTable::Snapshot::Entry { |
| 521 | int count; |
| 522 | int bytes; |
| 523 | Bucket* bucket; |
| 524 | Entry() : count(0), bytes(0) { } |
| 525 | |
| 526 | // Order by decreasing bytes |
| 527 | bool operator<(const Entry& x) const { |
| 528 | return this->bytes > x.bytes; |
| 529 | } |
| 530 | }; |
| 531 | |
| 532 | // State used to generate leak report. We keep a mapping from Bucket pointer |
| 533 | // the collected stats for that bucket. |
| 534 | struct HeapProfileTable::Snapshot::ReportState { |
| 535 | map<Bucket*, Entry> buckets_; |
| 536 | }; |
| 537 | |
| 538 | // Callback from ReportLeaks; updates ReportState. |
| 539 | void HeapProfileTable::Snapshot::ReportCallback(const void* ptr, |
| 540 | AllocValue* v, |
| 541 | ReportState* state) { |
| 542 | Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time |
| 543 | e->bucket = v->bucket(); |
| 544 | e->count++; |
| 545 | e->bytes += v->bytes; |
| 546 | } |
| 547 | |
| 548 | void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name, |
| 549 | const char* filename, |
| 550 | bool should_symbolize) { |
| 551 | // This is only used by the heap leak checker, but is intimately |
| 552 | // tied to the allocation map that belongs in this module and is |
| 553 | // therefore placed here. |
| 554 | RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes " |
| 555 | "in %" PRIuS " objects", |
| 556 | checker_name, |
| 557 | size_t(total_.alloc_size), |
| 558 | size_t(total_.allocs)); |
| 559 | |
| 560 | // Group objects by Bucket |
| 561 | ReportState state; |
| 562 | map_.Iterate(&ReportCallback, &state); |
| 563 | |
| 564 | // Sort buckets by decreasing leaked size |
| 565 | const int n = state.buckets_.size(); |
| 566 | Entry* entries = new Entry[n]; |
| 567 | int dst = 0; |
| 568 | for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin(); |
| 569 | iter != state.buckets_.end(); |
| 570 | ++iter) { |
| 571 | entries[dst++] = iter->second; |
| 572 | } |
| 573 | sort(entries, entries + n); |
| 574 | |
| 575 | // Report a bounded number of leaks to keep the leak report from |
| 576 | // growing too long. |
| 577 | const int to_report = |
| 578 | (FLAGS_heap_check_max_leaks > 0 && |
| 579 | n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n; |
| 580 | RAW_LOG(ERROR, "The %d largest leaks:", to_report); |
| 581 | |
| 582 | // Print |
| 583 | SymbolTable symbolization_table; |
| 584 | for (int i = 0; i < to_report; i++) { |
| 585 | const Entry& e = entries[i]; |
| 586 | for (int j = 0; j < e.bucket->depth; j++) { |
| 587 | symbolization_table.Add(e.bucket->stack[j]); |
| 588 | } |
| 589 | } |
| 590 | static const int kBufSize = 2<<10; |
| 591 | char buffer[kBufSize]; |
| 592 | if (should_symbolize) |
| 593 | symbolization_table.Symbolize(); |
| 594 | for (int i = 0; i < to_report; i++) { |
| 595 | const Entry& e = entries[i]; |
| 596 | base::RawPrinter printer(buffer, kBufSize); |
| 597 | printer.Printf("Leak of %d bytes in %d objects allocated from:\n", |
| 598 | e.bytes, e.count); |
| 599 | for (int j = 0; j < e.bucket->depth; j++) { |
| 600 | const void* pc = e.bucket->stack[j]; |
| 601 | printer.Printf("\t@ %" PRIxPTR " %s\n", |
| 602 | reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc)); |
| 603 | } |
| 604 | RAW_LOG(ERROR, "%s", buffer); |
| 605 | } |
| 606 | |
| 607 | if (to_report < n) { |
| 608 | RAW_LOG(ERROR, "Skipping leaks numbered %d..%d", |
| 609 | to_report, n-1); |
| 610 | } |
| 611 | delete[] entries; |
| 612 | |
| 613 | // TODO: Dump the sorted Entry list instead of dumping raw data? |
| 614 | // (should be much shorter) |
| 615 | if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) { |
| 616 | RAW_LOG(ERROR, "Could not write pprof profile to %s", filename); |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | void HeapProfileTable::Snapshot::ReportObject(const void* ptr, |
| 621 | AllocValue* v, |
| 622 | char* unused) { |
| 623 | // Perhaps also log the allocation stack trace (unsymbolized) |
| 624 | // on this line in case somebody finds it useful. |
| 625 | RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr); |
| 626 | } |
| 627 | |
| 628 | void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
| 629 | char unused; |
| 630 | map_.Iterate(ReportObject, &unused); |
| 631 | } |