Austin Schuh | 745610d | 2015-09-06 18:19:50 -0700 | [diff] [blame] | 1 | // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- |
| 2 | // Copyright (c) 2005, Google Inc. |
| 3 | // All rights reserved. |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | // --- |
| 32 | // All Rights Reserved. |
| 33 | // |
| 34 | // Author: Maxim Lifantsev |
| 35 | // |
| 36 | |
| 37 | #include "config.h" |
| 38 | |
| 39 | #include <fcntl.h> // for O_RDONLY (we use syscall to do actual reads) |
| 40 | #include <string.h> |
| 41 | #include <errno.h> |
| 42 | #ifdef HAVE_UNISTD_H |
| 43 | #include <unistd.h> |
| 44 | #endif |
| 45 | #ifdef HAVE_MMAP |
| 46 | #include <sys/mman.h> |
| 47 | #endif |
| 48 | #ifdef HAVE_PTHREAD |
| 49 | #include <pthread.h> |
| 50 | #endif |
| 51 | #include <sys/stat.h> |
| 52 | #include <sys/types.h> |
| 53 | #include <time.h> |
| 54 | #include <assert.h> |
| 55 | |
| 56 | #if defined(HAVE_LINUX_PTRACE_H) |
| 57 | #include <linux/ptrace.h> |
| 58 | #endif |
| 59 | #ifdef HAVE_SYS_SYSCALL_H |
| 60 | #include <sys/syscall.h> |
| 61 | #endif |
| 62 | #if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) |
| 63 | #include <wtypes.h> |
| 64 | #include <winbase.h> |
| 65 | #undef ERROR // windows defines these as macros, which can cause trouble |
| 66 | #undef max |
| 67 | #undef min |
| 68 | #endif |
| 69 | |
| 70 | #include <string> |
| 71 | #include <vector> |
| 72 | #include <map> |
| 73 | #include <set> |
| 74 | #include <algorithm> |
| 75 | #include <functional> |
| 76 | |
| 77 | #include <gperftools/heap-checker.h> |
| 78 | |
| 79 | #include "base/basictypes.h" |
| 80 | #include "base/googleinit.h" |
| 81 | #include "base/logging.h" |
| 82 | #include <gperftools/stacktrace.h> |
| 83 | #include "base/commandlineflags.h" |
| 84 | #include "base/elfcore.h" // for i386_regs |
| 85 | #include "base/thread_lister.h" |
| 86 | #include "heap-profile-table.h" |
| 87 | #include "base/low_level_alloc.h" |
| 88 | #include "malloc_hook-inl.h" |
| 89 | #include <gperftools/malloc_hook.h> |
| 90 | #include <gperftools/malloc_extension.h> |
| 91 | #include "maybe_threads.h" |
| 92 | #include "memory_region_map.h" |
| 93 | #include "base/spinlock.h" |
| 94 | #include "base/sysinfo.h" |
| 95 | #include "base/stl_allocator.h" |
| 96 | |
| 97 | using std::string; |
| 98 | using std::basic_string; |
| 99 | using std::pair; |
| 100 | using std::map; |
| 101 | using std::set; |
| 102 | using std::vector; |
| 103 | using std::swap; |
| 104 | using std::make_pair; |
| 105 | using std::min; |
| 106 | using std::max; |
| 107 | using std::less; |
| 108 | using std::char_traits; |
| 109 | |
| 110 | // If current process is being ptrace()d, 'TracerPid' in /proc/self/status |
| 111 | // will be non-zero. |
| 112 | static bool IsDebuggerAttached(void) { // only works under linux, probably |
| 113 | char buf[256]; // TracerPid comes relatively earlier in status output |
| 114 | int fd = open("/proc/self/status", O_RDONLY); |
| 115 | if (fd == -1) { |
| 116 | return false; // Can't tell for sure. |
| 117 | } |
| 118 | const int len = read(fd, buf, sizeof(buf)); |
| 119 | bool rc = false; |
| 120 | if (len > 0) { |
| 121 | const char *const kTracerPid = "TracerPid:\t"; |
| 122 | buf[len - 1] = '\0'; |
| 123 | const char *p = strstr(buf, kTracerPid); |
| 124 | if (p != NULL) { |
| 125 | rc = (strncmp(p + strlen(kTracerPid), "0\n", 2) != 0); |
| 126 | } |
| 127 | } |
| 128 | close(fd); |
| 129 | return rc; |
| 130 | } |
| 131 | |
| 132 | // This is the default if you don't link in -lprofiler |
| 133 | extern "C" { |
| 134 | ATTRIBUTE_WEAK PERFTOOLS_DLL_DECL bool ProfilingIsEnabledForAllThreads(); |
| 135 | bool ProfilingIsEnabledForAllThreads() { return false; } |
| 136 | } |
| 137 | |
| 138 | //---------------------------------------------------------------------- |
| 139 | // Flags that control heap-checking |
| 140 | //---------------------------------------------------------------------- |
| 141 | |
| 142 | DEFINE_string(heap_check, |
| 143 | EnvToString("HEAPCHECK", ""), |
| 144 | "The heap leak checking to be done over the whole executable: " |
| 145 | "\"minimal\", \"normal\", \"strict\", " |
| 146 | "\"draconian\", \"as-is\", and \"local\" " |
| 147 | " or the empty string are the supported choices. " |
| 148 | "(See HeapLeakChecker_InternalInitStart for details.)"); |
| 149 | |
| 150 | DEFINE_bool(heap_check_report, true, "Obsolete"); |
| 151 | |
| 152 | DEFINE_bool(heap_check_before_constructors, |
| 153 | true, |
| 154 | "deprecated; pretty much always true now"); |
| 155 | |
| 156 | DEFINE_bool(heap_check_after_destructors, |
| 157 | EnvToBool("HEAP_CHECK_AFTER_DESTRUCTORS", false), |
| 158 | "If overall heap check is to end after global destructors " |
| 159 | "or right after all REGISTER_HEAPCHECK_CLEANUP's"); |
| 160 | |
| 161 | DEFINE_bool(heap_check_strict_check, true, "Obsolete"); |
| 162 | |
| 163 | DEFINE_bool(heap_check_ignore_global_live, |
| 164 | EnvToBool("HEAP_CHECK_IGNORE_GLOBAL_LIVE", true), |
| 165 | "If overall heap check is to ignore heap objects reachable " |
| 166 | "from the global data"); |
| 167 | |
| 168 | DEFINE_bool(heap_check_identify_leaks, |
| 169 | EnvToBool("HEAP_CHECK_IDENTIFY_LEAKS", false), |
| 170 | "If heap check should generate the addresses of the leaked " |
| 171 | "objects in the memory leak profiles. This may be useful " |
| 172 | "in tracking down leaks where only a small fraction of " |
| 173 | "objects allocated at the same stack trace are leaked."); |
| 174 | |
| 175 | DEFINE_bool(heap_check_ignore_thread_live, |
| 176 | EnvToBool("HEAP_CHECK_IGNORE_THREAD_LIVE", true), |
| 177 | "If set to true, objects reachable from thread stacks " |
| 178 | "and registers are not reported as leaks"); |
| 179 | |
| 180 | DEFINE_bool(heap_check_test_pointer_alignment, |
| 181 | EnvToBool("HEAP_CHECK_TEST_POINTER_ALIGNMENT", false), |
| 182 | "Set to true to check if the found leak can be due to " |
| 183 | "use of unaligned pointers"); |
| 184 | |
| 185 | // Alignment at which all pointers in memory are supposed to be located; |
| 186 | // use 1 if any alignment is ok. |
| 187 | // heap_check_test_pointer_alignment flag guides if we try the value of 1. |
| 188 | // The larger it can be, the lesser is the chance of missing real leaks. |
| 189 | static const size_t kPointerSourceAlignment = sizeof(void*); |
| 190 | DEFINE_int32(heap_check_pointer_source_alignment, |
| 191 | EnvToInt("HEAP_CHECK_POINTER_SOURCE_ALIGNMENT", |
| 192 | kPointerSourceAlignment), |
| 193 | "Alignment at which all pointers in memory are supposed to be " |
| 194 | "located. Use 1 if any alignment is ok."); |
| 195 | |
| 196 | // A reasonable default to handle pointers inside of typical class objects: |
| 197 | // Too low and we won't be able to traverse pointers to normally-used |
| 198 | // nested objects and base parts of multiple-inherited objects. |
| 199 | // Too high and it will both slow down leak checking (FindInsideAlloc |
| 200 | // in HaveOnHeapLocked will get slower when there are large on-heap objects) |
| 201 | // and make it probabilistically more likely to miss leaks |
| 202 | // of large-sized objects. |
| 203 | static const int64 kHeapCheckMaxPointerOffset = 1024; |
| 204 | DEFINE_int64(heap_check_max_pointer_offset, |
| 205 | EnvToInt("HEAP_CHECK_MAX_POINTER_OFFSET", |
| 206 | kHeapCheckMaxPointerOffset), |
| 207 | "Largest pointer offset for which we traverse " |
| 208 | "pointers going inside of heap allocated objects. " |
| 209 | "Set to -1 to use the actual largest heap object size."); |
| 210 | |
| 211 | DEFINE_bool(heap_check_run_under_gdb, |
| 212 | EnvToBool("HEAP_CHECK_RUN_UNDER_GDB", false), |
| 213 | "If false, turns off heap-checking library when running under gdb " |
| 214 | "(normally, set to 'true' only when debugging the heap-checker)"); |
| 215 | |
| 216 | DEFINE_int32(heap_check_delay_seconds, 0, |
| 217 | "Number of seconds to delay on-exit heap checking." |
| 218 | " If you set this flag," |
| 219 | " you may also want to set exit_timeout_seconds in order to" |
| 220 | " avoid exit timeouts.\n" |
| 221 | "NOTE: This flag is to be used only to help diagnose issues" |
| 222 | " where it is suspected that the heap checker is reporting" |
| 223 | " false leaks that will disappear if the heap checker delays" |
| 224 | " its checks. Report any such issues to the heap-checker" |
| 225 | " maintainer(s)."); |
| 226 | |
| 227 | //---------------------------------------------------------------------- |
| 228 | |
| 229 | DEFINE_string(heap_profile_pprof, |
| 230 | EnvToString("PPROF_PATH", "pprof"), |
| 231 | "OBSOLETE; not used"); |
| 232 | |
| 233 | DEFINE_string(heap_check_dump_directory, |
| 234 | EnvToString("HEAP_CHECK_DUMP_DIRECTORY", "/tmp"), |
| 235 | "Directory to put heap-checker leak dump information"); |
| 236 | |
| 237 | |
| 238 | //---------------------------------------------------------------------- |
| 239 | // HeapLeakChecker global data |
| 240 | //---------------------------------------------------------------------- |
| 241 | |
| 242 | // Global lock for all the global data of this module. |
| 243 | static SpinLock heap_checker_lock(SpinLock::LINKER_INITIALIZED); |
| 244 | |
| 245 | //---------------------------------------------------------------------- |
| 246 | |
| 247 | // Heap profile prefix for leak checking profiles. |
| 248 | // Gets assigned once when leak checking is turned on, then never modified. |
| 249 | static const string* profile_name_prefix = NULL; |
| 250 | |
| 251 | // Whole-program heap leak checker. |
| 252 | // Gets assigned once when leak checking is turned on, |
| 253 | // then main_heap_checker is never deleted. |
| 254 | static HeapLeakChecker* main_heap_checker = NULL; |
| 255 | |
| 256 | // Whether we will use main_heap_checker to do a check at program exit |
| 257 | // automatically. In any case user can ask for more checks on main_heap_checker |
| 258 | // via GlobalChecker(). |
| 259 | static bool do_main_heap_check = false; |
| 260 | |
| 261 | // The heap profile we use to collect info about the heap. |
| 262 | // This is created in HeapLeakChecker::BeforeConstructorsLocked |
| 263 | // together with setting heap_checker_on (below) to true |
| 264 | // and registering our new/delete malloc hooks; |
| 265 | // similarly all are unset in HeapLeakChecker::TurnItselfOffLocked. |
| 266 | static HeapProfileTable* heap_profile = NULL; |
| 267 | |
| 268 | // If we are doing (or going to do) any kind of heap-checking. |
| 269 | static bool heap_checker_on = false; |
| 270 | |
| 271 | // pid of the process that does whole-program heap leak checking |
| 272 | static pid_t heap_checker_pid = 0; |
| 273 | |
| 274 | // If we did heap profiling during global constructors execution |
| 275 | static bool constructor_heap_profiling = false; |
| 276 | |
| 277 | // RAW_VLOG level we dump key INFO messages at. If you want to turn |
| 278 | // off these messages, set the environment variable PERFTOOLS_VERBOSE=-1. |
| 279 | static const int heap_checker_info_level = 0; |
| 280 | |
| 281 | //---------------------------------------------------------------------- |
| 282 | // HeapLeakChecker's own memory allocator that is |
| 283 | // independent of the normal program allocator. |
| 284 | //---------------------------------------------------------------------- |
| 285 | |
| 286 | // Wrapper of LowLevelAlloc for STL_Allocator and direct use. |
| 287 | // We always access this class under held heap_checker_lock, |
| 288 | // this allows us to in particular protect the period when threads are stopped |
| 289 | // at random spots with TCMalloc_ListAllProcessThreads by heap_checker_lock, |
| 290 | // w/o worrying about the lock in LowLevelAlloc::Arena. |
| 291 | // We rely on the fact that we use an own arena with an own lock here. |
| 292 | class HeapLeakChecker::Allocator { |
| 293 | public: |
| 294 | static void Init() { |
| 295 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 296 | RAW_DCHECK(arena_ == NULL, ""); |
| 297 | arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena()); |
| 298 | } |
| 299 | static void Shutdown() { |
| 300 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 301 | if (!LowLevelAlloc::DeleteArena(arena_) || alloc_count_ != 0) { |
| 302 | RAW_LOG(FATAL, "Internal heap checker leak of %d objects", alloc_count_); |
| 303 | } |
| 304 | } |
| 305 | static int alloc_count() { |
| 306 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 307 | return alloc_count_; |
| 308 | } |
| 309 | static void* Allocate(size_t n) { |
| 310 | RAW_DCHECK(arena_ && heap_checker_lock.IsHeld(), ""); |
| 311 | void* p = LowLevelAlloc::AllocWithArena(n, arena_); |
| 312 | if (p) alloc_count_ += 1; |
| 313 | return p; |
| 314 | } |
| 315 | static void Free(void* p) { |
| 316 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 317 | if (p) alloc_count_ -= 1; |
| 318 | LowLevelAlloc::Free(p); |
| 319 | } |
| 320 | static void Free(void* p, size_t /* n */) { |
| 321 | Free(p); |
| 322 | } |
| 323 | // destruct, free, and make *p to be NULL |
| 324 | template<typename T> static void DeleteAndNull(T** p) { |
| 325 | (*p)->~T(); |
| 326 | Free(*p); |
| 327 | *p = NULL; |
| 328 | } |
| 329 | template<typename T> static void DeleteAndNullIfNot(T** p) { |
| 330 | if (*p != NULL) DeleteAndNull(p); |
| 331 | } |
| 332 | private: |
| 333 | static LowLevelAlloc::Arena* arena_; |
| 334 | static int alloc_count_; |
| 335 | }; |
| 336 | |
| 337 | LowLevelAlloc::Arena* HeapLeakChecker::Allocator::arena_ = NULL; |
| 338 | int HeapLeakChecker::Allocator::alloc_count_ = 0; |
| 339 | |
| 340 | //---------------------------------------------------------------------- |
| 341 | // HeapLeakChecker live object tracking components |
| 342 | //---------------------------------------------------------------------- |
| 343 | |
| 344 | // Cases of live object placement we distinguish |
| 345 | enum ObjectPlacement { |
| 346 | MUST_BE_ON_HEAP, // Must point to a live object of the matching size in the |
| 347 | // heap_profile map of the heap when we get to it |
| 348 | IGNORED_ON_HEAP, // Is a live (ignored) object on heap |
| 349 | MAYBE_LIVE, // Is a piece of writable memory from /proc/self/maps |
| 350 | IN_GLOBAL_DATA, // Is part of global data region of the executable |
| 351 | THREAD_DATA, // Part of a thread stack and a thread descriptor with TLS |
| 352 | THREAD_REGISTERS, // Values in registers of some thread |
| 353 | }; |
| 354 | |
| 355 | // Information about an allocated object |
| 356 | struct AllocObject { |
| 357 | const void* ptr; // the object |
| 358 | uintptr_t size; // its size |
| 359 | ObjectPlacement place; // where ptr points to |
| 360 | |
| 361 | AllocObject(const void* p, size_t s, ObjectPlacement l) |
| 362 | : ptr(p), size(s), place(l) { } |
| 363 | }; |
| 364 | |
| 365 | // All objects (memory ranges) ignored via HeapLeakChecker::IgnoreObject |
| 366 | // Key is the object's address; value is its size. |
| 367 | typedef map<uintptr_t, size_t, less<uintptr_t>, |
| 368 | STL_Allocator<pair<const uintptr_t, size_t>, |
| 369 | HeapLeakChecker::Allocator> |
| 370 | > IgnoredObjectsMap; |
| 371 | static IgnoredObjectsMap* ignored_objects = NULL; |
| 372 | |
| 373 | // All objects (memory ranges) that we consider to be the sources of pointers |
| 374 | // to live (not leaked) objects. |
| 375 | // At different times this holds (what can be reached from) global data regions |
| 376 | // and the objects we've been told to ignore. |
| 377 | // For any AllocObject::ptr "live_objects" is supposed to contain at most one |
| 378 | // record at any time. We maintain this by checking with the heap_profile map |
| 379 | // of the heap and removing the live heap objects we've handled from it. |
| 380 | // This vector is maintained as a stack and the frontier of reachable |
| 381 | // live heap objects in our flood traversal of them. |
| 382 | typedef vector<AllocObject, |
| 383 | STL_Allocator<AllocObject, HeapLeakChecker::Allocator> |
| 384 | > LiveObjectsStack; |
| 385 | static LiveObjectsStack* live_objects = NULL; |
| 386 | |
| 387 | // A special string type that uses my allocator |
| 388 | typedef basic_string<char, char_traits<char>, |
| 389 | STL_Allocator<char, HeapLeakChecker::Allocator> |
| 390 | > HCL_string; |
| 391 | |
| 392 | // A placeholder to fill-in the starting values for live_objects |
| 393 | // for each library so we can keep the library-name association for logging. |
| 394 | typedef map<HCL_string, LiveObjectsStack, less<HCL_string>, |
| 395 | STL_Allocator<pair<const HCL_string, LiveObjectsStack>, |
| 396 | HeapLeakChecker::Allocator> |
| 397 | > LibraryLiveObjectsStacks; |
| 398 | static LibraryLiveObjectsStacks* library_live_objects = NULL; |
| 399 | |
| 400 | // Value stored in the map of disabled address ranges; |
| 401 | // its key is the end of the address range. |
| 402 | // We'll ignore allocations with a return address in a disabled range |
| 403 | // if the address occurs at 'max_depth' or less in the stack trace. |
| 404 | struct HeapLeakChecker::RangeValue { |
| 405 | uintptr_t start_address; // the start of the range |
| 406 | int max_depth; // the maximal stack depth to disable at |
| 407 | }; |
| 408 | typedef map<uintptr_t, HeapLeakChecker::RangeValue, less<uintptr_t>, |
| 409 | STL_Allocator<pair<const uintptr_t, HeapLeakChecker::RangeValue>, |
| 410 | HeapLeakChecker::Allocator> |
| 411 | > DisabledRangeMap; |
| 412 | // The disabled program counter address ranges for profile dumping |
| 413 | // that are registered with HeapLeakChecker::DisableChecksFromToLocked. |
| 414 | static DisabledRangeMap* disabled_ranges = NULL; |
| 415 | |
| 416 | // Set of stack tops. |
| 417 | // These are used to consider live only appropriate chunks of the memory areas |
| 418 | // that are used for stacks (and maybe thread-specific data as well) |
| 419 | // so that we do not treat pointers from outdated stack frames as live. |
| 420 | typedef set<uintptr_t, less<uintptr_t>, |
| 421 | STL_Allocator<uintptr_t, HeapLeakChecker::Allocator> |
| 422 | > StackTopSet; |
| 423 | static StackTopSet* stack_tops = NULL; |
| 424 | |
| 425 | // A map of ranges of code addresses for the system libraries |
| 426 | // that can mmap/mremap/sbrk-allocate memory regions for stacks |
| 427 | // and thread-local storage that we want to consider as live global data. |
| 428 | // Maps from the end address to the start address. |
| 429 | typedef map<uintptr_t, uintptr_t, less<uintptr_t>, |
| 430 | STL_Allocator<pair<const uintptr_t, uintptr_t>, |
| 431 | HeapLeakChecker::Allocator> |
| 432 | > GlobalRegionCallerRangeMap; |
| 433 | static GlobalRegionCallerRangeMap* global_region_caller_ranges = NULL; |
| 434 | |
| 435 | // TODO(maxim): make our big data structs into own modules |
| 436 | |
| 437 | // Disabler is implemented by keeping track of a per-thread count |
| 438 | // of active Disabler objects. Any objects allocated while the |
| 439 | // count > 0 are not reported. |
| 440 | |
| 441 | #ifdef HAVE_TLS |
| 442 | |
| 443 | static __thread int thread_disable_counter |
| 444 | // The "inital exec" model is faster than the default TLS model, at |
| 445 | // the cost you can't dlopen this library. But dlopen on heap-checker |
| 446 | // doesn't work anyway -- it must run before main -- so this is a good |
| 447 | // trade-off. |
| 448 | # ifdef HAVE___ATTRIBUTE__ |
| 449 | __attribute__ ((tls_model ("initial-exec"))) |
| 450 | # endif |
| 451 | ; |
| 452 | inline int get_thread_disable_counter() { |
| 453 | return thread_disable_counter; |
| 454 | } |
| 455 | inline void set_thread_disable_counter(int value) { |
| 456 | thread_disable_counter = value; |
| 457 | } |
| 458 | |
| 459 | #else // #ifdef HAVE_TLS |
| 460 | |
| 461 | static pthread_key_t thread_disable_counter_key; |
| 462 | static int main_thread_counter; // storage for use before main() |
| 463 | static bool use_main_thread_counter = true; |
| 464 | |
| 465 | // TODO(csilvers): this is called from NewHook, in the middle of malloc(). |
| 466 | // If perftools_pthread_getspecific calls malloc, that will lead to an |
| 467 | // infinite loop. I don't know how to fix that, so I hope it never happens! |
| 468 | inline int get_thread_disable_counter() { |
| 469 | if (use_main_thread_counter) // means we're running really early |
| 470 | return main_thread_counter; |
| 471 | void* p = perftools_pthread_getspecific(thread_disable_counter_key); |
| 472 | return (intptr_t)p; // kinda evil: store the counter directly in the void* |
| 473 | } |
| 474 | |
| 475 | inline void set_thread_disable_counter(int value) { |
| 476 | if (use_main_thread_counter) { // means we're running really early |
| 477 | main_thread_counter = value; |
| 478 | return; |
| 479 | } |
| 480 | intptr_t pointer_sized_value = value; |
| 481 | // kinda evil: store the counter directly in the void* |
| 482 | void* p = (void*)pointer_sized_value; |
| 483 | // NOTE: this may call malloc, which will call NewHook which will call |
| 484 | // get_thread_disable_counter() which will call pthread_getspecific(). I |
| 485 | // don't know if anything bad can happen if we call getspecific() in the |
| 486 | // middle of a setspecific() call. It seems to work ok in practice... |
| 487 | perftools_pthread_setspecific(thread_disable_counter_key, p); |
| 488 | } |
| 489 | |
| 490 | // The idea here is that this initializer will run pretty late: after |
| 491 | // pthreads have been totally set up. At this point we can call |
| 492 | // pthreads routines, so we set those up. |
| 493 | class InitThreadDisableCounter { |
| 494 | public: |
| 495 | InitThreadDisableCounter() { |
| 496 | perftools_pthread_key_create(&thread_disable_counter_key, NULL); |
| 497 | // Set up the main thread's value, which we have a special variable for. |
| 498 | void* p = (void*)main_thread_counter; // store the counter directly |
| 499 | perftools_pthread_setspecific(thread_disable_counter_key, p); |
| 500 | use_main_thread_counter = false; |
| 501 | } |
| 502 | }; |
| 503 | InitThreadDisableCounter init_thread_disable_counter; |
| 504 | |
| 505 | #endif // #ifdef HAVE_TLS |
| 506 | |
| 507 | HeapLeakChecker::Disabler::Disabler() { |
| 508 | // It is faster to unconditionally increment the thread-local |
| 509 | // counter than to check whether or not heap-checking is on |
| 510 | // in a thread-safe manner. |
| 511 | int counter = get_thread_disable_counter(); |
| 512 | set_thread_disable_counter(counter + 1); |
| 513 | RAW_VLOG(10, "Increasing thread disable counter to %d", counter + 1); |
| 514 | } |
| 515 | |
| 516 | HeapLeakChecker::Disabler::~Disabler() { |
| 517 | int counter = get_thread_disable_counter(); |
| 518 | RAW_DCHECK(counter > 0, ""); |
| 519 | if (counter > 0) { |
| 520 | set_thread_disable_counter(counter - 1); |
| 521 | RAW_VLOG(10, "Decreasing thread disable counter to %d", counter); |
| 522 | } else { |
| 523 | RAW_VLOG(0, "Thread disable counter underflow : %d", counter); |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | //---------------------------------------------------------------------- |
| 528 | |
| 529 | // The size of the largest heap object allocated so far. |
| 530 | static size_t max_heap_object_size = 0; |
| 531 | // The possible range of addresses that can point |
| 532 | // into one of the elements of heap_objects. |
| 533 | static uintptr_t min_heap_address = uintptr_t(-1LL); |
| 534 | static uintptr_t max_heap_address = 0; |
| 535 | |
| 536 | //---------------------------------------------------------------------- |
| 537 | |
| 538 | // Simple casting helpers for uintptr_t and void*: |
| 539 | template<typename T> |
| 540 | inline static const void* AsPtr(T addr) { |
| 541 | return reinterpret_cast<void*>(addr); |
| 542 | } |
| 543 | inline static uintptr_t AsInt(const void* ptr) { |
| 544 | return reinterpret_cast<uintptr_t>(ptr); |
| 545 | } |
| 546 | |
| 547 | //---------------------------------------------------------------------- |
| 548 | |
| 549 | // We've seen reports that strstr causes heap-checker crashes in some |
| 550 | // libc's (?): |
| 551 | // http://code.google.com/p/gperftools/issues/detail?id=263 |
| 552 | // It's simple enough to use our own. This is not in time-critical code. |
| 553 | static const char* hc_strstr(const char* s1, const char* s2) { |
| 554 | const size_t len = strlen(s2); |
| 555 | RAW_CHECK(len > 0, "Unexpected empty string passed to strstr()"); |
| 556 | for (const char* p = strchr(s1, *s2); p != NULL; p = strchr(p+1, *s2)) { |
| 557 | if (strncmp(p, s2, len) == 0) { |
| 558 | return p; |
| 559 | } |
| 560 | } |
| 561 | return NULL; |
| 562 | } |
| 563 | |
| 564 | //---------------------------------------------------------------------- |
| 565 | |
| 566 | // Our hooks for MallocHook |
| 567 | static void NewHook(const void* ptr, size_t size) { |
| 568 | if (ptr != NULL) { |
| 569 | const int counter = get_thread_disable_counter(); |
| 570 | const bool ignore = (counter > 0); |
| 571 | RAW_VLOG(16, "Recording Alloc: %p of %" PRIuS "; %d", ptr, size, |
| 572 | int(counter)); |
| 573 | |
| 574 | // Fetch the caller's stack trace before acquiring heap_checker_lock. |
| 575 | void* stack[HeapProfileTable::kMaxStackDepth]; |
| 576 | int depth = HeapProfileTable::GetCallerStackTrace(0, stack); |
| 577 | |
| 578 | { SpinLockHolder l(&heap_checker_lock); |
| 579 | if (size > max_heap_object_size) max_heap_object_size = size; |
| 580 | uintptr_t addr = AsInt(ptr); |
| 581 | if (addr < min_heap_address) min_heap_address = addr; |
| 582 | addr += size; |
| 583 | if (addr > max_heap_address) max_heap_address = addr; |
| 584 | if (heap_checker_on) { |
| 585 | heap_profile->RecordAlloc(ptr, size, depth, stack); |
| 586 | if (ignore) { |
| 587 | heap_profile->MarkAsIgnored(ptr); |
| 588 | } |
| 589 | } |
| 590 | } |
| 591 | RAW_VLOG(17, "Alloc Recorded: %p of %" PRIuS "", ptr, size); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | static void DeleteHook(const void* ptr) { |
| 596 | if (ptr != NULL) { |
| 597 | RAW_VLOG(16, "Recording Free %p", ptr); |
| 598 | { SpinLockHolder l(&heap_checker_lock); |
| 599 | if (heap_checker_on) heap_profile->RecordFree(ptr); |
| 600 | } |
| 601 | RAW_VLOG(17, "Free Recorded: %p", ptr); |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | //---------------------------------------------------------------------- |
| 606 | |
| 607 | enum StackDirection { |
| 608 | GROWS_TOWARDS_HIGH_ADDRESSES, |
| 609 | GROWS_TOWARDS_LOW_ADDRESSES, |
| 610 | UNKNOWN_DIRECTION |
| 611 | }; |
| 612 | |
| 613 | // Determine which way the stack grows: |
| 614 | |
| 615 | static StackDirection ATTRIBUTE_NOINLINE GetStackDirection( |
| 616 | const uintptr_t *const ptr) { |
| 617 | uintptr_t x; |
| 618 | if (&x < ptr) |
| 619 | return GROWS_TOWARDS_LOW_ADDRESSES; |
| 620 | if (ptr < &x) |
| 621 | return GROWS_TOWARDS_HIGH_ADDRESSES; |
| 622 | |
| 623 | RAW_CHECK(0, ""); // Couldn't determine the stack direction. |
| 624 | |
| 625 | return UNKNOWN_DIRECTION; |
| 626 | } |
| 627 | |
| 628 | // Direction of stack growth (will initialize via GetStackDirection()) |
| 629 | static StackDirection stack_direction = UNKNOWN_DIRECTION; |
| 630 | |
| 631 | // This routine is called for every thread stack we know about to register it. |
| 632 | static void RegisterStackLocked(const void* top_ptr) { |
| 633 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 634 | RAW_DCHECK(MemoryRegionMap::LockIsHeld(), ""); |
| 635 | RAW_VLOG(10, "Thread stack at %p", top_ptr); |
| 636 | uintptr_t top = AsInt(top_ptr); |
| 637 | stack_tops->insert(top); // add for later use |
| 638 | |
| 639 | // make sure stack_direction is initialized |
| 640 | if (stack_direction == UNKNOWN_DIRECTION) { |
| 641 | stack_direction = GetStackDirection(&top); |
| 642 | } |
| 643 | |
| 644 | // Find memory region with this stack |
| 645 | MemoryRegionMap::Region region; |
| 646 | if (MemoryRegionMap::FindAndMarkStackRegion(top, ®ion)) { |
| 647 | // Make the proper portion of the stack live: |
| 648 | if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) { |
| 649 | RAW_VLOG(11, "Live stack at %p of %" PRIuPTR " bytes", |
| 650 | top_ptr, region.end_addr - top); |
| 651 | live_objects->push_back(AllocObject(top_ptr, region.end_addr - top, |
| 652 | THREAD_DATA)); |
| 653 | } else { // GROWS_TOWARDS_HIGH_ADDRESSES |
| 654 | RAW_VLOG(11, "Live stack at %p of %" PRIuPTR " bytes", |
| 655 | AsPtr(region.start_addr), |
| 656 | top - region.start_addr); |
| 657 | live_objects->push_back(AllocObject(AsPtr(region.start_addr), |
| 658 | top - region.start_addr, |
| 659 | THREAD_DATA)); |
| 660 | } |
| 661 | // not in MemoryRegionMap, look in library_live_objects: |
| 662 | } else if (FLAGS_heap_check_ignore_global_live) { |
| 663 | for (LibraryLiveObjectsStacks::iterator lib = library_live_objects->begin(); |
| 664 | lib != library_live_objects->end(); ++lib) { |
| 665 | for (LiveObjectsStack::iterator span = lib->second.begin(); |
| 666 | span != lib->second.end(); ++span) { |
| 667 | uintptr_t start = AsInt(span->ptr); |
| 668 | uintptr_t end = start + span->size; |
| 669 | if (start <= top && top < end) { |
| 670 | RAW_VLOG(11, "Stack at %p is inside /proc/self/maps chunk %p..%p", |
| 671 | top_ptr, AsPtr(start), AsPtr(end)); |
| 672 | // Shrink start..end region by chopping away the memory regions in |
| 673 | // MemoryRegionMap that land in it to undo merging of regions |
| 674 | // in /proc/self/maps, so that we correctly identify what portion |
| 675 | // of start..end is actually the stack region. |
| 676 | uintptr_t stack_start = start; |
| 677 | uintptr_t stack_end = end; |
| 678 | // can optimize-away this loop, but it does not run often |
| 679 | RAW_DCHECK(MemoryRegionMap::LockIsHeld(), ""); |
| 680 | for (MemoryRegionMap::RegionIterator r = |
| 681 | MemoryRegionMap::BeginRegionLocked(); |
| 682 | r != MemoryRegionMap::EndRegionLocked(); ++r) { |
| 683 | if (top < r->start_addr && r->start_addr < stack_end) { |
| 684 | stack_end = r->start_addr; |
| 685 | } |
| 686 | if (stack_start < r->end_addr && r->end_addr <= top) { |
| 687 | stack_start = r->end_addr; |
| 688 | } |
| 689 | } |
| 690 | if (stack_start != start || stack_end != end) { |
| 691 | RAW_VLOG(11, "Stack at %p is actually inside memory chunk %p..%p", |
| 692 | top_ptr, AsPtr(stack_start), AsPtr(stack_end)); |
| 693 | } |
| 694 | // Make the proper portion of the stack live: |
| 695 | if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) { |
| 696 | RAW_VLOG(11, "Live stack at %p of %" PRIuPTR " bytes", |
| 697 | top_ptr, stack_end - top); |
| 698 | live_objects->push_back( |
| 699 | AllocObject(top_ptr, stack_end - top, THREAD_DATA)); |
| 700 | } else { // GROWS_TOWARDS_HIGH_ADDRESSES |
| 701 | RAW_VLOG(11, "Live stack at %p of %" PRIuPTR " bytes", |
| 702 | AsPtr(stack_start), top - stack_start); |
| 703 | live_objects->push_back( |
| 704 | AllocObject(AsPtr(stack_start), top - stack_start, THREAD_DATA)); |
| 705 | } |
| 706 | lib->second.erase(span); // kill the rest of the region |
| 707 | // Put the non-stack part(s) of the region back: |
| 708 | if (stack_start != start) { |
| 709 | lib->second.push_back(AllocObject(AsPtr(start), stack_start - start, |
| 710 | MAYBE_LIVE)); |
| 711 | } |
| 712 | if (stack_end != end) { |
| 713 | lib->second.push_back(AllocObject(AsPtr(stack_end), end - stack_end, |
| 714 | MAYBE_LIVE)); |
| 715 | } |
| 716 | return; |
| 717 | } |
| 718 | } |
| 719 | } |
| 720 | RAW_LOG(ERROR, "Memory region for stack at %p not found. " |
| 721 | "Will likely report false leak positives.", top_ptr); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | // Iterator for heap allocation map data to make ignored objects "live" |
| 726 | // (i.e., treated as roots for the mark-and-sweep phase) |
| 727 | static void MakeIgnoredObjectsLiveCallbackLocked( |
| 728 | const void* ptr, const HeapProfileTable::AllocInfo& info) { |
| 729 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 730 | if (info.ignored) { |
| 731 | live_objects->push_back(AllocObject(ptr, info.object_size, |
| 732 | MUST_BE_ON_HEAP)); |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | // Iterator for heap allocation map data to make objects allocated from |
| 737 | // disabled regions of code to be live. |
| 738 | static void MakeDisabledLiveCallbackLocked( |
| 739 | const void* ptr, const HeapProfileTable::AllocInfo& info) { |
| 740 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 741 | bool stack_disable = false; |
| 742 | bool range_disable = false; |
| 743 | for (int depth = 0; depth < info.stack_depth; depth++) { |
| 744 | uintptr_t addr = AsInt(info.call_stack[depth]); |
| 745 | if (disabled_ranges) { |
| 746 | DisabledRangeMap::const_iterator iter |
| 747 | = disabled_ranges->upper_bound(addr); |
| 748 | if (iter != disabled_ranges->end()) { |
| 749 | RAW_DCHECK(iter->first > addr, ""); |
| 750 | if (iter->second.start_address < addr && |
| 751 | iter->second.max_depth > depth) { |
| 752 | range_disable = true; // in range; dropping |
| 753 | break; |
| 754 | } |
| 755 | } |
| 756 | } |
| 757 | } |
| 758 | if (stack_disable || range_disable) { |
| 759 | uintptr_t start_address = AsInt(ptr); |
| 760 | uintptr_t end_address = start_address + info.object_size; |
| 761 | StackTopSet::const_iterator iter |
| 762 | = stack_tops->lower_bound(start_address); |
| 763 | if (iter != stack_tops->end()) { |
| 764 | RAW_DCHECK(*iter >= start_address, ""); |
| 765 | if (*iter < end_address) { |
| 766 | // We do not disable (treat as live) whole allocated regions |
| 767 | // if they are used to hold thread call stacks |
| 768 | // (i.e. when we find a stack inside). |
| 769 | // The reason is that we'll treat as live the currently used |
| 770 | // stack portions anyway (see RegisterStackLocked), |
| 771 | // and the rest of the region where the stack lives can well |
| 772 | // contain outdated stack variables which are not live anymore, |
| 773 | // hence should not be treated as such. |
| 774 | RAW_VLOG(11, "Not %s-disabling %" PRIuS " bytes at %p" |
| 775 | ": have stack inside: %p", |
| 776 | (stack_disable ? "stack" : "range"), |
| 777 | info.object_size, ptr, AsPtr(*iter)); |
| 778 | return; |
| 779 | } |
| 780 | } |
| 781 | RAW_VLOG(11, "%s-disabling %" PRIuS " bytes at %p", |
| 782 | (stack_disable ? "Stack" : "Range"), info.object_size, ptr); |
| 783 | live_objects->push_back(AllocObject(ptr, info.object_size, |
| 784 | MUST_BE_ON_HEAP)); |
| 785 | } |
| 786 | } |
| 787 | |
| 788 | static const char kUnnamedProcSelfMapEntry[] = "UNNAMED"; |
| 789 | |
| 790 | // This function takes some fields from a /proc/self/maps line: |
| 791 | // |
| 792 | // start_address start address of a memory region. |
| 793 | // end_address end address of a memory region |
| 794 | // permissions rwx + private/shared bit |
| 795 | // filename filename of the mapped file |
| 796 | // |
| 797 | // If the region is not writeable, then it cannot have any heap |
| 798 | // pointers in it, otherwise we record it as a candidate live region |
| 799 | // to get filtered later. |
| 800 | static void RecordGlobalDataLocked(uintptr_t start_address, |
| 801 | uintptr_t end_address, |
| 802 | const char* permissions, |
| 803 | const char* filename) { |
| 804 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 805 | // Ignore non-writeable regions. |
| 806 | if (strchr(permissions, 'w') == NULL) return; |
| 807 | if (filename == NULL || *filename == '\0') { |
| 808 | filename = kUnnamedProcSelfMapEntry; |
| 809 | } |
| 810 | RAW_VLOG(11, "Looking into %s: 0x%" PRIxPTR "..0x%" PRIxPTR, |
| 811 | filename, start_address, end_address); |
| 812 | (*library_live_objects)[filename]. |
| 813 | push_back(AllocObject(AsPtr(start_address), |
| 814 | end_address - start_address, |
| 815 | MAYBE_LIVE)); |
| 816 | } |
| 817 | |
| 818 | // See if 'library' from /proc/self/maps has base name 'library_base' |
| 819 | // i.e. contains it and has '.' or '-' after it. |
| 820 | static bool IsLibraryNamed(const char* library, const char* library_base) { |
| 821 | const char* p = hc_strstr(library, library_base); |
| 822 | size_t sz = strlen(library_base); |
| 823 | return p != NULL && (p[sz] == '.' || p[sz] == '-'); |
| 824 | } |
| 825 | |
| 826 | // static |
| 827 | void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library, |
| 828 | uintptr_t start_address, |
| 829 | uintptr_t end_address) { |
| 830 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 831 | int depth = 0; |
| 832 | // TODO(maxim): maybe this should be extended to also use objdump |
| 833 | // and pick the text portion of the library more precisely. |
| 834 | if (IsLibraryNamed(library, "/libpthread") || |
| 835 | // libpthread has a lot of small "system" leaks we don't care about. |
| 836 | // In particular it allocates memory to store data supplied via |
| 837 | // pthread_setspecific (which can be the only pointer to a heap object). |
| 838 | IsLibraryNamed(library, "/libdl") || |
| 839 | // library loaders leak some "system" heap that we don't care about |
| 840 | IsLibraryNamed(library, "/libcrypto") || |
| 841 | // Sometimes libcrypto of OpenSSH is compiled with -fomit-frame-pointer |
| 842 | // (any library can be, of course, but this one often is because speed |
| 843 | // is so important for making crypto usable). We ignore all its |
| 844 | // allocations because we can't see the call stacks. We'd prefer |
| 845 | // to ignore allocations done in files/symbols that match |
| 846 | // "default_malloc_ex|default_realloc_ex" |
| 847 | // but that doesn't work when the end-result binary is stripped. |
| 848 | IsLibraryNamed(library, "/libjvm") || |
| 849 | // JVM has a lot of leaks we don't care about. |
| 850 | IsLibraryNamed(library, "/libzip") |
| 851 | // The JVM leaks java.util.zip.Inflater after loading classes. |
| 852 | ) { |
| 853 | depth = 1; // only disable allocation calls directly from the library code |
| 854 | } else if (IsLibraryNamed(library, "/ld") |
| 855 | // library loader leaks some "system" heap |
| 856 | // (e.g. thread-local storage) that we don't care about |
| 857 | ) { |
| 858 | depth = 2; // disable allocation calls directly from the library code |
| 859 | // and at depth 2 from it. |
| 860 | // We need depth 2 here solely because of a libc bug that |
| 861 | // forces us to jump through __memalign_hook and MemalignOverride hoops |
| 862 | // in tcmalloc.cc. |
| 863 | // Those buggy __libc_memalign() calls are in ld-linux.so and happen for |
| 864 | // thread-local storage allocations that we want to ignore here. |
| 865 | // We go with the depth-2 hack as a workaround for this libc bug: |
| 866 | // otherwise we'd need to extend MallocHook interface |
| 867 | // so that correct stack depth adjustment can be propagated from |
| 868 | // the exceptional case of MemalignOverride. |
| 869 | // Using depth 2 here should not mask real leaks because ld-linux.so |
| 870 | // does not call user code. |
| 871 | } |
| 872 | if (depth) { |
| 873 | RAW_VLOG(10, "Disabling allocations from %s at depth %d:", library, depth); |
| 874 | DisableChecksFromToLocked(AsPtr(start_address), AsPtr(end_address), depth); |
| 875 | if (IsLibraryNamed(library, "/libpthread") || |
| 876 | IsLibraryNamed(library, "/libdl") || |
| 877 | IsLibraryNamed(library, "/ld")) { |
| 878 | RAW_VLOG(10, "Global memory regions made by %s will be live data", |
| 879 | library); |
| 880 | if (global_region_caller_ranges == NULL) { |
| 881 | global_region_caller_ranges = |
| 882 | new(Allocator::Allocate(sizeof(GlobalRegionCallerRangeMap))) |
| 883 | GlobalRegionCallerRangeMap; |
| 884 | } |
| 885 | global_region_caller_ranges |
| 886 | ->insert(make_pair(end_address, start_address)); |
| 887 | } |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | // static |
| 892 | HeapLeakChecker::ProcMapsResult HeapLeakChecker::UseProcMapsLocked( |
| 893 | ProcMapsTask proc_maps_task) { |
| 894 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 895 | // Need to provide own scratch memory to ProcMapsIterator: |
| 896 | ProcMapsIterator::Buffer buffer; |
| 897 | ProcMapsIterator it(0, &buffer); |
| 898 | if (!it.Valid()) { |
| 899 | int errsv = errno; |
| 900 | RAW_LOG(ERROR, "Could not open /proc/self/maps: errno=%d. " |
| 901 | "Libraries will not be handled correctly.", errsv); |
| 902 | return CANT_OPEN_PROC_MAPS; |
| 903 | } |
| 904 | uint64 start_address, end_address, file_offset; |
| 905 | int64 inode; |
| 906 | char *permissions, *filename; |
| 907 | bool saw_shared_lib = false; |
| 908 | bool saw_nonzero_inode = false; |
| 909 | bool saw_shared_lib_with_nonzero_inode = false; |
| 910 | while (it.Next(&start_address, &end_address, &permissions, |
| 911 | &file_offset, &inode, &filename)) { |
| 912 | if (start_address >= end_address) { |
| 913 | // Warn if a line we can be interested in is ill-formed: |
| 914 | if (inode != 0) { |
| 915 | RAW_LOG(ERROR, "Errors reading /proc/self/maps. " |
| 916 | "Some global memory regions will not " |
| 917 | "be handled correctly."); |
| 918 | } |
| 919 | // Silently skip other ill-formed lines: some are possible |
| 920 | // probably due to the interplay of how /proc/self/maps is updated |
| 921 | // while we read it in chunks in ProcMapsIterator and |
| 922 | // do things in this loop. |
| 923 | continue; |
| 924 | } |
| 925 | // Determine if any shared libraries are present (this is the same |
| 926 | // list of extensions as is found in pprof). We want to ignore |
| 927 | // 'fake' libraries with inode 0 when determining. However, some |
| 928 | // systems don't share inodes via /proc, so we turn off this check |
| 929 | // if we don't see any evidence that we're getting inode info. |
| 930 | if (inode != 0) { |
| 931 | saw_nonzero_inode = true; |
| 932 | } |
| 933 | if ((hc_strstr(filename, "lib") && hc_strstr(filename, ".so")) || |
| 934 | hc_strstr(filename, ".dll") || |
| 935 | // not all .dylib filenames start with lib. .dylib is big enough |
| 936 | // that we are unlikely to get false matches just checking that. |
| 937 | hc_strstr(filename, ".dylib") || hc_strstr(filename, ".bundle")) { |
| 938 | saw_shared_lib = true; |
| 939 | if (inode != 0) { |
| 940 | saw_shared_lib_with_nonzero_inode = true; |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | switch (proc_maps_task) { |
| 945 | case DISABLE_LIBRARY_ALLOCS: |
| 946 | // All lines starting like |
| 947 | // "401dc000-4030f000 r??p 00132000 03:01 13991972 lib/bin" |
| 948 | // identify a data and code sections of a shared library or our binary |
| 949 | if (inode != 0 && strncmp(permissions, "r-xp", 4) == 0) { |
| 950 | DisableLibraryAllocsLocked(filename, start_address, end_address); |
| 951 | } |
| 952 | break; |
| 953 | case RECORD_GLOBAL_DATA: |
| 954 | RecordGlobalDataLocked(start_address, end_address, |
| 955 | permissions, filename); |
| 956 | break; |
| 957 | default: |
| 958 | RAW_CHECK(0, ""); |
| 959 | } |
| 960 | } |
| 961 | // If /proc/self/maps is reporting inodes properly (we saw a |
| 962 | // non-zero inode), then we only say we saw a shared lib if we saw a |
| 963 | // 'real' one, with a non-zero inode. |
| 964 | if (saw_nonzero_inode) { |
| 965 | saw_shared_lib = saw_shared_lib_with_nonzero_inode; |
| 966 | } |
| 967 | if (!saw_shared_lib) { |
| 968 | RAW_LOG(ERROR, "No shared libs detected. Will likely report false leak " |
| 969 | "positives for statically linked executables."); |
| 970 | return NO_SHARED_LIBS_IN_PROC_MAPS; |
| 971 | } |
| 972 | return PROC_MAPS_USED; |
| 973 | } |
| 974 | |
| 975 | // Total number and size of live objects dropped from the profile; |
| 976 | // (re)initialized in IgnoreAllLiveObjectsLocked. |
| 977 | static int64 live_objects_total; |
| 978 | static int64 live_bytes_total; |
| 979 | |
| 980 | // pid of the thread that is doing the current leak check |
| 981 | // (protected by our lock; IgnoreAllLiveObjectsLocked sets it) |
| 982 | static pid_t self_thread_pid = 0; |
| 983 | |
| 984 | // Status of our thread listing callback execution |
| 985 | // (protected by our lock; used from within IgnoreAllLiveObjectsLocked) |
| 986 | static enum { |
| 987 | CALLBACK_NOT_STARTED, |
| 988 | CALLBACK_STARTED, |
| 989 | CALLBACK_COMPLETED, |
| 990 | } thread_listing_status = CALLBACK_NOT_STARTED; |
| 991 | |
| 992 | // Ideally to avoid deadlocks this function should not result in any libc |
| 993 | // or other function calls that might need to lock a mutex: |
| 994 | // It is called when all threads of a process are stopped |
| 995 | // at arbitrary points thus potentially holding those locks. |
| 996 | // |
| 997 | // In practice we are calling some simple i/o and sprintf-type library functions |
| 998 | // for logging messages, but use only our own LowLevelAlloc::Arena allocator. |
| 999 | // |
| 1000 | // This is known to be buggy: the library i/o function calls are able to cause |
| 1001 | // deadlocks when they request a lock that a stopped thread happens to hold. |
| 1002 | // This issue as far as we know have so far not resulted in any deadlocks |
| 1003 | // in practice, so for now we are taking our chance that the deadlocks |
| 1004 | // have insignificant frequency. |
| 1005 | // |
| 1006 | // If such deadlocks become a problem we should make the i/o calls |
| 1007 | // into appropriately direct system calls (or eliminate them), |
| 1008 | // in particular write() is not safe and vsnprintf() is potentially dangerous |
| 1009 | // due to reliance on locale functions (these are called through RAW_LOG |
| 1010 | // and in other ways). |
| 1011 | // |
| 1012 | |
| 1013 | #if defined(HAVE_LINUX_PTRACE_H) && defined(HAVE_SYS_SYSCALL_H) && defined(DUMPER) |
| 1014 | # if (defined(__i386__) || defined(__x86_64)) |
| 1015 | # define THREAD_REGS i386_regs |
| 1016 | # elif defined(__PPC__) |
| 1017 | # define THREAD_REGS ppc_regs |
| 1018 | # endif |
| 1019 | #endif |
| 1020 | |
| 1021 | /*static*/ int HeapLeakChecker::IgnoreLiveThreadsLocked(void* parameter, |
| 1022 | int num_threads, |
| 1023 | pid_t* thread_pids, |
| 1024 | va_list /*ap*/) { |
| 1025 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 1026 | thread_listing_status = CALLBACK_STARTED; |
| 1027 | RAW_VLOG(11, "Found %d threads (from pid %d)", num_threads, getpid()); |
| 1028 | |
| 1029 | if (FLAGS_heap_check_ignore_global_live) { |
| 1030 | UseProcMapsLocked(RECORD_GLOBAL_DATA); |
| 1031 | } |
| 1032 | |
| 1033 | // We put the registers from other threads here |
| 1034 | // to make pointers stored in them live. |
| 1035 | vector<void*, STL_Allocator<void*, Allocator> > thread_registers; |
| 1036 | |
| 1037 | int failures = 0; |
| 1038 | for (int i = 0; i < num_threads; ++i) { |
| 1039 | // the leak checking thread itself is handled |
| 1040 | // specially via self_thread_stack, not here: |
| 1041 | if (thread_pids[i] == self_thread_pid) continue; |
| 1042 | RAW_VLOG(11, "Handling thread with pid %d", thread_pids[i]); |
| 1043 | #ifdef THREAD_REGS |
| 1044 | THREAD_REGS thread_regs; |
| 1045 | #define sys_ptrace(r, p, a, d) syscall(SYS_ptrace, (r), (p), (a), (d)) |
| 1046 | // We use sys_ptrace to avoid thread locking |
| 1047 | // because this is called from TCMalloc_ListAllProcessThreads |
| 1048 | // when all but this thread are suspended. |
| 1049 | if (sys_ptrace(PTRACE_GETREGS, thread_pids[i], NULL, &thread_regs) == 0) { |
| 1050 | // Need to use SP to get all the data from the very last stack frame: |
| 1051 | COMPILE_ASSERT(sizeof(thread_regs.SP) == sizeof(void*), |
| 1052 | SP_register_does_not_look_like_a_pointer); |
| 1053 | RegisterStackLocked(reinterpret_cast<void*>(thread_regs.SP)); |
| 1054 | // Make registers live (just in case PTRACE_ATTACH resulted in some |
| 1055 | // register pointers still being in the registers and not on the stack): |
| 1056 | for (void** p = reinterpret_cast<void**>(&thread_regs); |
| 1057 | p < reinterpret_cast<void**>(&thread_regs + 1); ++p) { |
| 1058 | RAW_VLOG(12, "Thread register %p", *p); |
| 1059 | thread_registers.push_back(*p); |
| 1060 | } |
| 1061 | } else { |
| 1062 | failures += 1; |
| 1063 | } |
| 1064 | #else |
| 1065 | failures += 1; |
| 1066 | #endif |
| 1067 | } |
| 1068 | // Use all the collected thread (stack) liveness sources: |
| 1069 | IgnoreLiveObjectsLocked("threads stack data", ""); |
| 1070 | if (thread_registers.size()) { |
| 1071 | // Make thread registers be live heap data sources. |
| 1072 | // we rely here on the fact that vector is in one memory chunk: |
| 1073 | RAW_VLOG(11, "Live registers at %p of %" PRIuS " bytes", |
| 1074 | &thread_registers[0], thread_registers.size() * sizeof(void*)); |
| 1075 | live_objects->push_back(AllocObject(&thread_registers[0], |
| 1076 | thread_registers.size() * sizeof(void*), |
| 1077 | THREAD_REGISTERS)); |
| 1078 | IgnoreLiveObjectsLocked("threads register data", ""); |
| 1079 | } |
| 1080 | // Do all other liveness walking while all threads are stopped: |
| 1081 | IgnoreNonThreadLiveObjectsLocked(); |
| 1082 | // Can now resume the threads: |
| 1083 | TCMalloc_ResumeAllProcessThreads(num_threads, thread_pids); |
| 1084 | thread_listing_status = CALLBACK_COMPLETED; |
| 1085 | return failures; |
| 1086 | } |
| 1087 | |
| 1088 | // Stack top of the thread that is doing the current leak check |
| 1089 | // (protected by our lock; IgnoreAllLiveObjectsLocked sets it) |
| 1090 | static const void* self_thread_stack_top; |
| 1091 | |
| 1092 | // static |
| 1093 | void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() { |
| 1094 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 1095 | RAW_DCHECK(MemoryRegionMap::LockIsHeld(), ""); |
| 1096 | RAW_VLOG(11, "Handling self thread with pid %d", self_thread_pid); |
| 1097 | // Register our own stack: |
| 1098 | |
| 1099 | // Important that all stack ranges (including the one here) |
| 1100 | // are known before we start looking at them |
| 1101 | // in MakeDisabledLiveCallbackLocked: |
| 1102 | RegisterStackLocked(self_thread_stack_top); |
| 1103 | IgnoreLiveObjectsLocked("stack data", ""); |
| 1104 | |
| 1105 | // Make objects we were told to ignore live: |
| 1106 | if (ignored_objects) { |
| 1107 | for (IgnoredObjectsMap::const_iterator object = ignored_objects->begin(); |
| 1108 | object != ignored_objects->end(); ++object) { |
| 1109 | const void* ptr = AsPtr(object->first); |
| 1110 | RAW_VLOG(11, "Ignored live object at %p of %" PRIuS " bytes", |
| 1111 | ptr, object->second); |
| 1112 | live_objects-> |
| 1113 | push_back(AllocObject(ptr, object->second, MUST_BE_ON_HEAP)); |
| 1114 | // we do this liveness check for ignored_objects before doing any |
| 1115 | // live heap walking to make sure it does not fail needlessly: |
| 1116 | size_t object_size; |
| 1117 | if (!(heap_profile->FindAlloc(ptr, &object_size) && |
| 1118 | object->second == object_size)) { |
| 1119 | RAW_LOG(FATAL, "Object at %p of %" PRIuS " bytes from an" |
| 1120 | " IgnoreObject() has disappeared", ptr, object->second); |
| 1121 | } |
| 1122 | } |
| 1123 | IgnoreLiveObjectsLocked("ignored objects", ""); |
| 1124 | } |
| 1125 | |
| 1126 | // Treat objects that were allocated when a Disabler was live as |
| 1127 | // roots. I.e., if X was allocated while a Disabler was active, |
| 1128 | // and Y is reachable from X, arrange that neither X nor Y are |
| 1129 | // treated as leaks. |
| 1130 | heap_profile->IterateAllocs(MakeIgnoredObjectsLiveCallbackLocked); |
| 1131 | IgnoreLiveObjectsLocked("disabled objects", ""); |
| 1132 | |
| 1133 | // Make code-address-disabled objects live and ignored: |
| 1134 | // This in particular makes all thread-specific data live |
| 1135 | // because the basic data structure to hold pointers to thread-specific data |
| 1136 | // is allocated from libpthreads and we have range-disabled that |
| 1137 | // library code with UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS); |
| 1138 | // so now we declare all thread-specific data reachable from there as live. |
| 1139 | heap_profile->IterateAllocs(MakeDisabledLiveCallbackLocked); |
| 1140 | IgnoreLiveObjectsLocked("disabled code", ""); |
| 1141 | |
| 1142 | // Actually make global data live: |
| 1143 | if (FLAGS_heap_check_ignore_global_live) { |
| 1144 | bool have_null_region_callers = false; |
| 1145 | for (LibraryLiveObjectsStacks::iterator l = library_live_objects->begin(); |
| 1146 | l != library_live_objects->end(); ++l) { |
| 1147 | RAW_CHECK(live_objects->empty(), ""); |
| 1148 | // Process library_live_objects in l->second |
| 1149 | // filtering them by MemoryRegionMap: |
| 1150 | // It's safe to iterate over MemoryRegionMap |
| 1151 | // w/o locks here as we are inside MemoryRegionMap::Lock(): |
| 1152 | RAW_DCHECK(MemoryRegionMap::LockIsHeld(), ""); |
| 1153 | // The only change to MemoryRegionMap possible in this loop |
| 1154 | // is region addition as a result of allocating more memory |
| 1155 | // for live_objects. This won't invalidate the RegionIterator |
| 1156 | // or the intent of the loop. |
| 1157 | // --see the comment by MemoryRegionMap::BeginRegionLocked(). |
| 1158 | for (MemoryRegionMap::RegionIterator region = |
| 1159 | MemoryRegionMap::BeginRegionLocked(); |
| 1160 | region != MemoryRegionMap::EndRegionLocked(); ++region) { |
| 1161 | // "region" from MemoryRegionMap is to be subtracted from |
| 1162 | // (tentatively live) regions in l->second |
| 1163 | // if it has a stack inside or it was allocated by |
| 1164 | // a non-special caller (not one covered by a range |
| 1165 | // in global_region_caller_ranges). |
| 1166 | // This will in particular exclude all memory chunks used |
| 1167 | // by the heap itself as well as what's been allocated with |
| 1168 | // any allocator on top of mmap. |
| 1169 | bool subtract = true; |
| 1170 | if (!region->is_stack && global_region_caller_ranges) { |
| 1171 | if (region->caller() == static_cast<uintptr_t>(NULL)) { |
| 1172 | have_null_region_callers = true; |
| 1173 | } else { |
| 1174 | GlobalRegionCallerRangeMap::const_iterator iter |
| 1175 | = global_region_caller_ranges->upper_bound(region->caller()); |
| 1176 | if (iter != global_region_caller_ranges->end()) { |
| 1177 | RAW_DCHECK(iter->first > region->caller(), ""); |
| 1178 | if (iter->second < region->caller()) { // in special region |
| 1179 | subtract = false; |
| 1180 | } |
| 1181 | } |
| 1182 | } |
| 1183 | } |
| 1184 | if (subtract) { |
| 1185 | // The loop puts the result of filtering l->second into live_objects: |
| 1186 | for (LiveObjectsStack::const_iterator i = l->second.begin(); |
| 1187 | i != l->second.end(); ++i) { |
| 1188 | // subtract *region from *i |
| 1189 | uintptr_t start = AsInt(i->ptr); |
| 1190 | uintptr_t end = start + i->size; |
| 1191 | if (region->start_addr <= start && end <= region->end_addr) { |
| 1192 | // full deletion due to subsumption |
| 1193 | } else if (start < region->start_addr && |
| 1194 | region->end_addr < end) { // cutting-out split |
| 1195 | live_objects->push_back(AllocObject(i->ptr, |
| 1196 | region->start_addr - start, |
| 1197 | IN_GLOBAL_DATA)); |
| 1198 | live_objects->push_back(AllocObject(AsPtr(region->end_addr), |
| 1199 | end - region->end_addr, |
| 1200 | IN_GLOBAL_DATA)); |
| 1201 | } else if (region->end_addr > start && |
| 1202 | region->start_addr <= start) { // cut from start |
| 1203 | live_objects->push_back(AllocObject(AsPtr(region->end_addr), |
| 1204 | end - region->end_addr, |
| 1205 | IN_GLOBAL_DATA)); |
| 1206 | } else if (region->start_addr > start && |
| 1207 | region->start_addr < end) { // cut from end |
| 1208 | live_objects->push_back(AllocObject(i->ptr, |
| 1209 | region->start_addr - start, |
| 1210 | IN_GLOBAL_DATA)); |
| 1211 | } else { // pass: no intersection |
| 1212 | live_objects->push_back(AllocObject(i->ptr, i->size, |
| 1213 | IN_GLOBAL_DATA)); |
| 1214 | } |
| 1215 | } |
| 1216 | // Move live_objects back into l->second |
| 1217 | // for filtering by the next region. |
| 1218 | live_objects->swap(l->second); |
| 1219 | live_objects->clear(); |
| 1220 | } |
| 1221 | } |
| 1222 | // Now get and use live_objects from the final version of l->second: |
| 1223 | if (VLOG_IS_ON(11)) { |
| 1224 | for (LiveObjectsStack::const_iterator i = l->second.begin(); |
| 1225 | i != l->second.end(); ++i) { |
| 1226 | RAW_VLOG(11, "Library live region at %p of %" PRIuPTR " bytes", |
| 1227 | i->ptr, i->size); |
| 1228 | } |
| 1229 | } |
| 1230 | live_objects->swap(l->second); |
| 1231 | IgnoreLiveObjectsLocked("in globals of\n ", l->first.c_str()); |
| 1232 | } |
| 1233 | if (have_null_region_callers) { |
| 1234 | RAW_LOG(ERROR, "Have memory regions w/o callers: " |
| 1235 | "might report false leaks"); |
| 1236 | } |
| 1237 | Allocator::DeleteAndNull(&library_live_objects); |
| 1238 | } |
| 1239 | } |
| 1240 | |
| 1241 | // Callback for TCMalloc_ListAllProcessThreads in IgnoreAllLiveObjectsLocked below |
| 1242 | // to test/verify that we have just the one main thread, in which case |
| 1243 | // we can do everything in that main thread, |
| 1244 | // so that CPU profiler can collect all its samples. |
| 1245 | // Returns the number of threads in the process. |
| 1246 | static int IsOneThread(void* parameter, int num_threads, |
| 1247 | pid_t* thread_pids, va_list ap) { |
| 1248 | if (num_threads != 1) { |
| 1249 | RAW_LOG(WARNING, "Have threads: Won't CPU-profile the bulk of leak " |
| 1250 | "checking work happening in IgnoreLiveThreadsLocked!"); |
| 1251 | } |
| 1252 | TCMalloc_ResumeAllProcessThreads(num_threads, thread_pids); |
| 1253 | return num_threads; |
| 1254 | } |
| 1255 | |
| 1256 | // Dummy for IgnoreAllLiveObjectsLocked below. |
| 1257 | // Making it global helps with compiler warnings. |
| 1258 | static va_list dummy_ap; |
| 1259 | |
| 1260 | // static |
| 1261 | void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) { |
| 1262 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 1263 | RAW_CHECK(live_objects == NULL, ""); |
| 1264 | live_objects = new(Allocator::Allocate(sizeof(LiveObjectsStack))) |
| 1265 | LiveObjectsStack; |
| 1266 | stack_tops = new(Allocator::Allocate(sizeof(StackTopSet))) StackTopSet; |
| 1267 | // reset the counts |
| 1268 | live_objects_total = 0; |
| 1269 | live_bytes_total = 0; |
| 1270 | // Reduce max_heap_object_size to FLAGS_heap_check_max_pointer_offset |
| 1271 | // for the time of leak check. |
| 1272 | // FLAGS_heap_check_max_pointer_offset caps max_heap_object_size |
| 1273 | // to manage reasonably low chances of random bytes |
| 1274 | // appearing to be pointing into large actually leaked heap objects. |
| 1275 | const size_t old_max_heap_object_size = max_heap_object_size; |
| 1276 | max_heap_object_size = ( |
| 1277 | FLAGS_heap_check_max_pointer_offset != -1 |
| 1278 | ? min(size_t(FLAGS_heap_check_max_pointer_offset), max_heap_object_size) |
| 1279 | : max_heap_object_size); |
| 1280 | // Record global data as live: |
| 1281 | if (FLAGS_heap_check_ignore_global_live) { |
| 1282 | library_live_objects = |
| 1283 | new(Allocator::Allocate(sizeof(LibraryLiveObjectsStacks))) |
| 1284 | LibraryLiveObjectsStacks; |
| 1285 | } |
| 1286 | // Ignore all thread stacks: |
| 1287 | thread_listing_status = CALLBACK_NOT_STARTED; |
| 1288 | bool need_to_ignore_non_thread_objects = true; |
| 1289 | self_thread_pid = getpid(); |
| 1290 | self_thread_stack_top = self_stack_top; |
| 1291 | if (FLAGS_heap_check_ignore_thread_live) { |
| 1292 | // In case we are doing CPU profiling we'd like to do all the work |
| 1293 | // in the main thread, not in the special thread created by |
| 1294 | // TCMalloc_ListAllProcessThreads, so that CPU profiler can |
| 1295 | // collect all its samples. The machinery of |
| 1296 | // TCMalloc_ListAllProcessThreads conflicts with the CPU profiler |
| 1297 | // by also relying on signals and ::sigaction. We can do this |
| 1298 | // (run everything in the main thread) safely only if there's just |
| 1299 | // the main thread itself in our process. This variable reflects |
| 1300 | // these two conditions: |
| 1301 | bool want_and_can_run_in_main_thread = |
| 1302 | ProfilingIsEnabledForAllThreads() && |
| 1303 | TCMalloc_ListAllProcessThreads(NULL, IsOneThread) == 1; |
| 1304 | // When the normal path of TCMalloc_ListAllProcessThreads below is taken, |
| 1305 | // we fully suspend the threads right here before any liveness checking |
| 1306 | // and keep them suspended for the whole time of liveness checking |
| 1307 | // inside of the IgnoreLiveThreadsLocked callback. |
| 1308 | // (The threads can't (de)allocate due to lock on the delete hook but |
| 1309 | // if not suspended they could still mess with the pointer |
| 1310 | // graph while we walk it). |
| 1311 | int r = want_and_can_run_in_main_thread |
| 1312 | ? IgnoreLiveThreadsLocked(NULL, 1, &self_thread_pid, dummy_ap) |
| 1313 | : TCMalloc_ListAllProcessThreads(NULL, IgnoreLiveThreadsLocked); |
| 1314 | need_to_ignore_non_thread_objects = r < 0; |
| 1315 | if (r < 0) { |
| 1316 | RAW_LOG(WARNING, "Thread finding failed with %d errno=%d", r, errno); |
| 1317 | if (thread_listing_status == CALLBACK_COMPLETED) { |
| 1318 | RAW_LOG(INFO, "Thread finding callback " |
| 1319 | "finished ok; hopefully everything is fine"); |
| 1320 | need_to_ignore_non_thread_objects = false; |
| 1321 | } else if (thread_listing_status == CALLBACK_STARTED) { |
| 1322 | RAW_LOG(FATAL, "Thread finding callback was " |
| 1323 | "interrupted or crashed; can't fix this"); |
| 1324 | } else { // CALLBACK_NOT_STARTED |
| 1325 | RAW_LOG(ERROR, "Could not find thread stacks. " |
| 1326 | "Will likely report false leak positives."); |
| 1327 | } |
| 1328 | } else if (r != 0) { |
| 1329 | RAW_LOG(ERROR, "Thread stacks not found for %d threads. " |
| 1330 | "Will likely report false leak positives.", r); |
| 1331 | } else { |
| 1332 | RAW_VLOG(11, "Thread stacks appear to be found for all threads"); |
| 1333 | } |
| 1334 | } else { |
| 1335 | RAW_LOG(WARNING, "Not looking for thread stacks; " |
| 1336 | "objects reachable only from there " |
| 1337 | "will be reported as leaks"); |
| 1338 | } |
| 1339 | // Do all other live data ignoring here if we did not do it |
| 1340 | // within thread listing callback with all threads stopped. |
| 1341 | if (need_to_ignore_non_thread_objects) { |
| 1342 | if (FLAGS_heap_check_ignore_global_live) { |
| 1343 | UseProcMapsLocked(RECORD_GLOBAL_DATA); |
| 1344 | } |
| 1345 | IgnoreNonThreadLiveObjectsLocked(); |
| 1346 | } |
| 1347 | if (live_objects_total) { |
| 1348 | RAW_VLOG(10, "Ignoring %" PRId64 " reachable objects of %" PRId64 " bytes", |
| 1349 | live_objects_total, live_bytes_total); |
| 1350 | } |
| 1351 | // Free these: we made them here and heap_profile never saw them |
| 1352 | Allocator::DeleteAndNull(&live_objects); |
| 1353 | Allocator::DeleteAndNull(&stack_tops); |
| 1354 | max_heap_object_size = old_max_heap_object_size; // reset this var |
| 1355 | } |
| 1356 | |
| 1357 | // Alignment at which we should consider pointer positions |
| 1358 | // in IgnoreLiveObjectsLocked. Will normally use the value of |
| 1359 | // FLAGS_heap_check_pointer_source_alignment. |
| 1360 | static size_t pointer_source_alignment = kPointerSourceAlignment; |
| 1361 | // Global lock for HeapLeakChecker::DoNoLeaks |
| 1362 | // to protect pointer_source_alignment. |
| 1363 | static SpinLock alignment_checker_lock(SpinLock::LINKER_INITIALIZED); |
| 1364 | |
| 1365 | // This function changes the live bits in the heap_profile-table's state: |
| 1366 | // we only record the live objects to be skipped. |
| 1367 | // |
| 1368 | // When checking if a byte sequence points to a heap object we use |
| 1369 | // HeapProfileTable::FindInsideAlloc to handle both pointers to |
| 1370 | // the start and inside of heap-allocated objects. |
| 1371 | // The "inside" case needs to be checked to support |
| 1372 | // at least the following relatively common cases: |
| 1373 | // - C++ arrays allocated with new FooClass[size] for classes |
| 1374 | // with destructors have their size recorded in a sizeof(int) field |
| 1375 | // before the place normal pointers point to. |
| 1376 | // - basic_string<>-s for e.g. the C++ library of gcc 3.4 |
| 1377 | // have the meta-info in basic_string<...>::_Rep recorded |
| 1378 | // before the place normal pointers point to. |
| 1379 | // - Multiple-inherited objects have their pointers when cast to |
| 1380 | // different base classes pointing inside of the actually |
| 1381 | // allocated object. |
| 1382 | // - Sometimes reachability pointers point to member objects of heap objects, |
| 1383 | // and then those member objects point to the full heap object. |
| 1384 | // - Third party UnicodeString: it stores a 32-bit refcount |
| 1385 | // (in both 32-bit and 64-bit binaries) as the first uint32 |
| 1386 | // in the allocated memory and a normal pointer points at |
| 1387 | // the second uint32 behind the refcount. |
| 1388 | // By finding these additional objects here |
| 1389 | // we slightly increase the chance to mistake random memory bytes |
| 1390 | // for a pointer and miss a leak in a particular run of a binary. |
| 1391 | // |
| 1392 | /*static*/ void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name, |
| 1393 | const char* name2) { |
| 1394 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 1395 | int64 live_object_count = 0; |
| 1396 | int64 live_byte_count = 0; |
| 1397 | while (!live_objects->empty()) { |
| 1398 | const char* object = |
| 1399 | reinterpret_cast<const char*>(live_objects->back().ptr); |
| 1400 | size_t size = live_objects->back().size; |
| 1401 | const ObjectPlacement place = live_objects->back().place; |
| 1402 | live_objects->pop_back(); |
| 1403 | if (place == MUST_BE_ON_HEAP && heap_profile->MarkAsLive(object)) { |
| 1404 | live_object_count += 1; |
| 1405 | live_byte_count += size; |
| 1406 | } |
| 1407 | RAW_VLOG(13, "Looking for heap pointers in %p of %" PRIuS " bytes", |
| 1408 | object, size); |
| 1409 | const char* const whole_object = object; |
| 1410 | size_t const whole_size = size; |
| 1411 | // Try interpretting any byte sequence in object,size as a heap pointer: |
| 1412 | const size_t remainder = AsInt(object) % pointer_source_alignment; |
| 1413 | if (remainder) { |
| 1414 | object += pointer_source_alignment - remainder; |
| 1415 | if (size >= pointer_source_alignment - remainder) { |
| 1416 | size -= pointer_source_alignment - remainder; |
| 1417 | } else { |
| 1418 | size = 0; |
| 1419 | } |
| 1420 | } |
| 1421 | if (size < sizeof(void*)) continue; |
| 1422 | |
| 1423 | #ifdef NO_FRAME_POINTER |
| 1424 | // Frame pointer omission requires us to use libunwind, which uses direct |
| 1425 | // mmap and munmap system calls, and that needs special handling. |
| 1426 | if (name2 == kUnnamedProcSelfMapEntry) { |
| 1427 | static const uintptr_t page_mask = ~(getpagesize() - 1); |
| 1428 | const uintptr_t addr = reinterpret_cast<uintptr_t>(object); |
| 1429 | if ((addr & page_mask) == 0 && (size & page_mask) == 0) { |
| 1430 | // This is an object we slurped from /proc/self/maps. |
| 1431 | // It may or may not be readable at this point. |
| 1432 | // |
| 1433 | // In case all the above conditions made a mistake, and the object is |
| 1434 | // not related to libunwind, we also verify that it's not readable |
| 1435 | // before ignoring it. |
| 1436 | if (msync(const_cast<char*>(object), size, MS_ASYNC) != 0) { |
| 1437 | // Skip unreadable object, so we don't crash trying to sweep it. |
| 1438 | RAW_VLOG(0, "Ignoring inaccessible object [%p, %p) " |
| 1439 | "(msync error %d (%s))", |
| 1440 | object, object + size, errno, strerror(errno)); |
| 1441 | continue; |
| 1442 | } |
| 1443 | } |
| 1444 | } |
| 1445 | #endif |
| 1446 | |
| 1447 | const char* const max_object = object + size - sizeof(void*); |
| 1448 | while (object <= max_object) { |
| 1449 | // potentially unaligned load: |
| 1450 | const uintptr_t addr = *reinterpret_cast<const uintptr_t*>(object); |
| 1451 | // Do fast check before the more expensive HaveOnHeapLocked lookup: |
| 1452 | // this code runs for all memory words that are potentially pointers: |
| 1453 | const bool can_be_on_heap = |
| 1454 | // Order tests by the likelyhood of the test failing in 64/32 bit modes. |
| 1455 | // Yes, this matters: we either lose 5..6% speed in 32 bit mode |
| 1456 | // (which is already slower) or by a factor of 1.5..1.91 in 64 bit mode. |
| 1457 | // After the alignment test got dropped the above performance figures |
| 1458 | // must have changed; might need to revisit this. |
| 1459 | #if defined(__x86_64__) |
| 1460 | addr <= max_heap_address && // <= is for 0-sized object with max addr |
| 1461 | min_heap_address <= addr; |
| 1462 | #else |
| 1463 | min_heap_address <= addr && |
| 1464 | addr <= max_heap_address; // <= is for 0-sized object with max addr |
| 1465 | #endif |
| 1466 | if (can_be_on_heap) { |
| 1467 | const void* ptr = reinterpret_cast<const void*>(addr); |
| 1468 | // Too expensive (inner loop): manually uncomment when debugging: |
| 1469 | // RAW_VLOG(17, "Trying pointer to %p at %p", ptr, object); |
| 1470 | size_t object_size; |
| 1471 | if (HaveOnHeapLocked(&ptr, &object_size) && |
| 1472 | heap_profile->MarkAsLive(ptr)) { |
| 1473 | // We take the (hopefully low) risk here of encountering by accident |
| 1474 | // a byte sequence in memory that matches an address of |
| 1475 | // a heap object which is in fact leaked. |
| 1476 | // I.e. in very rare and probably not repeatable/lasting cases |
| 1477 | // we might miss some real heap memory leaks. |
| 1478 | RAW_VLOG(14, "Found pointer to %p of %" PRIuS " bytes at %p " |
| 1479 | "inside %p of size %" PRIuS "", |
| 1480 | ptr, object_size, object, whole_object, whole_size); |
| 1481 | if (VLOG_IS_ON(15)) { |
| 1482 | // log call stacks to help debug how come something is not a leak |
| 1483 | HeapProfileTable::AllocInfo alloc; |
| 1484 | if (!heap_profile->FindAllocDetails(ptr, &alloc)) { |
| 1485 | RAW_LOG(FATAL, "FindAllocDetails failed on ptr %p", ptr); |
| 1486 | } |
| 1487 | RAW_LOG(INFO, "New live %p object's alloc stack:", ptr); |
| 1488 | for (int i = 0; i < alloc.stack_depth; ++i) { |
| 1489 | RAW_LOG(INFO, " @ %p", alloc.call_stack[i]); |
| 1490 | } |
| 1491 | } |
| 1492 | live_object_count += 1; |
| 1493 | live_byte_count += object_size; |
| 1494 | live_objects->push_back(AllocObject(ptr, object_size, |
| 1495 | IGNORED_ON_HEAP)); |
| 1496 | } |
| 1497 | } |
| 1498 | object += pointer_source_alignment; |
| 1499 | } |
| 1500 | } |
| 1501 | live_objects_total += live_object_count; |
| 1502 | live_bytes_total += live_byte_count; |
| 1503 | if (live_object_count) { |
| 1504 | RAW_VLOG(10, "Removed %" PRId64 " live heap objects of %" PRId64 " bytes: %s%s", |
| 1505 | live_object_count, live_byte_count, name, name2); |
| 1506 | } |
| 1507 | } |
| 1508 | |
| 1509 | //---------------------------------------------------------------------- |
| 1510 | // HeapLeakChecker leak check disabling components |
| 1511 | //---------------------------------------------------------------------- |
| 1512 | |
| 1513 | // static |
| 1514 | void HeapLeakChecker::DisableChecksIn(const char* pattern) { |
| 1515 | RAW_LOG(WARNING, "DisableChecksIn(%s) is ignored", pattern); |
| 1516 | } |
| 1517 | |
| 1518 | // static |
| 1519 | void HeapLeakChecker::DoIgnoreObject(const void* ptr) { |
| 1520 | SpinLockHolder l(&heap_checker_lock); |
| 1521 | if (!heap_checker_on) return; |
| 1522 | size_t object_size; |
| 1523 | if (!HaveOnHeapLocked(&ptr, &object_size)) { |
| 1524 | RAW_LOG(ERROR, "No live heap object at %p to ignore", ptr); |
| 1525 | } else { |
| 1526 | RAW_VLOG(10, "Going to ignore live object at %p of %" PRIuS " bytes", |
| 1527 | ptr, object_size); |
| 1528 | if (ignored_objects == NULL) { |
| 1529 | ignored_objects = new(Allocator::Allocate(sizeof(IgnoredObjectsMap))) |
| 1530 | IgnoredObjectsMap; |
| 1531 | } |
| 1532 | if (!ignored_objects->insert(make_pair(AsInt(ptr), object_size)).second) { |
| 1533 | RAW_LOG(WARNING, "Object at %p is already being ignored", ptr); |
| 1534 | } |
| 1535 | } |
| 1536 | } |
| 1537 | |
| 1538 | // static |
| 1539 | void HeapLeakChecker::UnIgnoreObject(const void* ptr) { |
| 1540 | SpinLockHolder l(&heap_checker_lock); |
| 1541 | if (!heap_checker_on) return; |
| 1542 | size_t object_size; |
| 1543 | if (!HaveOnHeapLocked(&ptr, &object_size)) { |
| 1544 | RAW_LOG(FATAL, "No live heap object at %p to un-ignore", ptr); |
| 1545 | } else { |
| 1546 | bool found = false; |
| 1547 | if (ignored_objects) { |
| 1548 | IgnoredObjectsMap::iterator object = ignored_objects->find(AsInt(ptr)); |
| 1549 | if (object != ignored_objects->end() && object_size == object->second) { |
| 1550 | ignored_objects->erase(object); |
| 1551 | found = true; |
| 1552 | RAW_VLOG(10, "Now not going to ignore live object " |
| 1553 | "at %p of %" PRIuS " bytes", ptr, object_size); |
| 1554 | } |
| 1555 | } |
| 1556 | if (!found) RAW_LOG(FATAL, "Object at %p has not been ignored", ptr); |
| 1557 | } |
| 1558 | } |
| 1559 | |
| 1560 | //---------------------------------------------------------------------- |
| 1561 | // HeapLeakChecker non-static functions |
| 1562 | //---------------------------------------------------------------------- |
| 1563 | |
| 1564 | char* HeapLeakChecker::MakeProfileNameLocked() { |
| 1565 | RAW_DCHECK(lock_->IsHeld(), ""); |
| 1566 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 1567 | const int len = profile_name_prefix->size() + strlen(name_) + 5 + |
| 1568 | strlen(HeapProfileTable::kFileExt) + 1; |
| 1569 | char* file_name = reinterpret_cast<char*>(Allocator::Allocate(len)); |
| 1570 | snprintf(file_name, len, "%s.%s-end%s", |
| 1571 | profile_name_prefix->c_str(), name_, |
| 1572 | HeapProfileTable::kFileExt); |
| 1573 | return file_name; |
| 1574 | } |
| 1575 | |
| 1576 | void HeapLeakChecker::Create(const char *name, bool make_start_snapshot) { |
| 1577 | SpinLockHolder l(lock_); |
| 1578 | name_ = NULL; // checker is inactive |
| 1579 | start_snapshot_ = NULL; |
| 1580 | has_checked_ = false; |
| 1581 | inuse_bytes_increase_ = 0; |
| 1582 | inuse_allocs_increase_ = 0; |
| 1583 | keep_profiles_ = false; |
| 1584 | char* n = new char[strlen(name) + 1]; // do this before we lock |
| 1585 | IgnoreObject(n); // otherwise it might be treated as live due to our stack |
| 1586 | { // Heap activity in other threads is paused for this whole scope. |
| 1587 | SpinLockHolder al(&alignment_checker_lock); |
| 1588 | SpinLockHolder hl(&heap_checker_lock); |
| 1589 | MemoryRegionMap::LockHolder ml; |
| 1590 | if (heap_checker_on && profile_name_prefix != NULL) { |
| 1591 | RAW_DCHECK(strchr(name, '/') == NULL, "must be a simple name"); |
| 1592 | memcpy(n, name, strlen(name) + 1); |
| 1593 | name_ = n; // checker is active |
| 1594 | if (make_start_snapshot) { |
| 1595 | start_snapshot_ = heap_profile->TakeSnapshot(); |
| 1596 | } |
| 1597 | |
| 1598 | const HeapProfileTable::Stats& t = heap_profile->total(); |
| 1599 | const size_t start_inuse_bytes = t.alloc_size - t.free_size; |
| 1600 | const size_t start_inuse_allocs = t.allocs - t.frees; |
| 1601 | RAW_VLOG(10, "Start check \"%s\" profile: %" PRIuS " bytes " |
| 1602 | "in %" PRIuS " objects", |
| 1603 | name_, start_inuse_bytes, start_inuse_allocs); |
| 1604 | } else { |
| 1605 | RAW_LOG(WARNING, "Heap checker is not active, " |
| 1606 | "hence checker \"%s\" will do nothing!", name); |
| 1607 | RAW_LOG(WARNING, "To activate set the HEAPCHECK environment variable.\n"); |
| 1608 | } |
| 1609 | } |
| 1610 | if (name_ == NULL) { |
| 1611 | UnIgnoreObject(n); |
| 1612 | delete[] n; // must be done after we unlock |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | HeapLeakChecker::HeapLeakChecker(const char *name) : lock_(new SpinLock) { |
| 1617 | RAW_DCHECK(strcmp(name, "_main_") != 0, "_main_ is reserved"); |
| 1618 | Create(name, true/*create start_snapshot_*/); |
| 1619 | } |
| 1620 | |
| 1621 | HeapLeakChecker::HeapLeakChecker() : lock_(new SpinLock) { |
| 1622 | if (FLAGS_heap_check_before_constructors) { |
| 1623 | // We want to check for leaks of objects allocated during global |
| 1624 | // constructors (i.e., objects allocated already). So we do not |
| 1625 | // create a baseline snapshot and hence check for leaks of objects |
| 1626 | // that may have already been created. |
| 1627 | Create("_main_", false); |
| 1628 | } else { |
| 1629 | // We want to ignore leaks of objects allocated during global |
| 1630 | // constructors (i.e., objects allocated already). So we snapshot |
| 1631 | // the current heap contents and use them as a baseline that is |
| 1632 | // not reported by the leak checker. |
| 1633 | Create("_main_", true); |
| 1634 | } |
| 1635 | } |
| 1636 | |
| 1637 | ssize_t HeapLeakChecker::BytesLeaked() const { |
| 1638 | SpinLockHolder l(lock_); |
| 1639 | if (!has_checked_) { |
| 1640 | RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call"); |
| 1641 | } |
| 1642 | return inuse_bytes_increase_; |
| 1643 | } |
| 1644 | |
| 1645 | ssize_t HeapLeakChecker::ObjectsLeaked() const { |
| 1646 | SpinLockHolder l(lock_); |
| 1647 | if (!has_checked_) { |
| 1648 | RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call"); |
| 1649 | } |
| 1650 | return inuse_allocs_increase_; |
| 1651 | } |
| 1652 | |
| 1653 | // Save pid of main thread for using in naming dump files |
| 1654 | static int32 main_thread_pid = getpid(); |
| 1655 | #ifdef HAVE_PROGRAM_INVOCATION_NAME |
| 1656 | #ifdef __UCLIBC__ |
| 1657 | extern const char* program_invocation_name; |
| 1658 | extern const char* program_invocation_short_name; |
| 1659 | #else |
| 1660 | extern char* program_invocation_name; |
| 1661 | extern char* program_invocation_short_name; |
| 1662 | #endif |
| 1663 | static const char* invocation_name() { return program_invocation_short_name; } |
| 1664 | static string invocation_path() { return program_invocation_name; } |
| 1665 | #else |
| 1666 | static const char* invocation_name() { return "<your binary>"; } |
| 1667 | static string invocation_path() { return "<your binary>"; } |
| 1668 | #endif |
| 1669 | |
| 1670 | // Prints commands that users can run to get more information |
| 1671 | // about the reported leaks. |
| 1672 | static void SuggestPprofCommand(const char* pprof_file_arg) { |
| 1673 | // Extra help information to print for the user when the test is |
| 1674 | // being run in a way where the straightforward pprof command will |
| 1675 | // not suffice. |
| 1676 | string extra_help; |
| 1677 | |
| 1678 | // Common header info to print for remote runs |
| 1679 | const string remote_header = |
| 1680 | "This program is being executed remotely and therefore the pprof\n" |
| 1681 | "command printed above will not work. Either run this program\n" |
| 1682 | "locally, or adjust the pprof command as follows to allow it to\n" |
| 1683 | "work on your local machine:\n"; |
| 1684 | |
| 1685 | // Extra command for fetching remote data |
| 1686 | string fetch_cmd; |
| 1687 | |
| 1688 | RAW_LOG(WARNING, |
| 1689 | "\n\n" |
| 1690 | "If the preceding stack traces are not enough to find " |
| 1691 | "the leaks, try running THIS shell command:\n\n" |
| 1692 | "%s%s %s \"%s\" --inuse_objects --lines --heapcheck " |
| 1693 | " --edgefraction=1e-10 --nodefraction=1e-10 --gv\n" |
| 1694 | "\n" |
| 1695 | "%s" |
| 1696 | "If you are still puzzled about why the leaks are " |
| 1697 | "there, try rerunning this program with " |
| 1698 | "HEAP_CHECK_TEST_POINTER_ALIGNMENT=1 and/or with " |
| 1699 | "HEAP_CHECK_MAX_POINTER_OFFSET=-1\n" |
| 1700 | "If the leak report occurs in a small fraction of runs, " |
| 1701 | "try running with TCMALLOC_MAX_FREE_QUEUE_SIZE of few hundred MB " |
| 1702 | "or with TCMALLOC_RECLAIM_MEMORY=false, " // only works for debugalloc |
| 1703 | "it might help find leaks more repeatably\n", |
| 1704 | fetch_cmd.c_str(), |
| 1705 | "pprof", // works as long as pprof is on your path |
| 1706 | invocation_path().c_str(), |
| 1707 | pprof_file_arg, |
| 1708 | extra_help.c_str() |
| 1709 | ); |
| 1710 | } |
| 1711 | |
| 1712 | bool HeapLeakChecker::DoNoLeaks(ShouldSymbolize should_symbolize) { |
| 1713 | SpinLockHolder l(lock_); |
| 1714 | // The locking also helps us keep the messages |
| 1715 | // for the two checks close together. |
| 1716 | SpinLockHolder al(&alignment_checker_lock); |
| 1717 | |
| 1718 | // thread-safe: protected by alignment_checker_lock |
| 1719 | static bool have_disabled_hooks_for_symbolize = false; |
| 1720 | // Once we've checked for leaks and symbolized the results once, it's |
| 1721 | // not safe to do it again. This is because in order to symbolize |
| 1722 | // safely, we had to disable all the malloc hooks here, so we no |
| 1723 | // longer can be confident we've collected all the data we need. |
| 1724 | if (have_disabled_hooks_for_symbolize) { |
| 1725 | RAW_LOG(FATAL, "Must not call heap leak checker manually after " |
| 1726 | " program-exit's automatic check."); |
| 1727 | } |
| 1728 | |
| 1729 | HeapProfileTable::Snapshot* leaks = NULL; |
| 1730 | char* pprof_file = NULL; |
| 1731 | |
| 1732 | { |
| 1733 | // Heap activity in other threads is paused during this function |
| 1734 | // (i.e. until we got all profile difference info). |
| 1735 | SpinLockHolder hl(&heap_checker_lock); |
| 1736 | if (heap_checker_on == false) { |
| 1737 | if (name_ != NULL) { // leak checking enabled when created the checker |
| 1738 | RAW_LOG(WARNING, "Heap leak checker got turned off after checker " |
| 1739 | "\"%s\" has been created, no leak check is being done for it!", |
| 1740 | name_); |
| 1741 | } |
| 1742 | return true; |
| 1743 | } |
| 1744 | |
| 1745 | // Update global_region_caller_ranges. They may need to change since |
| 1746 | // e.g. initialization because shared libraries might have been loaded or |
| 1747 | // unloaded. |
| 1748 | Allocator::DeleteAndNullIfNot(&global_region_caller_ranges); |
| 1749 | ProcMapsResult pm_result = UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS); |
| 1750 | RAW_CHECK(pm_result == PROC_MAPS_USED, ""); |
| 1751 | |
| 1752 | // Keep track of number of internally allocated objects so we |
| 1753 | // can detect leaks in the heap-leak-checket itself |
| 1754 | const int initial_allocs = Allocator::alloc_count(); |
| 1755 | |
| 1756 | if (name_ == NULL) { |
| 1757 | RAW_LOG(FATAL, "Heap leak checker must not be turned on " |
| 1758 | "after construction of a HeapLeakChecker"); |
| 1759 | } |
| 1760 | |
| 1761 | MemoryRegionMap::LockHolder ml; |
| 1762 | int a_local_var; // Use our stack ptr to make stack data live: |
| 1763 | |
| 1764 | // Make the heap profile, other threads are locked out. |
| 1765 | HeapProfileTable::Snapshot* base = |
| 1766 | reinterpret_cast<HeapProfileTable::Snapshot*>(start_snapshot_); |
| 1767 | RAW_DCHECK(FLAGS_heap_check_pointer_source_alignment > 0, ""); |
| 1768 | pointer_source_alignment = FLAGS_heap_check_pointer_source_alignment; |
| 1769 | IgnoreAllLiveObjectsLocked(&a_local_var); |
| 1770 | leaks = heap_profile->NonLiveSnapshot(base); |
| 1771 | |
| 1772 | inuse_bytes_increase_ = static_cast<ssize_t>(leaks->total().alloc_size); |
| 1773 | inuse_allocs_increase_ = static_cast<ssize_t>(leaks->total().allocs); |
| 1774 | if (leaks->Empty()) { |
| 1775 | heap_profile->ReleaseSnapshot(leaks); |
| 1776 | leaks = NULL; |
| 1777 | |
| 1778 | // We can only check for internal leaks along the no-user-leak |
| 1779 | // path since in the leak path we temporarily release |
| 1780 | // heap_checker_lock and another thread can come in and disturb |
| 1781 | // allocation counts. |
| 1782 | if (Allocator::alloc_count() != initial_allocs) { |
| 1783 | RAW_LOG(FATAL, "Internal HeapChecker leak of %d objects ; %d -> %d", |
| 1784 | Allocator::alloc_count() - initial_allocs, |
| 1785 | initial_allocs, Allocator::alloc_count()); |
| 1786 | } |
| 1787 | } else if (FLAGS_heap_check_test_pointer_alignment) { |
| 1788 | if (pointer_source_alignment == 1) { |
| 1789 | RAW_LOG(WARNING, "--heap_check_test_pointer_alignment has no effect: " |
| 1790 | "--heap_check_pointer_source_alignment was already set to 1"); |
| 1791 | } else { |
| 1792 | // Try with reduced pointer aligment |
| 1793 | pointer_source_alignment = 1; |
| 1794 | IgnoreAllLiveObjectsLocked(&a_local_var); |
| 1795 | HeapProfileTable::Snapshot* leaks_wo_align = |
| 1796 | heap_profile->NonLiveSnapshot(base); |
| 1797 | pointer_source_alignment = FLAGS_heap_check_pointer_source_alignment; |
| 1798 | if (leaks_wo_align->Empty()) { |
| 1799 | RAW_LOG(WARNING, "Found no leaks without pointer alignment: " |
| 1800 | "something might be placing pointers at " |
| 1801 | "unaligned addresses! This needs to be fixed."); |
| 1802 | } else { |
| 1803 | RAW_LOG(INFO, "Found leaks without pointer alignment as well: " |
| 1804 | "unaligned pointers must not be the cause of leaks."); |
| 1805 | RAW_LOG(INFO, "--heap_check_test_pointer_alignment did not help " |
| 1806 | "to diagnose the leaks."); |
| 1807 | } |
| 1808 | heap_profile->ReleaseSnapshot(leaks_wo_align); |
| 1809 | } |
| 1810 | } |
| 1811 | |
| 1812 | if (leaks != NULL) { |
| 1813 | pprof_file = MakeProfileNameLocked(); |
| 1814 | } |
| 1815 | } |
| 1816 | |
| 1817 | has_checked_ = true; |
| 1818 | if (leaks == NULL) { |
| 1819 | if (FLAGS_heap_check_max_pointer_offset == -1) { |
| 1820 | RAW_LOG(WARNING, |
| 1821 | "Found no leaks without max_pointer_offset restriction: " |
| 1822 | "it's possible that the default value of " |
| 1823 | "heap_check_max_pointer_offset flag is too low. " |
| 1824 | "Do you use pointers with larger than that offsets " |
| 1825 | "pointing in the middle of heap-allocated objects?"); |
| 1826 | } |
| 1827 | const HeapProfileTable::Stats& stats = heap_profile->total(); |
| 1828 | RAW_VLOG(heap_checker_info_level, |
| 1829 | "No leaks found for check \"%s\" " |
| 1830 | "(but no 100%% guarantee that there aren't any): " |
| 1831 | "found %" PRId64 " reachable heap objects of %" PRId64 " bytes", |
| 1832 | name_, |
| 1833 | int64(stats.allocs - stats.frees), |
| 1834 | int64(stats.alloc_size - stats.free_size)); |
| 1835 | } else { |
| 1836 | if (should_symbolize == SYMBOLIZE) { |
| 1837 | // To turn addresses into symbols, we need to fork, which is a |
| 1838 | // problem if both parent and child end up trying to call the |
| 1839 | // same malloc-hooks we've set up, at the same time. To avoid |
| 1840 | // trouble, we turn off the hooks before symbolizing. Note that |
| 1841 | // this makes it unsafe to ever leak-report again! Luckily, we |
| 1842 | // typically only want to report once in a program's run, at the |
| 1843 | // very end. |
| 1844 | if (MallocHook::GetNewHook() == NewHook) |
| 1845 | MallocHook::SetNewHook(NULL); |
| 1846 | if (MallocHook::GetDeleteHook() == DeleteHook) |
| 1847 | MallocHook::SetDeleteHook(NULL); |
| 1848 | MemoryRegionMap::Shutdown(); |
| 1849 | // Make sure all the hooks really got unset: |
| 1850 | RAW_CHECK(MallocHook::GetNewHook() == NULL, ""); |
| 1851 | RAW_CHECK(MallocHook::GetDeleteHook() == NULL, ""); |
| 1852 | RAW_CHECK(MallocHook::GetMmapHook() == NULL, ""); |
| 1853 | RAW_CHECK(MallocHook::GetSbrkHook() == NULL, ""); |
| 1854 | have_disabled_hooks_for_symbolize = true; |
| 1855 | leaks->ReportLeaks(name_, pprof_file, true); // true = should_symbolize |
| 1856 | } else { |
| 1857 | leaks->ReportLeaks(name_, pprof_file, false); |
| 1858 | } |
| 1859 | if (FLAGS_heap_check_identify_leaks) { |
| 1860 | leaks->ReportIndividualObjects(); |
| 1861 | } |
| 1862 | |
| 1863 | SuggestPprofCommand(pprof_file); |
| 1864 | |
| 1865 | { |
| 1866 | SpinLockHolder hl(&heap_checker_lock); |
| 1867 | heap_profile->ReleaseSnapshot(leaks); |
| 1868 | Allocator::Free(pprof_file); |
| 1869 | } |
| 1870 | } |
| 1871 | |
| 1872 | return (leaks == NULL); |
| 1873 | } |
| 1874 | |
| 1875 | HeapLeakChecker::~HeapLeakChecker() { |
| 1876 | if (name_ != NULL) { // had leak checking enabled when created the checker |
| 1877 | if (!has_checked_) { |
| 1878 | RAW_LOG(FATAL, "Some *NoLeaks|SameHeap method" |
| 1879 | " must be called on any created HeapLeakChecker"); |
| 1880 | } |
| 1881 | |
| 1882 | // Deallocate any snapshot taken at start |
| 1883 | if (start_snapshot_ != NULL) { |
| 1884 | SpinLockHolder l(&heap_checker_lock); |
| 1885 | heap_profile->ReleaseSnapshot( |
| 1886 | reinterpret_cast<HeapProfileTable::Snapshot*>(start_snapshot_)); |
| 1887 | } |
| 1888 | |
| 1889 | UnIgnoreObject(name_); |
| 1890 | delete[] name_; |
| 1891 | name_ = NULL; |
| 1892 | } |
| 1893 | delete lock_; |
| 1894 | } |
| 1895 | |
| 1896 | //---------------------------------------------------------------------- |
| 1897 | // HeapLeakChecker overall heap check components |
| 1898 | //---------------------------------------------------------------------- |
| 1899 | |
| 1900 | // static |
| 1901 | bool HeapLeakChecker::IsActive() { |
| 1902 | SpinLockHolder l(&heap_checker_lock); |
| 1903 | return heap_checker_on; |
| 1904 | } |
| 1905 | |
| 1906 | vector<HeapCleaner::void_function>* HeapCleaner::heap_cleanups_ = NULL; |
| 1907 | |
| 1908 | // When a HeapCleaner object is intialized, add its function to the static list |
| 1909 | // of cleaners to be run before leaks checking. |
| 1910 | HeapCleaner::HeapCleaner(void_function f) { |
| 1911 | if (heap_cleanups_ == NULL) |
| 1912 | heap_cleanups_ = new vector<HeapCleaner::void_function>; |
| 1913 | heap_cleanups_->push_back(f); |
| 1914 | } |
| 1915 | |
| 1916 | // Run all of the cleanup functions and delete the vector. |
| 1917 | void HeapCleaner::RunHeapCleanups() { |
| 1918 | if (!heap_cleanups_) |
| 1919 | return; |
| 1920 | for (int i = 0; i < heap_cleanups_->size(); i++) { |
| 1921 | void (*f)(void) = (*heap_cleanups_)[i]; |
| 1922 | f(); |
| 1923 | } |
| 1924 | delete heap_cleanups_; |
| 1925 | heap_cleanups_ = NULL; |
| 1926 | } |
| 1927 | |
| 1928 | // Program exit heap cleanup registered as a module object destructor. |
| 1929 | // Will not get executed when we crash on a signal. |
| 1930 | // |
| 1931 | void HeapLeakChecker_RunHeapCleanups() { |
| 1932 | if (FLAGS_heap_check == "local") // don't check heap in this mode |
| 1933 | return; |
| 1934 | { SpinLockHolder l(&heap_checker_lock); |
| 1935 | // can get here (via forks?) with other pids |
| 1936 | if (heap_checker_pid != getpid()) return; |
| 1937 | } |
| 1938 | HeapCleaner::RunHeapCleanups(); |
| 1939 | if (!FLAGS_heap_check_after_destructors) HeapLeakChecker::DoMainHeapCheck(); |
| 1940 | } |
| 1941 | |
| 1942 | static bool internal_init_start_has_run = false; |
| 1943 | |
| 1944 | // Called exactly once, before main() (but hopefully just before). |
| 1945 | // This picks a good unique name for the dumped leak checking heap profiles. |
| 1946 | // |
| 1947 | // Because we crash when InternalInitStart is called more than once, |
| 1948 | // it's fine that we hold heap_checker_lock only around pieces of |
| 1949 | // this function: this is still enough for thread-safety w.r.t. other functions |
| 1950 | // of this module. |
| 1951 | // We can't hold heap_checker_lock throughout because it would deadlock |
| 1952 | // on a memory allocation since our new/delete hooks can be on. |
| 1953 | // |
| 1954 | void HeapLeakChecker_InternalInitStart() { |
| 1955 | { SpinLockHolder l(&heap_checker_lock); |
| 1956 | RAW_CHECK(!internal_init_start_has_run, |
| 1957 | "Heap-check constructor called twice. Perhaps you both linked" |
| 1958 | " in the heap checker, and also used LD_PRELOAD to load it?"); |
| 1959 | internal_init_start_has_run = true; |
| 1960 | |
| 1961 | #ifdef ADDRESS_SANITIZER |
| 1962 | // AddressSanitizer's custom malloc conflicts with HeapChecker. |
| 1963 | FLAGS_heap_check = ""; |
| 1964 | #endif |
| 1965 | |
| 1966 | if (FLAGS_heap_check.empty()) { |
| 1967 | // turns out we do not need checking in the end; can stop profiling |
| 1968 | HeapLeakChecker::TurnItselfOffLocked(); |
| 1969 | return; |
| 1970 | } else if (RunningOnValgrind()) { |
| 1971 | // There is no point in trying -- we'll just fail. |
| 1972 | RAW_LOG(WARNING, "Can't run under Valgrind; will turn itself off"); |
| 1973 | HeapLeakChecker::TurnItselfOffLocked(); |
| 1974 | return; |
| 1975 | } |
| 1976 | } |
| 1977 | |
| 1978 | // Changing this to false can be useful when debugging heap-checker itself: |
| 1979 | if (!FLAGS_heap_check_run_under_gdb && IsDebuggerAttached()) { |
| 1980 | RAW_LOG(WARNING, "Someone is ptrace()ing us; will turn itself off"); |
| 1981 | SpinLockHolder l(&heap_checker_lock); |
| 1982 | HeapLeakChecker::TurnItselfOffLocked(); |
| 1983 | return; |
| 1984 | } |
| 1985 | |
| 1986 | { SpinLockHolder l(&heap_checker_lock); |
| 1987 | if (!constructor_heap_profiling) { |
| 1988 | RAW_LOG(FATAL, "Can not start so late. You have to enable heap checking " |
| 1989 | "with HEAPCHECK=<mode>."); |
| 1990 | } |
| 1991 | } |
| 1992 | |
| 1993 | // Set all flags |
| 1994 | RAW_DCHECK(FLAGS_heap_check_pointer_source_alignment > 0, ""); |
| 1995 | if (FLAGS_heap_check == "minimal") { |
| 1996 | // The least we can check. |
| 1997 | FLAGS_heap_check_before_constructors = false; // from after main |
| 1998 | // (ignore more) |
| 1999 | FLAGS_heap_check_after_destructors = false; // to after cleanup |
| 2000 | // (most data is live) |
| 2001 | FLAGS_heap_check_ignore_thread_live = true; // ignore all live |
| 2002 | FLAGS_heap_check_ignore_global_live = true; // ignore all live |
| 2003 | } else if (FLAGS_heap_check == "normal") { |
| 2004 | // Faster than 'minimal' and not much stricter. |
| 2005 | FLAGS_heap_check_before_constructors = true; // from no profile (fast) |
| 2006 | FLAGS_heap_check_after_destructors = false; // to after cleanup |
| 2007 | // (most data is live) |
| 2008 | FLAGS_heap_check_ignore_thread_live = true; // ignore all live |
| 2009 | FLAGS_heap_check_ignore_global_live = true; // ignore all live |
| 2010 | } else if (FLAGS_heap_check == "strict") { |
| 2011 | // A bit stricter than 'normal': global destructors must fully clean up |
| 2012 | // after themselves if they are present. |
| 2013 | FLAGS_heap_check_before_constructors = true; // from no profile (fast) |
| 2014 | FLAGS_heap_check_after_destructors = true; // to after destructors |
| 2015 | // (less data live) |
| 2016 | FLAGS_heap_check_ignore_thread_live = true; // ignore all live |
| 2017 | FLAGS_heap_check_ignore_global_live = true; // ignore all live |
| 2018 | } else if (FLAGS_heap_check == "draconian") { |
| 2019 | // Drop not very portable and not very exact live heap flooding. |
| 2020 | FLAGS_heap_check_before_constructors = true; // from no profile (fast) |
| 2021 | FLAGS_heap_check_after_destructors = true; // to after destructors |
| 2022 | // (need them) |
| 2023 | FLAGS_heap_check_ignore_thread_live = false; // no live flood (stricter) |
| 2024 | FLAGS_heap_check_ignore_global_live = false; // no live flood (stricter) |
| 2025 | } else if (FLAGS_heap_check == "as-is") { |
| 2026 | // do nothing: use other flags as is |
| 2027 | } else if (FLAGS_heap_check == "local") { |
| 2028 | // do nothing |
| 2029 | } else { |
| 2030 | RAW_LOG(FATAL, "Unsupported heap_check flag: %s", |
| 2031 | FLAGS_heap_check.c_str()); |
| 2032 | } |
| 2033 | // FreeBSD doesn't seem to honor atexit execution order: |
| 2034 | // http://code.google.com/p/gperftools/issues/detail?id=375 |
| 2035 | // Since heap-checking before destructors depends on atexit running |
| 2036 | // at the right time, on FreeBSD we always check after, even in the |
| 2037 | // less strict modes. This just means FreeBSD is always a bit |
| 2038 | // stricter in its checking than other OSes. |
| 2039 | // This now appears to be the case in other OSes as well; |
| 2040 | // so always check afterwards. |
| 2041 | FLAGS_heap_check_after_destructors = true; |
| 2042 | |
| 2043 | { SpinLockHolder l(&heap_checker_lock); |
| 2044 | RAW_DCHECK(heap_checker_pid == getpid(), ""); |
| 2045 | heap_checker_on = true; |
| 2046 | RAW_DCHECK(heap_profile, ""); |
| 2047 | HeapLeakChecker::ProcMapsResult pm_result = HeapLeakChecker::UseProcMapsLocked(HeapLeakChecker::DISABLE_LIBRARY_ALLOCS); |
| 2048 | // might neeed to do this more than once |
| 2049 | // if one later dynamically loads libraries that we want disabled |
| 2050 | if (pm_result != HeapLeakChecker::PROC_MAPS_USED) { // can't function |
| 2051 | HeapLeakChecker::TurnItselfOffLocked(); |
| 2052 | return; |
| 2053 | } |
| 2054 | } |
| 2055 | |
| 2056 | // make a good place and name for heap profile leak dumps |
| 2057 | string* profile_prefix = |
| 2058 | new string(FLAGS_heap_check_dump_directory + "/" + invocation_name()); |
| 2059 | |
| 2060 | // Finalize prefix for dumping leak checking profiles. |
| 2061 | const int32 our_pid = getpid(); // safest to call getpid() outside lock |
| 2062 | { SpinLockHolder l(&heap_checker_lock); |
| 2063 | // main_thread_pid might still be 0 if this function is being called before |
| 2064 | // global constructors. In that case, our pid *is* the main pid. |
| 2065 | if (main_thread_pid == 0) |
| 2066 | main_thread_pid = our_pid; |
| 2067 | } |
| 2068 | char pid_buf[15]; |
| 2069 | snprintf(pid_buf, sizeof(pid_buf), ".%d", main_thread_pid); |
| 2070 | *profile_prefix += pid_buf; |
| 2071 | { SpinLockHolder l(&heap_checker_lock); |
| 2072 | RAW_DCHECK(profile_name_prefix == NULL, ""); |
| 2073 | profile_name_prefix = profile_prefix; |
| 2074 | } |
| 2075 | |
| 2076 | // Make sure new/delete hooks are installed properly |
| 2077 | // and heap profiler is indeed able to keep track |
| 2078 | // of the objects being allocated. |
| 2079 | // We test this to make sure we are indeed checking for leaks. |
| 2080 | char* test_str = new char[5]; |
| 2081 | size_t size; |
| 2082 | { SpinLockHolder l(&heap_checker_lock); |
| 2083 | RAW_CHECK(heap_profile->FindAlloc(test_str, &size), |
| 2084 | "our own new/delete not linked?"); |
| 2085 | } |
| 2086 | delete[] test_str; |
| 2087 | { SpinLockHolder l(&heap_checker_lock); |
| 2088 | // This check can fail when it should not if another thread allocates |
| 2089 | // into this same spot right this moment, |
| 2090 | // which is unlikely since this code runs in InitGoogle. |
| 2091 | RAW_CHECK(!heap_profile->FindAlloc(test_str, &size), |
| 2092 | "our own new/delete not linked?"); |
| 2093 | } |
| 2094 | // If we crash in the above code, it probably means that |
| 2095 | // "nm <this_binary> | grep new" will show that tcmalloc's new/delete |
| 2096 | // implementation did not get linked-in into this binary |
| 2097 | // (i.e. nm will list __builtin_new and __builtin_vec_new as undefined). |
| 2098 | // If this happens, it is a BUILD bug to be fixed. |
| 2099 | |
| 2100 | RAW_VLOG(heap_checker_info_level, |
| 2101 | "WARNING: Perftools heap leak checker is active " |
| 2102 | "-- Performance may suffer"); |
| 2103 | |
| 2104 | if (FLAGS_heap_check != "local") { |
| 2105 | HeapLeakChecker* main_hc = new HeapLeakChecker(); |
| 2106 | SpinLockHolder l(&heap_checker_lock); |
| 2107 | RAW_DCHECK(main_heap_checker == NULL, |
| 2108 | "Repeated creation of main_heap_checker"); |
| 2109 | main_heap_checker = main_hc; |
| 2110 | do_main_heap_check = true; |
| 2111 | } |
| 2112 | |
| 2113 | { SpinLockHolder l(&heap_checker_lock); |
| 2114 | RAW_CHECK(heap_checker_on && constructor_heap_profiling, |
| 2115 | "Leak checking is expected to be fully turned on now"); |
| 2116 | } |
| 2117 | |
| 2118 | // For binaries built in debug mode, this will set release queue of |
| 2119 | // debugallocation.cc to 100M to make it less likely for real leaks to |
| 2120 | // be hidden due to reuse of heap memory object addresses. |
| 2121 | // Running a test with --malloc_reclaim_memory=0 would help find leaks even |
| 2122 | // better, but the test might run out of memory as a result. |
| 2123 | // The scenario is that a heap object at address X is allocated and freed, |
| 2124 | // but some other data-structure still retains a pointer to X. |
| 2125 | // Then the same heap memory is used for another object, which is leaked, |
| 2126 | // but the leak is not noticed due to the pointer to the original object at X. |
| 2127 | // TODO(csilvers): support this in some manner. |
| 2128 | #if 0 |
| 2129 | SetCommandLineOptionWithMode("max_free_queue_size", "104857600", // 100M |
| 2130 | SET_FLAG_IF_DEFAULT); |
| 2131 | #endif |
| 2132 | } |
| 2133 | |
| 2134 | // We want this to run early as well, but not so early as |
| 2135 | // ::BeforeConstructors (we want flag assignments to have already |
| 2136 | // happened, for instance). Initializer-registration does the trick. |
| 2137 | REGISTER_MODULE_INITIALIZER(init_start, HeapLeakChecker_InternalInitStart()); |
| 2138 | REGISTER_MODULE_DESTRUCTOR(init_start, HeapLeakChecker_RunHeapCleanups()); |
| 2139 | |
| 2140 | // static |
| 2141 | bool HeapLeakChecker::NoGlobalLeaksMaybeSymbolize( |
| 2142 | ShouldSymbolize should_symbolize) { |
| 2143 | // we never delete or change main_heap_checker once it's set: |
| 2144 | HeapLeakChecker* main_hc = GlobalChecker(); |
| 2145 | if (main_hc) { |
| 2146 | RAW_VLOG(10, "Checking for whole-program memory leaks"); |
| 2147 | return main_hc->DoNoLeaks(should_symbolize); |
| 2148 | } |
| 2149 | return true; |
| 2150 | } |
| 2151 | |
| 2152 | // static |
| 2153 | bool HeapLeakChecker::DoMainHeapCheck() { |
| 2154 | if (FLAGS_heap_check_delay_seconds > 0) { |
| 2155 | sleep(FLAGS_heap_check_delay_seconds); |
| 2156 | } |
| 2157 | { SpinLockHolder l(&heap_checker_lock); |
| 2158 | if (!do_main_heap_check) return false; |
| 2159 | RAW_DCHECK(heap_checker_pid == getpid(), ""); |
| 2160 | do_main_heap_check = false; // will do it now; no need to do it more |
| 2161 | } |
| 2162 | |
| 2163 | // The program is over, so it's safe to symbolize addresses (which |
| 2164 | // requires a fork) because no serious work is expected to be done |
| 2165 | // after this. Symbolizing is really useful -- knowing what |
| 2166 | // function has a leak is better than knowing just an address -- |
| 2167 | // and while we can only safely symbolize once in a program run, |
| 2168 | // now is the time (after all, there's no "later" that would be better). |
| 2169 | if (!NoGlobalLeaksMaybeSymbolize(SYMBOLIZE)) { |
| 2170 | if (FLAGS_heap_check_identify_leaks) { |
| 2171 | RAW_LOG(FATAL, "Whole-program memory leaks found."); |
| 2172 | } |
| 2173 | RAW_LOG(ERROR, "Exiting with error code (instead of crashing) " |
| 2174 | "because of whole-program memory leaks"); |
| 2175 | _exit(1); // we don't want to call atexit() routines! |
| 2176 | } |
| 2177 | return true; |
| 2178 | } |
| 2179 | |
| 2180 | // static |
| 2181 | HeapLeakChecker* HeapLeakChecker::GlobalChecker() { |
| 2182 | SpinLockHolder l(&heap_checker_lock); |
| 2183 | return main_heap_checker; |
| 2184 | } |
| 2185 | |
| 2186 | // static |
| 2187 | bool HeapLeakChecker::NoGlobalLeaks() { |
| 2188 | // symbolizing requires a fork, which isn't safe to do in general. |
| 2189 | return NoGlobalLeaksMaybeSymbolize(DO_NOT_SYMBOLIZE); |
| 2190 | } |
| 2191 | |
| 2192 | // static |
| 2193 | void HeapLeakChecker::CancelGlobalCheck() { |
| 2194 | SpinLockHolder l(&heap_checker_lock); |
| 2195 | if (do_main_heap_check) { |
| 2196 | RAW_VLOG(heap_checker_info_level, |
| 2197 | "Canceling the automatic at-exit whole-program memory leak check"); |
| 2198 | do_main_heap_check = false; |
| 2199 | } |
| 2200 | } |
| 2201 | |
| 2202 | // static |
| 2203 | void HeapLeakChecker::BeforeConstructorsLocked() { |
| 2204 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 2205 | RAW_CHECK(!constructor_heap_profiling, |
| 2206 | "BeforeConstructorsLocked called multiple times"); |
| 2207 | #ifdef ADDRESS_SANITIZER |
| 2208 | // AddressSanitizer's custom malloc conflicts with HeapChecker. |
| 2209 | return; |
| 2210 | #endif |
| 2211 | // Set hooks early to crash if 'new' gets called before we make heap_profile, |
| 2212 | // and make sure no other hooks existed: |
| 2213 | RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); |
| 2214 | RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); |
| 2215 | constructor_heap_profiling = true; |
| 2216 | MemoryRegionMap::Init(1, /* use_buckets */ false); |
| 2217 | // Set up MemoryRegionMap with (at least) one caller stack frame to record |
| 2218 | // (important that it's done before HeapProfileTable creation below). |
| 2219 | Allocator::Init(); |
| 2220 | RAW_CHECK(heap_profile == NULL, ""); |
| 2221 | heap_profile = new(Allocator::Allocate(sizeof(HeapProfileTable))) |
| 2222 | HeapProfileTable(&Allocator::Allocate, &Allocator::Free, |
| 2223 | /* profile_mmap */ false); |
| 2224 | RAW_VLOG(10, "Starting tracking the heap"); |
| 2225 | heap_checker_on = true; |
| 2226 | } |
| 2227 | |
| 2228 | // static |
| 2229 | void HeapLeakChecker::TurnItselfOffLocked() { |
| 2230 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 2231 | // Set FLAGS_heap_check to "", for users who test for it |
| 2232 | if (!FLAGS_heap_check.empty()) // be a noop in the common case |
| 2233 | FLAGS_heap_check.clear(); // because clear() could allocate memory |
| 2234 | if (constructor_heap_profiling) { |
| 2235 | RAW_CHECK(heap_checker_on, ""); |
| 2236 | RAW_VLOG(heap_checker_info_level, "Turning perftools heap leak checking off"); |
| 2237 | heap_checker_on = false; |
| 2238 | // Unset our hooks checking they were set: |
| 2239 | RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), ""); |
| 2240 | RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), ""); |
| 2241 | Allocator::DeleteAndNull(&heap_profile); |
| 2242 | // free our optional global data: |
| 2243 | Allocator::DeleteAndNullIfNot(&ignored_objects); |
| 2244 | Allocator::DeleteAndNullIfNot(&disabled_ranges); |
| 2245 | Allocator::DeleteAndNullIfNot(&global_region_caller_ranges); |
| 2246 | Allocator::Shutdown(); |
| 2247 | MemoryRegionMap::Shutdown(); |
| 2248 | } |
| 2249 | RAW_CHECK(!heap_checker_on, ""); |
| 2250 | } |
| 2251 | |
| 2252 | extern bool heap_leak_checker_bcad_variable; // in heap-checker-bcad.cc |
| 2253 | |
| 2254 | static bool has_called_before_constructors = false; |
| 2255 | |
| 2256 | // TODO(maxim): inline this function with |
| 2257 | // MallocHook_InitAtFirstAllocation_HeapLeakChecker, and also rename |
| 2258 | // HeapLeakChecker::BeforeConstructorsLocked. |
| 2259 | void HeapLeakChecker_BeforeConstructors() { |
| 2260 | SpinLockHolder l(&heap_checker_lock); |
| 2261 | // We can be called from several places: the first mmap/sbrk/alloc call |
| 2262 | // or the first global c-tor from heap-checker-bcad.cc: |
| 2263 | // Do not re-execute initialization: |
| 2264 | if (has_called_before_constructors) return; |
| 2265 | has_called_before_constructors = true; |
| 2266 | |
| 2267 | heap_checker_pid = getpid(); // set it always |
| 2268 | heap_leak_checker_bcad_variable = true; |
| 2269 | // just to reference it, so that heap-checker-bcad.o is linked in |
| 2270 | |
| 2271 | // This function can be called *very* early, before the normal |
| 2272 | // global-constructor that sets FLAGS_verbose. Set it manually now, |
| 2273 | // so the RAW_LOG messages here are controllable. |
| 2274 | const char* verbose_str = GetenvBeforeMain("PERFTOOLS_VERBOSE"); |
| 2275 | if (verbose_str && atoi(verbose_str)) { // different than the default of 0? |
| 2276 | FLAGS_verbose = atoi(verbose_str); |
| 2277 | } |
| 2278 | |
| 2279 | bool need_heap_check = true; |
| 2280 | // The user indicates a desire for heap-checking via the HEAPCHECK |
| 2281 | // environment variable. If it's not set, there's no way to do |
| 2282 | // heap-checking. |
| 2283 | if (!GetenvBeforeMain("HEAPCHECK")) { |
| 2284 | need_heap_check = false; |
| 2285 | } |
| 2286 | #ifdef HAVE_GETEUID |
| 2287 | if (need_heap_check && getuid() != geteuid()) { |
| 2288 | // heap-checker writes out files. Thus, for security reasons, we don't |
| 2289 | // recognize the env. var. to turn on heap-checking if we're setuid. |
| 2290 | RAW_LOG(WARNING, ("HeapChecker: ignoring HEAPCHECK because " |
| 2291 | "program seems to be setuid\n")); |
| 2292 | need_heap_check = false; |
| 2293 | } |
| 2294 | #endif |
| 2295 | if (need_heap_check) { |
| 2296 | HeapLeakChecker::BeforeConstructorsLocked(); |
| 2297 | } |
| 2298 | } |
| 2299 | |
| 2300 | // This function overrides the weak function defined in malloc_hook.cc and |
| 2301 | // called by one of the initial malloc hooks (malloc_hook.cc) when the very |
| 2302 | // first memory allocation or an mmap/sbrk happens. This ensures that |
| 2303 | // HeapLeakChecker is initialized and installs all its hooks early enough to |
| 2304 | // track absolutely all memory allocations and all memory region acquisitions |
| 2305 | // via mmap and sbrk. |
| 2306 | extern "C" void MallocHook_InitAtFirstAllocation_HeapLeakChecker() { |
| 2307 | HeapLeakChecker_BeforeConstructors(); |
| 2308 | } |
| 2309 | |
| 2310 | // This function is executed after all global object destructors run. |
| 2311 | void HeapLeakChecker_AfterDestructors() { |
| 2312 | { SpinLockHolder l(&heap_checker_lock); |
| 2313 | // can get here (via forks?) with other pids |
| 2314 | if (heap_checker_pid != getpid()) return; |
| 2315 | } |
| 2316 | if (FLAGS_heap_check_after_destructors) { |
| 2317 | if (HeapLeakChecker::DoMainHeapCheck()) { |
| 2318 | const struct timespec sleep_time = { 0, 500000000 }; // 500 ms |
| 2319 | nanosleep(&sleep_time, NULL); |
| 2320 | // Need this hack to wait for other pthreads to exit. |
| 2321 | // Otherwise tcmalloc find errors |
| 2322 | // on a free() call from pthreads. |
| 2323 | } |
| 2324 | } |
| 2325 | SpinLockHolder l(&heap_checker_lock); |
| 2326 | RAW_CHECK(!do_main_heap_check, "should have done it"); |
| 2327 | } |
| 2328 | |
| 2329 | //---------------------------------------------------------------------- |
| 2330 | // HeapLeakChecker disabling helpers |
| 2331 | //---------------------------------------------------------------------- |
| 2332 | |
| 2333 | // These functions are at the end of the file to prevent their inlining: |
| 2334 | |
| 2335 | // static |
| 2336 | void HeapLeakChecker::DisableChecksFromToLocked(const void* start_address, |
| 2337 | const void* end_address, |
| 2338 | int max_depth) { |
| 2339 | RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 2340 | RAW_DCHECK(start_address < end_address, ""); |
| 2341 | if (disabled_ranges == NULL) { |
| 2342 | disabled_ranges = new(Allocator::Allocate(sizeof(DisabledRangeMap))) |
| 2343 | DisabledRangeMap; |
| 2344 | } |
| 2345 | RangeValue value; |
| 2346 | value.start_address = AsInt(start_address); |
| 2347 | value.max_depth = max_depth; |
| 2348 | if (disabled_ranges->insert(make_pair(AsInt(end_address), value)).second) { |
| 2349 | RAW_VLOG(10, "Disabling leak checking in stack traces " |
| 2350 | "under frame addresses between %p..%p", |
| 2351 | start_address, end_address); |
| 2352 | } else { // check that this is just a verbatim repetition |
| 2353 | RangeValue const& val = disabled_ranges->find(AsInt(end_address))->second; |
| 2354 | if (val.max_depth != value.max_depth || |
| 2355 | val.start_address != value.start_address) { |
| 2356 | RAW_LOG(FATAL, "Two DisableChecksToHereFrom calls conflict: " |
| 2357 | "(%p, %p, %d) vs. (%p, %p, %d)", |
| 2358 | AsPtr(val.start_address), end_address, val.max_depth, |
| 2359 | start_address, end_address, max_depth); |
| 2360 | } |
| 2361 | } |
| 2362 | } |
| 2363 | |
| 2364 | // static |
| 2365 | inline bool HeapLeakChecker::HaveOnHeapLocked(const void** ptr, |
| 2366 | size_t* object_size) { |
| 2367 | // Commented-out because HaveOnHeapLocked is very performance-critical: |
| 2368 | // RAW_DCHECK(heap_checker_lock.IsHeld(), ""); |
| 2369 | const uintptr_t addr = AsInt(*ptr); |
| 2370 | if (heap_profile->FindInsideAlloc( |
| 2371 | *ptr, max_heap_object_size, ptr, object_size)) { |
| 2372 | RAW_VLOG(16, "Got pointer into %p at +%" PRIuPTR " offset", |
| 2373 | *ptr, addr - AsInt(*ptr)); |
| 2374 | return true; |
| 2375 | } |
| 2376 | return false; |
| 2377 | } |
| 2378 | |
| 2379 | // static |
| 2380 | const void* HeapLeakChecker::GetAllocCaller(void* ptr) { |
| 2381 | // this is used only in the unittest, so the heavy checks are fine |
| 2382 | HeapProfileTable::AllocInfo info; |
| 2383 | { SpinLockHolder l(&heap_checker_lock); |
| 2384 | RAW_CHECK(heap_profile->FindAllocDetails(ptr, &info), ""); |
| 2385 | } |
| 2386 | RAW_CHECK(info.stack_depth >= 1, ""); |
| 2387 | return info.call_stack[0]; |
| 2388 | } |