blob: 3c6284741afacc34883ff05fe6c957bb83035511 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2006, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat
33// Maxim Lifantsev (refactoring)
34//
35
36#ifndef BASE_HEAP_PROFILE_TABLE_H_
37#define BASE_HEAP_PROFILE_TABLE_H_
38
39#include "addressmap-inl.h"
40#include "base/basictypes.h"
41#include "base/logging.h" // for RawFD
42#include "heap-profile-stats.h"
43
44// Table to maintain a heap profile data inside,
45// i.e. the set of currently active heap memory allocations.
46// thread-unsafe and non-reentrant code:
47// each instance object must be used by one thread
48// at a time w/o self-recursion.
49//
50// TODO(maxim): add a unittest for this class.
51class HeapProfileTable {
52 public:
53
54 // Extension to be used for heap pforile files.
55 static const char kFileExt[];
56
57 // Longest stack trace we record.
58 static const int kMaxStackDepth = 32;
59
60 // data types ----------------------------
61
62 // Profile stats.
63 typedef HeapProfileStats Stats;
64
65 // Info we can return about an allocation.
66 struct AllocInfo {
67 size_t object_size; // size of the allocation
68 const void* const* call_stack; // call stack that made the allocation call
69 int stack_depth; // depth of call_stack
70 bool live;
71 bool ignored;
72 };
73
74 // Info we return about an allocation context.
75 // An allocation context is a unique caller stack trace
76 // of an allocation operation.
77 struct AllocContextInfo : public Stats {
78 int stack_depth; // Depth of stack trace
79 const void* const* call_stack; // Stack trace
80 };
81
82 // Memory (de)allocator interface we'll use.
83 typedef void* (*Allocator)(size_t size);
84 typedef void (*DeAllocator)(void* ptr);
85
86 // interface ---------------------------
87
88 HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap);
89 ~HeapProfileTable();
90
91 // Collect the stack trace for the function that asked to do the
92 // allocation for passing to RecordAlloc() below.
93 //
94 // The stack trace is stored in 'stack'. The stack depth is returned.
95 //
96 // 'skip_count' gives the number of stack frames between this call
97 // and the memory allocation function.
98 static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]);
99
100 // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth'
101 // and 'call_stack' identifying the function that requested the
102 // allocation. They can be generated using GetCallerStackTrace() above.
103 void RecordAlloc(const void* ptr, size_t bytes,
104 int stack_depth, const void* const call_stack[]);
105
106 // Record the deallocation of memory at 'ptr'.
107 void RecordFree(const void* ptr);
108
109 // Return true iff we have recorded an allocation at 'ptr'.
110 // If yes, fill *object_size with the allocation byte size.
111 bool FindAlloc(const void* ptr, size_t* object_size) const;
112 // Same as FindAlloc, but fills all of *info.
113 bool FindAllocDetails(const void* ptr, AllocInfo* info) const;
114
115 // Return true iff "ptr" points into a recorded allocation
116 // If yes, fill *object_ptr with the actual allocation address
117 // and *object_size with the allocation byte size.
118 // max_size specifies largest currently possible allocation size.
119 bool FindInsideAlloc(const void* ptr, size_t max_size,
120 const void** object_ptr, size_t* object_size) const;
121
122 // If "ptr" points to a recorded allocation and it's not marked as live
123 // mark it as live and return true. Else return false.
124 // All allocations start as non-live.
125 bool MarkAsLive(const void* ptr);
126
127 // If "ptr" points to a recorded allocation, mark it as "ignored".
128 // Ignored objects are treated like other objects, except that they
129 // are skipped in heap checking reports.
130 void MarkAsIgnored(const void* ptr);
131
132 // Return current total (de)allocation statistics. It doesn't contain
133 // mmap'ed regions.
134 const Stats& total() const { return total_; }
135
136 // Allocation data iteration callback: gets passed object pointer and
137 // fully-filled AllocInfo.
138 typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info);
139
140 // Iterate over the allocation profile data calling "callback"
141 // for every allocation.
142 void IterateAllocs(AllocIterator callback) const {
143 address_map_->Iterate(MapArgsAllocIterator, callback);
144 }
145
146 // Allocation context profile data iteration callback
147 typedef void (*AllocContextIterator)(const AllocContextInfo& info);
148
149 // Iterate over the allocation context profile data calling "callback"
150 // for every allocation context. Allocation contexts are ordered by the
151 // size of allocated space.
152 void IterateOrderedAllocContexts(AllocContextIterator callback) const;
153
154 // Fill profile data into buffer 'buf' of size 'size'
155 // and return the actual size occupied by the dump in 'buf'.
156 // The profile buckets are dumped in the decreasing order
157 // of currently allocated bytes.
158 // We do not provision for 0-terminating 'buf'.
159 int FillOrderedProfile(char buf[], int size) const;
160
161 // Cleanup any old profile files matching prefix + ".*" + kFileExt.
162 static void CleanupOldProfiles(const char* prefix);
163
164 // Return a snapshot of the current contents of *this.
165 // Caller must call ReleaseSnapshot() on result when no longer needed.
166 // The result is only valid while this exists and until
167 // the snapshot is discarded by calling ReleaseSnapshot().
168 class Snapshot;
169 Snapshot* TakeSnapshot();
170
171 // Release a previously taken snapshot. snapshot must not
172 // be used after this call.
173 void ReleaseSnapshot(Snapshot* snapshot);
174
175 // Return a snapshot of every non-live, non-ignored object in *this.
176 // If "base" is non-NULL, skip any objects present in "base".
177 // As a side-effect, clears the "live" bit on every live object in *this.
178 // Caller must call ReleaseSnapshot() on result when no longer needed.
179 Snapshot* NonLiveSnapshot(Snapshot* base);
180
181 private:
182
183 // data types ----------------------------
184
185 // Hash table bucket to hold (de)allocation stats
186 // for a given allocation call stack trace.
187 typedef HeapProfileBucket Bucket;
188
189 // Info stored in the address map
190 struct AllocValue {
191 // Access to the stack-trace bucket
192 Bucket* bucket() const {
193 return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask));
194 }
195 // This also does set_live(false).
196 void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); }
197 size_t bytes; // Number of bytes in this allocation
198
199 // Access to the allocation liveness flag (for leak checking)
200 bool live() const { return bucket_rep & kLive; }
201 void set_live(bool l) {
202 bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0);
203 }
204
205 // Should this allocation be ignored if it looks like a leak?
206 bool ignore() const { return bucket_rep & kIgnore; }
207 void set_ignore(bool r) {
208 bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0);
209 }
210
211 private:
212 // We store a few bits in the bottom bits of bucket_rep.
213 // (Alignment is at least four, so we have at least two bits.)
214 static const int kLive = 1;
215 static const int kIgnore = 2;
216 static const int kMask = kLive | kIgnore;
217
218 uintptr_t bucket_rep;
219 };
220
221 // helper for FindInsideAlloc
222 static size_t AllocValueSize(const AllocValue& v) { return v.bytes; }
223
224 typedef AddressMap<AllocValue> AllocationMap;
225
226 // Arguments that need to be passed DumpBucketIterator callback below.
227 struct BufferArgs {
228 BufferArgs(char* buf_arg, int buflen_arg, int bufsize_arg)
229 : buf(buf_arg),
230 buflen(buflen_arg),
231 bufsize(bufsize_arg) {
232 }
233
234 char* buf;
235 int buflen;
236 int bufsize;
237
238 DISALLOW_COPY_AND_ASSIGN(BufferArgs);
239 };
240
241 // Arguments that need to be passed DumpNonLiveIterator callback below.
242 struct DumpArgs {
243 DumpArgs(RawFD fd_arg, Stats* profile_stats_arg)
244 : fd(fd_arg),
245 profile_stats(profile_stats_arg) {
246 }
247
248 RawFD fd; // file to write to
249 Stats* profile_stats; // stats to update (may be NULL)
250 };
251
252 // helpers ----------------------------
253
254 // Unparse bucket b and print its portion of profile dump into buf.
255 // We return the amount of space in buf that we use. We start printing
256 // at buf + buflen, and promise not to go beyond buf + bufsize.
257 // We do not provision for 0-terminating 'buf'.
258 //
259 // If profile_stats is non-NULL, we update *profile_stats by
260 // counting bucket b.
261 //
262 // "extra" is appended to the unparsed bucket. Typically it is empty,
263 // but may be set to something like " heapprofile" for the total
264 // bucket to indicate the type of the profile.
265 static int UnparseBucket(const Bucket& b,
266 char* buf, int buflen, int bufsize,
267 const char* extra,
268 Stats* profile_stats);
269
270 // Get the bucket for the caller stack trace 'key' of depth 'depth'
271 // creating the bucket if needed.
272 Bucket* GetBucket(int depth, const void* const key[]);
273
274 // Helper for IterateAllocs to do callback signature conversion
275 // from AllocationMap::Iterate to AllocIterator.
276 static void MapArgsAllocIterator(const void* ptr, AllocValue* v,
277 AllocIterator callback) {
278 AllocInfo info;
279 info.object_size = v->bytes;
280 info.call_stack = v->bucket()->stack;
281 info.stack_depth = v->bucket()->depth;
282 info.live = v->live();
283 info.ignored = v->ignore();
284 callback(ptr, info);
285 }
286
287 // Helper to dump a bucket.
288 inline static void DumpBucketIterator(const Bucket* bucket,
289 BufferArgs* args);
290
291 // Helper for DumpNonLiveProfile to do object-granularity
292 // heap profile dumping. It gets passed to AllocationMap::Iterate.
293 inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v,
294 const DumpArgs& args);
295
296 // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
297 // Creates a sorted list of Buckets whose length is num_buckets_.
298 // The caller is responsible for deallocating the returned list.
299 Bucket** MakeSortedBucketList() const;
300
301 // Helper for TakeSnapshot. Saves object to snapshot.
302 static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s);
303
304 // Arguments passed to AddIfNonLive
305 struct AddNonLiveArgs {
306 Snapshot* dest;
307 Snapshot* base;
308 };
309
310 // Helper for NonLiveSnapshot. Adds the object to the destination
311 // snapshot if it is non-live.
312 static void AddIfNonLive(const void* ptr, AllocValue* v,
313 AddNonLiveArgs* arg);
314
315 // Write contents of "*allocations" as a heap profile to
316 // "file_name". "total" must contain the total of all entries in
317 // "*allocations".
318 static bool WriteProfile(const char* file_name,
319 const Bucket& total,
320 AllocationMap* allocations);
321
322 // data ----------------------------
323
324 // Memory (de)allocator that we use.
325 Allocator alloc_;
326 DeAllocator dealloc_;
327
328 // Overall profile stats; we use only the Stats part,
329 // but make it a Bucket to pass to UnparseBucket.
330 Bucket total_;
331
332 bool profile_mmap_;
333
334 // Bucket hash table for malloc.
335 // We hand-craft one instead of using one of the pre-written
336 // ones because we do not want to use malloc when operating on the table.
337 // It is only few lines of code, so no big deal.
338 Bucket** bucket_table_;
339 int num_buckets_;
340
341 // Map of all currently allocated objects and mapped regions we know about.
342 AllocationMap* address_map_;
343
344 DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
345};
346
347class HeapProfileTable::Snapshot {
348 public:
349 const Stats& total() const { return total_; }
350
351 // Report anything in this snapshot as a leak.
352 // May use new/delete for temporary storage.
353 // If should_symbolize is true, will fork (which is not threadsafe)
354 // to turn addresses into symbol names. Set to false for maximum safety.
355 // Also writes a heap profile to "filename" that contains
356 // all of the objects in this snapshot.
357 void ReportLeaks(const char* checker_name, const char* filename,
358 bool should_symbolize);
359
360 // Report the addresses of all leaked objects.
361 // May use new/delete for temporary storage.
362 void ReportIndividualObjects();
363
364 bool Empty() const {
365 return (total_.allocs == 0) && (total_.alloc_size == 0);
366 }
367
368 private:
369 friend class HeapProfileTable;
370
371 // Total count/size are stored in a Bucket so we can reuse UnparseBucket
372 Bucket total_;
373
374 // We share the Buckets managed by the parent table, but have our
375 // own object->bucket map.
376 AllocationMap map_;
377
378 Snapshot(Allocator alloc, DeAllocator dealloc) : map_(alloc, dealloc) {
379 memset(&total_, 0, sizeof(total_));
380 }
381
382 // Callback used to populate a Snapshot object with entries found
383 // in another allocation map.
384 inline void Add(const void* ptr, const AllocValue& v) {
385 map_.Insert(ptr, v);
386 total_.allocs++;
387 total_.alloc_size += v.bytes;
388 }
389
390 // Helpers for sorting and generating leak reports
391 struct Entry;
392 struct ReportState;
393 static void ReportCallback(const void* ptr, AllocValue* v, ReportState*);
394 static void ReportObject(const void* ptr, AllocValue* v, char*);
395
396 DISALLOW_COPY_AND_ASSIGN(Snapshot);
397};
398
399#endif // BASE_HEAP_PROFILE_TABLE_H_