blob: bf50394faa7e6a995e8b06af9fb984f89cd07b8f [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2008, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat <opensource@google.com>
33
34#ifndef TCMALLOC_PAGE_HEAP_H_
35#define TCMALLOC_PAGE_HEAP_H_
36
37#include <config.h>
38#include <stddef.h> // for size_t
39#ifdef HAVE_STDINT_H
40#include <stdint.h> // for uint64_t, int64_t, uint16_t
41#endif
42#include <gperftools/malloc_extension.h>
43#include "base/basictypes.h"
44#include "common.h"
45#include "packed-cache-inl.h"
46#include "pagemap.h"
47#include "span.h"
48
49// We need to dllexport PageHeap just for the unittest. MSVC complains
50// that we don't dllexport the PageHeap members, but we don't need to
51// test those, so I just suppress this warning.
52#ifdef _MSC_VER
53#pragma warning(push)
54#pragma warning(disable:4251)
55#endif
56
57// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
58// you're porting to a system where you really can't get a stacktrace.
59// Because we control the definition of GetStackTrace, all clients of
60// GetStackTrace should #include us rather than stacktrace.h.
61#ifdef NO_TCMALLOC_SAMPLES
62 // We use #define so code compiles even if you #include stacktrace.h somehow.
63# define GetStackTrace(stack, depth, skip) (0)
64#else
65# include <gperftools/stacktrace.h>
66#endif
67
68namespace base {
69struct MallocRange;
70}
71
72namespace tcmalloc {
73
74// -------------------------------------------------------------------------
75// Map from page-id to per-page data
76// -------------------------------------------------------------------------
77
78// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
79// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
80// because sometimes the sizeclass is all the information we need.
81
82// Selector class -- general selector uses 3-level map
83template <int BITS> class MapSelector {
84 public:
85 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
Austin Schuh745610d2015-09-06 18:19:50 -070086};
87
Brian Silverman20350ac2021-11-17 18:19:55 -080088#ifndef TCMALLOC_SMALL_BUT_SLOW
89// x86-64 and arm64 are using 48 bits of address space. So we can use
90// just two level map, but since initial ram consumption of this mode
91// is a bit on the higher side, we opt-out of it in
92// TCMALLOC_SMALL_BUT_SLOW mode.
93template <> class MapSelector<48> {
94 public:
95 typedef TCMalloc_PageMap2<48-kPageShift> Type;
96};
97
98#endif // TCMALLOC_SMALL_BUT_SLOW
99
Austin Schuh745610d2015-09-06 18:19:50 -0700100// A two-level map for 32-bit machines
101template <> class MapSelector<32> {
102 public:
103 typedef TCMalloc_PageMap2<32-kPageShift> Type;
Austin Schuh745610d2015-09-06 18:19:50 -0700104};
105
106// -------------------------------------------------------------------------
107// Page-level allocator
108// * Eager coalescing
109//
110// Heap for page-level allocation. We allow allocating and freeing a
111// contiguous runs of pages (called a "span").
112// -------------------------------------------------------------------------
113
114class PERFTOOLS_DLL_DECL PageHeap {
115 public:
116 PageHeap();
117
118 // Allocate a run of "n" pages. Returns zero if out of memory.
119 // Caller should not pass "n == 0" -- instead, n should have
120 // been rounded up already.
121 Span* New(Length n);
122
123 // Delete the span "[p, p+n-1]".
124 // REQUIRES: span was returned by earlier call to New() and
125 // has not yet been deleted.
126 void Delete(Span* span);
127
128 // Mark an allocated span as being used for small objects of the
129 // specified size-class.
130 // REQUIRES: span was returned by an earlier call to New()
131 // and has not yet been deleted.
Brian Silverman20350ac2021-11-17 18:19:55 -0800132 void RegisterSizeClass(Span* span, uint32 sc);
Austin Schuh745610d2015-09-06 18:19:50 -0700133
134 // Split an allocated span into two spans: one of length "n" pages
135 // followed by another span of length "span->length - n" pages.
136 // Modifies "*span" to point to the first span of length "n" pages.
137 // Returns a pointer to the second span.
138 //
139 // REQUIRES: "0 < n < span->length"
140 // REQUIRES: span->location == IN_USE
141 // REQUIRES: span->sizeclass == 0
142 Span* Split(Span* span, Length n);
143
144 // Return the descriptor for the specified page. Returns NULL if
145 // this PageID was not allocated previously.
Brian Silverman20350ac2021-11-17 18:19:55 -0800146 inline ATTRIBUTE_ALWAYS_INLINE
147 Span* GetDescriptor(PageID p) const {
Austin Schuh745610d2015-09-06 18:19:50 -0700148 return reinterpret_cast<Span*>(pagemap_.get(p));
149 }
150
151 // If this page heap is managing a range with starting page # >= start,
152 // store info about the range in *r and return true. Else return false.
153 bool GetNextRange(PageID start, base::MallocRange* r);
154
155 // Page heap statistics
156 struct Stats {
Brian Silverman20350ac2021-11-17 18:19:55 -0800157 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0), committed_bytes(0),
158 scavenge_count(0), commit_count(0), total_commit_bytes(0),
159 decommit_count(0), total_decommit_bytes(0),
160 reserve_count(0), total_reserve_bytes(0) {}
Austin Schuh745610d2015-09-06 18:19:50 -0700161 uint64_t system_bytes; // Total bytes allocated from system
162 uint64_t free_bytes; // Total bytes on normal freelists
163 uint64_t unmapped_bytes; // Total bytes on returned freelists
164 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
165
Brian Silverman20350ac2021-11-17 18:19:55 -0800166 uint64_t scavenge_count; // Number of times scavagened flush pages
167
168 uint64_t commit_count; // Number of virtual memory commits
169 uint64_t total_commit_bytes; // Bytes committed in lifetime of process
170 uint64_t decommit_count; // Number of virtual memory decommits
171 uint64_t total_decommit_bytes; // Bytes decommitted in lifetime of process
172
173 uint64_t reserve_count; // Number of virtual memory reserves
174 uint64_t total_reserve_bytes; // Bytes reserved in lifetime of process
Austin Schuh745610d2015-09-06 18:19:50 -0700175 };
176 inline Stats stats() const { return stats_; }
177
178 struct SmallSpanStats {
179 // For each free list of small spans, the length (in spans) of the
180 // normal and returned free lists for that size.
Brian Silverman20350ac2021-11-17 18:19:55 -0800181 //
182 // NOTE: index 'i' accounts the number of spans of length 'i + 1'.
Austin Schuh745610d2015-09-06 18:19:50 -0700183 int64 normal_length[kMaxPages];
184 int64 returned_length[kMaxPages];
185 };
186 void GetSmallSpanStats(SmallSpanStats* result);
187
188 // Stats for free large spans (i.e., spans with more than kMaxPages pages).
189 struct LargeSpanStats {
190 int64 spans; // Number of such spans
191 int64 normal_pages; // Combined page length of normal large spans
192 int64 returned_pages; // Combined page length of unmapped spans
193 };
194 void GetLargeSpanStats(LargeSpanStats* result);
195
196 bool Check();
197 // Like Check() but does some more comprehensive checking.
198 bool CheckExpensive();
199 bool CheckList(Span* list, Length min_pages, Length max_pages,
200 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
Brian Silverman20350ac2021-11-17 18:19:55 -0800201 bool CheckSet(SpanSet *s, Length min_pages, int freelist);
Austin Schuh745610d2015-09-06 18:19:50 -0700202
203 // Try to release at least num_pages for reuse by the OS. Returns
204 // the actual number of pages released, which may be less than
205 // num_pages if there weren't enough pages to release. The result
206 // may also be larger than num_pages since page_heap might decide to
207 // release one large range instead of fragmenting it into two
208 // smaller released and unreleased ranges.
209 Length ReleaseAtLeastNPages(Length num_pages);
210
Austin Schuh745610d2015-09-06 18:19:50 -0700211 // Reads and writes to pagemap_cache_ do not require locking.
Brian Silverman20350ac2021-11-17 18:19:55 -0800212 bool TryGetSizeClass(PageID p, uint32* out) const {
213 return pagemap_cache_.TryGet(p, out);
Austin Schuh745610d2015-09-06 18:19:50 -0700214 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800215 void SetCachedSizeClass(PageID p, uint32 cl) {
216 ASSERT(cl != 0);
217 pagemap_cache_.Put(p, cl);
218 }
219 void InvalidateCachedSizeClass(PageID p) { pagemap_cache_.Invalidate(p); }
220 uint32 GetSizeClassOrZero(PageID p) const {
221 uint32 cached_value;
222 if (!TryGetSizeClass(p, &cached_value)) {
223 cached_value = 0;
224 }
225 return cached_value;
226 }
Austin Schuh745610d2015-09-06 18:19:50 -0700227
228 bool GetAggressiveDecommit(void) {return aggressive_decommit_;}
229 void SetAggressiveDecommit(bool aggressive_decommit) {
230 aggressive_decommit_ = aggressive_decommit;
231 }
232
233 private:
234 // Allocates a big block of memory for the pagemap once we reach more than
235 // 128MB
236 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
237
238 // Minimum number of pages to fetch from system at a time. Must be
239 // significantly bigger than kBlockSize to amortize system-call
240 // overhead, and also to reduce external fragementation. Also, we
241 // should keep this value big because various incarnations of Linux
242 // have small limits on the number of mmap() regions per
243 // address-space.
244 // REQUIRED: kMinSystemAlloc <= kMaxPages;
245 static const int kMinSystemAlloc = kMaxPages;
246
247 // Never delay scavenging for more than the following number of
248 // deallocated pages. With 4K pages, this comes to 4GB of
249 // deallocation.
250 static const int kMaxReleaseDelay = 1 << 20;
251
252 // If there is nothing to release, wait for so many pages before
253 // scavenging again. With 4K pages, this comes to 1GB of memory.
254 static const int kDefaultReleaseDelay = 1 << 18;
255
256 // Pick the appropriate map and cache types based on pointer size
257 typedef MapSelector<kAddressBits>::Type PageMap;
Brian Silverman20350ac2021-11-17 18:19:55 -0800258 typedef PackedCache<kAddressBits - kPageShift> PageMapCache;
Austin Schuh745610d2015-09-06 18:19:50 -0700259 mutable PageMapCache pagemap_cache_;
Brian Silverman20350ac2021-11-17 18:19:55 -0800260 PageMap pagemap_;
Austin Schuh745610d2015-09-06 18:19:50 -0700261
262 // We segregate spans of a given size into two circular linked
263 // lists: one for normal spans, and one for spans whose memory
264 // has been returned to the system.
265 struct SpanList {
266 Span normal;
267 Span returned;
268 };
269
Brian Silverman20350ac2021-11-17 18:19:55 -0800270 // Sets of spans with length > kMaxPages.
271 //
272 // Rather than using a linked list, we use sets here for efficient
273 // best-fit search.
274 SpanSet large_normal_;
275 SpanSet large_returned_;
Austin Schuh745610d2015-09-06 18:19:50 -0700276
277 // Array mapping from span length to a doubly linked list of free spans
Brian Silverman20350ac2021-11-17 18:19:55 -0800278 //
279 // NOTE: index 'i' stores spans of length 'i + 1'.
Austin Schuh745610d2015-09-06 18:19:50 -0700280 SpanList free_[kMaxPages];
281
282 // Statistics on system, free, and unmapped bytes
283 Stats stats_;
284
285 Span* SearchFreeAndLargeLists(Length n);
286
287 bool GrowHeap(Length n);
288
289 // REQUIRES: span->length >= n
290 // REQUIRES: span->location != IN_USE
291 // Remove span from its free list, and move any leftover part of
292 // span into appropriate free lists. Also update "span" to have
293 // length exactly "n" and mark it as non-free so it can be returned
294 // to the client. After all that, decrease free_pages_ by n and
295 // return span.
296 Span* Carve(Span* span, Length n);
297
298 void RecordSpan(Span* span) {
299 pagemap_.set(span->start, span);
300 if (span->length > 1) {
301 pagemap_.set(span->start + span->length - 1, span);
302 }
303 }
304
305 // Allocate a large span of length == n. If successful, returns a
306 // span of exactly the specified length. Else, returns NULL.
307 Span* AllocLarge(Length n);
308
309 // Coalesce span with neighboring spans if possible, prepend to
310 // appropriate free list, and adjust stats.
311 void MergeIntoFreeList(Span* span);
312
313 // Commit the span.
314 void CommitSpan(Span* span);
315
316 // Decommit the span.
317 bool DecommitSpan(Span* span);
318
319 // Prepends span to appropriate free list, and adjusts stats.
320 void PrependToFreeList(Span* span);
321
322 // Removes span from its free list, and adjust stats.
323 void RemoveFromFreeList(Span* span);
324
325 // Incrementally release some memory to the system.
326 // IncrementalScavenge(n) is called whenever n pages are freed.
327 void IncrementalScavenge(Length n);
328
Brian Silverman20350ac2021-11-17 18:19:55 -0800329 // Attempts to decommit 's' and move it to the returned freelist.
330 //
331 // Returns the length of the Span or zero if release failed.
332 //
333 // REQUIRES: 's' must be on the NORMAL freelist.
334 Length ReleaseSpan(Span *s);
Austin Schuh745610d2015-09-06 18:19:50 -0700335
336 // Checks if we are allowed to take more memory from the system.
337 // If limit is reached and allowRelease is true, tries to release
338 // some unused spans.
339 bool EnsureLimit(Length n, bool allowRelease = true);
340
Brian Silverman20350ac2021-11-17 18:19:55 -0800341 Span* CheckAndHandlePreMerge(Span *span, Span *other);
Austin Schuh745610d2015-09-06 18:19:50 -0700342
343 // Number of pages to deallocate before doing more scavenging
344 int64_t scavenge_counter_;
345
346 // Index of last free list where we released memory to the OS.
347 int release_index_;
348
349 bool aggressive_decommit_;
350};
351
352} // namespace tcmalloc
353
354#ifdef _MSC_VER
355#pragma warning(pop)
356#endif
357
358#endif // TCMALLOC_PAGE_HEAP_H_