blob: 18abed1974a0cc6df9fbad1c76c192f55007e4ca [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2008, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat <opensource@google.com>
33
34#ifndef TCMALLOC_PAGE_HEAP_H_
35#define TCMALLOC_PAGE_HEAP_H_
36
37#include <config.h>
38#include <stddef.h> // for size_t
39#ifdef HAVE_STDINT_H
40#include <stdint.h> // for uint64_t, int64_t, uint16_t
41#endif
42#include <gperftools/malloc_extension.h>
43#include "base/basictypes.h"
44#include "common.h"
45#include "packed-cache-inl.h"
46#include "pagemap.h"
47#include "span.h"
48
49// We need to dllexport PageHeap just for the unittest. MSVC complains
50// that we don't dllexport the PageHeap members, but we don't need to
51// test those, so I just suppress this warning.
52#ifdef _MSC_VER
53#pragma warning(push)
54#pragma warning(disable:4251)
55#endif
56
57// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
58// you're porting to a system where you really can't get a stacktrace.
59// Because we control the definition of GetStackTrace, all clients of
60// GetStackTrace should #include us rather than stacktrace.h.
61#ifdef NO_TCMALLOC_SAMPLES
62 // We use #define so code compiles even if you #include stacktrace.h somehow.
63# define GetStackTrace(stack, depth, skip) (0)
64#else
65# include <gperftools/stacktrace.h>
66#endif
67
68namespace base {
69struct MallocRange;
70}
71
72namespace tcmalloc {
73
74// -------------------------------------------------------------------------
75// Map from page-id to per-page data
76// -------------------------------------------------------------------------
77
78// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
79// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
80// because sometimes the sizeclass is all the information we need.
81
82// Selector class -- general selector uses 3-level map
83template <int BITS> class MapSelector {
84 public:
85 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
86 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
87};
88
89// A two-level map for 32-bit machines
90template <> class MapSelector<32> {
91 public:
92 typedef TCMalloc_PageMap2<32-kPageShift> Type;
93 typedef PackedCache<32-kPageShift, uint16_t> CacheType;
94};
95
96// -------------------------------------------------------------------------
97// Page-level allocator
98// * Eager coalescing
99//
100// Heap for page-level allocation. We allow allocating and freeing a
101// contiguous runs of pages (called a "span").
102// -------------------------------------------------------------------------
103
104class PERFTOOLS_DLL_DECL PageHeap {
105 public:
106 PageHeap();
107
108 // Allocate a run of "n" pages. Returns zero if out of memory.
109 // Caller should not pass "n == 0" -- instead, n should have
110 // been rounded up already.
111 Span* New(Length n);
112
113 // Delete the span "[p, p+n-1]".
114 // REQUIRES: span was returned by earlier call to New() and
115 // has not yet been deleted.
116 void Delete(Span* span);
117
118 // Mark an allocated span as being used for small objects of the
119 // specified size-class.
120 // REQUIRES: span was returned by an earlier call to New()
121 // and has not yet been deleted.
122 void RegisterSizeClass(Span* span, size_t sc);
123
124 // Split an allocated span into two spans: one of length "n" pages
125 // followed by another span of length "span->length - n" pages.
126 // Modifies "*span" to point to the first span of length "n" pages.
127 // Returns a pointer to the second span.
128 //
129 // REQUIRES: "0 < n < span->length"
130 // REQUIRES: span->location == IN_USE
131 // REQUIRES: span->sizeclass == 0
132 Span* Split(Span* span, Length n);
133
134 // Return the descriptor for the specified page. Returns NULL if
135 // this PageID was not allocated previously.
136 inline Span* GetDescriptor(PageID p) const {
137 return reinterpret_cast<Span*>(pagemap_.get(p));
138 }
139
140 // If this page heap is managing a range with starting page # >= start,
141 // store info about the range in *r and return true. Else return false.
142 bool GetNextRange(PageID start, base::MallocRange* r);
143
144 // Page heap statistics
145 struct Stats {
146 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0), committed_bytes(0) {}
147 uint64_t system_bytes; // Total bytes allocated from system
148 uint64_t free_bytes; // Total bytes on normal freelists
149 uint64_t unmapped_bytes; // Total bytes on returned freelists
150 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
151
152 };
153 inline Stats stats() const { return stats_; }
154
155 struct SmallSpanStats {
156 // For each free list of small spans, the length (in spans) of the
157 // normal and returned free lists for that size.
158 int64 normal_length[kMaxPages];
159 int64 returned_length[kMaxPages];
160 };
161 void GetSmallSpanStats(SmallSpanStats* result);
162
163 // Stats for free large spans (i.e., spans with more than kMaxPages pages).
164 struct LargeSpanStats {
165 int64 spans; // Number of such spans
166 int64 normal_pages; // Combined page length of normal large spans
167 int64 returned_pages; // Combined page length of unmapped spans
168 };
169 void GetLargeSpanStats(LargeSpanStats* result);
170
171 bool Check();
172 // Like Check() but does some more comprehensive checking.
173 bool CheckExpensive();
174 bool CheckList(Span* list, Length min_pages, Length max_pages,
175 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
176
177 // Try to release at least num_pages for reuse by the OS. Returns
178 // the actual number of pages released, which may be less than
179 // num_pages if there weren't enough pages to release. The result
180 // may also be larger than num_pages since page_heap might decide to
181 // release one large range instead of fragmenting it into two
182 // smaller released and unreleased ranges.
183 Length ReleaseAtLeastNPages(Length num_pages);
184
185 // Return 0 if we have no information, or else the correct sizeclass for p.
186 // Reads and writes to pagemap_cache_ do not require locking.
187 // The entries are 64 bits on 64-bit hardware and 16 bits on
188 // 32-bit hardware, and we don't mind raciness as long as each read of
189 // an entry yields a valid entry, not a partially updated entry.
190 size_t GetSizeClassIfCached(PageID p) const {
191 return pagemap_cache_.GetOrDefault(p, 0);
192 }
193 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
194
195 bool GetAggressiveDecommit(void) {return aggressive_decommit_;}
196 void SetAggressiveDecommit(bool aggressive_decommit) {
197 aggressive_decommit_ = aggressive_decommit;
198 }
199
200 private:
201 // Allocates a big block of memory for the pagemap once we reach more than
202 // 128MB
203 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
204
205 // Minimum number of pages to fetch from system at a time. Must be
206 // significantly bigger than kBlockSize to amortize system-call
207 // overhead, and also to reduce external fragementation. Also, we
208 // should keep this value big because various incarnations of Linux
209 // have small limits on the number of mmap() regions per
210 // address-space.
211 // REQUIRED: kMinSystemAlloc <= kMaxPages;
212 static const int kMinSystemAlloc = kMaxPages;
213
214 // Never delay scavenging for more than the following number of
215 // deallocated pages. With 4K pages, this comes to 4GB of
216 // deallocation.
217 static const int kMaxReleaseDelay = 1 << 20;
218
219 // If there is nothing to release, wait for so many pages before
220 // scavenging again. With 4K pages, this comes to 1GB of memory.
221 static const int kDefaultReleaseDelay = 1 << 18;
222
223 // Pick the appropriate map and cache types based on pointer size
224 typedef MapSelector<kAddressBits>::Type PageMap;
225 typedef MapSelector<kAddressBits>::CacheType PageMapCache;
226 PageMap pagemap_;
227 mutable PageMapCache pagemap_cache_;
228
229 // We segregate spans of a given size into two circular linked
230 // lists: one for normal spans, and one for spans whose memory
231 // has been returned to the system.
232 struct SpanList {
233 Span normal;
234 Span returned;
235 };
236
237 // List of free spans of length >= kMaxPages
238 SpanList large_;
239
240 // Array mapping from span length to a doubly linked list of free spans
241 SpanList free_[kMaxPages];
242
243 // Statistics on system, free, and unmapped bytes
244 Stats stats_;
245
246 Span* SearchFreeAndLargeLists(Length n);
247
248 bool GrowHeap(Length n);
249
250 // REQUIRES: span->length >= n
251 // REQUIRES: span->location != IN_USE
252 // Remove span from its free list, and move any leftover part of
253 // span into appropriate free lists. Also update "span" to have
254 // length exactly "n" and mark it as non-free so it can be returned
255 // to the client. After all that, decrease free_pages_ by n and
256 // return span.
257 Span* Carve(Span* span, Length n);
258
259 void RecordSpan(Span* span) {
260 pagemap_.set(span->start, span);
261 if (span->length > 1) {
262 pagemap_.set(span->start + span->length - 1, span);
263 }
264 }
265
266 // Allocate a large span of length == n. If successful, returns a
267 // span of exactly the specified length. Else, returns NULL.
268 Span* AllocLarge(Length n);
269
270 // Coalesce span with neighboring spans if possible, prepend to
271 // appropriate free list, and adjust stats.
272 void MergeIntoFreeList(Span* span);
273
274 // Commit the span.
275 void CommitSpan(Span* span);
276
277 // Decommit the span.
278 bool DecommitSpan(Span* span);
279
280 // Prepends span to appropriate free list, and adjusts stats.
281 void PrependToFreeList(Span* span);
282
283 // Removes span from its free list, and adjust stats.
284 void RemoveFromFreeList(Span* span);
285
286 // Incrementally release some memory to the system.
287 // IncrementalScavenge(n) is called whenever n pages are freed.
288 void IncrementalScavenge(Length n);
289
290 // Release the last span on the normal portion of this list.
291 // Return the length of that span or zero if release failed.
292 Length ReleaseLastNormalSpan(SpanList* slist);
293
294 // Checks if we are allowed to take more memory from the system.
295 // If limit is reached and allowRelease is true, tries to release
296 // some unused spans.
297 bool EnsureLimit(Length n, bool allowRelease = true);
298
299 bool MayMergeSpans(Span *span, Span *other);
300
301 // Number of pages to deallocate before doing more scavenging
302 int64_t scavenge_counter_;
303
304 // Index of last free list where we released memory to the OS.
305 int release_index_;
306
307 bool aggressive_decommit_;
308};
309
310} // namespace tcmalloc
311
312#ifdef _MSC_VER
313#pragma warning(pop)
314#endif
315
316#endif // TCMALLOC_PAGE_HEAP_H_