blob: 44ad65447237bc340a95af50d496d676691454f0 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2// Copyright (c) 2008, Google Inc.
3// All rights reserved.
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat <opensource@google.com>
33
34#include <config.h>
35#ifdef HAVE_INTTYPES_H
36#include <inttypes.h> // for PRIuPTR
37#endif
38#include <errno.h> // for ENOMEM, errno
39#include <gperftools/malloc_extension.h> // for MallocRange, etc
40#include "base/basictypes.h"
41#include "base/commandlineflags.h"
42#include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
43#include "page_heap_allocator.h" // for PageHeapAllocator
44#include "static_vars.h" // for Static
45#include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
46
47DEFINE_double(tcmalloc_release_rate,
48 EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
49 "Rate at which we release unused memory to the system. "
50 "Zero means we never release memory back to the system. "
51 "Increase this flag to return memory faster; decrease it "
52 "to return memory slower. Reasonable rates are in the "
53 "range [0,10]");
54
55DEFINE_int64(tcmalloc_heap_limit_mb,
56 EnvToInt("TCMALLOC_HEAP_LIMIT_MB", 0),
57 "Limit total size of the process heap to the "
58 "specified number of MiB. "
59 "When we approach the limit the memory is released "
60 "to the system more aggressively (more minor page faults). "
61 "Zero means to allocate as long as system allows.");
62
63namespace tcmalloc {
64
65PageHeap::PageHeap()
66 : pagemap_(MetaDataAlloc),
Austin Schuh745610d2015-09-06 18:19:50 -070067 scavenge_counter_(0),
68 // Start scavenging at kMaxPages list
69 release_index_(kMaxPages),
70 aggressive_decommit_(false) {
Brian Silverman20350ac2021-11-17 18:19:55 -080071 COMPILE_ASSERT(kClassSizesMax <= (1 << PageMapCache::kValuebits), valuebits);
Austin Schuh745610d2015-09-06 18:19:50 -070072 for (int i = 0; i < kMaxPages; i++) {
73 DLL_Init(&free_[i].normal);
74 DLL_Init(&free_[i].returned);
75 }
76}
77
78Span* PageHeap::SearchFreeAndLargeLists(Length n) {
79 ASSERT(Check());
80 ASSERT(n > 0);
81
82 // Find first size >= n that has a non-empty list
Brian Silverman20350ac2021-11-17 18:19:55 -080083 for (Length s = n; s <= kMaxPages; s++) {
84 Span* ll = &free_[s - 1].normal;
Austin Schuh745610d2015-09-06 18:19:50 -070085 // If we're lucky, ll is non-empty, meaning it has a suitable span.
86 if (!DLL_IsEmpty(ll)) {
87 ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
88 return Carve(ll->next, n);
89 }
90 // Alternatively, maybe there's a usable returned span.
Brian Silverman20350ac2021-11-17 18:19:55 -080091 ll = &free_[s - 1].returned;
Austin Schuh745610d2015-09-06 18:19:50 -070092 if (!DLL_IsEmpty(ll)) {
93 // We did not call EnsureLimit before, to avoid releasing the span
94 // that will be taken immediately back.
95 // Calling EnsureLimit here is not very expensive, as it fails only if
96 // there is no more normal spans (and it fails efficiently)
97 // or SystemRelease does not work (there is probably no returned spans).
98 if (EnsureLimit(n)) {
99 // ll may have became empty due to coalescing
100 if (!DLL_IsEmpty(ll)) {
101 ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
102 return Carve(ll->next, n);
103 }
104 }
105 }
106 }
107 // No luck in free lists, our last chance is in a larger class.
108 return AllocLarge(n); // May be NULL
109}
110
111static const size_t kForcedCoalesceInterval = 128*1024*1024;
112
113Span* PageHeap::New(Length n) {
114 ASSERT(Check());
115 ASSERT(n > 0);
116
117 Span* result = SearchFreeAndLargeLists(n);
118 if (result != NULL)
119 return result;
120
121 if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
122 && stats_.free_bytes + stats_.unmapped_bytes >= stats_.system_bytes / 4
123 && (stats_.system_bytes / kForcedCoalesceInterval
124 != (stats_.system_bytes + (n << kPageShift)) / kForcedCoalesceInterval)) {
125 // We're about to grow heap, but there are lots of free pages.
126 // tcmalloc's design decision to keep unmapped and free spans
127 // separately and never coalesce them means that sometimes there
128 // can be free pages span of sufficient size, but it consists of
129 // "segments" of different type so page heap search cannot find
130 // it. In order to prevent growing heap and wasting memory in such
131 // case we're going to unmap all free pages. So that all free
132 // spans are maximally coalesced.
133 //
134 // We're also limiting 'rate' of going into this path to be at
135 // most once per 128 megs of heap growth. Otherwise programs that
136 // grow heap frequently (and that means by small amount) could be
137 // penalized with higher count of minor page faults.
138 //
139 // See also large_heap_fragmentation_unittest.cc and
140 // https://code.google.com/p/gperftools/issues/detail?id=368
141 ReleaseAtLeastNPages(static_cast<Length>(0x7fffffff));
142
143 // then try again. If we are forced to grow heap because of large
144 // spans fragmentation and not because of problem described above,
145 // then at the very least we've just unmapped free but
146 // insufficiently big large spans back to OS. So in case of really
147 // unlucky memory fragmentation we'll be consuming virtual address
148 // space, but not real memory
149 result = SearchFreeAndLargeLists(n);
150 if (result != NULL) return result;
151 }
152
153 // Grow the heap and try again.
154 if (!GrowHeap(n)) {
155 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
156 ASSERT(Check());
157 // underlying SysAllocator likely set ENOMEM but we can get here
158 // due to EnsureLimit so we set it here too.
159 //
160 // Setting errno to ENOMEM here allows us to avoid dealing with it
161 // in fast-path.
162 errno = ENOMEM;
163 return NULL;
164 }
165 return SearchFreeAndLargeLists(n);
166}
167
168Span* PageHeap::AllocLarge(Length n) {
Austin Schuh745610d2015-09-06 18:19:50 -0700169 Span *best = NULL;
Brian Silverman20350ac2021-11-17 18:19:55 -0800170 Span *best_normal = NULL;
Austin Schuh745610d2015-09-06 18:19:50 -0700171
Brian Silverman20350ac2021-11-17 18:19:55 -0800172 // Create a Span to use as an upper bound.
173 Span bound;
174 bound.start = 0;
175 bound.length = n;
176
177 // First search the NORMAL spans..
178 SpanSet::iterator place = large_normal_.upper_bound(SpanPtrWithLength(&bound));
179 if (place != large_normal_.end()) {
180 best = place->span;
181 best_normal = best;
182 ASSERT(best->location == Span::ON_NORMAL_FREELIST);
Austin Schuh745610d2015-09-06 18:19:50 -0700183 }
184
Brian Silverman20350ac2021-11-17 18:19:55 -0800185 // Try to find better fit from RETURNED spans.
186 place = large_returned_.upper_bound(SpanPtrWithLength(&bound));
187 if (place != large_returned_.end()) {
188 Span *c = place->span;
189 ASSERT(c->location == Span::ON_RETURNED_FREELIST);
190 if (best_normal == NULL
191 || c->length < best->length
192 || (c->length == best->length && c->start < best->start))
193 best = place->span;
Austin Schuh745610d2015-09-06 18:19:50 -0700194 }
195
Brian Silverman20350ac2021-11-17 18:19:55 -0800196 if (best == best_normal) {
Austin Schuh745610d2015-09-06 18:19:50 -0700197 return best == NULL ? NULL : Carve(best, n);
198 }
199
Brian Silverman20350ac2021-11-17 18:19:55 -0800200 // best comes from RETURNED set.
Austin Schuh745610d2015-09-06 18:19:50 -0700201
202 if (EnsureLimit(n, false)) {
203 return Carve(best, n);
204 }
205
206 if (EnsureLimit(n, true)) {
207 // best could have been destroyed by coalescing.
Brian Silverman20350ac2021-11-17 18:19:55 -0800208 // best_normal is not a best-fit, and it could be destroyed as well.
Austin Schuh745610d2015-09-06 18:19:50 -0700209 // We retry, the limit is already ensured:
210 return AllocLarge(n);
211 }
212
Brian Silverman20350ac2021-11-17 18:19:55 -0800213 // If best_normal existed, EnsureLimit would succeeded:
214 ASSERT(best_normal == NULL);
Austin Schuh745610d2015-09-06 18:19:50 -0700215 // We are not allowed to take best from returned list.
216 return NULL;
217}
218
219Span* PageHeap::Split(Span* span, Length n) {
220 ASSERT(0 < n);
221 ASSERT(n < span->length);
222 ASSERT(span->location == Span::IN_USE);
223 ASSERT(span->sizeclass == 0);
Austin Schuh745610d2015-09-06 18:19:50 -0700224
225 const int extra = span->length - n;
226 Span* leftover = NewSpan(span->start + n, extra);
227 ASSERT(leftover->location == Span::IN_USE);
Austin Schuh745610d2015-09-06 18:19:50 -0700228 RecordSpan(leftover);
229 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
230 span->length = n;
231
232 return leftover;
233}
234
235void PageHeap::CommitSpan(Span* span) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800236 ++stats_.commit_count;
237
Austin Schuh745610d2015-09-06 18:19:50 -0700238 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
239 static_cast<size_t>(span->length << kPageShift));
240 stats_.committed_bytes += span->length << kPageShift;
Brian Silverman20350ac2021-11-17 18:19:55 -0800241 stats_.total_commit_bytes += (span->length << kPageShift);
Austin Schuh745610d2015-09-06 18:19:50 -0700242}
243
244bool PageHeap::DecommitSpan(Span* span) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800245 ++stats_.decommit_count;
246
Austin Schuh745610d2015-09-06 18:19:50 -0700247 bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
248 static_cast<size_t>(span->length << kPageShift));
249 if (rv) {
250 stats_.committed_bytes -= span->length << kPageShift;
Brian Silverman20350ac2021-11-17 18:19:55 -0800251 stats_.total_decommit_bytes += (span->length << kPageShift);
Austin Schuh745610d2015-09-06 18:19:50 -0700252 }
253
254 return rv;
255}
256
257Span* PageHeap::Carve(Span* span, Length n) {
258 ASSERT(n > 0);
259 ASSERT(span->location != Span::IN_USE);
260 const int old_location = span->location;
261 RemoveFromFreeList(span);
262 span->location = Span::IN_USE;
Austin Schuh745610d2015-09-06 18:19:50 -0700263
264 const int extra = span->length - n;
265 ASSERT(extra >= 0);
266 if (extra > 0) {
267 Span* leftover = NewSpan(span->start + n, extra);
268 leftover->location = old_location;
Austin Schuh745610d2015-09-06 18:19:50 -0700269 RecordSpan(leftover);
270
271 // The previous span of |leftover| was just splitted -- no need to
272 // coalesce them. The next span of |leftover| was not previously coalesced
273 // with |span|, i.e. is NULL or has got location other than |old_location|.
274#ifndef NDEBUG
275 const PageID p = leftover->start;
276 const Length len = leftover->length;
277 Span* next = GetDescriptor(p+len);
278 ASSERT (next == NULL ||
279 next->location == Span::IN_USE ||
280 next->location != leftover->location);
281#endif
282
283 PrependToFreeList(leftover); // Skip coalescing - no candidates possible
284 span->length = n;
285 pagemap_.set(span->start + n - 1, span);
286 }
287 ASSERT(Check());
288 if (old_location == Span::ON_RETURNED_FREELIST) {
289 // We need to recommit this address space.
290 CommitSpan(span);
291 }
292 ASSERT(span->location == Span::IN_USE);
293 ASSERT(span->length == n);
294 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
295 return span;
296}
297
298void PageHeap::Delete(Span* span) {
299 ASSERT(Check());
300 ASSERT(span->location == Span::IN_USE);
301 ASSERT(span->length > 0);
302 ASSERT(GetDescriptor(span->start) == span);
303 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
304 const Length n = span->length;
305 span->sizeclass = 0;
306 span->sample = 0;
307 span->location = Span::ON_NORMAL_FREELIST;
Austin Schuh745610d2015-09-06 18:19:50 -0700308 MergeIntoFreeList(span); // Coalesces if possible
309 IncrementalScavenge(n);
310 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
311 ASSERT(Check());
312}
313
Brian Silverman20350ac2021-11-17 18:19:55 -0800314// Given span we're about to free and other span (still on free list),
315// checks if 'other' span is mergable with 'span'. If it is, removes
316// other span from free list, performs aggressive decommit if
317// necessary and returns 'other' span. Otherwise 'other' span cannot
318// be merged and is left untouched. In that case NULL is returned.
319Span* PageHeap::CheckAndHandlePreMerge(Span* span, Span* other) {
320 if (other == NULL) {
321 return other;
Austin Schuh745610d2015-09-06 18:19:50 -0700322 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800323 // if we're in aggressive decommit mode and span is decommitted,
324 // then we try to decommit adjacent span.
325 if (aggressive_decommit_ && other->location == Span::ON_NORMAL_FREELIST
326 && span->location == Span::ON_RETURNED_FREELIST) {
327 bool worked = DecommitSpan(other);
328 if (!worked) {
329 return NULL;
330 }
331 } else if (other->location != span->location) {
332 return NULL;
333 }
334
335 RemoveFromFreeList(other);
336 return other;
Austin Schuh745610d2015-09-06 18:19:50 -0700337}
338
339void PageHeap::MergeIntoFreeList(Span* span) {
340 ASSERT(span->location != Span::IN_USE);
341
342 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
343 // necessary. We do not bother resetting the stale pagemap
344 // entries for the pieces we are merging together because we only
345 // care about the pagemap entries for the boundaries.
346 //
347 // Note: depending on aggressive_decommit_ mode we allow only
348 // similar spans to be coalesced.
349 //
350 // The following applies if aggressive_decommit_ is enabled:
351 //
Austin Schuh745610d2015-09-06 18:19:50 -0700352 // TODO(jar): "Always decommit" causes some extra calls to commit when we are
353 // called in GrowHeap() during an allocation :-/. We need to eval the cost of
354 // that oscillation, and possibly do something to reduce it.
355
356 // TODO(jar): We need a better strategy for deciding to commit, or decommit,
357 // based on memory usage and free heap sizes.
358
Austin Schuh745610d2015-09-06 18:19:50 -0700359 const PageID p = span->start;
360 const Length n = span->length;
Brian Silverman20350ac2021-11-17 18:19:55 -0800361
362 if (aggressive_decommit_ && span->location == Span::ON_NORMAL_FREELIST) {
363 if (DecommitSpan(span)) {
364 span->location = Span::ON_RETURNED_FREELIST;
365 }
366 }
367
368 Span* prev = CheckAndHandlePreMerge(span, GetDescriptor(p-1));
369 if (prev != NULL) {
Austin Schuh745610d2015-09-06 18:19:50 -0700370 // Merge preceding span into this span
371 ASSERT(prev->start + prev->length == p);
372 const Length len = prev->length;
Austin Schuh745610d2015-09-06 18:19:50 -0700373 DeleteSpan(prev);
374 span->start -= len;
375 span->length += len;
376 pagemap_.set(span->start, span);
Austin Schuh745610d2015-09-06 18:19:50 -0700377 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800378 Span* next = CheckAndHandlePreMerge(span, GetDescriptor(p+n));
379 if (next != NULL) {
Austin Schuh745610d2015-09-06 18:19:50 -0700380 // Merge next span into this span
381 ASSERT(next->start == p+n);
382 const Length len = next->length;
Austin Schuh745610d2015-09-06 18:19:50 -0700383 DeleteSpan(next);
384 span->length += len;
385 pagemap_.set(span->start + span->length - 1, span);
Austin Schuh745610d2015-09-06 18:19:50 -0700386 }
387
Austin Schuh745610d2015-09-06 18:19:50 -0700388 PrependToFreeList(span);
389}
390
391void PageHeap::PrependToFreeList(Span* span) {
392 ASSERT(span->location != Span::IN_USE);
Brian Silverman20350ac2021-11-17 18:19:55 -0800393 if (span->location == Span::ON_NORMAL_FREELIST)
Austin Schuh745610d2015-09-06 18:19:50 -0700394 stats_.free_bytes += (span->length << kPageShift);
Brian Silverman20350ac2021-11-17 18:19:55 -0800395 else
396 stats_.unmapped_bytes += (span->length << kPageShift);
397
398 if (span->length > kMaxPages) {
399 SpanSet *set = &large_normal_;
400 if (span->location == Span::ON_RETURNED_FREELIST)
401 set = &large_returned_;
402 std::pair<SpanSet::iterator, bool> p =
403 set->insert(SpanPtrWithLength(span));
404 ASSERT(p.second); // We never have duplicates since span->start is unique.
405 span->SetSpanSetIterator(p.first);
406 return;
407 }
408
409 SpanList* list = &free_[span->length - 1];
410 if (span->location == Span::ON_NORMAL_FREELIST) {
Austin Schuh745610d2015-09-06 18:19:50 -0700411 DLL_Prepend(&list->normal, span);
412 } else {
Austin Schuh745610d2015-09-06 18:19:50 -0700413 DLL_Prepend(&list->returned, span);
414 }
415}
416
417void PageHeap::RemoveFromFreeList(Span* span) {
418 ASSERT(span->location != Span::IN_USE);
419 if (span->location == Span::ON_NORMAL_FREELIST) {
420 stats_.free_bytes -= (span->length << kPageShift);
421 } else {
422 stats_.unmapped_bytes -= (span->length << kPageShift);
423 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800424 if (span->length > kMaxPages) {
425 SpanSet *set = &large_normal_;
426 if (span->location == Span::ON_RETURNED_FREELIST)
427 set = &large_returned_;
428 SpanSet::iterator iter = span->ExtractSpanSetIterator();
429 ASSERT(iter->span == span);
430 ASSERT(set->find(SpanPtrWithLength(span)) == iter);
431 set->erase(iter);
432 } else {
433 DLL_Remove(span);
434 }
Austin Schuh745610d2015-09-06 18:19:50 -0700435}
436
437void PageHeap::IncrementalScavenge(Length n) {
438 // Fast path; not yet time to release memory
439 scavenge_counter_ -= n;
440 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
441
442 const double rate = FLAGS_tcmalloc_release_rate;
443 if (rate <= 1e-6) {
444 // Tiny release rate means that releasing is disabled.
445 scavenge_counter_ = kDefaultReleaseDelay;
446 return;
447 }
448
Brian Silverman20350ac2021-11-17 18:19:55 -0800449 ++stats_.scavenge_count;
450
Austin Schuh745610d2015-09-06 18:19:50 -0700451 Length released_pages = ReleaseAtLeastNPages(1);
452
453 if (released_pages == 0) {
454 // Nothing to scavenge, delay for a while.
455 scavenge_counter_ = kDefaultReleaseDelay;
456 } else {
457 // Compute how long to wait until we return memory.
458 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
459 // after releasing one page.
460 const double mult = 1000.0 / rate;
461 double wait = mult * static_cast<double>(released_pages);
462 if (wait > kMaxReleaseDelay) {
463 // Avoid overflow and bound to reasonable range.
464 wait = kMaxReleaseDelay;
465 }
466 scavenge_counter_ = static_cast<int64_t>(wait);
467 }
468}
469
Brian Silverman20350ac2021-11-17 18:19:55 -0800470Length PageHeap::ReleaseSpan(Span* s) {
Austin Schuh745610d2015-09-06 18:19:50 -0700471 ASSERT(s->location == Span::ON_NORMAL_FREELIST);
472
473 if (DecommitSpan(s)) {
474 RemoveFromFreeList(s);
475 const Length n = s->length;
476 s->location = Span::ON_RETURNED_FREELIST;
477 MergeIntoFreeList(s); // Coalesces if possible.
478 return n;
479 }
480
481 return 0;
482}
483
484Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
485 Length released_pages = 0;
486
Brian Silverman20350ac2021-11-17 18:19:55 -0800487 // Round robin through the lists of free spans, releasing a
488 // span from each list. Stop after releasing at least num_pages
Austin Schuh745610d2015-09-06 18:19:50 -0700489 // or when there is nothing more to release.
490 while (released_pages < num_pages && stats_.free_bytes > 0) {
491 for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
492 i++, release_index_++) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800493 Span *s;
Austin Schuh745610d2015-09-06 18:19:50 -0700494 if (release_index_ > kMaxPages) release_index_ = 0;
Brian Silverman20350ac2021-11-17 18:19:55 -0800495
496 if (release_index_ == kMaxPages) {
497 if (large_normal_.empty()) {
498 continue;
499 }
500 s = (large_normal_.begin())->span;
501 } else {
502 SpanList* slist = &free_[release_index_];
503 if (DLL_IsEmpty(&slist->normal)) {
504 continue;
505 }
506 s = slist->normal.prev;
Austin Schuh745610d2015-09-06 18:19:50 -0700507 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800508 // TODO(todd) if the remaining number of pages to release
509 // is significantly smaller than s->length, and s is on the
510 // large freelist, should we carve s instead of releasing?
511 // the whole thing?
512 Length released_len = ReleaseSpan(s);
513 // Some systems do not support release
514 if (released_len == 0) return released_pages;
515 released_pages += released_len;
Austin Schuh745610d2015-09-06 18:19:50 -0700516 }
517 }
518 return released_pages;
519}
520
521bool PageHeap::EnsureLimit(Length n, bool withRelease)
522{
523 Length limit = (FLAGS_tcmalloc_heap_limit_mb*1024*1024) >> kPageShift;
524 if (limit == 0) return true; //there is no limit
525
526 // We do not use stats_.system_bytes because it does not take
527 // MetaDataAllocs into account.
528 Length takenPages = TCMalloc_SystemTaken >> kPageShift;
529 //XXX takenPages may be slightly bigger than limit for two reasons:
530 //* MetaDataAllocs ignore the limit (it is not easy to handle
531 // out of memory there)
532 //* sys_alloc may round allocation up to huge page size,
533 // although smaller limit was ensured
534
535 ASSERT(takenPages >= stats_.unmapped_bytes >> kPageShift);
536 takenPages -= stats_.unmapped_bytes >> kPageShift;
537
538 if (takenPages + n > limit && withRelease) {
539 takenPages -= ReleaseAtLeastNPages(takenPages + n - limit);
540 }
541
542 return takenPages + n <= limit;
543}
544
Brian Silverman20350ac2021-11-17 18:19:55 -0800545void PageHeap::RegisterSizeClass(Span* span, uint32 sc) {
Austin Schuh745610d2015-09-06 18:19:50 -0700546 // Associate span object with all interior pages as well
547 ASSERT(span->location == Span::IN_USE);
548 ASSERT(GetDescriptor(span->start) == span);
549 ASSERT(GetDescriptor(span->start+span->length-1) == span);
Austin Schuh745610d2015-09-06 18:19:50 -0700550 span->sizeclass = sc;
551 for (Length i = 1; i < span->length-1; i++) {
552 pagemap_.set(span->start+i, span);
553 }
554}
555
556void PageHeap::GetSmallSpanStats(SmallSpanStats* result) {
Brian Silverman20350ac2021-11-17 18:19:55 -0800557 for (int i = 0; i < kMaxPages; i++) {
558 result->normal_length[i] = DLL_Length(&free_[i].normal);
559 result->returned_length[i] = DLL_Length(&free_[i].returned);
Austin Schuh745610d2015-09-06 18:19:50 -0700560 }
561}
562
563void PageHeap::GetLargeSpanStats(LargeSpanStats* result) {
564 result->spans = 0;
565 result->normal_pages = 0;
566 result->returned_pages = 0;
Brian Silverman20350ac2021-11-17 18:19:55 -0800567 for (SpanSet::iterator it = large_normal_.begin(); it != large_normal_.end(); ++it) {
568 result->normal_pages += it->length;
Austin Schuh745610d2015-09-06 18:19:50 -0700569 result->spans++;
570 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800571 for (SpanSet::iterator it = large_returned_.begin(); it != large_returned_.end(); ++it) {
572 result->returned_pages += it->length;
Austin Schuh745610d2015-09-06 18:19:50 -0700573 result->spans++;
574 }
575}
576
577bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
578 Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
579 if (span == NULL) {
580 return false;
581 }
582 r->address = span->start << kPageShift;
583 r->length = span->length << kPageShift;
584 r->fraction = 0;
585 switch (span->location) {
586 case Span::IN_USE:
587 r->type = base::MallocRange::INUSE;
588 r->fraction = 1;
589 if (span->sizeclass > 0) {
590 // Only some of the objects in this span may be in use.
591 const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
592 r->fraction = (1.0 * osize * span->refcount) / r->length;
593 }
594 break;
595 case Span::ON_NORMAL_FREELIST:
596 r->type = base::MallocRange::FREE;
597 break;
598 case Span::ON_RETURNED_FREELIST:
599 r->type = base::MallocRange::UNMAPPED;
600 break;
601 default:
602 r->type = base::MallocRange::UNKNOWN;
603 break;
604 }
605 return true;
606}
607
608static void RecordGrowth(size_t growth) {
609 StackTrace* t = Static::stacktrace_allocator()->New();
610 t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
611 t->size = growth;
612 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
613 Static::set_growth_stacks(t);
614}
615
616bool PageHeap::GrowHeap(Length n) {
617 ASSERT(kMaxPages >= kMinSystemAlloc);
618 if (n > kMaxValidPages) return false;
619 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
620 size_t actual_size;
621 void* ptr = NULL;
622 if (EnsureLimit(ask)) {
623 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
624 }
625 if (ptr == NULL) {
626 if (n < ask) {
627 // Try growing just "n" pages
628 ask = n;
629 if (EnsureLimit(ask)) {
630 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
631 }
632 }
633 if (ptr == NULL) return false;
634 }
635 ask = actual_size >> kPageShift;
636 RecordGrowth(ask << kPageShift);
637
Brian Silverman20350ac2021-11-17 18:19:55 -0800638 ++stats_.reserve_count;
639 ++stats_.commit_count;
640
Austin Schuh745610d2015-09-06 18:19:50 -0700641 uint64_t old_system_bytes = stats_.system_bytes;
642 stats_.system_bytes += (ask << kPageShift);
643 stats_.committed_bytes += (ask << kPageShift);
Brian Silverman20350ac2021-11-17 18:19:55 -0800644
645 stats_.total_commit_bytes += (ask << kPageShift);
646 stats_.total_reserve_bytes += (ask << kPageShift);
647
Austin Schuh745610d2015-09-06 18:19:50 -0700648 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
649 ASSERT(p > 0);
650
651 // If we have already a lot of pages allocated, just pre allocate a bunch of
652 // memory for the page map. This prevents fragmentation by pagemap metadata
653 // when a program keeps allocating and freeing large blocks.
654
655 if (old_system_bytes < kPageMapBigAllocationThreshold
656 && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
657 pagemap_.PreallocateMoreMemory();
658 }
659
660 // Make sure pagemap_ has entries for all of the new pages.
661 // Plus ensure one before and one after so coalescing code
662 // does not need bounds-checking.
663 if (pagemap_.Ensure(p-1, ask+2)) {
664 // Pretend the new area is allocated and then Delete() it to cause
665 // any necessary coalescing to occur.
666 Span* span = NewSpan(p, ask);
667 RecordSpan(span);
668 Delete(span);
669 ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
670 ASSERT(Check());
671 return true;
672 } else {
673 // We could not allocate memory within "pagemap_"
674 // TODO: Once we can return memory to the system, return the new span
675 return false;
676 }
677}
678
679bool PageHeap::Check() {
Austin Schuh745610d2015-09-06 18:19:50 -0700680 return true;
681}
682
683bool PageHeap::CheckExpensive() {
684 bool result = Check();
Brian Silverman20350ac2021-11-17 18:19:55 -0800685 CheckSet(&large_normal_, kMaxPages + 1, Span::ON_NORMAL_FREELIST);
686 CheckSet(&large_returned_, kMaxPages + 1, Span::ON_RETURNED_FREELIST);
687 for (int s = 1; s <= kMaxPages; s++) {
688 CheckList(&free_[s - 1].normal, s, s, Span::ON_NORMAL_FREELIST);
689 CheckList(&free_[s - 1].returned, s, s, Span::ON_RETURNED_FREELIST);
Austin Schuh745610d2015-09-06 18:19:50 -0700690 }
691 return result;
692}
693
694bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
695 int freelist) {
696 for (Span* s = list->next; s != list; s = s->next) {
697 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
698 CHECK_CONDITION(s->length >= min_pages);
699 CHECK_CONDITION(s->length <= max_pages);
700 CHECK_CONDITION(GetDescriptor(s->start) == s);
701 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
702 }
703 return true;
704}
705
Brian Silverman20350ac2021-11-17 18:19:55 -0800706bool PageHeap::CheckSet(SpanSet* spanset, Length min_pages,int freelist) {
707 for (SpanSet::iterator it = spanset->begin(); it != spanset->end(); ++it) {
708 Span* s = it->span;
709 CHECK_CONDITION(s->length == it->length);
710 CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
711 CHECK_CONDITION(s->length >= min_pages);
712 CHECK_CONDITION(GetDescriptor(s->start) == s);
713 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
714 }
715 return true;
716}
717
Austin Schuh745610d2015-09-06 18:19:50 -0700718} // namespace tcmalloc