20 #include "absl/base/internal/low_level_alloc.h"
22 #include <type_traits>
24 #include "absl/base/call_once.h"
25 #include "absl/base/config.h"
26 #include "absl/base/internal/direct_mmap.h"
27 #include "absl/base/internal/scheduling_mode.h"
28 #include "absl/base/macros.h"
29 #include "absl/base/thread_annotations.h"
34 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
52 #include "absl/base/dynamic_annotations.h"
53 #include "absl/base/internal/raw_logging.h"
54 #include "absl/base/internal/spinlock.h"
57 #if defined(__APPLE__)
60 #if !defined MAP_ANONYMOUS
61 #define MAP_ANONYMOUS MAP_ANON
62 #endif // !MAP_ANONYMOUS
67 namespace base_internal {
125 while ((((
r =
r*1103515245 + 12345) >> 30) & 1) == 0) {
144 size_t max_fit = (
size - offsetof(AllocList,
next)) /
sizeof(AllocList *);
146 if (
static_cast<size_t>(
level) > max_fit)
level =
static_cast<int>(max_fit);
157 AllocList *e, AllocList **prev) {
160 for (AllocList *
n; (
n = p->next[
level]) !=
nullptr &&
n <
e; p =
n) {
164 return (head->levels == 0) ? nullptr : prev[0]->next[0];
173 for (; head->levels <
e->levels; head->levels++) {
174 prev[head->levels] = head;
176 for (
int i = 0;
i !=
e->levels;
i++) {
177 e->next[
i] = prev[
i]->next[
i];
178 prev[
i]->next[
i] =
e;
189 for (
int i = 0;
i !=
e->levels && prev[
i]->next[
i] ==
e;
i++) {
190 prev[
i]->next[
i] =
e->next[
i];
192 while (head->levels > 0 && head->next[head->levels - 1] ==
nullptr) {
230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
240 void CreateGlobalArenas() {
241 new (&default_arena_storage)
244 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
245 new (&unhooked_async_sig_safe_arena_storage)
257 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
263 &unhooked_async_sig_safe_arena_storage);
285 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
297 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
299 const int err = pthread_sigmask(SIG_SETMASK, &
mask_,
nullptr);
310 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
315 ArenaLock(
const ArenaLock &) =
delete;
316 ArenaLock &operator=(
const ArenaLock &) =
delete;
327 size_t GetPageSize() {
329 SYSTEM_INFO system_info;
330 GetSystemInfo(&system_info);
331 return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
332 #elif defined(__wasm__) || defined(__asmjs__)
333 return getpagesize();
335 return sysconf(_SC_PAGESIZE);
339 size_t RoundedUpBlockSize() {
341 size_t round_up = 16;
342 while (round_up <
sizeof(AllocList::Header)) {
343 round_up += round_up;
354 pagesize(GetPageSize()),
355 round_up(RoundedUpBlockSize()),
356 min_size(2 * round_up),
358 freelist.header.size = 0;
359 freelist.header.magic =
361 freelist.header.arena =
this;
363 memset(freelist.next, 0,
sizeof(freelist.next));
369 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
371 meta_data_arena = UnhookedAsyncSigSafeArena();
375 meta_data_arena = UnhookedArena();
386 "may not delete default arena");
388 if (
arena->allocation_count != 0) {
392 while (
arena->freelist.next[0] !=
nullptr) {
393 AllocList *region =
arena->freelist.next[0];
394 size_t size = region->header.size;
395 arena->freelist.next[0] = region->next[0];
398 "bad magic number in DeleteArena()");
400 "bad arena pointer in DeleteArena()");
402 "empty arena has non-page-aligned block size");
404 "empty arena has non-page-aligned block");
407 munmap_result = VirtualFree(region, 0, MEM_RELEASE);
409 "LowLevelAlloc::DeleteArena: VitualFree failed");
411 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
413 munmap_result = munmap(region,
size);
415 munmap_result = base_internal::DirectMunmap(region,
size);
418 munmap_result = munmap(region,
size);
419 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
420 if (munmap_result != 0) {
455 AllocList *
next = prev->next[
i];
456 if (
next !=
nullptr) {
459 "bad magic number in Next()");
461 if (prev != &
arena->freelist) {
463 ABSL_RAW_CHECK(
reinterpret_cast<char *
>(prev) + prev->header.size <
464 reinterpret_cast<char *
>(
next),
465 "malformed freelist");
473 AllocList *
n =
a->next[0];
474 if (
n !=
nullptr &&
reinterpret_cast<char *
>(
a) +
a->header.size ==
475 reinterpret_cast<char *
>(
n)) {
477 a->header.size +=
n->header.size;
479 n->header.arena =
nullptr;
492 AllocList *f =
reinterpret_cast<AllocList *
>(
493 reinterpret_cast<char *
>(
v) -
sizeof (f->header));
495 "bad magic number in AddToFreelist()");
497 "bad arena pointer in AddToFreelist()");
511 AllocList *f =
reinterpret_cast<AllocList *
>(
512 reinterpret_cast<char *
>(
v) -
sizeof (f->header));
517 arena->allocation_count--;
535 if (i < arena->freelist.levels) {
538 s->header.size < req_rnd) {
550 size_t new_pages_size =
RoundUp(req_rnd,
arena->pagesize * 16);
553 new_pages = VirtualAlloc(0, new_pages_size,
554 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
557 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
559 new_pages = base_internal::DirectMmap(
nullptr, new_pages_size,
560 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
562 new_pages = mmap(
nullptr, new_pages_size, PROT_WRITE | PROT_READ,
563 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
566 new_pages = mmap(
nullptr, new_pages_size, PROT_WRITE | PROT_READ,
567 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
568 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
569 if (new_pages == MAP_FAILED) {
575 s =
reinterpret_cast<AllocList *
>(new_pages);
576 s->header.size = new_pages_size;
587 AllocList *
n =
reinterpret_cast<AllocList *
>
588 (req_rnd +
reinterpret_cast<char *
>(
s));
589 n->header.size =
s->header.size - req_rnd;
592 s->header.size = req_rnd;
597 arena->allocation_count++;
620 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING