00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #include "absl/base/internal/low_level_alloc.h"
00021
00022 #include <type_traits>
00023
00024 #include "absl/base/call_once.h"
00025 #include "absl/base/config.h"
00026 #include "absl/base/internal/direct_mmap.h"
00027 #include "absl/base/internal/scheduling_mode.h"
00028 #include "absl/base/macros.h"
00029 #include "absl/base/thread_annotations.h"
00030
00031
00032
00033
00034 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
00035
00036 #ifndef _WIN32
00037 #include <pthread.h>
00038 #include <signal.h>
00039 #include <sys/mman.h>
00040 #include <unistd.h>
00041 #else
00042 #include <windows.h>
00043 #endif
00044
00045 #include <string.h>
00046 #include <algorithm>
00047 #include <atomic>
00048 #include <cerrno>
00049 #include <cstddef>
00050 #include <new>
00051
00052 #include "absl/base/dynamic_annotations.h"
00053 #include "absl/base/internal/raw_logging.h"
00054 #include "absl/base/internal/spinlock.h"
00055
00056
00057 #if defined(__APPLE__)
00058
00059
00060 #if !defined MAP_ANONYMOUS
00061 #define MAP_ANONYMOUS MAP_ANON
00062 #endif // !MAP_ANONYMOUS
00063 #endif // __APPLE__
00064
00065 namespace absl {
00066 namespace base_internal {
00067
00068
00069
00070
00071 static const int kMaxLevel = 30;
00072
00073 namespace {
00074
00075 struct AllocList {
00076 struct Header {
00077
00078
00079 uintptr_t size;
00080
00081
00082 uintptr_t magic;
00083
00084
00085 LowLevelAlloc::Arena *arena;
00086
00087
00088 void *dummy_for_alignment;
00089 } header;
00090
00091
00092
00093
00094
00095 int levels;
00096
00097
00098
00099 AllocList *next[kMaxLevel];
00100 };
00101 }
00102
00103
00104
00105
00106
00107
00108
00109 static int IntLog2(size_t size, size_t base) {
00110 int result = 0;
00111 for (size_t i = size; i > base; i >>= 1) {
00112 result++;
00113 }
00114
00115
00116
00117 return result;
00118 }
00119
00120
00121 static int Random(uint32_t *state) {
00122 uint32_t r = *state;
00123 int result = 1;
00124 while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
00125 result++;
00126 }
00127 *state = r;
00128 return result;
00129 }
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139 static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
00140
00141
00142
00143 size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
00144 int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
00145 if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
00146 if (level > kMaxLevel-1) level = kMaxLevel - 1;
00147 ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
00148 return level;
00149 }
00150
00151
00152
00153
00154
00155 static AllocList *LLA_SkiplistSearch(AllocList *head,
00156 AllocList *e, AllocList **prev) {
00157 AllocList *p = head;
00158 for (int level = head->levels - 1; level >= 0; level--) {
00159 for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
00160 }
00161 prev[level] = p;
00162 }
00163 return (head->levels == 0) ? nullptr : prev[0]->next[0];
00164 }
00165
00166
00167
00168
00169 static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
00170 AllocList **prev) {
00171 LLA_SkiplistSearch(head, e, prev);
00172 for (; head->levels < e->levels; head->levels++) {
00173 prev[head->levels] = head;
00174 }
00175 for (int i = 0; i != e->levels; i++) {
00176 e->next[i] = prev[i]->next[i];
00177 prev[i]->next[i] = e;
00178 }
00179 }
00180
00181
00182
00183
00184 static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
00185 AllocList **prev) {
00186 AllocList *found = LLA_SkiplistSearch(head, e, prev);
00187 ABSL_RAW_CHECK(e == found, "element not in freelist");
00188 for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
00189 prev[i]->next[i] = e->next[i];
00190 }
00191 while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
00192 head->levels--;
00193 }
00194 }
00195
00196
00197
00198
00199
00200 struct LowLevelAlloc::Arena {
00201
00202 explicit Arena(uint32_t flags_value);
00203
00204 base_internal::SpinLock mu;
00205
00206 AllocList freelist GUARDED_BY(mu);
00207
00208 int32_t allocation_count GUARDED_BY(mu);
00209
00210 const uint32_t flags;
00211
00212 const size_t pagesize;
00213
00214 const size_t roundup;
00215
00216 const size_t min_size;
00217
00218 uint32_t random GUARDED_BY(mu);
00219 };
00220
00221 namespace {
00222 using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
00223 alignof(LowLevelAlloc::Arena)>::type;
00224
00225
00226
00227
00228 ArenaStorage default_arena_storage;
00229 ArenaStorage unhooked_arena_storage;
00230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00231 ArenaStorage unhooked_async_sig_safe_arena_storage;
00232 #endif
00233
00234
00235
00236 absl::once_flag create_globals_once;
00237
00238 void CreateGlobalArenas() {
00239 new (&default_arena_storage)
00240 LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
00241 new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
00242 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00243 new (&unhooked_async_sig_safe_arena_storage)
00244 LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
00245 #endif
00246 }
00247
00248
00249
00250 LowLevelAlloc::Arena* UnhookedArena() {
00251 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00252 return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
00253 }
00254
00255 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00256
00257
00258 LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
00259 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00260 return reinterpret_cast<LowLevelAlloc::Arena *>(
00261 &unhooked_async_sig_safe_arena_storage);
00262 }
00263 #endif
00264
00265 }
00266
00267
00268 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
00269 base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00270 return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
00271 }
00272
00273
00274 static const uintptr_t kMagicAllocated = 0x4c833e95U;
00275 static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
00276
00277 namespace {
00278 class SCOPED_LOCKABLE ArenaLock {
00279 public:
00280 explicit ArenaLock(LowLevelAlloc::Arena *arena)
00281 EXCLUSIVE_LOCK_FUNCTION(arena->mu)
00282 : arena_(arena) {
00283 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00284 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00285 sigset_t all;
00286 sigfillset(&all);
00287 mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
00288 }
00289 #endif
00290 arena_->mu.Lock();
00291 }
00292 ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
00293 void Leave() UNLOCK_FUNCTION() {
00294 arena_->mu.Unlock();
00295 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00296 if (mask_valid_) {
00297 const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
00298 if (err != 0) {
00299 ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
00300 }
00301 }
00302 #endif
00303 left_ = true;
00304 }
00305
00306 private:
00307 bool left_ = false;
00308 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00309 bool mask_valid_ = false;
00310 sigset_t mask_;
00311 #endif
00312 LowLevelAlloc::Arena *arena_;
00313 ArenaLock(const ArenaLock &) = delete;
00314 ArenaLock &operator=(const ArenaLock &) = delete;
00315 };
00316 }
00317
00318
00319
00320 inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
00321 return magic ^ reinterpret_cast<uintptr_t>(ptr);
00322 }
00323
00324 namespace {
00325 size_t GetPageSize() {
00326 #ifdef _WIN32
00327 SYSTEM_INFO system_info;
00328 GetSystemInfo(&system_info);
00329 return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
00330 #elif defined(__wasm__) || defined(__asmjs__)
00331 return getpagesize();
00332 #else
00333 return sysconf(_SC_PAGESIZE);
00334 #endif
00335 }
00336
00337 size_t RoundedUpBlockSize() {
00338
00339 size_t roundup = 16;
00340 while (roundup < sizeof(AllocList::Header)) {
00341 roundup += roundup;
00342 }
00343 return roundup;
00344 }
00345
00346 }
00347
00348 LowLevelAlloc::Arena::Arena(uint32_t flags_value)
00349 : mu(base_internal::SCHEDULE_KERNEL_ONLY),
00350 allocation_count(0),
00351 flags(flags_value),
00352 pagesize(GetPageSize()),
00353 roundup(RoundedUpBlockSize()),
00354 min_size(2 * roundup),
00355 random(0) {
00356 freelist.header.size = 0;
00357 freelist.header.magic =
00358 Magic(kMagicUnallocated, &freelist.header);
00359 freelist.header.arena = this;
00360 freelist.levels = 0;
00361 memset(freelist.next, 0, sizeof(freelist.next));
00362 }
00363
00364
00365 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
00366 Arena *meta_data_arena = DefaultArena();
00367 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00368 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00369 meta_data_arena = UnhookedAsyncSigSafeArena();
00370 } else
00371 #endif
00372 if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
00373 meta_data_arena = UnhookedArena();
00374 }
00375 Arena *result =
00376 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
00377 return result;
00378 }
00379
00380
00381 bool LowLevelAlloc::DeleteArena(Arena *arena) {
00382 ABSL_RAW_CHECK(
00383 arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
00384 "may not delete default arena");
00385 ArenaLock section(arena);
00386 if (arena->allocation_count != 0) {
00387 section.Leave();
00388 return false;
00389 }
00390 while (arena->freelist.next[0] != nullptr) {
00391 AllocList *region = arena->freelist.next[0];
00392 size_t size = region->header.size;
00393 arena->freelist.next[0] = region->next[0];
00394 ABSL_RAW_CHECK(
00395 region->header.magic == Magic(kMagicUnallocated, ®ion->header),
00396 "bad magic number in DeleteArena()");
00397 ABSL_RAW_CHECK(region->header.arena == arena,
00398 "bad arena pointer in DeleteArena()");
00399 ABSL_RAW_CHECK(size % arena->pagesize == 0,
00400 "empty arena has non-page-aligned block size");
00401 ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
00402 "empty arena has non-page-aligned block");
00403 int munmap_result;
00404 #ifdef _WIN32
00405 munmap_result = VirtualFree(region, 0, MEM_RELEASE);
00406 ABSL_RAW_CHECK(munmap_result != 0,
00407 "LowLevelAlloc::DeleteArena: VitualFree failed");
00408 #else
00409 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00410 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
00411 munmap_result = munmap(region, size);
00412 } else {
00413 munmap_result = base_internal::DirectMunmap(region, size);
00414 }
00415 #else
00416 munmap_result = munmap(region, size);
00417 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00418 if (munmap_result != 0) {
00419 ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
00420 errno);
00421 }
00422 #endif // _WIN32
00423 }
00424 section.Leave();
00425 arena->~Arena();
00426 Free(arena);
00427 return true;
00428 }
00429
00430
00431
00432
00433
00434 static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
00435 uintptr_t sum = a + b;
00436 ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
00437 return sum;
00438 }
00439
00440
00441
00442 static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
00443 return CheckedAdd(addr, align - 1) & ~(align - 1);
00444 }
00445
00446
00447
00448
00449
00450
00451 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
00452 ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
00453 AllocList *next = prev->next[i];
00454 if (next != nullptr) {
00455 ABSL_RAW_CHECK(
00456 next->header.magic == Magic(kMagicUnallocated, &next->header),
00457 "bad magic number in Next()");
00458 ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
00459 if (prev != &arena->freelist) {
00460 ABSL_RAW_CHECK(prev < next, "unordered freelist");
00461 ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
00462 reinterpret_cast<char *>(next),
00463 "malformed freelist");
00464 }
00465 }
00466 return next;
00467 }
00468
00469
00470 static void Coalesce(AllocList *a) {
00471 AllocList *n = a->next[0];
00472 if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
00473 reinterpret_cast<char *>(n)) {
00474 LowLevelAlloc::Arena *arena = a->header.arena;
00475 a->header.size += n->header.size;
00476 n->header.magic = 0;
00477 n->header.arena = nullptr;
00478 AllocList *prev[kMaxLevel];
00479 LLA_SkiplistDelete(&arena->freelist, n, prev);
00480 LLA_SkiplistDelete(&arena->freelist, a, prev);
00481 a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
00482 &arena->random);
00483 LLA_SkiplistInsert(&arena->freelist, a, prev);
00484 }
00485 }
00486
00487
00488
00489 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
00490 AllocList *f = reinterpret_cast<AllocList *>(
00491 reinterpret_cast<char *>(v) - sizeof (f->header));
00492 ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
00493 "bad magic number in AddToFreelist()");
00494 ABSL_RAW_CHECK(f->header.arena == arena,
00495 "bad arena pointer in AddToFreelist()");
00496 f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
00497 &arena->random);
00498 AllocList *prev[kMaxLevel];
00499 LLA_SkiplistInsert(&arena->freelist, f, prev);
00500 f->header.magic = Magic(kMagicUnallocated, &f->header);
00501 Coalesce(f);
00502 Coalesce(prev[0]);
00503 }
00504
00505
00506
00507 void LowLevelAlloc::Free(void *v) {
00508 if (v != nullptr) {
00509 AllocList *f = reinterpret_cast<AllocList *>(
00510 reinterpret_cast<char *>(v) - sizeof (f->header));
00511 ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
00512 "bad magic number in Free()");
00513 LowLevelAlloc::Arena *arena = f->header.arena;
00514 ArenaLock section(arena);
00515 AddToFreelist(v, arena);
00516 ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
00517 arena->allocation_count--;
00518 section.Leave();
00519 }
00520 }
00521
00522
00523
00524 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
00525 void *result = nullptr;
00526 if (request != 0) {
00527 AllocList *s;
00528 ArenaLock section(arena);
00529
00530 size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
00531 arena->roundup);
00532 for (;;) {
00533
00534 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
00535 if (i < arena->freelist.levels) {
00536 AllocList *before = &arena->freelist;
00537 while ((s = Next(i, before, arena)) != nullptr &&
00538 s->header.size < req_rnd) {
00539 before = s;
00540 }
00541 if (s != nullptr) {
00542 break;
00543 }
00544 }
00545
00546
00547 arena->mu.Unlock();
00548
00549
00550 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
00551 void *new_pages;
00552 #ifdef _WIN32
00553 new_pages = VirtualAlloc(0, new_pages_size,
00554 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
00555 ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
00556 #else
00557 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00558 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00559 new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
00560 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
00561 } else {
00562 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
00563 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
00564 }
00565 #else
00566 new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
00567 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
00568 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00569 if (new_pages == MAP_FAILED) {
00570 ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
00571 }
00572
00573 #endif // _WIN32
00574 arena->mu.Lock();
00575 s = reinterpret_cast<AllocList *>(new_pages);
00576 s->header.size = new_pages_size;
00577
00578 s->header.magic = Magic(kMagicAllocated, &s->header);
00579 s->header.arena = arena;
00580 AddToFreelist(&s->levels, arena);
00581 }
00582 AllocList *prev[kMaxLevel];
00583 LLA_SkiplistDelete(&arena->freelist, s, prev);
00584
00585 if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
00586
00587 AllocList *n = reinterpret_cast<AllocList *>
00588 (req_rnd + reinterpret_cast<char *>(s));
00589 n->header.size = s->header.size - req_rnd;
00590 n->header.magic = Magic(kMagicAllocated, &n->header);
00591 n->header.arena = arena;
00592 s->header.size = req_rnd;
00593 AddToFreelist(&n->levels, arena);
00594 }
00595 s->header.magic = Magic(kMagicAllocated, &s->header);
00596 ABSL_RAW_CHECK(s->header.arena == arena, "");
00597 arena->allocation_count++;
00598 section.Leave();
00599 result = &s->levels;
00600 }
00601 ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
00602 return result;
00603 }
00604
00605 void *LowLevelAlloc::Alloc(size_t request) {
00606 void *result = DoAllocWithArena(request, DefaultArena());
00607 return result;
00608 }
00609
00610 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
00611 ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
00612 void *result = DoAllocWithArena(request, arena);
00613 return result;
00614 }
00615
00616 }
00617 }
00618
00619 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING