low_level_alloc.cc
Go to the documentation of this file.
00001 // Copyright 2017 The Abseil Authors.
00002 //
00003 // Licensed under the Apache License, Version 2.0 (the "License");
00004 // you may not use this file except in compliance with the License.
00005 // You may obtain a copy of the License at
00006 //
00007 //      https://www.apache.org/licenses/LICENSE-2.0
00008 //
00009 // Unless required by applicable law or agreed to in writing, software
00010 // distributed under the License is distributed on an "AS IS" BASIS,
00011 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 // See the License for the specific language governing permissions and
00013 // limitations under the License.
00014 
00015 // A low-level allocator that can be used by other low-level
00016 // modules without introducing dependency cycles.
00017 // This allocator is slow and wasteful of memory;
00018 // it should not be used when performance is key.
00019 
00020 #include "absl/base/internal/low_level_alloc.h"
00021 
00022 #include <type_traits>
00023 
00024 #include "absl/base/call_once.h"
00025 #include "absl/base/config.h"
00026 #include "absl/base/internal/direct_mmap.h"
00027 #include "absl/base/internal/scheduling_mode.h"
00028 #include "absl/base/macros.h"
00029 #include "absl/base/thread_annotations.h"
00030 
00031 // LowLevelAlloc requires that the platform support low-level
00032 // allocation of virtual memory. Platforms lacking this cannot use
00033 // LowLevelAlloc.
00034 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
00035 
00036 #ifndef _WIN32
00037 #include <pthread.h>
00038 #include <signal.h>
00039 #include <sys/mman.h>
00040 #include <unistd.h>
00041 #else
00042 #include <windows.h>
00043 #endif
00044 
00045 #include <string.h>
00046 #include <algorithm>
00047 #include <atomic>
00048 #include <cerrno>
00049 #include <cstddef>
00050 #include <new>                   // for placement-new
00051 
00052 #include "absl/base/dynamic_annotations.h"
00053 #include "absl/base/internal/raw_logging.h"
00054 #include "absl/base/internal/spinlock.h"
00055 
00056 // MAP_ANONYMOUS
00057 #if defined(__APPLE__)
00058 // For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
00059 // deprecated. In Darwin, MAP_ANON is all there is.
00060 #if !defined MAP_ANONYMOUS
00061 #define MAP_ANONYMOUS MAP_ANON
00062 #endif  // !MAP_ANONYMOUS
00063 #endif  // __APPLE__
00064 
00065 namespace absl {
00066 namespace base_internal {
00067 
00068 // A first-fit allocator with amortized logarithmic free() time.
00069 
00070 // ---------------------------------------------------------------------------
00071 static const int kMaxLevel = 30;
00072 
00073 namespace {
00074 // This struct describes one allocated block, or one free block.
00075 struct AllocList {
00076   struct Header {
00077     // Size of entire region, including this field. Must be
00078     // first. Valid in both allocated and unallocated blocks.
00079     uintptr_t size;
00080 
00081     // kMagicAllocated or kMagicUnallocated xor this.
00082     uintptr_t magic;
00083 
00084     // Pointer to parent arena.
00085     LowLevelAlloc::Arena *arena;
00086 
00087     // Aligns regions to 0 mod 2*sizeof(void*).
00088     void *dummy_for_alignment;
00089   } header;
00090 
00091   // Next two fields: in unallocated blocks: freelist skiplist data
00092   //                  in allocated blocks: overlaps with client data
00093 
00094   // Levels in skiplist used.
00095   int levels;
00096 
00097   // Actually has levels elements. The AllocList node may not have room
00098   // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
00099   AllocList *next[kMaxLevel];
00100 };
00101 }  // namespace
00102 
00103 // ---------------------------------------------------------------------------
00104 // A trivial skiplist implementation.  This is used to keep the freelist
00105 // in address order while taking only logarithmic time per insert and delete.
00106 
00107 // An integer approximation of log2(size/base)
00108 // Requires size >= base.
00109 static int IntLog2(size_t size, size_t base) {
00110   int result = 0;
00111   for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
00112     result++;
00113   }
00114   //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
00115   // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
00116   // => result ~= log2(size/base)
00117   return result;
00118 }
00119 
00120 // Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
00121 static int Random(uint32_t *state) {
00122   uint32_t r = *state;
00123   int result = 1;
00124   while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
00125     result++;
00126   }
00127   *state = r;
00128   return result;
00129 }
00130 
00131 // Return a number of skiplist levels for a node of size bytes, where
00132 // base is the minimum node size.  Compute level=log2(size / base)+n
00133 // where n is 1 if random is false and otherwise a random number generated with
00134 // the standard distribution for a skiplist:  See Random() above.
00135 // Bigger nodes tend to have more skiplist levels due to the log2(size / base)
00136 // term, so first-fit searches touch fewer nodes.  "level" is clipped so
00137 // level<kMaxLevel and next[level-1] will fit in the node.
00138 // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
00139 static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
00140   // max_fit is the maximum number of levels that will fit in a node for the
00141   // given size.   We can't return more than max_fit, no matter what the
00142   // random number generator says.
00143   size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
00144   int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
00145   if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
00146   if (level > kMaxLevel-1) level = kMaxLevel - 1;
00147   ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
00148   return level;
00149 }
00150 
00151 // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
00152 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
00153 // points to the last element at level i in the AllocList less than *e, or is
00154 // head if no such element exists.
00155 static AllocList *LLA_SkiplistSearch(AllocList *head,
00156                                      AllocList *e, AllocList **prev) {
00157   AllocList *p = head;
00158   for (int level = head->levels - 1; level >= 0; level--) {
00159     for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
00160     }
00161     prev[level] = p;
00162   }
00163   return (head->levels == 0) ? nullptr : prev[0]->next[0];
00164 }
00165 
00166 // Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
00167 // Requires that e->levels be previously set by the caller (using
00168 // LLA_SkiplistLevels())
00169 static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
00170                                AllocList **prev) {
00171   LLA_SkiplistSearch(head, e, prev);
00172   for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
00173     prev[head->levels] = head;                        // to all *e's levels
00174   }
00175   for (int i = 0; i != e->levels; i++) {  // add element to list
00176     e->next[i] = prev[i]->next[i];
00177     prev[i]->next[i] = e;
00178   }
00179 }
00180 
00181 // Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
00182 // Requires that e->levels be previous set by the caller (using
00183 // LLA_SkiplistLevels())
00184 static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
00185                                AllocList **prev) {
00186   AllocList *found = LLA_SkiplistSearch(head, e, prev);
00187   ABSL_RAW_CHECK(e == found, "element not in freelist");
00188   for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
00189     prev[i]->next[i] = e->next[i];
00190   }
00191   while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
00192     head->levels--;   // reduce head->levels if level unused
00193   }
00194 }
00195 
00196 // ---------------------------------------------------------------------------
00197 // Arena implementation
00198 
00199 // Metadata for an LowLevelAlloc arena instance.
00200 struct LowLevelAlloc::Arena {
00201   // Constructs an arena with the given LowLevelAlloc flags.
00202   explicit Arena(uint32_t flags_value);
00203 
00204   base_internal::SpinLock mu;
00205   // Head of free list, sorted by address
00206   AllocList freelist GUARDED_BY(mu);
00207   // Count of allocated blocks
00208   int32_t allocation_count GUARDED_BY(mu);
00209   // flags passed to NewArena
00210   const uint32_t flags;
00211   // Result of sysconf(_SC_PAGESIZE)
00212   const size_t pagesize;
00213   // Lowest power of two >= max(16, sizeof(AllocList))
00214   const size_t roundup;
00215   // Smallest allocation block size
00216   const size_t min_size;
00217   // PRNG state
00218   uint32_t random GUARDED_BY(mu);
00219 };
00220 
00221 namespace {
00222 using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
00223                                           alignof(LowLevelAlloc::Arena)>::type;
00224 
00225 // Static storage space for the lazily-constructed, default global arena
00226 // instances.  We require this space because the whole point of LowLevelAlloc
00227 // is to avoid relying on malloc/new.
00228 ArenaStorage default_arena_storage;
00229 ArenaStorage unhooked_arena_storage;
00230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00231 ArenaStorage unhooked_async_sig_safe_arena_storage;
00232 #endif
00233 
00234 // We must use LowLevelCallOnce here to construct the global arenas, rather than
00235 // using function-level statics, to avoid recursively invoking the scheduler.
00236 absl::once_flag create_globals_once;
00237 
00238 void CreateGlobalArenas() {
00239   new (&default_arena_storage)
00240       LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
00241   new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
00242 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00243   new (&unhooked_async_sig_safe_arena_storage)
00244       LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
00245 #endif
00246 }
00247 
00248 // Returns a global arena that does not call into hooks.  Used by NewArena()
00249 // when kCallMallocHook is not set.
00250 LowLevelAlloc::Arena* UnhookedArena() {
00251   base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00252   return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
00253 }
00254 
00255 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00256 // Returns a global arena that is async-signal safe.  Used by NewArena() when
00257 // kAsyncSignalSafe is set.
00258 LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
00259   base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00260   return reinterpret_cast<LowLevelAlloc::Arena *>(
00261       &unhooked_async_sig_safe_arena_storage);
00262 }
00263 #endif
00264 
00265 }  // namespace
00266 
00267 // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
00268 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
00269   base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
00270   return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
00271 }
00272 
00273 // magic numbers to identify allocated and unallocated blocks
00274 static const uintptr_t kMagicAllocated = 0x4c833e95U;
00275 static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
00276 
00277 namespace {
00278 class SCOPED_LOCKABLE ArenaLock {
00279  public:
00280   explicit ArenaLock(LowLevelAlloc::Arena *arena)
00281       EXCLUSIVE_LOCK_FUNCTION(arena->mu)
00282       : arena_(arena) {
00283 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00284     if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00285       sigset_t all;
00286       sigfillset(&all);
00287       mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
00288     }
00289 #endif
00290     arena_->mu.Lock();
00291   }
00292   ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
00293   void Leave() UNLOCK_FUNCTION() {
00294     arena_->mu.Unlock();
00295 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00296     if (mask_valid_) {
00297       const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
00298       if (err != 0) {
00299         ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
00300       }
00301     }
00302 #endif
00303     left_ = true;
00304   }
00305 
00306  private:
00307   bool left_ = false;  // whether left region
00308 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00309   bool mask_valid_ = false;
00310   sigset_t mask_;  // old mask of blocked signals
00311 #endif
00312   LowLevelAlloc::Arena *arena_;
00313   ArenaLock(const ArenaLock &) = delete;
00314   ArenaLock &operator=(const ArenaLock &) = delete;
00315 };
00316 }  // namespace
00317 
00318 // create an appropriate magic number for an object at "ptr"
00319 // "magic" should be kMagicAllocated or kMagicUnallocated
00320 inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
00321   return magic ^ reinterpret_cast<uintptr_t>(ptr);
00322 }
00323 
00324 namespace {
00325 size_t GetPageSize() {
00326 #ifdef _WIN32
00327   SYSTEM_INFO system_info;
00328   GetSystemInfo(&system_info);
00329   return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
00330 #elif defined(__wasm__) || defined(__asmjs__)
00331   return getpagesize();
00332 #else
00333   return sysconf(_SC_PAGESIZE);
00334 #endif
00335 }
00336 
00337 size_t RoundedUpBlockSize() {
00338   // Round up block sizes to a power of two close to the header size.
00339   size_t roundup = 16;
00340   while (roundup < sizeof(AllocList::Header)) {
00341     roundup += roundup;
00342   }
00343   return roundup;
00344 }
00345 
00346 }  // namespace
00347 
00348 LowLevelAlloc::Arena::Arena(uint32_t flags_value)
00349     : mu(base_internal::SCHEDULE_KERNEL_ONLY),
00350       allocation_count(0),
00351       flags(flags_value),
00352       pagesize(GetPageSize()),
00353       roundup(RoundedUpBlockSize()),
00354       min_size(2 * roundup),
00355       random(0) {
00356   freelist.header.size = 0;
00357   freelist.header.magic =
00358       Magic(kMagicUnallocated, &freelist.header);
00359   freelist.header.arena = this;
00360   freelist.levels = 0;
00361   memset(freelist.next, 0, sizeof(freelist.next));
00362 }
00363 
00364 // L < meta_data_arena->mu
00365 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
00366   Arena *meta_data_arena = DefaultArena();
00367 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00368   if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00369     meta_data_arena = UnhookedAsyncSigSafeArena();
00370   } else  // NOLINT(readability/braces)
00371 #endif
00372       if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
00373     meta_data_arena = UnhookedArena();
00374   }
00375   Arena *result =
00376     new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
00377   return result;
00378 }
00379 
00380 // L < arena->mu, L < arena->arena->mu
00381 bool LowLevelAlloc::DeleteArena(Arena *arena) {
00382   ABSL_RAW_CHECK(
00383       arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
00384       "may not delete default arena");
00385   ArenaLock section(arena);
00386   if (arena->allocation_count != 0) {
00387     section.Leave();
00388     return false;
00389   }
00390   while (arena->freelist.next[0] != nullptr) {
00391     AllocList *region = arena->freelist.next[0];
00392     size_t size = region->header.size;
00393     arena->freelist.next[0] = region->next[0];
00394     ABSL_RAW_CHECK(
00395         region->header.magic == Magic(kMagicUnallocated, &region->header),
00396         "bad magic number in DeleteArena()");
00397     ABSL_RAW_CHECK(region->header.arena == arena,
00398                    "bad arena pointer in DeleteArena()");
00399     ABSL_RAW_CHECK(size % arena->pagesize == 0,
00400                    "empty arena has non-page-aligned block size");
00401     ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
00402                    "empty arena has non-page-aligned block");
00403     int munmap_result;
00404 #ifdef _WIN32
00405     munmap_result = VirtualFree(region, 0, MEM_RELEASE);
00406     ABSL_RAW_CHECK(munmap_result != 0,
00407                    "LowLevelAlloc::DeleteArena: VitualFree failed");
00408 #else
00409 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00410     if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
00411       munmap_result = munmap(region, size);
00412     } else {
00413       munmap_result = base_internal::DirectMunmap(region, size);
00414     }
00415 #else
00416     munmap_result = munmap(region, size);
00417 #endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00418     if (munmap_result != 0) {
00419       ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
00420                    errno);
00421     }
00422 #endif  // _WIN32
00423   }
00424   section.Leave();
00425   arena->~Arena();
00426   Free(arena);
00427   return true;
00428 }
00429 
00430 // ---------------------------------------------------------------------------
00431 
00432 // Addition, checking for overflow.  The intent is to die if an external client
00433 // manages to push through a request that would cause arithmetic to fail.
00434 static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
00435   uintptr_t sum = a + b;
00436   ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
00437   return sum;
00438 }
00439 
00440 // Return value rounded up to next multiple of align.
00441 // align must be a power of two.
00442 static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
00443   return CheckedAdd(addr, align - 1) & ~(align - 1);
00444 }
00445 
00446 // Equivalent to "return prev->next[i]" but with sanity checking
00447 // that the freelist is in the correct order, that it
00448 // consists of regions marked "unallocated", and that no two regions
00449 // are adjacent in memory (they should have been coalesced).
00450 // L < arena->mu
00451 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
00452   ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
00453   AllocList *next = prev->next[i];
00454   if (next != nullptr) {
00455     ABSL_RAW_CHECK(
00456         next->header.magic == Magic(kMagicUnallocated, &next->header),
00457         "bad magic number in Next()");
00458     ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
00459     if (prev != &arena->freelist) {
00460       ABSL_RAW_CHECK(prev < next, "unordered freelist");
00461       ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
00462                          reinterpret_cast<char *>(next),
00463                      "malformed freelist");
00464     }
00465   }
00466   return next;
00467 }
00468 
00469 // Coalesce list item "a" with its successor if they are adjacent.
00470 static void Coalesce(AllocList *a) {
00471   AllocList *n = a->next[0];
00472   if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
00473                           reinterpret_cast<char *>(n)) {
00474     LowLevelAlloc::Arena *arena = a->header.arena;
00475     a->header.size += n->header.size;
00476     n->header.magic = 0;
00477     n->header.arena = nullptr;
00478     AllocList *prev[kMaxLevel];
00479     LLA_SkiplistDelete(&arena->freelist, n, prev);
00480     LLA_SkiplistDelete(&arena->freelist, a, prev);
00481     a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
00482                                    &arena->random);
00483     LLA_SkiplistInsert(&arena->freelist, a, prev);
00484   }
00485 }
00486 
00487 // Adds block at location "v" to the free list
00488 // L >= arena->mu
00489 static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
00490   AllocList *f = reinterpret_cast<AllocList *>(
00491                         reinterpret_cast<char *>(v) - sizeof (f->header));
00492   ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
00493                  "bad magic number in AddToFreelist()");
00494   ABSL_RAW_CHECK(f->header.arena == arena,
00495                  "bad arena pointer in AddToFreelist()");
00496   f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
00497                                  &arena->random);
00498   AllocList *prev[kMaxLevel];
00499   LLA_SkiplistInsert(&arena->freelist, f, prev);
00500   f->header.magic = Magic(kMagicUnallocated, &f->header);
00501   Coalesce(f);                  // maybe coalesce with successor
00502   Coalesce(prev[0]);            // maybe coalesce with predecessor
00503 }
00504 
00505 // Frees storage allocated by LowLevelAlloc::Alloc().
00506 // L < arena->mu
00507 void LowLevelAlloc::Free(void *v) {
00508   if (v != nullptr) {
00509     AllocList *f = reinterpret_cast<AllocList *>(
00510                         reinterpret_cast<char *>(v) - sizeof (f->header));
00511     ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
00512                    "bad magic number in Free()");
00513     LowLevelAlloc::Arena *arena = f->header.arena;
00514     ArenaLock section(arena);
00515     AddToFreelist(v, arena);
00516     ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
00517     arena->allocation_count--;
00518     section.Leave();
00519   }
00520 }
00521 
00522 // allocates and returns a block of size bytes, to be freed with Free()
00523 // L < arena->mu
00524 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
00525   void *result = nullptr;
00526   if (request != 0) {
00527     AllocList *s;       // will point to region that satisfies request
00528     ArenaLock section(arena);
00529     // round up with header
00530     size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
00531                              arena->roundup);
00532     for (;;) {      // loop until we find a suitable region
00533       // find the minimum levels that a block of this size must have
00534       int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
00535       if (i < arena->freelist.levels) {   // potential blocks exist
00536         AllocList *before = &arena->freelist;  // predecessor of s
00537         while ((s = Next(i, before, arena)) != nullptr &&
00538                s->header.size < req_rnd) {
00539           before = s;
00540         }
00541         if (s != nullptr) {       // we found a region
00542           break;
00543         }
00544       }
00545       // we unlock before mmap() both because mmap() may call a callback hook,
00546       // and because it may be slow.
00547       arena->mu.Unlock();
00548       // mmap generous 64K chunks to decrease
00549       // the chances/impact of fragmentation:
00550       size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
00551       void *new_pages;
00552 #ifdef _WIN32
00553       new_pages = VirtualAlloc(0, new_pages_size,
00554                                MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
00555       ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
00556 #else
00557 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00558       if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
00559         new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
00560             PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
00561       } else {
00562         new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
00563                          MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
00564       }
00565 #else
00566       new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
00567                        MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
00568 #endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
00569       if (new_pages == MAP_FAILED) {
00570         ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
00571       }
00572 
00573 #endif  // _WIN32
00574       arena->mu.Lock();
00575       s = reinterpret_cast<AllocList *>(new_pages);
00576       s->header.size = new_pages_size;
00577       // Pretend the block is allocated; call AddToFreelist() to free it.
00578       s->header.magic = Magic(kMagicAllocated, &s->header);
00579       s->header.arena = arena;
00580       AddToFreelist(&s->levels, arena);  // insert new region into free list
00581     }
00582     AllocList *prev[kMaxLevel];
00583     LLA_SkiplistDelete(&arena->freelist, s, prev);    // remove from free list
00584     // s points to the first free region that's big enough
00585     if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
00586       // big enough to split
00587       AllocList *n = reinterpret_cast<AllocList *>
00588                         (req_rnd + reinterpret_cast<char *>(s));
00589       n->header.size = s->header.size - req_rnd;
00590       n->header.magic = Magic(kMagicAllocated, &n->header);
00591       n->header.arena = arena;
00592       s->header.size = req_rnd;
00593       AddToFreelist(&n->levels, arena);
00594     }
00595     s->header.magic = Magic(kMagicAllocated, &s->header);
00596     ABSL_RAW_CHECK(s->header.arena == arena, "");
00597     arena->allocation_count++;
00598     section.Leave();
00599     result = &s->levels;
00600   }
00601   ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
00602   return result;
00603 }
00604 
00605 void *LowLevelAlloc::Alloc(size_t request) {
00606   void *result = DoAllocWithArena(request, DefaultArena());
00607   return result;
00608 }
00609 
00610 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
00611   ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
00612   void *result = DoAllocWithArena(request, arena);
00613   return result;
00614 }
00615 
00616 }  // namespace base_internal
00617 }  // namespace absl
00618 
00619 #endif  // ABSL_LOW_LEVEL_ALLOC_MISSING


abseil_cpp
Author(s):
autogenerated on Wed Jun 19 2019 19:42:15