low_level_alloc.cc
Go to the documentation of this file.
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // A low-level allocator that can be used by other low-level
16 // modules without introducing dependency cycles.
17 // This allocator is slow and wasteful of memory;
18 // it should not be used when performance is key.
19 
21 
22 #include <type_traits>
23 
24 #include "absl/base/call_once.h"
25 #include "absl/base/config.h"
28 #include "absl/base/macros.h"
30 
31 // LowLevelAlloc requires that the platform support low-level
32 // allocation of virtual memory. Platforms lacking this cannot use
33 // LowLevelAlloc.
34 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
35 
36 #ifndef _WIN32
37 #include <pthread.h>
38 #include <signal.h>
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #else
42 #include <windows.h>
43 #endif
44 
45 #include <string.h>
46 #include <algorithm>
47 #include <atomic>
48 #include <cerrno>
49 #include <cstddef>
50 #include <new> // for placement-new
51 
55 
56 // MAP_ANONYMOUS
57 #if defined(__APPLE__)
58 // For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
59 // deprecated. In Darwin, MAP_ANON is all there is.
60 #if !defined MAP_ANONYMOUS
61 #define MAP_ANONYMOUS MAP_ANON
62 #endif // !MAP_ANONYMOUS
63 #endif // __APPLE__
64 
65 namespace absl {
66 namespace base_internal {
67 
68 // A first-fit allocator with amortized logarithmic free() time.
69 
70 // ---------------------------------------------------------------------------
71 static const int kMaxLevel = 30;
72 
73 namespace {
74 // This struct describes one allocated block, or one free block.
75 struct AllocList {
76  struct Header {
77  // Size of entire region, including this field. Must be
78  // first. Valid in both allocated and unallocated blocks.
79  uintptr_t size;
80 
81  // kMagicAllocated or kMagicUnallocated xor this.
82  uintptr_t magic;
83 
84  // Pointer to parent arena.
85  LowLevelAlloc::Arena *arena;
86 
87  // Aligns regions to 0 mod 2*sizeof(void*).
89  } header;
90 
91  // Next two fields: in unallocated blocks: freelist skiplist data
92  // in allocated blocks: overlaps with client data
93 
94  // Levels in skiplist used.
95  int levels;
96 
97  // Actually has levels elements. The AllocList node may not have room
98  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
99  AllocList *next[kMaxLevel];
100 };
101 } // namespace
102 
103 // ---------------------------------------------------------------------------
104 // A trivial skiplist implementation. This is used to keep the freelist
105 // in address order while taking only logarithmic time per insert and delete.
106 
107 // An integer approximation of log2(size/base)
108 // Requires size >= base.
109 static int IntLog2(size_t size, size_t base) {
110  int result = 0;
111  for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
112  result++;
113  }
114  // floor(size / 2**result) <= base < floor(size / 2**(result-1))
115  // => log2(size/(base+1)) <= result < 1+log2(size/base)
116  // => result ~= log2(size/base)
117  return result;
118 }
119 
120 // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
121 static int Random(uint32_t *state) {
122  uint32_t r = *state;
123  int result = 1;
124  while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
125  result++;
126  }
127  *state = r;
128  return result;
129 }
130 
131 // Return a number of skiplist levels for a node of size bytes, where
132 // base is the minimum node size. Compute level=log2(size / base)+n
133 // where n is 1 if random is false and otherwise a random number generated with
134 // the standard distribution for a skiplist: See Random() above.
135 // Bigger nodes tend to have more skiplist levels due to the log2(size / base)
136 // term, so first-fit searches touch fewer nodes. "level" is clipped so
137 // level<kMaxLevel and next[level-1] will fit in the node.
138 // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
139 static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
140  // max_fit is the maximum number of levels that will fit in a node for the
141  // given size. We can't return more than max_fit, no matter what the
142  // random number generator says.
143  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
144  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
145  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
146  if (level > kMaxLevel-1) level = kMaxLevel - 1;
147  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
148  return level;
149 }
150 
151 // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
152 // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
153 // points to the last element at level i in the AllocList less than *e, or is
154 // head if no such element exists.
155 static AllocList *LLA_SkiplistSearch(AllocList *head,
156  AllocList *e, AllocList **prev) {
157  AllocList *p = head;
158  for (int level = head->levels - 1; level >= 0; level--) {
159  for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
160  }
161  prev[level] = p;
162  }
163  return (head->levels == 0) ? nullptr : prev[0]->next[0];
164 }
165 
166 // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
167 // Requires that e->levels be previously set by the caller (using
168 // LLA_SkiplistLevels())
169 static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
170  AllocList **prev) {
171  LLA_SkiplistSearch(head, e, prev);
172  for (; head->levels < e->levels; head->levels++) { // extend prev pointers
173  prev[head->levels] = head; // to all *e's levels
174  }
175  for (int i = 0; i != e->levels; i++) { // add element to list
176  e->next[i] = prev[i]->next[i];
177  prev[i]->next[i] = e;
178  }
179 }
180 
181 // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
182 // Requires that e->levels be previous set by the caller (using
183 // LLA_SkiplistLevels())
184 static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
185  AllocList **prev) {
186  AllocList *found = LLA_SkiplistSearch(head, e, prev);
187  ABSL_RAW_CHECK(e == found, "element not in freelist");
188  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
189  prev[i]->next[i] = e->next[i];
190  }
191  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
192  head->levels--; // reduce head->levels if level unused
193  }
194 }
195 
196 // ---------------------------------------------------------------------------
197 // Arena implementation
198 
199 // Metadata for an LowLevelAlloc arena instance.
201  // Constructs an arena with the given LowLevelAlloc flags.
202  explicit Arena(uint32_t flags_value);
203 
205  // Head of free list, sorted by address
206  AllocList freelist GUARDED_BY(mu);
207  // Count of allocated blocks
208  int32_t allocation_count GUARDED_BY(mu);
209  // flags passed to NewArena
210  const uint32_t flags;
211  // Result of sysconf(_SC_PAGESIZE)
212  const size_t pagesize;
213  // Lowest power of two >= max(16, sizeof(AllocList))
214  const size_t roundup;
215  // Smallest allocation block size
216  const size_t min_size;
217  // PRNG state
218  uint32_t random GUARDED_BY(mu);
219 };
220 
221 namespace {
222 using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
223  alignof(LowLevelAlloc::Arena)>::type;
224 
225 // Static storage space for the lazily-constructed, default global arena
226 // instances. We require this space because the whole point of LowLevelAlloc
227 // is to avoid relying on malloc/new.
228 ArenaStorage default_arena_storage;
229 ArenaStorage unhooked_arena_storage;
230 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
231 ArenaStorage unhooked_async_sig_safe_arena_storage;
232 #endif
233 
234 // We must use LowLevelCallOnce here to construct the global arenas, rather than
235 // using function-level statics, to avoid recursively invoking the scheduler.
236 absl::once_flag create_globals_once;
237 
238 void CreateGlobalArenas() {
239  new (&default_arena_storage)
241  new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
242 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
243  new (&unhooked_async_sig_safe_arena_storage)
245 #endif
246 }
247 
248 // Returns a global arena that does not call into hooks. Used by NewArena()
249 // when kCallMallocHook is not set.
250 LowLevelAlloc::Arena* UnhookedArena() {
251  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
252  return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
253 }
254 
255 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
256 // Returns a global arena that is async-signal safe. Used by NewArena() when
257 // kAsyncSignalSafe is set.
258 LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
259  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
260  return reinterpret_cast<LowLevelAlloc::Arena *>(
261  &unhooked_async_sig_safe_arena_storage);
262 }
263 #endif
264 
265 } // namespace
266 
267 // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
269  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
270  return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
271 }
272 
273 // magic numbers to identify allocated and unallocated blocks
274 static const uintptr_t kMagicAllocated = 0x4c833e95U;
275 static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
276 
277 namespace {
278 class SCOPED_LOCKABLE ArenaLock {
279  public:
280  explicit ArenaLock(LowLevelAlloc::Arena *arena)
282  : arena_(arena) {
283 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
284  if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
285  sigset_t all;
286  sigfillset(&all);
287  mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
288  }
289 #endif
290  arena_->mu.Lock();
291  }
292  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
293  void Leave() UNLOCK_FUNCTION() {
294  arena_->mu.Unlock();
295 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
296  if (mask_valid_) {
297  const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
298  if (err != 0) {
299  ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
300  }
301  }
302 #endif
303  left_ = true;
304  }
305 
306  private:
307  bool left_ = false; // whether left region
308 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
309  bool mask_valid_ = false;
310  sigset_t mask_; // old mask of blocked signals
311 #endif
312  LowLevelAlloc::Arena *arena_;
313  ArenaLock(const ArenaLock &) = delete;
314  ArenaLock &operator=(const ArenaLock &) = delete;
315 };
316 } // namespace
317 
318 // create an appropriate magic number for an object at "ptr"
319 // "magic" should be kMagicAllocated or kMagicUnallocated
320 inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
321  return magic ^ reinterpret_cast<uintptr_t>(ptr);
322 }
323 
324 namespace {
325 size_t GetPageSize() {
326 #ifdef _WIN32
327  SYSTEM_INFO system_info;
328  GetSystemInfo(&system_info);
329  return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
330 #elif defined(__wasm__) || defined(__asmjs__)
331  return getpagesize();
332 #else
333  return sysconf(_SC_PAGESIZE);
334 #endif
335 }
336 
337 size_t RoundedUpBlockSize() {
338  // Round up block sizes to a power of two close to the header size.
339  size_t roundup = 16;
340  while (roundup < sizeof(AllocList::Header)) {
341  roundup += roundup;
342  }
343  return roundup;
344 }
345 
346 } // namespace
347 
348 LowLevelAlloc::Arena::Arena(uint32_t flags_value)
349  : mu(base_internal::SCHEDULE_KERNEL_ONLY),
350  allocation_count(0),
351  flags(flags_value),
352  pagesize(GetPageSize()),
353  roundup(RoundedUpBlockSize()),
354  min_size(2 * roundup),
355  random(0) {
356  freelist.header.size = 0;
357  freelist.header.magic =
358  Magic(kMagicUnallocated, &freelist.header);
359  freelist.header.arena = this;
360  freelist.levels = 0;
361  memset(freelist.next, 0, sizeof(freelist.next));
362 }
363 
364 // L < meta_data_arena->mu
366  Arena *meta_data_arena = DefaultArena();
367 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
368  if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
369  meta_data_arena = UnhookedAsyncSigSafeArena();
370  } else // NOLINT(readability/braces)
371 #endif
372  if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
373  meta_data_arena = UnhookedArena();
374  }
375  Arena *result =
376  new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
377  return result;
378 }
379 
380 // L < arena->mu, L < arena->arena->mu
383  arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
384  "may not delete default arena");
385  ArenaLock section(arena);
386  if (arena->allocation_count != 0) {
387  section.Leave();
388  return false;
389  }
390  while (arena->freelist.next[0] != nullptr) {
391  AllocList *region = arena->freelist.next[0];
392  size_t size = region->header.size;
393  arena->freelist.next[0] = region->next[0];
395  region->header.magic == Magic(kMagicUnallocated, &region->header),
396  "bad magic number in DeleteArena()");
397  ABSL_RAW_CHECK(region->header.arena == arena,
398  "bad arena pointer in DeleteArena()");
399  ABSL_RAW_CHECK(size % arena->pagesize == 0,
400  "empty arena has non-page-aligned block size");
401  ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
402  "empty arena has non-page-aligned block");
403  int munmap_result;
404 #ifdef _WIN32
405  munmap_result = VirtualFree(region, 0, MEM_RELEASE);
406  ABSL_RAW_CHECK(munmap_result != 0,
407  "LowLevelAlloc::DeleteArena: VitualFree failed");
408 #else
409 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
410  if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
411  munmap_result = munmap(region, size);
412  } else {
413  munmap_result = base_internal::DirectMunmap(region, size);
414  }
415 #else
416  munmap_result = munmap(region, size);
417 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
418  if (munmap_result != 0) {
419  ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
420  errno);
421  }
422 #endif // _WIN32
423  }
424  section.Leave();
425  arena->~Arena();
426  Free(arena);
427  return true;
428 }
429 
430 // ---------------------------------------------------------------------------
431 
432 // Addition, checking for overflow. The intent is to die if an external client
433 // manages to push through a request that would cause arithmetic to fail.
434 static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
435  uintptr_t sum = a + b;
436  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
437  return sum;
438 }
439 
440 // Return value rounded up to next multiple of align.
441 // align must be a power of two.
442 static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
443  return CheckedAdd(addr, align - 1) & ~(align - 1);
444 }
445 
446 // Equivalent to "return prev->next[i]" but with sanity checking
447 // that the freelist is in the correct order, that it
448 // consists of regions marked "unallocated", and that no two regions
449 // are adjacent in memory (they should have been coalesced).
450 // L < arena->mu
451 static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
452  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
453  AllocList *next = prev->next[i];
454  if (next != nullptr) {
456  next->header.magic == Magic(kMagicUnallocated, &next->header),
457  "bad magic number in Next()");
458  ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
459  if (prev != &arena->freelist) {
460  ABSL_RAW_CHECK(prev < next, "unordered freelist");
461  ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
462  reinterpret_cast<char *>(next),
463  "malformed freelist");
464  }
465  }
466  return next;
467 }
468 
469 // Coalesce list item "a" with its successor if they are adjacent.
470 static void Coalesce(AllocList *a) {
471  AllocList *n = a->next[0];
472  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
473  reinterpret_cast<char *>(n)) {
474  LowLevelAlloc::Arena *arena = a->header.arena;
475  a->header.size += n->header.size;
476  n->header.magic = 0;
477  n->header.arena = nullptr;
478  AllocList *prev[kMaxLevel];
479  LLA_SkiplistDelete(&arena->freelist, n, prev);
480  LLA_SkiplistDelete(&arena->freelist, a, prev);
481  a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
482  &arena->random);
483  LLA_SkiplistInsert(&arena->freelist, a, prev);
484  }
485 }
486 
487 // Adds block at location "v" to the free list
488 // L >= arena->mu
490  AllocList *f = reinterpret_cast<AllocList *>(
491  reinterpret_cast<char *>(v) - sizeof (f->header));
492  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
493  "bad magic number in AddToFreelist()");
494  ABSL_RAW_CHECK(f->header.arena == arena,
495  "bad arena pointer in AddToFreelist()");
496  f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
497  &arena->random);
498  AllocList *prev[kMaxLevel];
499  LLA_SkiplistInsert(&arena->freelist, f, prev);
500  f->header.magic = Magic(kMagicUnallocated, &f->header);
501  Coalesce(f); // maybe coalesce with successor
502  Coalesce(prev[0]); // maybe coalesce with predecessor
503 }
504 
505 // Frees storage allocated by LowLevelAlloc::Alloc().
506 // L < arena->mu
507 void LowLevelAlloc::Free(void *v) {
508  if (v != nullptr) {
509  AllocList *f = reinterpret_cast<AllocList *>(
510  reinterpret_cast<char *>(v) - sizeof (f->header));
511  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
512  "bad magic number in Free()");
513  LowLevelAlloc::Arena *arena = f->header.arena;
514  ArenaLock section(arena);
515  AddToFreelist(v, arena);
516  ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
517  arena->allocation_count--;
518  section.Leave();
519  }
520 }
521 
522 // allocates and returns a block of size bytes, to be freed with Free()
523 // L < arena->mu
524 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
525  void *result = nullptr;
526  if (request != 0) {
527  AllocList *s; // will point to region that satisfies request
528  ArenaLock section(arena);
529  // round up with header
530  size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
531  arena->roundup);
532  for (;;) { // loop until we find a suitable region
533  // find the minimum levels that a block of this size must have
534  int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
535  if (i < arena->freelist.levels) { // potential blocks exist
536  AllocList *before = &arena->freelist; // predecessor of s
537  while ((s = Next(i, before, arena)) != nullptr &&
538  s->header.size < req_rnd) {
539  before = s;
540  }
541  if (s != nullptr) { // we found a region
542  break;
543  }
544  }
545  // we unlock before mmap() both because mmap() may call a callback hook,
546  // and because it may be slow.
547  arena->mu.Unlock();
548  // mmap generous 64K chunks to decrease
549  // the chances/impact of fragmentation:
550  size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
551  void *new_pages;
552 #ifdef _WIN32
553  new_pages = VirtualAlloc(0, new_pages_size,
554  MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
555  ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
556 #else
557 #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
558  if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
559  new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
560  PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
561  } else {
562  new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
563  MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
564  }
565 #else
566  new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
567  MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
568 #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
569  if (new_pages == MAP_FAILED) {
570  ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
571  }
572 
573 #endif // _WIN32
574  arena->mu.Lock();
575  s = reinterpret_cast<AllocList *>(new_pages);
576  s->header.size = new_pages_size;
577  // Pretend the block is allocated; call AddToFreelist() to free it.
578  s->header.magic = Magic(kMagicAllocated, &s->header);
579  s->header.arena = arena;
580  AddToFreelist(&s->levels, arena); // insert new region into free list
581  }
582  AllocList *prev[kMaxLevel];
583  LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
584  // s points to the first free region that's big enough
585  if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
586  // big enough to split
587  AllocList *n = reinterpret_cast<AllocList *>
588  (req_rnd + reinterpret_cast<char *>(s));
589  n->header.size = s->header.size - req_rnd;
590  n->header.magic = Magic(kMagicAllocated, &n->header);
591  n->header.arena = arena;
592  s->header.size = req_rnd;
593  AddToFreelist(&n->levels, arena);
594  }
595  s->header.magic = Magic(kMagicAllocated, &s->header);
596  ABSL_RAW_CHECK(s->header.arena == arena, "");
597  arena->allocation_count++;
598  section.Leave();
599  result = &s->levels;
600  }
601  ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
602  return result;
603 }
604 
605 void *LowLevelAlloc::Alloc(size_t request) {
606  void *result = DoAllocWithArena(request, DefaultArena());
607  return result;
608 }
609 
610 void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
611  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
612  void *result = DoAllocWithArena(request, arena);
613  return result;
614 }
615 
616 } // namespace base_internal
617 } // namespace absl
618 
619 #endif // ABSL_LOW_LEVEL_ALLOC_MISSING
int v
Definition: variant_test.cc:81
bool mask_valid_
void Lock() EXCLUSIVE_LOCK_FUNCTION()
Definition: spinlock.h:82
#define EXCLUSIVE_LOCK_FUNCTION(...)
sigset_t mask_
static const uintptr_t kMagicUnallocated
static uintptr_t RoundUp(uintptr_t addr, uintptr_t align)
static void LLA_SkiplistDelete(AllocList *head, AllocList *e, AllocList **prev)
static AllocList * Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena)
#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size)
#define ABSL_RAW_LOG(severity,...)
Definition: raw_logging.h:42
static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random)
void Unlock() UNLOCK_FUNCTION()
Definition: spinlock.h:104
static int Random(uint32_t *state)
static uintptr_t CheckedAdd(uintptr_t a, uintptr_t b)
bool left_
static void * Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook)
static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr)
#define SCOPED_LOCKABLE
CONSTEXPR_F fields align(second_tag, fields f) noexcept
Definition: algorithm.h:29
static void * DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena)
#define ABSL_RAW_CHECK(condition, message)
Definition: raw_logging.h:57
static const int kMaxLevel
AllocList * next[kMaxLevel]
char * ptr
void LowLevelCallOnce(absl::once_flag *flag, Callable &&fn, Args &&...args)
Definition: call_once.h:189
static const uintptr_t kMagicAllocated
int levels
uintptr_t magic
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena)
#define UNLOCK_FUNCTION(...)
LowLevelAlloc::Arena * arena_
uintptr_t size
static std::vector< uint32_t > *tid_array GUARDED_BY(tid_lock)
LowLevelAlloc::Arena * arena
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook)
static void LLA_SkiplistInsert(AllocList *head, AllocList *e, AllocList **prev)
static void Coalesce(AllocList *a)
static void * AllocWithArena(size_t request, Arena *arena) ABSL_ATTRIBUTE_SECTION(malloc_hook)
void * dummy_for_alignment
uint64_t b
Definition: layout_test.cc:50
static Arena * NewArena(int32_t flags)
static bool DeleteArena(Arena *arena)
struct absl::base_internal::@37::AllocList::Header header
static AllocList * LLA_SkiplistSearch(AllocList *head, AllocList *e, AllocList **prev)
static int IntLog2(size_t size, size_t base)


abseil_cpp
Author(s):
autogenerated on Wed Jun 19 2019 19:19:57