dlmalloc.h
Go to the documentation of this file.
00001 /*
00002   Default header file for malloc-2.8.x, written by Doug Lea
00003   and released to the public domain, as explained at
00004   http://creativecommons.org/licenses/publicdomain. 
00005  
00006   last update: Wed May 27 14:25:17 2009  Doug Lea  (dl at gee)
00007 
00008   This header is for ANSI C/C++ only.  You can set any of
00009   the following #defines before including:
00010 
00011   * If USE_DL_PREFIX is defined, it is assumed that malloc.c 
00012     was also compiled with this option, so all routines
00013     have names starting with "dl".
00014 
00015   * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
00016     file will be #included AFTER <malloc.h>. This is needed only if
00017     your system defines a struct mallinfo that is incompatible with the
00018     standard one declared here.  Otherwise, you can include this file
00019     INSTEAD of your system system <malloc.h>.  At least on ANSI, all
00020     declarations should be compatible with system versions
00021 
00022   * If MSPACES is defined, declarations for mspace versions are included.
00023 */
00024 
00025 #ifndef MALLOC_280_H
00026 #define MALLOC_280_H
00027 
00028 #define USE_DL_PREFIX
00029 
00030 #ifdef __cplusplus
00031 extern "C" {
00032 #endif
00033 
00034 #include <stddef.h>   /* for size_t */
00035 
00036 #ifndef ONLY_MSPACES
00037 #define ONLY_MSPACES 0     /* define to a value */
00038 #endif  /* ONLY_MSPACES */
00039 #ifndef NO_MALLINFO
00040 #define NO_MALLINFO 0
00041 #endif  /* NO_MALLINFO */
00042 
00043 
00044 #if !ONLY_MSPACES
00045 
00046 #ifndef USE_DL_PREFIX
00047 #define dlcalloc               calloc
00048 #define dlfree                 free
00049 #define dlmalloc               malloc
00050 #define dlmemalign             memalign
00051 #define dlrealloc              realloc
00052 #define dlvalloc               valloc
00053 #define dlpvalloc              pvalloc
00054 #define dlmallinfo             mallinfo
00055 #define dlmallopt              mallopt
00056 #define dlmalloc_trim          malloc_trim
00057 #define dlmalloc_stats         malloc_stats
00058 #define dlmalloc_usable_size   malloc_usable_size
00059 #define dlmalloc_footprint     malloc_footprint
00060 #define dlindependent_calloc   independent_calloc
00061 #define dlindependent_comalloc independent_comalloc
00062 #endif /* USE_DL_PREFIX */
00063 #if !NO_MALLINFO 
00064 #ifndef HAVE_USR_INCLUDE_MALLOC_H
00065 #ifndef _MALLOC_H
00066 #ifndef MALLINFO_FIELD_TYPE
00067 #define MALLINFO_FIELD_TYPE size_t
00068 #endif /* MALLINFO_FIELD_TYPE */
00069 #ifndef STRUCT_MALLINFO_DECLARED
00070 #define STRUCT_MALLINFO_DECLARED 1
00071 struct mallinfo {
00072   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
00073   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
00074   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
00075   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
00076   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
00077   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
00078   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
00079   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
00080   MALLINFO_FIELD_TYPE fordblks; /* total free space */
00081   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
00082 };
00083 #endif /* STRUCT_MALLINFO_DECLARED */
00084 #endif  /* _MALLOC_H */
00085 #endif  /* HAVE_USR_INCLUDE_MALLOC_H */
00086 #endif  /* !NO_MALLINFO */
00087 
00088 /*
00089   malloc(size_t n)
00090   Returns a pointer to a newly allocated chunk of at least n bytes, or
00091   null if no space is available, in which case errno is set to ENOMEM
00092   on ANSI C systems.
00093 
00094   If n is zero, malloc returns a minimum-sized chunk. (The minimum
00095   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
00096   systems.)  Note that size_t is an unsigned type, so calls with
00097   arguments that would be negative if signed are interpreted as
00098   requests for huge amounts of space, which will often fail. The
00099   maximum supported value of n differs across systems, but is in all
00100   cases less than the maximum representable value of a size_t.
00101 */
00102 void* dlmalloc(size_t);
00103 
00104 /*
00105   free(void* p)
00106   Releases the chunk of memory pointed to by p, that had been previously
00107   allocated using malloc or a related routine such as realloc.
00108   It has no effect if p is null. If p was not malloced or already
00109   freed, free(p) will by default cuase the current program to abort.
00110 */
00111 void  dlfree(void*);
00112 
00113 /*
00114   calloc(size_t n_elements, size_t element_size);
00115   Returns a pointer to n_elements * element_size bytes, with all locations
00116   set to zero.
00117 */
00118 void* dlcalloc(size_t, size_t);
00119 
00120 /*
00121   realloc(void* p, size_t n)
00122   Returns a pointer to a chunk of size n that contains the same data
00123   as does chunk p up to the minimum of (n, p's size) bytes, or null
00124   if no space is available.
00125 
00126   The returned pointer may or may not be the same as p. The algorithm
00127   prefers extending p in most cases when possible, otherwise it
00128   employs the equivalent of a malloc-copy-free sequence.
00129 
00130   If p is null, realloc is equivalent to malloc.
00131 
00132   If space is not available, realloc returns null, errno is set (if on
00133   ANSI) and p is NOT freed.
00134 
00135   if n is for fewer bytes than already held by p, the newly unused
00136   space is lopped off and freed if possible.  realloc with a size
00137   argument of zero (re)allocates a minimum-sized chunk.
00138 
00139   The old unix realloc convention of allowing the last-free'd chunk
00140   to be used as an argument to realloc is not supported.
00141 */
00142 
00143 void* dlrealloc(void*, size_t);
00144 
00145 /*
00146   memalign(size_t alignment, size_t n);
00147   Returns a pointer to a newly allocated chunk of n bytes, aligned
00148   in accord with the alignment argument.
00149 
00150   The alignment argument should be a power of two. If the argument is
00151   not a power of two, the nearest greater power is used.
00152   8-byte alignment is guaranteed by normal malloc calls, so don't
00153   bother calling memalign with an argument of 8 or less.
00154 
00155   Overreliance on memalign is a sure way to fragment space.
00156 */
00157 void* dlmemalign(size_t, size_t);
00158 
00159 /*
00160   valloc(size_t n);
00161   Equivalent to memalign(pagesize, n), where pagesize is the page
00162   size of the system. If the pagesize is unknown, 4096 is used.
00163 */
00164 void* dlvalloc(size_t);
00165 
00166 /*
00167   mallopt(int parameter_number, int parameter_value)
00168   Sets tunable parameters The format is to provide a
00169   (parameter-number, parameter-value) pair.  mallopt then sets the
00170   corresponding parameter to the argument value if it can (i.e., so
00171   long as the value is meaningful), and returns 1 if successful else
00172   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
00173   normally defined in malloc.h.  None of these are use in this malloc,
00174   so setting them has no effect. But this malloc also supports other
00175   options in mallopt:
00176 
00177   Symbol            param #  default    allowed param values
00178   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
00179   M_GRANULARITY        -2     page size   any power of 2 >= page size
00180   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
00181 */
00182 int dlmallopt(int, int);
00183 
00184 #define M_TRIM_THRESHOLD     (-1)
00185 #define M_GRANULARITY        (-2)
00186 #define M_MMAP_THRESHOLD     (-3)
00187 
00188 
00189 /*
00190   malloc_footprint();
00191   Returns the number of bytes obtained from the system.  The total
00192   number of bytes allocated by malloc, realloc etc., is less than this
00193   value. Unlike mallinfo, this function returns only a precomputed
00194   result, so can be called frequently to monitor memory consumption.
00195   Even if locks are otherwise defined, this function does not use them,
00196   so results might not be up to date.
00197 */
00198 size_t dlmalloc_footprint();
00199 
00200 #if !NO_MALLINFO
00201 /*
00202   mallinfo()
00203   Returns (by copy) a struct containing various summary statistics:
00204 
00205   arena:     current total non-mmapped bytes allocated from system
00206   ordblks:   the number of free chunks
00207   smblks:    always zero.
00208   hblks:     current number of mmapped regions
00209   hblkhd:    total bytes held in mmapped regions
00210   usmblks:   the maximum total allocated space. This will be greater
00211                 than current total if trimming has occurred.
00212   fsmblks:   always zero
00213   uordblks:  current total allocated space (normal or mmapped)
00214   fordblks:  total free space
00215   keepcost:  the maximum number of bytes that could ideally be released
00216                back to system via malloc_trim. ("ideally" means that
00217                it ignores page restrictions etc.)
00218 
00219   Because these fields are ints, but internal bookkeeping may
00220   be kept as longs, the reported values may wrap around zero and
00221   thus be inaccurate.
00222 */
00223 
00224 struct mallinfo dlmallinfo(void);
00225 #endif  /* NO_MALLINFO */
00226 
00227 /*
00228   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
00229 
00230   independent_calloc is similar to calloc, but instead of returning a
00231   single cleared space, it returns an array of pointers to n_elements
00232   independent elements that can hold contents of size elem_size, each
00233   of which starts out cleared, and can be independently freed,
00234   realloc'ed etc. The elements are guaranteed to be adjacently
00235   allocated (this is not guaranteed to occur with multiple callocs or
00236   mallocs), which may also improve cache locality in some
00237   applications.
00238 
00239   The "chunks" argument is optional (i.e., may be null, which is
00240   probably the most typical usage). If it is null, the returned array
00241   is itself dynamically allocated and should also be freed when it is
00242   no longer needed. Otherwise, the chunks array must be of at least
00243   n_elements in length. It is filled in with the pointers to the
00244   chunks.
00245 
00246   In either case, independent_calloc returns this pointer array, or
00247   null if the allocation failed.  If n_elements is zero and "chunks"
00248   is null, it returns a chunk representing an array with zero elements
00249   (which should be freed if not wanted).
00250 
00251   Each element must be individually freed when it is no longer
00252   needed. If you'd like to instead be able to free all at once, you
00253   should instead use regular calloc and assign pointers into this
00254   space to represent elements.  (In this case though, you cannot
00255   independently free elements.)
00256 
00257   independent_calloc simplifies and speeds up implementations of many
00258   kinds of pools.  It may also be useful when constructing large data
00259   structures that initially have a fixed number of fixed-sized nodes,
00260   but the number is not known at compile time, and some of the nodes
00261   may later need to be freed. For example:
00262 
00263   struct Node { int item; struct Node* next; };
00264 
00265   struct Node* build_list() {
00266     struct Node** pool;
00267     int n = read_number_of_nodes_needed();
00268     if (n <= 0) return 0;
00269     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
00270     if (pool == 0) die();
00271     // organize into a linked list...
00272     struct Node* first = pool[0];
00273     for (i = 0; i < n-1; ++i)
00274       pool[i]->next = pool[i+1];
00275     free(pool);     // Can now free the array (or not, if it is needed later)
00276     return first;
00277   }
00278 */
00279 void** dlindependent_calloc(size_t, size_t, void**);
00280 
00281 /*
00282   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
00283 
00284   independent_comalloc allocates, all at once, a set of n_elements
00285   chunks with sizes indicated in the "sizes" array.    It returns
00286   an array of pointers to these elements, each of which can be
00287   independently freed, realloc'ed etc. The elements are guaranteed to
00288   be adjacently allocated (this is not guaranteed to occur with
00289   multiple callocs or mallocs), which may also improve cache locality
00290   in some applications.
00291 
00292   The "chunks" argument is optional (i.e., may be null). If it is null
00293   the returned array is itself dynamically allocated and should also
00294   be freed when it is no longer needed. Otherwise, the chunks array
00295   must be of at least n_elements in length. It is filled in with the
00296   pointers to the chunks.
00297 
00298   In either case, independent_comalloc returns this pointer array, or
00299   null if the allocation failed.  If n_elements is zero and chunks is
00300   null, it returns a chunk representing an array with zero elements
00301   (which should be freed if not wanted).
00302 
00303   Each element must be individually freed when it is no longer
00304   needed. If you'd like to instead be able to free all at once, you
00305   should instead use a single regular malloc, and assign pointers at
00306   particular offsets in the aggregate space. (In this case though, you
00307   cannot independently free elements.)
00308 
00309   independent_comallac differs from independent_calloc in that each
00310   element may have a different size, and also that it does not
00311   automatically clear elements.
00312 
00313   independent_comalloc can be used to speed up allocation in cases
00314   where several structs or objects must always be allocated at the
00315   same time.  For example:
00316 
00317   struct Head { ... }
00318   struct Foot { ... }
00319 
00320   void send_message(char* msg) {
00321     int msglen = strlen(msg);
00322     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
00323     void* chunks[3];
00324     if (independent_comalloc(3, sizes, chunks) == 0)
00325       die();
00326     struct Head* head = (struct Head*)(chunks[0]);
00327     char*        body = (char*)(chunks[1]);
00328     struct Foot* foot = (struct Foot*)(chunks[2]);
00329     // ...
00330   }
00331 
00332   In general though, independent_comalloc is worth using only for
00333   larger values of n_elements. For small values, you probably won't
00334   detect enough difference from series of malloc calls to bother.
00335 
00336   Overuse of independent_comalloc can increase overall memory usage,
00337   since it cannot reuse existing noncontiguous small chunks that
00338   might be available for some of the elements.
00339 */
00340 void** dlindependent_comalloc(size_t, size_t*, void**);
00341 
00342 
00343 /*
00344   pvalloc(size_t n);
00345   Equivalent to valloc(minimum-page-that-holds(n)), that is,
00346   round up n to nearest pagesize.
00347  */
00348 void*  dlpvalloc(size_t);
00349 
00350 /*
00351   malloc_trim(size_t pad);
00352 
00353   If possible, gives memory back to the system (via negative arguments
00354   to sbrk) if there is unused memory at the `high' end of the malloc
00355   pool or in unused MMAP segments. You can call this after freeing
00356   large blocks of memory to potentially reduce the system-level memory
00357   requirements of a program. However, it cannot guarantee to reduce
00358   memory. Under some allocation patterns, some large free blocks of
00359   memory will be locked between two used chunks, so they cannot be
00360   given back to the system.
00361 
00362   The `pad' argument to malloc_trim represents the amount of free
00363   trailing space to leave untrimmed. If this argument is zero, only
00364   the minimum amount of memory to maintain internal data structures
00365   will be left. Non-zero arguments can be supplied to maintain enough
00366   trailing space to service future expected allocations without having
00367   to re-obtain memory from the system.
00368 
00369   Malloc_trim returns 1 if it actually released any memory, else 0.
00370 */
00371 int  dlmalloc_trim(size_t);
00372 
00373 /*
00374   malloc_stats();
00375   Prints on stderr the amount of space obtained from the system (both
00376   via sbrk and mmap), the maximum amount (which may be more than
00377   current if malloc_trim and/or munmap got called), and the current
00378   number of bytes allocated via malloc (or realloc, etc) but not yet
00379   freed. Note that this is the number of bytes allocated, not the
00380   number requested. It will be larger than the number requested
00381   because of alignment and bookkeeping overhead. Because it includes
00382   alignment wastage as being in use, this figure may be greater than
00383   zero even when no user-level chunks are allocated.
00384 
00385   The reported current and maximum system memory can be inaccurate if
00386   a program makes other calls to system memory allocation functions
00387   (normally sbrk) outside of malloc.
00388 
00389   malloc_stats prints only the most commonly interesting statistics.
00390   More information can be obtained by calling mallinfo.
00391 */
00392 void  dlmalloc_stats();
00393 
00394 #endif /* !ONLY_MSPACES */
00395 
00396 /*
00397   malloc_usable_size(void* p);
00398 
00399   Returns the number of bytes you can actually use in
00400   an allocated chunk, which may be more than you requested (although
00401   often not) due to alignment and minimum size constraints.
00402   You can use this many bytes without worrying about
00403   overwriting other allocated objects. This is not a particularly great
00404   programming practice. malloc_usable_size can be more useful in
00405   debugging and assertions, for example:
00406 
00407   p = malloc(n);
00408   assert(malloc_usable_size(p) >= 256);
00409 */
00410 size_t dlmalloc_usable_size(void*);
00411 
00412 
00413 #if MSPACES
00414 
00415 /*
00416   mspace is an opaque type representing an independent
00417   region of space that supports mspace_malloc, etc.
00418 */
00419 typedef void* mspace;
00420 
00421 /*
00422   create_mspace creates and returns a new independent space with the
00423   given initial capacity, or, if 0, the default granularity size.  It
00424   returns null if there is no system memory available to create the
00425   space.  If argument locked is non-zero, the space uses a separate
00426   lock to control access. The capacity of the space will grow
00427   dynamically as needed to service mspace_malloc requests.  You can
00428   control the sizes of incremental increases of this space by
00429   compiling with a different DEFAULT_GRANULARITY or dynamically
00430   setting with mallopt(M_GRANULARITY, value).
00431 */
00432 mspace create_mspace(size_t capacity, int locked);
00433 
00434 /*
00435   destroy_mspace destroys the given space, and attempts to return all
00436   of its memory back to the system, returning the total number of
00437   bytes freed. After destruction, the results of access to all memory
00438   used by the space become undefined.
00439 */
00440 size_t destroy_mspace(mspace msp);
00441 
00442 /*
00443   create_mspace_with_base uses the memory supplied as the initial base
00444   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
00445   space is used for bookkeeping, so the capacity must be at least this
00446   large. (Otherwise 0 is returned.) When this initial space is
00447   exhausted, additional memory will be obtained from the system.
00448   Destroying this space will deallocate all additionally allocated
00449   space (if possible) but not the initial base.
00450 */
00451 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
00452 
00453 /*
00454   mspace_track_large_chunks controls whether requests for large chunks
00455   are allocated in their own untracked mmapped regions, separate from
00456   others in this mspace. By default large chunks are not tracked,
00457   which reduces fragmentation. However, such chunks are not
00458   necessarily released to the system upon destroy_mspace.  Enabling
00459   tracking by setting to true may increase fragmentation, but avoids
00460   leakage when relying on destroy_mspace to release all memory
00461   allocated using this space.  The function returns the previous
00462   setting.
00463 */
00464 int mspace_track_large_chunks(mspace msp, int enable);
00465 
00466 /*
00467   mspace_malloc behaves as malloc, but operates within
00468   the given space.
00469 */
00470 void* mspace_malloc(mspace msp, size_t bytes);
00471 
00472 /*
00473   mspace_free behaves as free, but operates within
00474   the given space.
00475 
00476   If compiled with FOOTERS==1, mspace_free is not actually needed.
00477   free may be called instead of mspace_free because freed chunks from
00478   any space are handled by their originating spaces.
00479 */
00480 void mspace_free(mspace msp, void* mem);
00481 
00482 /*
00483   mspace_realloc behaves as realloc, but operates within
00484   the given space.
00485 
00486   If compiled with FOOTERS==1, mspace_realloc is not actually
00487   needed.  realloc may be called instead of mspace_realloc because
00488   realloced chunks from any space are handled by their originating
00489   spaces.
00490 */
00491 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
00492 
00493 /*
00494   mspace_calloc behaves as calloc, but operates within
00495   the given space.
00496 */
00497 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
00498 
00499 /*
00500   mspace_memalign behaves as memalign, but operates within
00501   the given space.
00502 */
00503 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
00504 
00505 /*
00506   mspace_independent_calloc behaves as independent_calloc, but
00507   operates within the given space.
00508 */
00509 void** mspace_independent_calloc(mspace msp, size_t n_elements,
00510                                  size_t elem_size, void* chunks[]);
00511 
00512 /*
00513   mspace_independent_comalloc behaves as independent_comalloc, but
00514   operates within the given space.
00515 */
00516 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
00517                                    size_t sizes[], void* chunks[]);
00518 
00519 /*
00520   mspace_footprint() returns the number of bytes obtained from the
00521   system for this space.
00522 */
00523 size_t mspace_footprint(mspace msp);
00524 
00525 
00526 #if !NO_MALLINFO
00527 /*
00528   mspace_mallinfo behaves as mallinfo, but reports properties of
00529   the given space.
00530 */
00531 struct mallinfo mspace_mallinfo(mspace msp);
00532 #endif /* NO_MALLINFO */
00533 
00534 /*
00535   malloc_usable_size(void* p) behaves the same as malloc_usable_size;
00536 */
00537  size_t mspace_usable_size(void* mem);
00538 
00539 /*
00540   mspace_malloc_stats behaves as malloc_stats, but reports
00541   properties of the given space.
00542 */
00543 void mspace_malloc_stats(mspace msp);
00544 
00545 /*
00546   mspace_trim behaves as malloc_trim, but
00547   operates within the given space.
00548 */
00549 int mspace_trim(mspace msp, size_t pad);
00550 
00551 /*
00552   An alias for mallopt.
00553 */
00554 int mspace_mallopt(int, int);
00555 
00556 #endif  /* MSPACES */
00557 
00558 #ifdef __cplusplus
00559 };  /* end of extern "C" */
00560 #endif
00561 
00562 #endif /* MALLOC_280_H */


libicr
Author(s): Robert Krug
autogenerated on Mon Jan 6 2014 11:32:37