00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483 #define USE_DL_PREFIX
00484
00485
00486 #define NO_SEGMENT_TRAVERSAL 1
00487
00488
00489 #ifndef DLMALLOC_VERSION
00490 #define DLMALLOC_VERSION 20804
00491 #endif
00492
00493 #ifndef WIN32
00494 #ifdef _WIN32
00495 #define WIN32 1
00496 #endif
00497 #ifdef _WIN32_WCE
00498 #define LACKS_FCNTL_H
00499 #define WIN32 1
00500 #endif
00501 #endif
00502 #ifdef WIN32
00503 #define WIN32_LEAN_AND_MEAN
00504 #include <windows.h>
00505 #define HAVE_MMAP 1
00506 #define HAVE_MORECORE 0
00507 #define LACKS_UNISTD_H
00508 #define LACKS_SYS_PARAM_H
00509 #define LACKS_SYS_MMAN_H
00510 #define LACKS_STRING_H
00511 #define LACKS_STRINGS_H
00512 #define LACKS_SYS_TYPES_H
00513 #define LACKS_ERRNO_H
00514 #ifndef MALLOC_FAILURE_ACTION
00515 #define MALLOC_FAILURE_ACTION
00516 #endif
00517 #ifdef _WIN32_WCE
00518 #define MMAP_CLEARS 0
00519 #else
00520 #define MMAP_CLEARS 1
00521 #endif
00522 #endif
00523
00524 #if defined(DARWIN) || defined(_DARWIN)
00525
00526 #ifndef HAVE_MORECORE
00527 #define HAVE_MORECORE 0
00528 #define HAVE_MMAP 1
00529
00530 #ifndef MALLOC_ALIGNMENT
00531 #define MALLOC_ALIGNMENT ((size_t)16U)
00532 #endif
00533 #endif
00534 #endif
00535
00536 #ifndef LACKS_SYS_TYPES_H
00537 #include <sys/types.h>
00538 #endif
00539
00540 #if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
00541 #define SPIN_LOCKS_AVAILABLE 1
00542 #else
00543 #define SPIN_LOCKS_AVAILABLE 0
00544 #endif
00545
00546
00547 #define MAX_SIZE_T (~(size_t)0)
00548
00549 #ifndef ONLY_MSPACES
00550 #define ONLY_MSPACES 0
00551 #else
00552 #define ONLY_MSPACES 1
00553 #endif
00554 #ifndef MSPACES
00555 #if ONLY_MSPACES
00556 #define MSPACES 1
00557 #else
00558 #define MSPACES 0
00559 #endif
00560 #endif
00561 #ifndef MALLOC_ALIGNMENT
00562 #define MALLOC_ALIGNMENT ((size_t)8U)
00563 #endif
00564 #ifndef FOOTERS
00565 #define FOOTERS 0
00566 #endif
00567 #ifndef ABORT
00568 #define ABORT abort()
00569 #endif
00570 #ifndef ABORT_ON_ASSERT_FAILURE
00571 #define ABORT_ON_ASSERT_FAILURE 1
00572 #endif
00573 #ifndef PROCEED_ON_ERROR
00574 #define PROCEED_ON_ERROR 0
00575 #endif
00576 #ifndef USE_LOCKS
00577 #define USE_LOCKS 0
00578 #endif
00579 #ifndef USE_SPIN_LOCKS
00580 #if USE_LOCKS && SPIN_LOCKS_AVAILABLE
00581 #define USE_SPIN_LOCKS 1
00582 #else
00583 #define USE_SPIN_LOCKS 0
00584 #endif
00585 #endif
00586 #ifndef INSECURE
00587 #define INSECURE 0
00588 #endif
00589 #ifndef HAVE_MMAP
00590 #define HAVE_MMAP 1
00591 #endif
00592 #ifndef MMAP_CLEARS
00593 #define MMAP_CLEARS 1
00594 #endif
00595 #ifndef HAVE_MREMAP
00596 #ifdef linux
00597 #define HAVE_MREMAP 1
00598 #else
00599 #define HAVE_MREMAP 0
00600 #endif
00601 #endif
00602 #ifndef MALLOC_FAILURE_ACTION
00603 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
00604 #endif
00605 #ifndef HAVE_MORECORE
00606 #if ONLY_MSPACES
00607 #define HAVE_MORECORE 0
00608 #else
00609 #define HAVE_MORECORE 1
00610 #endif
00611 #endif
00612 #if !HAVE_MORECORE
00613 #define MORECORE_CONTIGUOUS 0
00614 #else
00615 #define MORECORE_DEFAULT sbrk
00616 #ifndef MORECORE_CONTIGUOUS
00617 #define MORECORE_CONTIGUOUS 1
00618 #endif
00619 #endif
00620 #ifndef DEFAULT_GRANULARITY
00621 #if (MORECORE_CONTIGUOUS || defined(WIN32))
00622 #define DEFAULT_GRANULARITY (0)
00623 #else
00624 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
00625 #endif
00626 #endif
00627 #ifndef DEFAULT_TRIM_THRESHOLD
00628 #ifndef MORECORE_CANNOT_TRIM
00629 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
00630 #else
00631 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
00632 #endif
00633 #endif
00634 #ifndef DEFAULT_MMAP_THRESHOLD
00635 #if HAVE_MMAP
00636 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
00637 #else
00638 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
00639 #endif
00640 #endif
00641 #ifndef MAX_RELEASE_CHECK_RATE
00642 #if HAVE_MMAP
00643 #define MAX_RELEASE_CHECK_RATE 4095
00644 #else
00645 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
00646 #endif
00647 #endif
00648 #ifndef USE_BUILTIN_FFS
00649 #define USE_BUILTIN_FFS 0
00650 #endif
00651 #ifndef USE_DEV_RANDOM
00652 #define USE_DEV_RANDOM 0
00653 #endif
00654 #ifndef NO_MALLINFO
00655 #define NO_MALLINFO 0
00656 #endif
00657 #ifndef MALLINFO_FIELD_TYPE
00658 #define MALLINFO_FIELD_TYPE size_t
00659 #endif
00660 #ifndef NO_SEGMENT_TRAVERSAL
00661 #define NO_SEGMENT_TRAVERSAL 0
00662 #endif
00663
00664
00665
00666
00667
00668
00669
00670
00671 #define M_TRIM_THRESHOLD (-1)
00672 #define M_GRANULARITY (-2)
00673 #define M_MMAP_THRESHOLD (-3)
00674
00675
00676
00677 #if !NO_MALLINFO
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702 #ifdef HAVE_USR_INCLUDE_MALLOC_H
00703 #include "/usr/include/malloc.h"
00704 #else
00705 #ifndef STRUCT_MALLINFO_DECLARED
00706 #define STRUCT_MALLINFO_DECLARED 1
00707 struct mallinfo {
00708 MALLINFO_FIELD_TYPE arena;
00709 MALLINFO_FIELD_TYPE ordblks;
00710 MALLINFO_FIELD_TYPE smblks;
00711 MALLINFO_FIELD_TYPE hblks;
00712 MALLINFO_FIELD_TYPE hblkhd;
00713 MALLINFO_FIELD_TYPE usmblks;
00714 MALLINFO_FIELD_TYPE fsmblks;
00715 MALLINFO_FIELD_TYPE uordblks;
00716 MALLINFO_FIELD_TYPE fordblks;
00717 MALLINFO_FIELD_TYPE keepcost;
00718 };
00719 #endif
00720 #endif
00721 #endif
00722
00723
00724
00725
00726
00727
00728 #ifndef FORCEINLINE
00729 #if defined(__GNUC__)
00730 #define FORCEINLINE __inline __attribute__ ((always_inline))
00731 #elif defined(_MSC_VER)
00732 #define FORCEINLINE __forceinline
00733 #endif
00734 #endif
00735 #ifndef NOINLINE
00736 #if defined(__GNUC__)
00737 #define NOINLINE __attribute__ ((noinline))
00738 #elif defined(_MSC_VER)
00739 #define NOINLINE __declspec(noinline)
00740 #else
00741 #define NOINLINE
00742 #endif
00743 #endif
00744
00745 #ifdef __cplusplus
00746 extern "C" {
00747 #ifndef FORCEINLINE
00748 #define FORCEINLINE inline
00749 #endif
00750 #endif
00751 #ifndef FORCEINLINE
00752 #define FORCEINLINE
00753 #endif
00754
00755 #if !ONLY_MSPACES
00756
00757
00758
00759 #ifndef USE_DL_PREFIX
00760 #define dlcalloc calloc
00761 #define dlfree free
00762 #define dlmalloc malloc
00763 #define dlmemalign memalign
00764 #define dlrealloc realloc
00765 #define dlvalloc valloc
00766 #define dlpvalloc pvalloc
00767 #define dlmallinfo mallinfo
00768 #define dlmallopt mallopt
00769 #define dlmalloc_trim malloc_trim
00770 #define dlmalloc_stats malloc_stats
00771 #define dlmalloc_usable_size malloc_usable_size
00772 #define dlmalloc_footprint malloc_footprint
00773 #define dlmalloc_max_footprint malloc_max_footprint
00774 #define dlindependent_calloc independent_calloc
00775 #define dlindependent_comalloc independent_comalloc
00776 #endif
00777
00778
00779
00780
00781
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793 void* dlmalloc(size_t);
00794
00795
00796
00797
00798
00799
00800
00801
00802 void dlfree(void*);
00803
00804
00805
00806
00807
00808
00809 void* dlcalloc(size_t, size_t);
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834 void* dlrealloc(void*, size_t);
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848 void* dlmemalign(size_t, size_t);
00849
00850
00851
00852
00853
00854
00855 void* dlvalloc(size_t);
00856
00857
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879 int dlmallopt(int, int);
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890 size_t dlmalloc_footprint(void);
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903 size_t dlmalloc_max_footprint(void);
00904
00905 #if !NO_MALLINFO
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928 struct mallinfo dlmallinfo(void);
00929 #endif
00930
00931
00932
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969
00970
00971
00972
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983 void** dlindependent_calloc(size_t, size_t, void**);
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017
01018
01019
01020
01021
01022
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044 void** dlindependent_comalloc(size_t, size_t*, void**);
01045
01046
01047
01048
01049
01050
01051
01052 void* dlpvalloc(size_t);
01053
01054
01055
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075 int dlmalloc_trim(size_t);
01076
01077
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096 void dlmalloc_stats(void);
01097
01098 #endif
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114 size_t dlmalloc_usable_size(void*);
01115
01116
01117 #if MSPACES
01118
01119
01120
01121
01122
01123 typedef void* mspace;
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136 mspace create_mspace(size_t capacity, int locked);
01137
01138
01139
01140
01141
01142
01143
01144 size_t destroy_mspace(mspace msp);
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
01156
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168 int mspace_track_large_chunks(mspace msp, int enable);
01169
01170
01171
01172
01173
01174
01175 void* mspace_malloc(mspace msp, size_t bytes);
01176
01177
01178
01179
01180
01181
01182
01183
01184
01185 void mspace_free(mspace msp, void* mem);
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
01197
01198
01199
01200
01201
01202 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
01203
01204
01205
01206
01207
01208 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
01209
01210
01211
01212
01213
01214 void** mspace_independent_calloc(mspace msp, size_t n_elements,
01215 size_t elem_size, void* chunks[]);
01216
01217
01218
01219
01220
01221 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
01222 size_t sizes[], void* chunks[]);
01223
01224
01225
01226
01227
01228 size_t mspace_footprint(mspace msp);
01229
01230
01231
01232
01233
01234 size_t mspace_max_footprint(mspace msp);
01235
01236
01237 #if !NO_MALLINFO
01238
01239
01240
01241
01242 struct mallinfo mspace_mallinfo(mspace msp);
01243 #endif
01244
01245
01246
01247
01248 size_t mspace_usable_size(void* mem);
01249
01250
01251
01252
01253
01254 void mspace_malloc_stats(mspace msp);
01255
01256
01257
01258
01259
01260 int mspace_trim(mspace msp, size_t pad);
01261
01262
01263
01264
01265 int mspace_mallopt(int, int);
01266
01267 #endif
01268
01269 #ifdef __cplusplus
01270 };
01271 #endif
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281
01282
01283
01284
01285 #ifdef WIN32
01286 #pragma warning( disable : 4146 )
01287 #endif
01288
01289 #include <stdio.h>
01290
01291 #ifndef LACKS_ERRNO_H
01292 #include <errno.h>
01293 #endif
01294
01295
01296 #include <time.h>
01297
01298 #ifndef LACKS_STDLIB_H
01299 #include <stdlib.h>
01300 #endif
01301 #ifdef DEBUG
01302 #if ABORT_ON_ASSERT_FAILURE
01303 #undef assert
01304 #define assert(x) if(!(x)) ABORT
01305 #else
01306 #include <assert.h>
01307 #endif
01308 #else
01309 #ifndef assert
01310 #define assert(x)
01311 #endif
01312 #define DEBUG 0
01313 #endif
01314 #ifndef LACKS_STRING_H
01315 #include <string.h>
01316 #endif
01317 #if USE_BUILTIN_FFS
01318 #ifndef LACKS_STRINGS_H
01319 #include <strings.h>
01320 #endif
01321 #endif
01322 #if HAVE_MMAP
01323 #ifndef LACKS_SYS_MMAN_H
01324
01325 #if (defined(linux) && !defined(__USE_GNU))
01326 #define __USE_GNU 1
01327 #include <sys/mman.h>
01328 #undef __USE_GNU
01329 #else
01330 #include <sys/mman.h>
01331 #endif
01332 #endif
01333 #ifndef LACKS_FCNTL_H
01334 #include <fcntl.h>
01335 #endif
01336 #endif
01337 #ifndef LACKS_UNISTD_H
01338 #include <unistd.h>
01339 #else
01340 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
01341 extern void* sbrk(ptrdiff_t);
01342 #endif
01343 #endif
01344
01345
01346 #if USE_LOCKS
01347 #ifndef WIN32
01348 #include <pthread.h>
01349 #if defined (__SVR4) && defined (__sun)
01350 #include <thread.h>
01351 #endif
01352 #else
01353 #ifndef _M_AMD64
01354
01355 #ifdef __cplusplus
01356 extern "C" {
01357 #endif
01358 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
01359 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
01360 #ifdef __cplusplus
01361 }
01362 #endif
01363 #endif
01364 #pragma intrinsic (_InterlockedCompareExchange)
01365 #pragma intrinsic (_InterlockedExchange)
01366 #define interlockedcompareexchange _InterlockedCompareExchange
01367 #define interlockedexchange _InterlockedExchange
01368 #endif
01369 #endif
01370
01371
01372 #if defined(_MSC_VER) && _MSC_VER>=1300
01373 #ifndef BitScanForward
01374 #ifdef __cplusplus
01375 extern "C" {
01376 #endif
01377 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
01378 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
01379 #ifdef __cplusplus
01380 }
01381 #endif
01382
01383 #define BitScanForward _BitScanForward
01384 #define BitScanReverse _BitScanReverse
01385 #pragma intrinsic(_BitScanForward)
01386 #pragma intrinsic(_BitScanReverse)
01387 #endif
01388 #endif
01389
01390 #ifndef WIN32
01391 #ifndef malloc_getpagesize
01392 # ifdef _SC_PAGESIZE
01393 # ifndef _SC_PAGE_SIZE
01394 # define _SC_PAGE_SIZE _SC_PAGESIZE
01395 # endif
01396 # endif
01397 # ifdef _SC_PAGE_SIZE
01398 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
01399 # else
01400 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
01401 extern size_t getpagesize();
01402 # define malloc_getpagesize getpagesize()
01403 # else
01404 # ifdef WIN32
01405 # define malloc_getpagesize getpagesize()
01406 # else
01407 # ifndef LACKS_SYS_PARAM_H
01408 # include <sys/param.h>
01409 # endif
01410 # ifdef EXEC_PAGESIZE
01411 # define malloc_getpagesize EXEC_PAGESIZE
01412 # else
01413 # ifdef NBPG
01414 # ifndef CLSIZE
01415 # define malloc_getpagesize NBPG
01416 # else
01417 # define malloc_getpagesize (NBPG * CLSIZE)
01418 # endif
01419 # else
01420 # ifdef NBPC
01421 # define malloc_getpagesize NBPC
01422 # else
01423 # ifdef PAGESIZE
01424 # define malloc_getpagesize PAGESIZE
01425 # else
01426 # define malloc_getpagesize ((size_t)4096U)
01427 # endif
01428 # endif
01429 # endif
01430 # endif
01431 # endif
01432 # endif
01433 # endif
01434 #endif
01435 #endif
01436
01437
01438
01439
01440
01441
01442 #define SIZE_T_SIZE (sizeof(size_t))
01443 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
01444
01445
01446
01447 #define SIZE_T_ZERO ((size_t)0)
01448 #define SIZE_T_ONE ((size_t)1)
01449 #define SIZE_T_TWO ((size_t)2)
01450 #define SIZE_T_FOUR ((size_t)4)
01451 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
01452 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
01453 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
01454 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
01455
01456
01457 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
01458
01459
01460 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
01461
01462
01463 #define align_offset(A)\
01464 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
01465 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
01466
01467
01468
01469
01470
01471
01472
01473
01474
01475
01476
01477 #define MFAIL ((void*)(MAX_SIZE_T))
01478 #define CMFAIL ((char*)(MFAIL))
01479
01480 #if HAVE_MMAP
01481
01482 #ifndef WIN32
01483 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
01484 #define MMAP_PROT (PROT_READ|PROT_WRITE)
01485 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
01486 #define MAP_ANONYMOUS MAP_ANON
01487 #endif
01488 #ifdef MAP_ANONYMOUS
01489 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
01490 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
01491 #else
01492
01493
01494
01495
01496 #define MMAP_FLAGS (MAP_PRIVATE)
01497 static int dev_zero_fd = -1;
01498 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
01499 (dev_zero_fd = open("/dev/zero", O_RDWR), \
01500 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
01501 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
01502 #endif
01503
01504 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
01505
01506 #else
01507
01508
01509 static FORCEINLINE void* win32mmap(size_t size) {
01510 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
01511 return (ptr != 0)? ptr: MFAIL;
01512 }
01513
01514
01515 static FORCEINLINE void* win32direct_mmap(size_t size) {
01516 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
01517 PAGE_READWRITE);
01518 return (ptr != 0)? ptr: MFAIL;
01519 }
01520
01521
01522 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
01523 MEMORY_BASIC_INFORMATION minfo;
01524 char* cptr = (char*)ptr;
01525 while (size) {
01526 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
01527 return -1;
01528 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
01529 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
01530 return -1;
01531 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
01532 return -1;
01533 cptr += minfo.RegionSize;
01534 size -= minfo.RegionSize;
01535 }
01536 return 0;
01537 }
01538
01539 #define MMAP_DEFAULT(s) win32mmap(s)
01540 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
01541 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
01542 #endif
01543 #endif
01544
01545 #if HAVE_MREMAP
01546 #ifndef WIN32
01547 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
01548 #endif
01549 #endif
01550
01551
01555 #if HAVE_MORECORE
01556 #ifdef MORECORE
01557 #define CALL_MORECORE(S) MORECORE(S)
01558 #else
01559 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
01560 #endif
01561 #else
01562 #define CALL_MORECORE(S) MFAIL
01563 #endif
01564
01568 #if HAVE_MMAP
01569 #define USE_MMAP_BIT (SIZE_T_ONE)
01570
01571 #ifdef MMAP
01572 #define CALL_MMAP(s) MMAP(s)
01573 #else
01574 #define CALL_MMAP(s) MMAP_DEFAULT(s)
01575 #endif
01576 #ifdef MUNMAP
01577 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
01578 #else
01579 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
01580 #endif
01581 #ifdef DIRECT_MMAP
01582 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
01583 #else
01584 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
01585 #endif
01586 #else
01587 #define USE_MMAP_BIT (SIZE_T_ZERO)
01588
01589 #define MMAP(s) MFAIL
01590 #define MUNMAP(a, s) (-1)
01591 #define DIRECT_MMAP(s) MFAIL
01592 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
01593 #define CALL_MMAP(s) MMAP(s)
01594 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
01595 #endif
01596
01600 #if HAVE_MMAP && HAVE_MREMAP
01601 #ifdef MREMAP
01602 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
01603 #else
01604 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
01605 #endif
01606 #else
01607 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
01608 #endif
01609
01610
01611 #define USE_NONCONTIGUOUS_BIT (4U)
01612
01613
01614 #define EXTERN_BIT (8U)
01615
01616
01617
01618
01619
01620
01621
01622
01623
01624
01625
01626
01627
01628
01629
01630
01631
01632
01633
01634
01635
01636
01637
01638
01639
01640
01641
01642
01643
01644
01645
01646
01647
01648
01649 #if USE_LOCKS == 1
01650
01651 #if USE_SPIN_LOCKS && SPIN_LOCKS_AVAILABLE
01652 #ifndef WIN32
01653
01654
01655 struct pthread_mlock_t {
01656 volatile unsigned int l;
01657 unsigned int c;
01658 pthread_t threadid;
01659 };
01660 #define MLOCK_T struct pthread_mlock_t
01661 #define CURRENT_THREAD pthread_self()
01662 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
01663 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
01664 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
01665 #define TRY_LOCK(sl) pthread_try_lock(sl)
01666 #define SPINS_PER_YIELD 63
01667
01668 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
01669
01670 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
01671 int spins = 0;
01672 volatile unsigned int* lp = &sl->l;
01673 for (;;) {
01674 if (*lp != 0) {
01675 if (sl->threadid == CURRENT_THREAD) {
01676 ++sl->c;
01677 return 0;
01678 }
01679 }
01680 else {
01681
01682 int cmp = 0;
01683 int val = 1;
01684 int ret;
01685 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
01686 : "=a" (ret)
01687 : "r" (val), "m" (*(lp)), "0"(cmp)
01688 : "memory", "cc");
01689 if (!ret) {
01690 assert(!sl->threadid);
01691 sl->threadid = CURRENT_THREAD;
01692 sl->c = 1;
01693 return 0;
01694 }
01695 }
01696 if ((++spins & SPINS_PER_YIELD) == 0) {
01697 #if defined (__SVR4) && defined (__sun)
01698 thr_yield();
01699 #else
01700 #if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)
01701 sched_yield();
01702 #else
01703 ;
01704 #endif
01705 #endif
01706 }
01707 }
01708 }
01709
01710 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
01711 volatile unsigned int* lp = &sl->l;
01712 assert(*lp != 0);
01713 assert(sl->threadid == CURRENT_THREAD);
01714 if (--sl->c == 0) {
01715 sl->threadid = 0;
01716 int prev = 0;
01717 int ret;
01718 __asm__ __volatile__ ("lock; xchgl %0, %1"
01719 : "=r" (ret)
01720 : "m" (*(lp)), "0"(prev)
01721 : "memory");
01722 }
01723 }
01724
01725 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
01726 volatile unsigned int* lp = &sl->l;
01727 if (*lp != 0) {
01728 if (sl->threadid == CURRENT_THREAD) {
01729 ++sl->c;
01730 return 1;
01731 }
01732 }
01733 else {
01734 int cmp = 0;
01735 int val = 1;
01736 int ret;
01737 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
01738 : "=a" (ret)
01739 : "r" (val), "m" (*(lp)), "0"(cmp)
01740 : "memory", "cc");
01741 if (!ret) {
01742 assert(!sl->threadid);
01743 sl->threadid = CURRENT_THREAD;
01744 sl->c = 1;
01745 return 1;
01746 }
01747 }
01748 return 0;
01749 }
01750
01751
01752 #else
01753
01754 struct win32_mlock_t {
01755 volatile long l;
01756 unsigned int c;
01757 long threadid;
01758 };
01759
01760 #define MLOCK_T struct win32_mlock_t
01761 #define CURRENT_THREAD GetCurrentThreadId()
01762 #define INITIAL_LOCK(sl) ((sl)->threadid = 0, (sl)->l = (sl)->c = 0, 0)
01763 #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
01764 #define RELEASE_LOCK(sl) win32_release_lock(sl)
01765 #define TRY_LOCK(sl) win32_try_lock(sl)
01766 #define SPINS_PER_YIELD 63
01767
01768 static MLOCK_T malloc_global_mutex = { 0, 0, 0};
01769
01770 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
01771 int spins = 0;
01772 for (;;) {
01773 if (sl->l != 0) {
01774 if (sl->threadid == CURRENT_THREAD) {
01775 ++sl->c;
01776 return 0;
01777 }
01778 }
01779 else {
01780 if (!interlockedexchange(&sl->l, 1)) {
01781 assert(!sl->threadid);
01782 sl->threadid = CURRENT_THREAD;
01783 sl->c = 1;
01784 return 0;
01785 }
01786 }
01787 if ((++spins & SPINS_PER_YIELD) == 0)
01788 SleepEx(0, FALSE);
01789 }
01790 }
01791
01792 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
01793 assert(sl->threadid == CURRENT_THREAD);
01794 assert(sl->l != 0);
01795 if (--sl->c == 0) {
01796 sl->threadid = 0;
01797 interlockedexchange (&sl->l, 0);
01798 }
01799 }
01800
01801 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
01802 if (sl->l != 0) {
01803 if (sl->threadid == CURRENT_THREAD) {
01804 ++sl->c;
01805 return 1;
01806 }
01807 }
01808 else {
01809 if (!interlockedexchange(&sl->l, 1)){
01810 assert(!sl->threadid);
01811 sl->threadid = CURRENT_THREAD;
01812 sl->c = 1;
01813 return 1;
01814 }
01815 }
01816 return 0;
01817 }
01818
01819 #endif
01820 #else
01821
01822 #ifndef WIN32
01823
01824
01825 #define MLOCK_T pthread_mutex_t
01826 #define CURRENT_THREAD pthread_self()
01827 #define INITIAL_LOCK(sl) pthread_init_lock(sl)
01828 #define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl)
01829 #define RELEASE_LOCK(sl) pthread_mutex_unlock(sl)
01830 #define TRY_LOCK(sl) (!pthread_mutex_trylock(sl))
01831
01832 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
01833
01834
01835
01836 #ifdef linux
01837 #ifndef PTHREAD_MUTEX_RECURSIVE
01838 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
01839 int __kind));
01840 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
01841 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
01842 #endif
01843 #endif
01844
01845 static int pthread_init_lock (MLOCK_T *sl) {
01846 pthread_mutexattr_t attr;
01847 if (pthread_mutexattr_init(&attr)) return 1;
01848 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
01849 if (pthread_mutex_init(sl, &attr)) return 1;
01850 if (pthread_mutexattr_destroy(&attr)) return 1;
01851 return 0;
01852 }
01853
01854 #else
01855
01856 #define MLOCK_T CRITICAL_SECTION
01857 #define CURRENT_THREAD GetCurrentThreadId()
01858 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000))
01859 #define ACQUIRE_LOCK(s) (EnterCriticalSection(sl), 0)
01860 #define RELEASE_LOCK(s) LeaveCriticalSection(sl)
01861 #define TRY_LOCK(s) TryEnterCriticalSection(sl)
01862 #define NEED_GLOBAL_LOCK_INIT
01863
01864 static MLOCK_T malloc_global_mutex;
01865 static volatile long malloc_global_mutex_status;
01866
01867
01868 static void init_malloc_global_mutex() {
01869 for (;;) {
01870 long stat = malloc_global_mutex_status;
01871 if (stat > 0)
01872 return;
01873
01874 if (stat == 0 &&
01875 interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) {
01876 InitializeCriticalSection(&malloc_global_mutex);
01877 interlockedexchange(&malloc_global_mutex_status,1);
01878 return;
01879 }
01880 SleepEx(0, FALSE);
01881 }
01882 }
01883
01884 #endif
01885 #endif
01886 #endif
01887
01888
01889
01890 #if USE_LOCKS > 1
01891
01892
01893
01894
01895
01896
01897 #endif
01898
01899
01900
01901 #if USE_LOCKS
01902 #define USE_LOCK_BIT (2U)
01903 #else
01904 #define USE_LOCK_BIT (0U)
01905 #define INITIAL_LOCK(l)
01906 #endif
01907
01908 #if USE_LOCKS
01909 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
01910 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
01911 #endif
01912 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
01913 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
01914 #endif
01915 #else
01916 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
01917 #define RELEASE_MALLOC_GLOBAL_LOCK()
01918 #endif
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929
01930
01931
01932
01933
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950
01951
01952
01953
01954
01955
01956
01957
01958
01959
01960
01961
01962
01963
01964
01965
01966
01967
01968
01969
01970
01971
01972
01973
01974
01975
01976
01977
01978
01979
01980
01981
01982
01983
01984
01985
01986
01987
01988
01989
01990
01991
01992
01993
01994
01995
01996
01997
01998
01999
02000
02001
02002
02003
02004
02005
02006
02007
02008
02009
02010
02011
02012
02013
02014
02015
02016
02017
02018
02019
02020
02021
02022
02023
02024
02025
02026
02027
02028
02029
02030
02031
02032
02033
02034
02035
02036
02037
02038
02039
02040
02041
02042
02043
02044
02045
02046
02047
02048
02049
02050
02051
02052
02053
02054
02055
02056
02057
02058 struct malloc_chunk {
02059 size_t prev_foot;
02060 size_t head;
02061 struct malloc_chunk* fd;
02062 struct malloc_chunk* bk;
02063 };
02064
02065 typedef struct malloc_chunk mchunk;
02066 typedef struct malloc_chunk* mchunkptr;
02067 typedef struct malloc_chunk* sbinptr;
02068 typedef unsigned int bindex_t;
02069 typedef unsigned int binmap_t;
02070 typedef unsigned int flag_t;
02071
02072
02073
02074 #define MCHUNK_SIZE (sizeof(mchunk))
02075
02076 #if FOOTERS
02077 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
02078 #else
02079 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
02080 #endif
02081
02082
02083 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
02084
02085 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
02086
02087
02088 #define MIN_CHUNK_SIZE\
02089 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
02090
02091
02092 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
02093 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
02094
02095 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
02096
02097
02098 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
02099 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
02100
02101
02102 #define pad_request(req) \
02103 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
02104
02105
02106 #define request2size(req) \
02107 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
02108
02109
02110
02111
02112
02113
02114
02115
02116
02117
02118
02119
02120 #define PINUSE_BIT (SIZE_T_ONE)
02121 #define CINUSE_BIT (SIZE_T_TWO)
02122 #define FLAG4_BIT (SIZE_T_FOUR)
02123 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
02124 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
02125
02126
02127 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
02128
02129
02130 #define cinuse(p) ((p)->head & CINUSE_BIT)
02131 #define pinuse(p) ((p)->head & PINUSE_BIT)
02132 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
02133 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
02134
02135 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
02136
02137 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
02138
02139
02140 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
02141 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
02142
02143
02144 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
02145 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
02146
02147
02148 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
02149
02150
02151 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
02152 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
02153
02154
02155 #define set_size_and_pinuse_of_free_chunk(p, s)\
02156 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
02157
02158
02159 #define set_free_with_pinuse(p, s, n)\
02160 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
02161
02162
02163 #define overhead_for(p)\
02164 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
02165
02166
02167 #if MMAP_CLEARS
02168 #define calloc_must_clear(p) (!is_mmapped(p))
02169 #else
02170 #define calloc_must_clear(p) (1)
02171 #endif
02172
02173
02174
02175
02176
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197
02198
02199
02200
02201
02202
02203
02204
02205
02206
02207
02208
02209
02210
02211
02212
02213
02214
02215
02216
02217
02218
02219
02220
02221
02222
02223
02224
02225
02226
02227
02228
02229
02230
02231
02232
02233
02234
02235
02236
02237
02238
02239
02240
02241
02242
02243
02244
02245
02246
02247
02248
02249
02250
02251
02252
02253
02254
02255
02256
02257
02258
02259
02260
02261
02262
02263
02264 struct malloc_tree_chunk {
02265
02266 size_t prev_foot;
02267 size_t head;
02268 struct malloc_tree_chunk* fd;
02269 struct malloc_tree_chunk* bk;
02270
02271 struct malloc_tree_chunk* child[2];
02272 struct malloc_tree_chunk* parent;
02273 bindex_t index;
02274 };
02275
02276 typedef struct malloc_tree_chunk tchunk;
02277 typedef struct malloc_tree_chunk* tchunkptr;
02278 typedef struct malloc_tree_chunk* tbinptr;
02279
02280
02281 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300
02301
02302
02303
02304
02305
02306
02307
02308
02309
02310
02311
02312
02313
02314
02315
02316
02317
02318
02319
02320
02321
02322
02323
02324
02325
02326
02327
02328
02329
02330
02331
02332
02333
02334
02335
02336
02337
02338
02339
02340 struct malloc_segment {
02341 char* base;
02342 size_t size;
02343 struct malloc_segment* next;
02344 flag_t sflags;
02345 };
02346
02347 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
02348 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
02349
02350 typedef struct malloc_segment msegment;
02351 typedef struct malloc_segment* msegmentptr;
02352
02353
02354
02355
02356
02357
02358
02359
02360
02361
02362
02363
02364
02365
02366
02367
02368
02369
02370
02371
02372
02373
02374
02375
02376
02377
02378
02379
02380
02381
02382
02383
02384
02385
02386
02387
02388
02389
02390
02391
02392
02393
02394
02395
02396
02397
02398
02399
02400
02401
02402
02403
02404
02405
02406
02407
02408
02409
02410
02411
02412
02413
02414
02415
02416
02417
02418
02419
02420
02421
02422
02423
02424
02425
02426
02427
02428
02429
02430
02431
02432
02433
02434
02435
02436
02437
02438 #define NSMALLBINS (32U)
02439 #define NTREEBINS (32U)
02440 #define SMALLBIN_SHIFT (3U)
02441 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
02442 #define TREEBIN_SHIFT (8U)
02443 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
02444 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
02445 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
02446
02447 struct malloc_state {
02448 binmap_t smallmap;
02449 binmap_t treemap;
02450 size_t dvsize;
02451 size_t topsize;
02452 char* least_addr;
02453 mchunkptr dv;
02454 mchunkptr top;
02455 size_t trim_check;
02456 size_t release_checks;
02457 size_t magic;
02458 mchunkptr smallbins[(NSMALLBINS+1)*2];
02459 tbinptr treebins[NTREEBINS];
02460 size_t footprint;
02461 size_t max_footprint;
02462 flag_t mflags;
02463 #if USE_LOCKS
02464 MLOCK_T mutex;
02465 #endif
02466 msegment seg;
02467 void* extp;
02468 size_t exts;
02469 };
02470
02471 typedef struct malloc_state* mstate;
02472
02473
02474
02475
02476
02477
02478
02479
02480
02481
02482 struct malloc_params {
02483 volatile size_t magic;
02484 size_t page_size;
02485 size_t granularity;
02486 size_t mmap_threshold;
02487 size_t trim_threshold;
02488 flag_t default_mflags;
02489 };
02490
02491 static struct malloc_params mparams;
02492
02493
02494 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
02495
02496 #if !ONLY_MSPACES
02497
02498
02499 static struct malloc_state _gm_;
02500 #define gm (&_gm_)
02501 #define is_global(M) ((M) == &_gm_)
02502
02503 #endif
02504
02505 #define is_initialized(M) ((M)->top != 0)
02506
02507
02508
02509
02510
02511 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
02512 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
02513 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
02514
02515 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
02516 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
02517 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
02518
02519 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
02520 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
02521
02522 #define set_lock(M,L)\
02523 ((M)->mflags = (L)?\
02524 ((M)->mflags | USE_LOCK_BIT) :\
02525 ((M)->mflags & ~USE_LOCK_BIT))
02526
02527
02528 #define page_align(S)\
02529 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
02530
02531
02532 #define granularity_align(S)\
02533 (((S) + (mparams.granularity - SIZE_T_ONE))\
02534 & ~(mparams.granularity - SIZE_T_ONE))
02535
02536
02537
02538 #ifdef WIN32
02539 #define mmap_align(S) granularity_align(S)
02540 #else
02541 #define mmap_align(S) page_align(S)
02542 #endif
02543
02544
02545 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
02546
02547 #define is_page_aligned(S)\
02548 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
02549 #define is_granularity_aligned(S)\
02550 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
02551
02552
02553 #define segment_holds(S, A)\
02554 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
02555
02556
02557 static msegmentptr segment_holding(mstate m, char* addr) {
02558 msegmentptr sp = &m->seg;
02559 for (;;) {
02560 if (addr >= sp->base && addr < sp->base + sp->size)
02561 return sp;
02562 if ((sp = sp->next) == 0)
02563 return 0;
02564 }
02565 }
02566
02567
02568 static int has_segment_link(mstate m, msegmentptr ss) {
02569 msegmentptr sp = &m->seg;
02570 for (;;) {
02571 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
02572 return 1;
02573 if ((sp = sp->next) == 0)
02574 return 0;
02575 }
02576 }
02577
02578 #ifndef MORECORE_CANNOT_TRIM
02579 #define should_trim(M,s) ((s) > (M)->trim_check)
02580 #else
02581 #define should_trim(M,s) (0)
02582 #endif
02583
02584
02585
02586
02587
02588
02589 #define TOP_FOOT_SIZE\
02590 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
02591
02592
02593
02594
02595
02596
02597
02598
02599
02600
02601 #if USE_LOCKS
02602
02603 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
02604 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
02605 #else
02606
02607 #ifndef PREACTION
02608 #define PREACTION(M) (0)
02609 #endif
02610
02611 #ifndef POSTACTION
02612 #define POSTACTION(M)
02613 #endif
02614
02615 #endif
02616
02617
02618
02619
02620
02621
02622
02623
02624
02625 #if PROCEED_ON_ERROR
02626
02627
02628 int malloc_corruption_error_count;
02629
02630
02631 static void reset_on_error(mstate m);
02632
02633 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
02634 #define USAGE_ERROR_ACTION(m, p)
02635
02636 #else
02637
02638 #ifndef CORRUPTION_ERROR_ACTION
02639 #define CORRUPTION_ERROR_ACTION(m) ABORT
02640 #endif
02641
02642 #ifndef USAGE_ERROR_ACTION
02643 #define USAGE_ERROR_ACTION(m,p) ABORT
02644 #endif
02645
02646 #endif
02647
02648
02649
02650 #if ! DEBUG
02651
02652 #define check_free_chunk(M,P)
02653 #define check_inuse_chunk(M,P)
02654 #define check_malloced_chunk(M,P,N)
02655 #define check_mmapped_chunk(M,P)
02656 #define check_malloc_state(M)
02657 #define check_top_chunk(M,P)
02658
02659 #else
02660 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
02661 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
02662 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
02663 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
02664 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
02665 #define check_malloc_state(M) do_check_malloc_state(M)
02666
02667 static void do_check_any_chunk(mstate m, mchunkptr p);
02668 static void do_check_top_chunk(mstate m, mchunkptr p);
02669 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
02670 static void do_check_inuse_chunk(mstate m, mchunkptr p);
02671 static void do_check_free_chunk(mstate m, mchunkptr p);
02672 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
02673 static void do_check_tree(mstate m, tchunkptr t);
02674 static void do_check_treebin(mstate m, bindex_t i);
02675 static void do_check_smallbin(mstate m, bindex_t i);
02676 static void do_check_malloc_state(mstate m);
02677 static int bin_find(mstate m, mchunkptr x);
02678 static size_t traverse_and_check(mstate m);
02679 #endif
02680
02681
02682
02683 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
02684 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
02685 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
02686 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
02687
02688
02689 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
02690 #define treebin_at(M,i) (&((M)->treebins[i]))
02691
02692
02693 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
02694 #define compute_tree_index(S, I)\
02695 {\
02696 unsigned int X = S >> TREEBIN_SHIFT;\
02697 if (X == 0)\
02698 I = 0;\
02699 else if (X > 0xFFFF)\
02700 I = NTREEBINS-1;\
02701 else {\
02702 unsigned int K;\
02703 __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
02704 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02705 }\
02706 }
02707
02708 #elif defined (__INTEL_COMPILER)
02709 #define compute_tree_index(S, I)\
02710 {\
02711 size_t X = S >> TREEBIN_SHIFT;\
02712 if (X == 0)\
02713 I = 0;\
02714 else if (X > 0xFFFF)\
02715 I = NTREEBINS-1;\
02716 else {\
02717 unsigned int K = _bit_scan_reverse (X); \
02718 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02719 }\
02720 }
02721
02722 #elif defined(_MSC_VER) && _MSC_VER>=1300
02723 #define compute_tree_index(S, I)\
02724 {\
02725 size_t X = S >> TREEBIN_SHIFT;\
02726 if (X == 0)\
02727 I = 0;\
02728 else if (X > 0xFFFF)\
02729 I = NTREEBINS-1;\
02730 else {\
02731 unsigned int K;\
02732 _BitScanReverse((DWORD *) &K, X);\
02733 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02734 }\
02735 }
02736
02737 #else
02738 #define compute_tree_index(S, I)\
02739 {\
02740 size_t X = S >> TREEBIN_SHIFT;\
02741 if (X == 0)\
02742 I = 0;\
02743 else if (X > 0xFFFF)\
02744 I = NTREEBINS-1;\
02745 else {\
02746 unsigned int Y = (unsigned int)X;\
02747 unsigned int N = ((Y - 0x100) >> 16) & 8;\
02748 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
02749 N += K;\
02750 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
02751 K = 14 - N + ((Y <<= K) >> 15);\
02752 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
02753 }\
02754 }
02755 #endif
02756
02757
02758 #define bit_for_tree_index(i) \
02759 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
02760
02761
02762 #define leftshift_for_tree_index(i) \
02763 ((i == NTREEBINS-1)? 0 : \
02764 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
02765
02766
02767 #define minsize_for_tree_index(i) \
02768 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
02769 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
02770
02771
02772
02773
02774
02775 #define idx2bit(i) ((binmap_t)(1) << (i))
02776
02777
02778 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
02779 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
02780 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
02781
02782 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
02783 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
02784 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
02785
02786
02787 #define least_bit(x) ((x) & -(x))
02788
02789
02790 #define left_bits(x) ((x<<1) | -(x<<1))
02791
02792
02793 #define same_or_left_bits(x) ((x) | -(x))
02794
02795
02796
02797 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
02798 #define compute_bit2idx(X, I)\
02799 {\
02800 unsigned int J;\
02801 __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
02802 I = (bindex_t)J;\
02803 }
02804
02805 #elif defined (__INTEL_COMPILER)
02806 #define compute_bit2idx(X, I)\
02807 {\
02808 unsigned int J;\
02809 J = _bit_scan_forward (X); \
02810 I = (bindex_t)J;\
02811 }
02812
02813 #elif defined(_MSC_VER) && _MSC_VER>=1300
02814 #define compute_bit2idx(X, I)\
02815 {\
02816 unsigned int J;\
02817 _BitScanForward((DWORD *) &J, X);\
02818 I = (bindex_t)J;\
02819 }
02820
02821 #elif USE_BUILTIN_FFS
02822 #define compute_bit2idx(X, I) I = ffs(X)-1
02823
02824 #else
02825 #define compute_bit2idx(X, I)\
02826 {\
02827 unsigned int Y = X - 1;\
02828 unsigned int K = Y >> (16-4) & 16;\
02829 unsigned int N = K; Y >>= K;\
02830 N += K = Y >> (8-3) & 8; Y >>= K;\
02831 N += K = Y >> (4-2) & 4; Y >>= K;\
02832 N += K = Y >> (2-1) & 2; Y >>= K;\
02833 N += K = Y >> (1-0) & 1; Y >>= K;\
02834 I = (bindex_t)(N + Y);\
02835 }
02836 #endif
02837
02838
02839
02840
02841
02842
02843
02844
02845
02846
02847
02848
02849
02850
02851
02852
02853
02854
02855
02856
02857
02858
02859
02860
02861
02862
02863
02864
02865
02866
02867 #if !INSECURE
02868
02869 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
02870
02871 #define ok_next(p, n) ((char*)(p) < (char*)(n))
02872
02873 #define ok_inuse(p) is_inuse(p)
02874
02875 #define ok_pinuse(p) pinuse(p)
02876
02877 #else
02878 #define ok_address(M, a) (1)
02879 #define ok_next(b, n) (1)
02880 #define ok_inuse(p) (1)
02881 #define ok_pinuse(p) (1)
02882 #endif
02883
02884 #if (FOOTERS && !INSECURE)
02885
02886 #define ok_magic(M) ((M)->magic == mparams.magic)
02887 #else
02888 #define ok_magic(M) (1)
02889 #endif
02890
02891
02892
02893 #if !INSECURE
02894 #if defined(__GNUC__) && __GNUC__ >= 3
02895 #define RTCHECK(e) __builtin_expect(e, 1)
02896 #else
02897 #define RTCHECK(e) (e)
02898 #endif
02899 #else
02900 #define RTCHECK(e) (1)
02901 #endif
02902
02903
02904
02905 #if !FOOTERS
02906
02907 #define mark_inuse_foot(M,p,s)
02908
02909
02910
02911
02912 #define set_inuse(M,p,s)\
02913 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02914 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02915
02916
02917 #define set_inuse_and_pinuse(M,p,s)\
02918 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02919 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02920
02921
02922 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02923 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
02924
02925 #else
02926
02927
02928 #define mark_inuse_foot(M,p,s)\
02929 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
02930
02931 #define get_mstate_for(p)\
02932 ((mstate)(((mchunkptr)((char*)(p) +\
02933 (chunksize(p))))->prev_foot ^ mparams.magic))
02934
02935 #define set_inuse(M,p,s)\
02936 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02937 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
02938 mark_inuse_foot(M,p,s))
02939
02940 #define set_inuse_and_pinuse(M,p,s)\
02941 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02942 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
02943 mark_inuse_foot(M,p,s))
02944
02945 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02946 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02947 mark_inuse_foot(M, p, s))
02948
02949 #endif
02950
02951
02952
02953
02954 static int init_mparams(void) {
02955 #ifdef NEED_GLOBAL_LOCK_INIT
02956 if (malloc_global_mutex_status <= 0)
02957 init_malloc_global_mutex();
02958 #endif
02959
02960 ACQUIRE_MALLOC_GLOBAL_LOCK();
02961 if (mparams.magic == 0) {
02962 size_t magic;
02963 size_t psize;
02964 size_t gsize;
02965
02966 #ifndef WIN32
02967 psize = malloc_getpagesize;
02968 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
02969 #else
02970 {
02971 SYSTEM_INFO system_info;
02972 GetSystemInfo(&system_info);
02973 psize = system_info.dwPageSize;
02974 gsize = ((DEFAULT_GRANULARITY != 0)?
02975 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
02976 }
02977 #endif
02978
02979
02980
02981
02982
02983
02984
02985 if ((sizeof(size_t) != sizeof(char*)) ||
02986 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
02987 (sizeof(int) < 4) ||
02988 (MALLOC_ALIGNMENT < (size_t)8U) ||
02989 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
02990 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
02991 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
02992 ((psize & (psize-SIZE_T_ONE)) != 0))
02993 ABORT;
02994
02995 mparams.granularity = gsize;
02996 mparams.page_size = psize;
02997 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
02998 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
02999 #if MORECORE_CONTIGUOUS
03000 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
03001 #else
03002 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
03003 #endif
03004
03005 #if !ONLY_MSPACES
03006
03007 gm->mflags = mparams.default_mflags;
03008 INITIAL_LOCK(&gm->mutex);
03009 #endif
03010
03011 {
03012 #if USE_DEV_RANDOM
03013 int fd;
03014 unsigned char buf[sizeof(size_t)];
03015
03016 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
03017 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
03018 magic = *((size_t *) buf);
03019 close(fd);
03020 }
03021 else
03022 #endif
03023 #ifdef WIN32
03024 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
03025 #else
03026 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
03027 #endif
03028 magic |= (size_t)8U;
03029 magic &= ~(size_t)7U;
03030 mparams.magic = magic;
03031 }
03032 }
03033
03034 RELEASE_MALLOC_GLOBAL_LOCK();
03035 return 1;
03036 }
03037
03038
03039 static int change_mparam(int param_number, int value) {
03040 size_t val;
03041 ensure_initialization();
03042 val = (value == -1)? MAX_SIZE_T : (size_t)value;
03043 switch(param_number) {
03044 case M_TRIM_THRESHOLD:
03045 mparams.trim_threshold = val;
03046 return 1;
03047 case M_GRANULARITY:
03048 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
03049 mparams.granularity = val;
03050 return 1;
03051 }
03052 else
03053 return 0;
03054 case M_MMAP_THRESHOLD:
03055 mparams.mmap_threshold = val;
03056 return 1;
03057 default:
03058 return 0;
03059 }
03060 }
03061
03062 #if DEBUG
03063
03064
03065
03066 static void do_check_any_chunk(mstate m, mchunkptr p) {
03067 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
03068 assert(ok_address(m, p));
03069 }
03070
03071
03072 static void do_check_top_chunk(mstate m, mchunkptr p) {
03073 msegmentptr sp = segment_holding(m, (char*)p);
03074 size_t sz = p->head & ~INUSE_BITS;
03075 assert(sp != 0);
03076 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
03077 assert(ok_address(m, p));
03078 assert(sz == m->topsize);
03079 assert(sz > 0);
03080 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
03081 assert(pinuse(p));
03082 assert(!pinuse(chunk_plus_offset(p, sz)));
03083 }
03084
03085
03086 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
03087 size_t sz = chunksize(p);
03088 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
03089 assert(is_mmapped(p));
03090 assert(use_mmap(m));
03091 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
03092 assert(ok_address(m, p));
03093 assert(!is_small(sz));
03094 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
03095 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
03096 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
03097 }
03098
03099
03100 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
03101 do_check_any_chunk(m, p);
03102 assert(is_inuse(p));
03103 assert(next_pinuse(p));
03104
03105 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
03106 if (is_mmapped(p))
03107 do_check_mmapped_chunk(m, p);
03108 }
03109
03110
03111 static void do_check_free_chunk(mstate m, mchunkptr p) {
03112 size_t sz = chunksize(p);
03113 mchunkptr next = chunk_plus_offset(p, sz);
03114 do_check_any_chunk(m, p);
03115 assert(!is_inuse(p));
03116 assert(!next_pinuse(p));
03117 assert (!is_mmapped(p));
03118 if (p != m->dv && p != m->top) {
03119 if (sz >= MIN_CHUNK_SIZE) {
03120 assert((sz & CHUNK_ALIGN_MASK) == 0);
03121 assert(is_aligned(chunk2mem(p)));
03122 assert(next->prev_foot == sz);
03123 assert(pinuse(p));
03124 assert (next == m->top || is_inuse(next));
03125 assert(p->fd->bk == p);
03126 assert(p->bk->fd == p);
03127 }
03128 else
03129 assert(sz == SIZE_T_SIZE);
03130 }
03131 }
03132
03133
03134 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
03135 if (mem != 0) {
03136 mchunkptr p = mem2chunk(mem);
03137 size_t sz = p->head & ~INUSE_BITS;
03138 do_check_inuse_chunk(m, p);
03139 assert((sz & CHUNK_ALIGN_MASK) == 0);
03140 assert(sz >= MIN_CHUNK_SIZE);
03141 assert(sz >= s);
03142
03143 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
03144 }
03145 }
03146
03147
03148 static void do_check_tree(mstate m, tchunkptr t) {
03149 tchunkptr head = 0;
03150 tchunkptr u = t;
03151 bindex_t tindex = t->index;
03152 size_t tsize = chunksize(t);
03153 bindex_t idx;
03154 compute_tree_index(tsize, idx);
03155 assert(tindex == idx);
03156 assert(tsize >= MIN_LARGE_SIZE);
03157 assert(tsize >= minsize_for_tree_index(idx));
03158 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
03159
03160 do {
03161 do_check_any_chunk(m, ((mchunkptr)u));
03162 assert(u->index == tindex);
03163 assert(chunksize(u) == tsize);
03164 assert(!is_inuse(u));
03165 assert(!next_pinuse(u));
03166 assert(u->fd->bk == u);
03167 assert(u->bk->fd == u);
03168 if (u->parent == 0) {
03169 assert(u->child[0] == 0);
03170 assert(u->child[1] == 0);
03171 }
03172 else {
03173 assert(head == 0);
03174 head = u;
03175 assert(u->parent != u);
03176 assert (u->parent->child[0] == u ||
03177 u->parent->child[1] == u ||
03178 *((tbinptr*)(u->parent)) == u);
03179 if (u->child[0] != 0) {
03180 assert(u->child[0]->parent == u);
03181 assert(u->child[0] != u);
03182 do_check_tree(m, u->child[0]);
03183 }
03184 if (u->child[1] != 0) {
03185 assert(u->child[1]->parent == u);
03186 assert(u->child[1] != u);
03187 do_check_tree(m, u->child[1]);
03188 }
03189 if (u->child[0] != 0 && u->child[1] != 0) {
03190 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
03191 }
03192 }
03193 u = u->fd;
03194 } while (u != t);
03195 assert(head != 0);
03196 }
03197
03198
03199 static void do_check_treebin(mstate m, bindex_t i) {
03200 tbinptr* tb = treebin_at(m, i);
03201 tchunkptr t = *tb;
03202 int empty = (m->treemap & (1U << i)) == 0;
03203 if (t == 0)
03204 assert(empty);
03205 if (!empty)
03206 do_check_tree(m, t);
03207 }
03208
03209
03210 static void do_check_smallbin(mstate m, bindex_t i) {
03211 sbinptr b = smallbin_at(m, i);
03212 mchunkptr p = b->bk;
03213 unsigned int empty = (m->smallmap & (1U << i)) == 0;
03214 if (p == b)
03215 assert(empty);
03216 if (!empty) {
03217 for (; p != b; p = p->bk) {
03218 size_t size = chunksize(p);
03219 mchunkptr q;
03220
03221 do_check_free_chunk(m, p);
03222
03223 assert(small_index(size) == i);
03224 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
03225
03226 q = next_chunk(p);
03227 if (q->head != FENCEPOST_HEAD)
03228 do_check_inuse_chunk(m, q);
03229 }
03230 }
03231 }
03232
03233
03234 static int bin_find(mstate m, mchunkptr x) {
03235 size_t size = chunksize(x);
03236 if (is_small(size)) {
03237 bindex_t sidx = small_index(size);
03238 sbinptr b = smallbin_at(m, sidx);
03239 if (smallmap_is_marked(m, sidx)) {
03240 mchunkptr p = b;
03241 do {
03242 if (p == x)
03243 return 1;
03244 } while ((p = p->fd) != b);
03245 }
03246 }
03247 else {
03248 bindex_t tidx;
03249 compute_tree_index(size, tidx);
03250 if (treemap_is_marked(m, tidx)) {
03251 tchunkptr t = *treebin_at(m, tidx);
03252 size_t sizebits = size << leftshift_for_tree_index(tidx);
03253 while (t != 0 && chunksize(t) != size) {
03254 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
03255 sizebits <<= 1;
03256 }
03257 if (t != 0) {
03258 tchunkptr u = t;
03259 do {
03260 if (u == (tchunkptr)x)
03261 return 1;
03262 } while ((u = u->fd) != t);
03263 }
03264 }
03265 }
03266 return 0;
03267 }
03268
03269
03270 static size_t traverse_and_check(mstate m) {
03271 size_t sum = 0;
03272 if (is_initialized(m)) {
03273 msegmentptr s = &m->seg;
03274 sum += m->topsize + TOP_FOOT_SIZE;
03275 while (s != 0) {
03276 mchunkptr q = align_as_chunk(s->base);
03277 mchunkptr lastq = 0;
03278 assert(pinuse(q));
03279 while (segment_holds(s, q) &&
03280 q != m->top && q->head != FENCEPOST_HEAD) {
03281 sum += chunksize(q);
03282 if (is_inuse(q)) {
03283 assert(!bin_find(m, q));
03284 do_check_inuse_chunk(m, q);
03285 }
03286 else {
03287 assert(q == m->dv || bin_find(m, q));
03288 assert(lastq == 0 || is_inuse(lastq));
03289 do_check_free_chunk(m, q);
03290 }
03291 lastq = q;
03292 q = next_chunk(q);
03293 }
03294 s = s->next;
03295 }
03296 }
03297 return sum;
03298 }
03299
03300
03301 static void do_check_malloc_state(mstate m) {
03302 bindex_t i;
03303 size_t total;
03304
03305 for (i = 0; i < NSMALLBINS; ++i)
03306 do_check_smallbin(m, i);
03307 for (i = 0; i < NTREEBINS; ++i)
03308 do_check_treebin(m, i);
03309
03310 if (m->dvsize != 0) {
03311 do_check_any_chunk(m, m->dv);
03312 assert(m->dvsize == chunksize(m->dv));
03313 assert(m->dvsize >= MIN_CHUNK_SIZE);
03314 assert(bin_find(m, m->dv) == 0);
03315 }
03316
03317 if (m->top != 0) {
03318 do_check_top_chunk(m, m->top);
03319
03320 assert(m->topsize > 0);
03321 assert(bin_find(m, m->top) == 0);
03322 }
03323
03324 total = traverse_and_check(m);
03325 assert(total <= m->footprint);
03326 assert(m->footprint <= m->max_footprint);
03327 }
03328 #endif
03329
03330
03331
03332 #if !NO_MALLINFO
03333 static struct mallinfo internal_mallinfo(mstate m) {
03334 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
03335 ensure_initialization();
03336 if (!PREACTION(m)) {
03337 check_malloc_state(m);
03338 if (is_initialized(m)) {
03339 size_t nfree = SIZE_T_ONE;
03340 size_t mfree = m->topsize + TOP_FOOT_SIZE;
03341 size_t sum = mfree;
03342 msegmentptr s = &m->seg;
03343 while (s != 0) {
03344 mchunkptr q = align_as_chunk(s->base);
03345 while (segment_holds(s, q) &&
03346 q != m->top && q->head != FENCEPOST_HEAD) {
03347 size_t sz = chunksize(q);
03348 sum += sz;
03349 if (!is_inuse(q)) {
03350 mfree += sz;
03351 ++nfree;
03352 }
03353 q = next_chunk(q);
03354 }
03355 s = s->next;
03356 }
03357
03358 nm.arena = sum;
03359 nm.ordblks = nfree;
03360 nm.hblkhd = m->footprint - sum;
03361 nm.usmblks = m->max_footprint;
03362 nm.uordblks = m->footprint - mfree;
03363 nm.fordblks = mfree;
03364 nm.keepcost = m->topsize;
03365 }
03366
03367 POSTACTION(m);
03368 }
03369 return nm;
03370 }
03371 #endif
03372
03373 static void internal_malloc_stats(mstate m) {
03374 ensure_initialization();
03375 if (!PREACTION(m)) {
03376 size_t maxfp = 0;
03377 size_t fp = 0;
03378 size_t used = 0;
03379 check_malloc_state(m);
03380 if (is_initialized(m)) {
03381 msegmentptr s = &m->seg;
03382 maxfp = m->max_footprint;
03383 fp = m->footprint;
03384 used = fp - (m->topsize + TOP_FOOT_SIZE);
03385
03386 while (s != 0) {
03387 mchunkptr q = align_as_chunk(s->base);
03388 while (segment_holds(s, q) &&
03389 q != m->top && q->head != FENCEPOST_HEAD) {
03390 if (!is_inuse(q))
03391 used -= chunksize(q);
03392 q = next_chunk(q);
03393 }
03394 s = s->next;
03395 }
03396 }
03397
03398 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
03399 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
03400 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
03401
03402 POSTACTION(m);
03403 }
03404 }
03405
03406
03407
03408
03409
03410
03411
03412
03413
03414
03415
03416 #define insert_small_chunk(M, P, S) {\
03417 bindex_t I = small_index(S);\
03418 mchunkptr B = smallbin_at(M, I);\
03419 mchunkptr F = B;\
03420 assert(S >= MIN_CHUNK_SIZE);\
03421 if (!smallmap_is_marked(M, I))\
03422 mark_smallmap(M, I);\
03423 else if (RTCHECK(ok_address(M, B->fd)))\
03424 F = B->fd;\
03425 else {\
03426 CORRUPTION_ERROR_ACTION(M);\
03427 }\
03428 B->fd = P;\
03429 F->bk = P;\
03430 P->fd = F;\
03431 P->bk = B;\
03432 }
03433
03434
03435 #define unlink_small_chunk(M, P, S) {\
03436 mchunkptr F = P->fd;\
03437 mchunkptr B = P->bk;\
03438 bindex_t I = small_index(S);\
03439 assert(P != B);\
03440 assert(P != F);\
03441 assert(chunksize(P) == small_index2size(I));\
03442 if (F == B)\
03443 clear_smallmap(M, I);\
03444 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
03445 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
03446 F->bk = B;\
03447 B->fd = F;\
03448 }\
03449 else {\
03450 CORRUPTION_ERROR_ACTION(M);\
03451 }\
03452 }
03453
03454
03455 #define unlink_first_small_chunk(M, B, P, I) {\
03456 mchunkptr F = P->fd;\
03457 assert(P != B);\
03458 assert(P != F);\
03459 assert(chunksize(P) == small_index2size(I));\
03460 if (B == F)\
03461 clear_smallmap(M, I);\
03462 else if (RTCHECK(ok_address(M, F))) {\
03463 B->fd = F;\
03464 F->bk = B;\
03465 }\
03466 else {\
03467 CORRUPTION_ERROR_ACTION(M);\
03468 }\
03469 }
03470
03471
03472
03473
03474
03475 #define replace_dv(M, P, S) {\
03476 size_t DVS = M->dvsize;\
03477 if (DVS != 0) {\
03478 mchunkptr DV = M->dv;\
03479 assert(is_small(DVS));\
03480 insert_small_chunk(M, DV, DVS);\
03481 }\
03482 M->dvsize = S;\
03483 M->dv = P;\
03484 }
03485
03486
03487
03488
03489 #define insert_large_chunk(M, X, S) {\
03490 tbinptr* H;\
03491 bindex_t I;\
03492 compute_tree_index(S, I);\
03493 H = treebin_at(M, I);\
03494 X->index = I;\
03495 X->child[0] = X->child[1] = 0;\
03496 if (!treemap_is_marked(M, I)) {\
03497 mark_treemap(M, I);\
03498 *H = X;\
03499 X->parent = (tchunkptr)H;\
03500 X->fd = X->bk = X;\
03501 }\
03502 else {\
03503 tchunkptr T = *H;\
03504 size_t K = S << leftshift_for_tree_index(I);\
03505 for (;;) {\
03506 if (chunksize(T) != S) {\
03507 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
03508 K <<= 1;\
03509 if (*C != 0)\
03510 T = *C;\
03511 else if (RTCHECK(ok_address(M, C))) {\
03512 *C = X;\
03513 X->parent = T;\
03514 X->fd = X->bk = X;\
03515 break;\
03516 }\
03517 else {\
03518 CORRUPTION_ERROR_ACTION(M);\
03519 break;\
03520 }\
03521 }\
03522 else {\
03523 tchunkptr F = T->fd;\
03524 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
03525 T->fd = F->bk = X;\
03526 X->fd = F;\
03527 X->bk = T;\
03528 X->parent = 0;\
03529 break;\
03530 }\
03531 else {\
03532 CORRUPTION_ERROR_ACTION(M);\
03533 break;\
03534 }\
03535 }\
03536 }\
03537 }\
03538 }
03539
03540
03541
03542
03543
03544
03545
03546
03547
03548
03549
03550
03551
03552
03553
03554
03555
03556
03557 #define unlink_large_chunk(M, X) {\
03558 tchunkptr XP = X->parent;\
03559 tchunkptr R;\
03560 if (X->bk != X) {\
03561 tchunkptr F = X->fd;\
03562 R = X->bk;\
03563 if (RTCHECK(ok_address(M, F))) {\
03564 F->bk = R;\
03565 R->fd = F;\
03566 }\
03567 else {\
03568 CORRUPTION_ERROR_ACTION(M);\
03569 }\
03570 }\
03571 else {\
03572 tchunkptr* RP;\
03573 if (((R = *(RP = &(X->child[1]))) != 0) ||\
03574 ((R = *(RP = &(X->child[0]))) != 0)) {\
03575 tchunkptr* CP;\
03576 while ((*(CP = &(R->child[1])) != 0) ||\
03577 (*(CP = &(R->child[0])) != 0)) {\
03578 R = *(RP = CP);\
03579 }\
03580 if (RTCHECK(ok_address(M, RP)))\
03581 *RP = 0;\
03582 else {\
03583 CORRUPTION_ERROR_ACTION(M);\
03584 }\
03585 }\
03586 }\
03587 if (XP != 0) {\
03588 tbinptr* H = treebin_at(M, X->index);\
03589 if (X == *H) {\
03590 if ((*H = R) == 0) \
03591 clear_treemap(M, X->index);\
03592 }\
03593 else if (RTCHECK(ok_address(M, XP))) {\
03594 if (XP->child[0] == X) \
03595 XP->child[0] = R;\
03596 else \
03597 XP->child[1] = R;\
03598 }\
03599 else\
03600 CORRUPTION_ERROR_ACTION(M);\
03601 if (R != 0) {\
03602 if (RTCHECK(ok_address(M, R))) {\
03603 tchunkptr C0, C1;\
03604 R->parent = XP;\
03605 if ((C0 = X->child[0]) != 0) {\
03606 if (RTCHECK(ok_address(M, C0))) {\
03607 R->child[0] = C0;\
03608 C0->parent = R;\
03609 }\
03610 else\
03611 CORRUPTION_ERROR_ACTION(M);\
03612 }\
03613 if ((C1 = X->child[1]) != 0) {\
03614 if (RTCHECK(ok_address(M, C1))) {\
03615 R->child[1] = C1;\
03616 C1->parent = R;\
03617 }\
03618 else\
03619 CORRUPTION_ERROR_ACTION(M);\
03620 }\
03621 }\
03622 else\
03623 CORRUPTION_ERROR_ACTION(M);\
03624 }\
03625 }\
03626 }
03627
03628
03629
03630 #define insert_chunk(M, P, S)\
03631 if (is_small(S)) insert_small_chunk(M, P, S)\
03632 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
03633
03634 #define unlink_chunk(M, P, S)\
03635 if (is_small(S)) unlink_small_chunk(M, P, S)\
03636 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
03637
03638
03639
03640
03641 #if ONLY_MSPACES
03642 #define internal_malloc(m, b) mspace_malloc(m, b)
03643 #define internal_free(m, mem) mspace_free(m,mem);
03644 #else
03645 #if MSPACES
03646 #define internal_malloc(m, b)\
03647 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
03648 #define internal_free(m, mem)\
03649 if (m == gm) dlfree(mem); else mspace_free(m,mem);
03650 #else
03651 #define internal_malloc(m, b) dlmalloc(b)
03652 #define internal_free(m, mem) dlfree(mem)
03653 #endif
03654 #endif
03655
03656
03657
03658
03659
03660
03661
03662
03663
03664
03665
03666
03667 static void* mmap_alloc(mstate m, size_t nb) {
03668 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03669 if (mmsize > nb) {
03670 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
03671 if (mm != CMFAIL) {
03672 size_t offset = align_offset(chunk2mem(mm));
03673 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
03674 mchunkptr p = (mchunkptr)(mm + offset);
03675 p->prev_foot = offset;
03676 p->head = psize;
03677 mark_inuse_foot(m, p, psize);
03678 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
03679 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
03680
03681 if (m->least_addr == 0 || mm < m->least_addr)
03682 m->least_addr = mm;
03683 if ((m->footprint += mmsize) > m->max_footprint)
03684 m->max_footprint = m->footprint;
03685 assert(is_aligned(chunk2mem(p)));
03686 check_mmapped_chunk(m, p);
03687 return chunk2mem(p);
03688 }
03689 }
03690 return 0;
03691 }
03692
03693
03694 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
03695 size_t oldsize = chunksize(oldp);
03696 if (is_small(nb))
03697 return 0;
03698
03699 if (oldsize >= nb + SIZE_T_SIZE &&
03700 (oldsize - nb) <= (mparams.granularity << 1))
03701 return oldp;
03702 else {
03703 size_t offset = oldp->prev_foot;
03704 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
03705 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03706 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
03707 oldmmsize, newmmsize, 1);
03708 if (cp != CMFAIL) {
03709 mchunkptr newp = (mchunkptr)(cp + offset);
03710 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
03711 newp->head = psize;
03712 mark_inuse_foot(m, newp, psize);
03713 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
03714 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
03715
03716 if (cp < m->least_addr)
03717 m->least_addr = cp;
03718 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
03719 m->max_footprint = m->footprint;
03720 check_mmapped_chunk(m, newp);
03721 return newp;
03722 }
03723 }
03724 return 0;
03725 }
03726
03727
03728
03729
03730 static void init_top(mstate m, mchunkptr p, size_t psize) {
03731
03732 size_t offset = align_offset(chunk2mem(p));
03733 p = (mchunkptr)((char*)p + offset);
03734 psize -= offset;
03735
03736 m->top = p;
03737 m->topsize = psize;
03738 p->head = psize | PINUSE_BIT;
03739
03740 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
03741 m->trim_check = mparams.trim_threshold;
03742 }
03743
03744
03745 static void init_bins(mstate m) {
03746
03747 bindex_t i;
03748 for (i = 0; i < NSMALLBINS; ++i) {
03749 sbinptr bin = smallbin_at(m,i);
03750 bin->fd = bin->bk = bin;
03751 }
03752 }
03753
03754 #if PROCEED_ON_ERROR
03755
03756
03757 static void reset_on_error(mstate m) {
03758 int i;
03759 ++malloc_corruption_error_count;
03760
03761 m->smallbins = m->treebins = 0;
03762 m->dvsize = m->topsize = 0;
03763 m->seg.base = 0;
03764 m->seg.size = 0;
03765 m->seg.next = 0;
03766 m->top = m->dv = 0;
03767 for (i = 0; i < NTREEBINS; ++i)
03768 *treebin_at(m, i) = 0;
03769 init_bins(m);
03770 }
03771 #endif
03772
03773
03774 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
03775 size_t nb) {
03776 mchunkptr p = align_as_chunk(newbase);
03777 mchunkptr oldfirst = align_as_chunk(oldbase);
03778 size_t psize = (char*)oldfirst - (char*)p;
03779 mchunkptr q = chunk_plus_offset(p, nb);
03780 size_t qsize = psize - nb;
03781 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
03782
03783 assert((char*)oldfirst > (char*)q);
03784 assert(pinuse(oldfirst));
03785 assert(qsize >= MIN_CHUNK_SIZE);
03786
03787
03788 if (oldfirst == m->top) {
03789 size_t tsize = m->topsize += qsize;
03790 m->top = q;
03791 q->head = tsize | PINUSE_BIT;
03792 check_top_chunk(m, q);
03793 }
03794 else if (oldfirst == m->dv) {
03795 size_t dsize = m->dvsize += qsize;
03796 m->dv = q;
03797 set_size_and_pinuse_of_free_chunk(q, dsize);
03798 }
03799 else {
03800 if (!is_inuse(oldfirst)) {
03801 size_t nsize = chunksize(oldfirst);
03802 unlink_chunk(m, oldfirst, nsize);
03803 oldfirst = chunk_plus_offset(oldfirst, nsize);
03804 qsize += nsize;
03805 }
03806 set_free_with_pinuse(q, qsize, oldfirst);
03807 insert_chunk(m, q, qsize);
03808 check_free_chunk(m, q);
03809 }
03810
03811 check_malloced_chunk(m, chunk2mem(p), nb);
03812 return chunk2mem(p);
03813 }
03814
03815
03816 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
03817
03818 char* old_top = (char*)m->top;
03819 msegmentptr oldsp = segment_holding(m, old_top);
03820 char* old_end = oldsp->base + oldsp->size;
03821 size_t ssize = pad_request(sizeof(struct malloc_segment));
03822 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03823 size_t offset = align_offset(chunk2mem(rawsp));
03824 char* asp = rawsp + offset;
03825 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
03826 mchunkptr sp = (mchunkptr)csp;
03827 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
03828 mchunkptr tnext = chunk_plus_offset(sp, ssize);
03829 mchunkptr p = tnext;
03830 int nfences = 0;
03831
03832
03833 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
03834
03835
03836 assert(is_aligned(ss));
03837 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
03838 *ss = m->seg;
03839 m->seg.base = tbase;
03840 m->seg.size = tsize;
03841 m->seg.sflags = mmapped;
03842 m->seg.next = ss;
03843
03844
03845 for (;;) {
03846 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
03847 p->head = FENCEPOST_HEAD;
03848 ++nfences;
03849 if ((char*)(&(nextp->head)) < old_end)
03850 p = nextp;
03851 else
03852 break;
03853 }
03854 assert(nfences >= 2);
03855
03856
03857 if (csp != old_top) {
03858 mchunkptr q = (mchunkptr)old_top;
03859 size_t psize = csp - old_top;
03860 mchunkptr tn = chunk_plus_offset(q, psize);
03861 set_free_with_pinuse(q, psize, tn);
03862 insert_chunk(m, q, psize);
03863 }
03864
03865 check_top_chunk(m, m->top);
03866 }
03867
03868
03869
03870
03871 static void* sys_alloc(mstate m, size_t nb) {
03872 char* tbase = CMFAIL;
03873 size_t tsize = 0;
03874 flag_t mmap_flag = 0;
03875
03876 ensure_initialization();
03877
03878
03879 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
03880 void* mem = mmap_alloc(m, nb);
03881 if (mem != 0)
03882 return mem;
03883 }
03884
03885
03886
03887
03888
03889
03890
03891
03892
03893
03894
03895
03896
03897
03898
03899
03900
03901
03902
03903
03904
03905
03906
03907 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
03908 char* br = CMFAIL;
03909 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
03910 size_t asize = 0;
03911 ACQUIRE_MALLOC_GLOBAL_LOCK();
03912
03913 if (ss == 0) {
03914 char* base = (char*)CALL_MORECORE(0);
03915 if (base != CMFAIL) {
03916 asize = granularity_align(nb + SYS_ALLOC_PADDING);
03917
03918 if (!is_page_aligned(base))
03919 asize += (page_align((size_t)base) - (size_t)base);
03920
03921 if (asize < HALF_MAX_SIZE_T &&
03922 (br = (char*)(CALL_MORECORE(asize))) == base) {
03923 tbase = base;
03924 tsize = asize;
03925 }
03926 }
03927 }
03928 else {
03929
03930 asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
03931
03932 if (asize < HALF_MAX_SIZE_T &&
03933 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
03934 tbase = br;
03935 tsize = asize;
03936 }
03937 }
03938
03939 if (tbase == CMFAIL) {
03940 if (br != CMFAIL) {
03941 if (asize < HALF_MAX_SIZE_T &&
03942 asize < nb + SYS_ALLOC_PADDING) {
03943 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize);
03944 if (esize < HALF_MAX_SIZE_T) {
03945 char* end = (char*)CALL_MORECORE(esize);
03946 if (end != CMFAIL)
03947 asize += esize;
03948 else {
03949 (void) CALL_MORECORE(-asize);
03950 br = CMFAIL;
03951 }
03952 }
03953 }
03954 }
03955 if (br != CMFAIL) {
03956 tbase = br;
03957 tsize = asize;
03958 }
03959 else
03960 disable_contiguous(m);
03961 }
03962
03963 RELEASE_MALLOC_GLOBAL_LOCK();
03964 }
03965
03966 if (HAVE_MMAP && tbase == CMFAIL) {
03967 size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING);
03968 if (rsize > nb) {
03969 char* mp = (char*)(CALL_MMAP(rsize));
03970 if (mp != CMFAIL) {
03971 tbase = mp;
03972 tsize = rsize;
03973 mmap_flag = USE_MMAP_BIT;
03974 }
03975 }
03976 }
03977
03978 if (HAVE_MORECORE && tbase == CMFAIL) {
03979 size_t asize = granularity_align(nb + SYS_ALLOC_PADDING);
03980 if (asize < HALF_MAX_SIZE_T) {
03981 char* br = CMFAIL;
03982 char* end = CMFAIL;
03983 ACQUIRE_MALLOC_GLOBAL_LOCK();
03984 br = (char*)(CALL_MORECORE(asize));
03985 end = (char*)(CALL_MORECORE(0));
03986 RELEASE_MALLOC_GLOBAL_LOCK();
03987 if (br != CMFAIL && end != CMFAIL && br < end) {
03988 size_t ssize = end - br;
03989 if (ssize > nb + TOP_FOOT_SIZE) {
03990 tbase = br;
03991 tsize = ssize;
03992 }
03993 }
03994 }
03995 }
03996
03997 if (tbase != CMFAIL) {
03998
03999 if ((m->footprint += tsize) > m->max_footprint)
04000 m->max_footprint = m->footprint;
04001
04002 if (!is_initialized(m)) {
04003 if (m->least_addr == 0 || tbase < m->least_addr)
04004 m->least_addr = tbase;
04005 m->seg.base = tbase;
04006 m->seg.size = tsize;
04007 m->seg.sflags = mmap_flag;
04008 m->magic = mparams.magic;
04009 m->release_checks = MAX_RELEASE_CHECK_RATE;
04010 init_bins(m);
04011 #if !ONLY_MSPACES
04012 if (is_global(m))
04013 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
04014 else
04015 #endif
04016 {
04017
04018 mchunkptr mn = next_chunk(mem2chunk(m));
04019 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
04020 }
04021 }
04022
04023 else {
04024
04025 msegmentptr sp = &m->seg;
04026
04027 while (sp != 0 && tbase != sp->base + sp->size)
04028 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
04029 if (sp != 0 &&
04030 !is_extern_segment(sp) &&
04031 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
04032 segment_holds(sp, m->top)) {
04033 sp->size += tsize;
04034 init_top(m, m->top, m->topsize + tsize);
04035 }
04036 else {
04037 if (tbase < m->least_addr)
04038 m->least_addr = tbase;
04039 sp = &m->seg;
04040 while (sp != 0 && sp->base != tbase + tsize)
04041 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
04042 if (sp != 0 &&
04043 !is_extern_segment(sp) &&
04044 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
04045 char* oldbase = sp->base;
04046 sp->base = tbase;
04047 sp->size += tsize;
04048 return prepend_alloc(m, tbase, oldbase, nb);
04049 }
04050 else
04051 add_segment(m, tbase, tsize, mmap_flag);
04052 }
04053 }
04054
04055 if (nb < m->topsize) {
04056 size_t rsize = m->topsize -= nb;
04057 mchunkptr p = m->top;
04058 mchunkptr r = m->top = chunk_plus_offset(p, nb);
04059 r->head = rsize | PINUSE_BIT;
04060 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
04061 check_top_chunk(m, m->top);
04062 check_malloced_chunk(m, chunk2mem(p), nb);
04063 return chunk2mem(p);
04064 }
04065 }
04066
04067 MALLOC_FAILURE_ACTION;
04068 return 0;
04069 }
04070
04071
04072
04073
04074 static size_t release_unused_segments(mstate m) {
04075 size_t released = 0;
04076 int nsegs = 0;
04077 msegmentptr pred = &m->seg;
04078 msegmentptr sp = pred->next;
04079 while (sp != 0) {
04080 char* base = sp->base;
04081 size_t size = sp->size;
04082 msegmentptr next = sp->next;
04083 ++nsegs;
04084 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
04085 mchunkptr p = align_as_chunk(base);
04086 size_t psize = chunksize(p);
04087
04088 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
04089 tchunkptr tp = (tchunkptr)p;
04090 assert(segment_holds(sp, (char*)sp));
04091 if (p == m->dv) {
04092 m->dv = 0;
04093 m->dvsize = 0;
04094 }
04095 else {
04096 unlink_large_chunk(m, tp);
04097 }
04098 if (CALL_MUNMAP(base, size) == 0) {
04099 released += size;
04100 m->footprint -= size;
04101
04102 sp = pred;
04103 sp->next = next;
04104 }
04105 else {
04106 insert_large_chunk(m, tp, psize);
04107 }
04108 }
04109 }
04110 if (NO_SEGMENT_TRAVERSAL)
04111 break;
04112 pred = sp;
04113 sp = next;
04114 }
04115
04116 m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
04117 nsegs : MAX_RELEASE_CHECK_RATE);
04118 return released;
04119 }
04120
04121 static int sys_trim(mstate m, size_t pad) {
04122 size_t released = 0;
04123 ensure_initialization();
04124 if (pad < MAX_REQUEST && is_initialized(m)) {
04125 pad += TOP_FOOT_SIZE;
04126
04127 if (m->topsize > pad) {
04128
04129 size_t unit = mparams.granularity;
04130 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
04131 SIZE_T_ONE) * unit;
04132 msegmentptr sp = segment_holding(m, (char*)m->top);
04133
04134 if (!is_extern_segment(sp)) {
04135 if (is_mmapped_segment(sp)) {
04136 if (HAVE_MMAP &&
04137 sp->size >= extra &&
04138 !has_segment_link(m, sp)) {
04139 size_t newsize = sp->size - extra;
04140
04141 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
04142 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
04143 released = extra;
04144 }
04145 }
04146 }
04147 else if (HAVE_MORECORE) {
04148 if (extra >= HALF_MAX_SIZE_T)
04149 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
04150 ACQUIRE_MALLOC_GLOBAL_LOCK();
04151 {
04152
04153 char* old_br = (char*)(CALL_MORECORE(0));
04154 if (old_br == sp->base + sp->size) {
04155 char* rel_br = (char*)(CALL_MORECORE(-extra));
04156 char* new_br = (char*)(CALL_MORECORE(0));
04157 if (rel_br != CMFAIL && new_br < old_br)
04158 released = old_br - new_br;
04159 }
04160 }
04161 RELEASE_MALLOC_GLOBAL_LOCK();
04162 }
04163 }
04164
04165 if (released != 0) {
04166 sp->size -= released;
04167 m->footprint -= released;
04168 init_top(m, m->top, m->topsize - released);
04169 check_top_chunk(m, m->top);
04170 }
04171 }
04172
04173
04174 if (HAVE_MMAP)
04175 released += release_unused_segments(m);
04176
04177
04178 if (released == 0 && m->topsize > m->trim_check)
04179 m->trim_check = MAX_SIZE_T;
04180 }
04181
04182 return (released != 0)? 1 : 0;
04183 }
04184
04185
04186
04187
04188
04189 static void* tmalloc_large(mstate m, size_t nb) {
04190 tchunkptr v = 0;
04191 size_t rsize = -nb;
04192 tchunkptr t;
04193 bindex_t idx;
04194 compute_tree_index(nb, idx);
04195 if ((t = *treebin_at(m, idx)) != 0) {
04196
04197 size_t sizebits = nb << leftshift_for_tree_index(idx);
04198 tchunkptr rst = 0;
04199 for (;;) {
04200 tchunkptr rt;
04201 size_t trem = chunksize(t) - nb;
04202 if (trem < rsize) {
04203 v = t;
04204 if ((rsize = trem) == 0)
04205 break;
04206 }
04207 rt = t->child[1];
04208 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
04209 if (rt != 0 && rt != t)
04210 rst = rt;
04211 if (t == 0) {
04212 t = rst;
04213 break;
04214 }
04215 sizebits <<= 1;
04216 }
04217 }
04218 if (t == 0 && v == 0) {
04219 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
04220 if (leftbits != 0) {
04221 bindex_t i;
04222 binmap_t leastbit = least_bit(leftbits);
04223 compute_bit2idx(leastbit, i);
04224 t = *treebin_at(m, i);
04225 }
04226 }
04227
04228 while (t != 0) {
04229 size_t trem = chunksize(t) - nb;
04230 if (trem < rsize) {
04231 rsize = trem;
04232 v = t;
04233 }
04234 t = leftmost_child(t);
04235 }
04236
04237
04238 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
04239 if (RTCHECK(ok_address(m, v))) {
04240 mchunkptr r = chunk_plus_offset(v, nb);
04241 assert(chunksize(v) == rsize + nb);
04242 if (RTCHECK(ok_next(v, r))) {
04243 unlink_large_chunk(m, v);
04244 if (rsize < MIN_CHUNK_SIZE)
04245 set_inuse_and_pinuse(m, v, (rsize + nb));
04246 else {
04247 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
04248 set_size_and_pinuse_of_free_chunk(r, rsize);
04249 insert_chunk(m, r, rsize);
04250 }
04251 return chunk2mem(v);
04252 }
04253 }
04254 CORRUPTION_ERROR_ACTION(m);
04255 }
04256 return 0;
04257 }
04258
04259
04260 static void* tmalloc_small(mstate m, size_t nb) {
04261 tchunkptr t, v;
04262 size_t rsize;
04263 bindex_t i;
04264 binmap_t leastbit = least_bit(m->treemap);
04265 compute_bit2idx(leastbit, i);
04266 v = t = *treebin_at(m, i);
04267 rsize = chunksize(t) - nb;
04268
04269 while ((t = leftmost_child(t)) != 0) {
04270 size_t trem = chunksize(t) - nb;
04271 if (trem < rsize) {
04272 rsize = trem;
04273 v = t;
04274 }
04275 }
04276
04277 if (RTCHECK(ok_address(m, v))) {
04278 mchunkptr r = chunk_plus_offset(v, nb);
04279 assert(chunksize(v) == rsize + nb);
04280 if (RTCHECK(ok_next(v, r))) {
04281 unlink_large_chunk(m, v);
04282 if (rsize < MIN_CHUNK_SIZE)
04283 set_inuse_and_pinuse(m, v, (rsize + nb));
04284 else {
04285 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
04286 set_size_and_pinuse_of_free_chunk(r, rsize);
04287 replace_dv(m, r, rsize);
04288 }
04289 return chunk2mem(v);
04290 }
04291 }
04292
04293 CORRUPTION_ERROR_ACTION(m);
04294 return 0;
04295 }
04296
04297
04298
04299 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
04300 if (bytes >= MAX_REQUEST) {
04301 MALLOC_FAILURE_ACTION;
04302 return 0;
04303 }
04304 if (!PREACTION(m)) {
04305 mchunkptr oldp = mem2chunk(oldmem);
04306 size_t oldsize = chunksize(oldp);
04307 mchunkptr next = chunk_plus_offset(oldp, oldsize);
04308 mchunkptr newp = 0;
04309 void* extra = 0;
04310
04311
04312
04313 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
04314 ok_next(oldp, next) && ok_pinuse(next))) {
04315 size_t nb = request2size(bytes);
04316 if (is_mmapped(oldp))
04317 newp = mmap_resize(m, oldp, nb);
04318 else if (oldsize >= nb) {
04319 size_t rsize = oldsize - nb;
04320 newp = oldp;
04321 if (rsize >= MIN_CHUNK_SIZE) {
04322 mchunkptr remainder = chunk_plus_offset(newp, nb);
04323 set_inuse(m, newp, nb);
04324 set_inuse_and_pinuse(m, remainder, rsize);
04325 extra = chunk2mem(remainder);
04326 }
04327 }
04328 else if (next == m->top && oldsize + m->topsize > nb) {
04329
04330 size_t newsize = oldsize + m->topsize;
04331 size_t newtopsize = newsize - nb;
04332 mchunkptr newtop = chunk_plus_offset(oldp, nb);
04333 set_inuse(m, oldp, nb);
04334 newtop->head = newtopsize |PINUSE_BIT;
04335 m->top = newtop;
04336 m->topsize = newtopsize;
04337 newp = oldp;
04338 }
04339 }
04340 else {
04341 USAGE_ERROR_ACTION(m, oldmem);
04342 POSTACTION(m);
04343 return 0;
04344 }
04345 #if DEBUG
04346 if (newp != 0) {
04347 check_inuse_chunk(m, newp);
04348 }
04349 #endif
04350
04351 POSTACTION(m);
04352
04353 if (newp != 0) {
04354 if (extra != 0) {
04355 internal_free(m, extra);
04356 }
04357 return chunk2mem(newp);
04358 }
04359 else {
04360 void* newmem = internal_malloc(m, bytes);
04361 if (newmem != 0) {
04362 size_t oc = oldsize - overhead_for(oldp);
04363 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
04364 internal_free(m, oldmem);
04365 }
04366 return newmem;
04367 }
04368 }
04369 return 0;
04370 }
04371
04372
04373
04374 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
04375 if (alignment <= MALLOC_ALIGNMENT)
04376 return internal_malloc(m, bytes);
04377 if (alignment < MIN_CHUNK_SIZE)
04378 alignment = MIN_CHUNK_SIZE;
04379 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
04380 size_t a = MALLOC_ALIGNMENT << 1;
04381 while (a < alignment) a <<= 1;
04382 alignment = a;
04383 }
04384
04385 if (bytes >= MAX_REQUEST - alignment) {
04386 if (m != 0) {
04387 MALLOC_FAILURE_ACTION;
04388 }
04389 }
04390 else {
04391 size_t nb = request2size(bytes);
04392 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
04393 char* mem = (char*)internal_malloc(m, req);
04394 if (mem != 0) {
04395 void* leader = 0;
04396 void* trailer = 0;
04397 mchunkptr p = mem2chunk(mem);
04398
04399 if (PREACTION(m)) return 0;
04400 if ((((size_t)(mem)) % alignment) != 0) {
04401
04402
04403
04404
04405
04406
04407
04408
04409 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
04410 alignment -
04411 SIZE_T_ONE)) &
04412 -alignment));
04413 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
04414 br : br+alignment;
04415 mchunkptr newp = (mchunkptr)pos;
04416 size_t leadsize = pos - (char*)(p);
04417 size_t newsize = chunksize(p) - leadsize;
04418
04419 if (is_mmapped(p)) {
04420 newp->prev_foot = p->prev_foot + leadsize;
04421 newp->head = newsize;
04422 }
04423 else {
04424 set_inuse(m, newp, newsize);
04425 set_inuse(m, p, leadsize);
04426 leader = chunk2mem(p);
04427 }
04428 p = newp;
04429 }
04430
04431
04432 if (!is_mmapped(p)) {
04433 size_t size = chunksize(p);
04434 if (size > nb + MIN_CHUNK_SIZE) {
04435 size_t remainder_size = size - nb;
04436 mchunkptr remainder = chunk_plus_offset(p, nb);
04437 set_inuse(m, p, nb);
04438 set_inuse(m, remainder, remainder_size);
04439 trailer = chunk2mem(remainder);
04440 }
04441 }
04442
04443 assert (chunksize(p) >= nb);
04444 assert((((size_t)(chunk2mem(p))) % alignment) == 0);
04445 check_inuse_chunk(m, p);
04446 POSTACTION(m);
04447 if (leader != 0) {
04448 internal_free(m, leader);
04449 }
04450 if (trailer != 0) {
04451 internal_free(m, trailer);
04452 }
04453 return chunk2mem(p);
04454 }
04455 }
04456 return 0;
04457 }
04458
04459
04460
04461 static void** ialloc(mstate m,
04462 size_t n_elements,
04463 size_t* sizes,
04464 int opts,
04465 void* chunks[]) {
04466
04467
04468
04469
04470
04471
04472
04473
04474
04475 size_t element_size;
04476 size_t contents_size;
04477 size_t array_size;
04478 void* mem;
04479 mchunkptr p;
04480 size_t remainder_size;
04481 void** marray;
04482 mchunkptr array_chunk;
04483 flag_t was_enabled;
04484 size_t size;
04485 size_t i;
04486
04487 ensure_initialization();
04488
04489 if (chunks != 0) {
04490 if (n_elements == 0)
04491 return chunks;
04492 marray = chunks;
04493 array_size = 0;
04494 }
04495 else {
04496
04497 if (n_elements == 0)
04498 return (void**)internal_malloc(m, 0);
04499 marray = 0;
04500 array_size = request2size(n_elements * (sizeof(void*)));
04501 }
04502
04503
04504 if (opts & 0x1) {
04505 element_size = request2size(*sizes);
04506 contents_size = n_elements * element_size;
04507 }
04508 else {
04509 element_size = 0;
04510 contents_size = 0;
04511 for (i = 0; i != n_elements; ++i)
04512 contents_size += request2size(sizes[i]);
04513 }
04514
04515 size = contents_size + array_size;
04516
04517
04518
04519
04520
04521
04522 was_enabled = use_mmap(m);
04523 disable_mmap(m);
04524 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
04525 if (was_enabled)
04526 enable_mmap(m);
04527 if (mem == 0)
04528 return 0;
04529
04530 if (PREACTION(m)) return 0;
04531 p = mem2chunk(mem);
04532 remainder_size = chunksize(p);
04533
04534 assert(!is_mmapped(p));
04535
04536 if (opts & 0x2) {
04537 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
04538 }
04539
04540
04541 if (marray == 0) {
04542 size_t array_chunk_size;
04543 array_chunk = chunk_plus_offset(p, contents_size);
04544 array_chunk_size = remainder_size - contents_size;
04545 marray = (void**) (chunk2mem(array_chunk));
04546 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
04547 remainder_size = contents_size;
04548 }
04549
04550
04551 for (i = 0; ; ++i) {
04552 marray[i] = chunk2mem(p);
04553 if (i != n_elements-1) {
04554 if (element_size != 0)
04555 size = element_size;
04556 else
04557 size = request2size(sizes[i]);
04558 remainder_size -= size;
04559 set_size_and_pinuse_of_inuse_chunk(m, p, size);
04560 p = chunk_plus_offset(p, size);
04561 }
04562 else {
04563 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
04564 break;
04565 }
04566 }
04567
04568 #if DEBUG
04569 if (marray != chunks) {
04570
04571 if (element_size != 0) {
04572 assert(remainder_size == element_size);
04573 }
04574 else {
04575 assert(remainder_size == request2size(sizes[i]));
04576 }
04577 check_inuse_chunk(m, mem2chunk(marray));
04578 }
04579 for (i = 0; i != n_elements; ++i)
04580 check_inuse_chunk(m, mem2chunk(marray[i]));
04581
04582 #endif
04583
04584 POSTACTION(m);
04585 return marray;
04586 }
04587
04588
04589
04590
04591 #if !ONLY_MSPACES
04592
04593 void* dlmalloc(size_t bytes) {
04594
04595
04596
04597
04598
04599
04600
04601
04602
04603
04604
04605
04606
04607
04608
04609
04610
04611
04612
04613
04614
04615
04616
04617 #if USE_LOCKS
04618 ensure_initialization();
04619 #endif
04620
04621 if (!PREACTION(gm)) {
04622 void* mem;
04623 size_t nb;
04624 if (bytes <= MAX_SMALL_REQUEST) {
04625 bindex_t idx;
04626 binmap_t smallbits;
04627 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
04628 idx = small_index(nb);
04629 smallbits = gm->smallmap >> idx;
04630
04631 if ((smallbits & 0x3U) != 0) {
04632 mchunkptr b, p;
04633 idx += ~smallbits & 1;
04634 b = smallbin_at(gm, idx);
04635 p = b->fd;
04636 assert(chunksize(p) == small_index2size(idx));
04637 unlink_first_small_chunk(gm, b, p, idx);
04638 set_inuse_and_pinuse(gm, p, small_index2size(idx));
04639 mem = chunk2mem(p);
04640 check_malloced_chunk(gm, mem, nb);
04641 goto postaction;
04642 }
04643
04644 else if (nb > gm->dvsize) {
04645 if (smallbits != 0) {
04646 mchunkptr b, p, r;
04647 size_t rsize;
04648 bindex_t i;
04649 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
04650 binmap_t leastbit = least_bit(leftbits);
04651 compute_bit2idx(leastbit, i);
04652 b = smallbin_at(gm, i);
04653 p = b->fd;
04654 assert(chunksize(p) == small_index2size(i));
04655 unlink_first_small_chunk(gm, b, p, i);
04656 rsize = small_index2size(i) - nb;
04657
04658 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
04659 set_inuse_and_pinuse(gm, p, small_index2size(i));
04660 else {
04661 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04662 r = chunk_plus_offset(p, nb);
04663 set_size_and_pinuse_of_free_chunk(r, rsize);
04664 replace_dv(gm, r, rsize);
04665 }
04666 mem = chunk2mem(p);
04667 check_malloced_chunk(gm, mem, nb);
04668 goto postaction;
04669 }
04670
04671 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
04672 check_malloced_chunk(gm, mem, nb);
04673 goto postaction;
04674 }
04675 }
04676 }
04677 else if (bytes >= MAX_REQUEST)
04678 nb = MAX_SIZE_T;
04679 else {
04680 nb = pad_request(bytes);
04681 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
04682 check_malloced_chunk(gm, mem, nb);
04683 goto postaction;
04684 }
04685 }
04686
04687 if (nb <= gm->dvsize) {
04688 size_t rsize = gm->dvsize - nb;
04689 mchunkptr p = gm->dv;
04690 if (rsize >= MIN_CHUNK_SIZE) {
04691 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
04692 gm->dvsize = rsize;
04693 set_size_and_pinuse_of_free_chunk(r, rsize);
04694 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04695 }
04696 else {
04697 size_t dvs = gm->dvsize;
04698 gm->dvsize = 0;
04699 gm->dv = 0;
04700 set_inuse_and_pinuse(gm, p, dvs);
04701 }
04702 mem = chunk2mem(p);
04703 check_malloced_chunk(gm, mem, nb);
04704 goto postaction;
04705 }
04706
04707 else if (nb < gm->topsize) {
04708 size_t rsize = gm->topsize -= nb;
04709 mchunkptr p = gm->top;
04710 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
04711 r->head = rsize | PINUSE_BIT;
04712 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04713 mem = chunk2mem(p);
04714 check_top_chunk(gm, gm->top);
04715 check_malloced_chunk(gm, mem, nb);
04716 goto postaction;
04717 }
04718
04719 mem = sys_alloc(gm, nb);
04720
04721 postaction:
04722 POSTACTION(gm);
04723 return mem;
04724 }
04725
04726 return 0;
04727 }
04728
04729 void dlfree(void* mem) {
04730
04731
04732
04733
04734
04735
04736 if (mem != 0) {
04737 mchunkptr p = mem2chunk(mem);
04738 #if FOOTERS
04739 mstate fm = get_mstate_for(p);
04740 if (!ok_magic(fm)) {
04741 USAGE_ERROR_ACTION(fm, p);
04742 return;
04743 }
04744 #else
04745 #define fm gm
04746 #endif
04747 if (!PREACTION(fm)) {
04748 check_inuse_chunk(fm, p);
04749 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
04750 size_t psize = chunksize(p);
04751 mchunkptr next = chunk_plus_offset(p, psize);
04752 if (!pinuse(p)) {
04753 size_t prevsize = p->prev_foot;
04754 if (is_mmapped(p)) {
04755 psize += prevsize + MMAP_FOOT_PAD;
04756 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
04757 fm->footprint -= psize;
04758 goto postaction;
04759 }
04760 else {
04761 mchunkptr prev = chunk_minus_offset(p, prevsize);
04762 psize += prevsize;
04763 p = prev;
04764 if (RTCHECK(ok_address(fm, prev))) {
04765 if (p != fm->dv) {
04766 unlink_chunk(fm, p, prevsize);
04767 }
04768 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
04769 fm->dvsize = psize;
04770 set_free_with_pinuse(p, psize, next);
04771 goto postaction;
04772 }
04773 }
04774 else
04775 goto erroraction;
04776 }
04777 }
04778
04779 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
04780 if (!cinuse(next)) {
04781 if (next == fm->top) {
04782 size_t tsize = fm->topsize += psize;
04783 fm->top = p;
04784 p->head = tsize | PINUSE_BIT;
04785 if (p == fm->dv) {
04786 fm->dv = 0;
04787 fm->dvsize = 0;
04788 }
04789 if (should_trim(fm, tsize))
04790 sys_trim(fm, 0);
04791 goto postaction;
04792 }
04793 else if (next == fm->dv) {
04794 size_t dsize = fm->dvsize += psize;
04795 fm->dv = p;
04796 set_size_and_pinuse_of_free_chunk(p, dsize);
04797 goto postaction;
04798 }
04799 else {
04800 size_t nsize = chunksize(next);
04801 psize += nsize;
04802 unlink_chunk(fm, next, nsize);
04803 set_size_and_pinuse_of_free_chunk(p, psize);
04804 if (p == fm->dv) {
04805 fm->dvsize = psize;
04806 goto postaction;
04807 }
04808 }
04809 }
04810 else
04811 set_free_with_pinuse(p, psize, next);
04812
04813 if (is_small(psize)) {
04814 insert_small_chunk(fm, p, psize);
04815 check_free_chunk(fm, p);
04816 }
04817 else {
04818 tchunkptr tp = (tchunkptr)p;
04819 insert_large_chunk(fm, tp, psize);
04820 check_free_chunk(fm, p);
04821 if (--fm->release_checks == 0)
04822 release_unused_segments(fm);
04823 }
04824 goto postaction;
04825 }
04826 }
04827 erroraction:
04828 USAGE_ERROR_ACTION(fm, p);
04829 postaction:
04830 POSTACTION(fm);
04831 }
04832 }
04833 #if !FOOTERS
04834 #undef fm
04835 #endif
04836 }
04837
04838 void* dlcalloc(size_t n_elements, size_t elem_size) {
04839 void* mem;
04840 size_t req = 0;
04841 if (n_elements != 0) {
04842 req = n_elements * elem_size;
04843 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
04844 (req / n_elements != elem_size))
04845 req = MAX_SIZE_T;
04846 }
04847 mem = dlmalloc(req);
04848 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
04849 memset(mem, 0, req);
04850 return mem;
04851 }
04852
04853 void* dlrealloc(void* oldmem, size_t bytes) {
04854 if (oldmem == 0)
04855 return dlmalloc(bytes);
04856 #ifdef REALLOC_ZERO_BYTES_FREES
04857 if (bytes == 0) {
04858 dlfree(oldmem);
04859 return 0;
04860 }
04861 #endif
04862 else {
04863 #if ! FOOTERS
04864 mstate m = gm;
04865 #else
04866 mstate m = get_mstate_for(mem2chunk(oldmem));
04867 if (!ok_magic(m)) {
04868 USAGE_ERROR_ACTION(m, oldmem);
04869 return 0;
04870 }
04871 #endif
04872 return internal_realloc(m, oldmem, bytes);
04873 }
04874 }
04875
04876 void* dlmemalign(size_t alignment, size_t bytes) {
04877 return internal_memalign(gm, alignment, bytes);
04878 }
04879
04880 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
04881 void* chunks[]) {
04882 size_t sz = elem_size;
04883 return ialloc(gm, n_elements, &sz, 3, chunks);
04884 }
04885
04886 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
04887 void* chunks[]) {
04888 return ialloc(gm, n_elements, sizes, 0, chunks);
04889 }
04890
04891 void* dlvalloc(size_t bytes) {
04892 size_t pagesz;
04893 ensure_initialization();
04894 pagesz = mparams.page_size;
04895 return dlmemalign(pagesz, bytes);
04896 }
04897
04898 void* dlpvalloc(size_t bytes) {
04899 size_t pagesz;
04900 ensure_initialization();
04901 pagesz = mparams.page_size;
04902 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
04903 }
04904
04905 int dlmalloc_trim(size_t pad) {
04906 int result = 0;
04907 ensure_initialization();
04908 if (!PREACTION(gm)) {
04909 result = sys_trim(gm, pad);
04910 POSTACTION(gm);
04911 }
04912 return result;
04913 }
04914
04915 size_t dlmalloc_footprint(void) {
04916 return gm->footprint;
04917 }
04918
04919 size_t dlmalloc_max_footprint(void) {
04920 return gm->max_footprint;
04921 }
04922
04923 #if !NO_MALLINFO
04924 struct mallinfo dlmallinfo(void) {
04925 return internal_mallinfo(gm);
04926 }
04927 #endif
04928
04929 void dlmalloc_stats() {
04930 internal_malloc_stats(gm);
04931 }
04932
04933 int dlmallopt(int param_number, int value) {
04934 return change_mparam(param_number, value);
04935 }
04936
04937 #endif
04938
04939 size_t dlmalloc_usable_size(void* mem) {
04940 if (mem != 0) {
04941 mchunkptr p = mem2chunk(mem);
04942 if (is_inuse(p))
04943 return chunksize(p) - overhead_for(p);
04944 }
04945 return 0;
04946 }
04947
04948
04949
04950 #if MSPACES
04951
04952 static mstate init_user_mstate(char* tbase, size_t tsize) {
04953 size_t msize = pad_request(sizeof(struct malloc_state));
04954 mchunkptr mn;
04955 mchunkptr msp = align_as_chunk(tbase);
04956 mstate m = (mstate)(chunk2mem(msp));
04957 memset(m, 0, msize);
04958 INITIAL_LOCK(&m->mutex);
04959 msp->head = (msize|INUSE_BITS);
04960 m->seg.base = m->least_addr = tbase;
04961 m->seg.size = m->footprint = m->max_footprint = tsize;
04962 m->magic = mparams.magic;
04963 m->release_checks = MAX_RELEASE_CHECK_RATE;
04964 m->mflags = mparams.default_mflags;
04965 m->extp = 0;
04966 m->exts = 0;
04967 disable_contiguous(m);
04968 init_bins(m);
04969 mn = next_chunk(mem2chunk(m));
04970 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
04971 check_top_chunk(m, m->top);
04972 return m;
04973 }
04974
04975 mspace create_mspace(size_t capacity, int locked) {
04976 mstate m = 0;
04977 size_t msize;
04978 ensure_initialization();
04979 msize = pad_request(sizeof(struct malloc_state));
04980 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
04981 size_t rs = ((capacity == 0)? mparams.granularity :
04982 (capacity + TOP_FOOT_SIZE + msize));
04983 size_t tsize = granularity_align(rs);
04984 char* tbase = (char*)(CALL_MMAP(tsize));
04985 if (tbase != CMFAIL) {
04986 m = init_user_mstate(tbase, tsize);
04987 m->seg.sflags = USE_MMAP_BIT;
04988 set_lock(m, locked);
04989 }
04990 }
04991 return (mspace)m;
04992 }
04993
04994 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
04995 mstate m = 0;
04996 size_t msize;
04997 ensure_initialization();
04998 msize = pad_request(sizeof(struct malloc_state));
04999 if (capacity > msize + TOP_FOOT_SIZE &&
05000 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
05001 m = init_user_mstate((char*)base, capacity);
05002 m->seg.sflags = EXTERN_BIT;
05003 set_lock(m, locked);
05004 }
05005 return (mspace)m;
05006 }
05007
05008 int mspace_track_large_chunks(mspace msp, int enable) {
05009 int ret = 0;
05010 mstate ms = (mstate)msp;
05011 if (!PREACTION(ms)) {
05012 if (!use_mmap(ms))
05013 ret = 1;
05014 if (!enable)
05015 enable_mmap(ms);
05016 else
05017 disable_mmap(ms);
05018 POSTACTION(ms);
05019 }
05020 return ret;
05021 }
05022
05023 size_t destroy_mspace(mspace msp) {
05024 size_t freed = 0;
05025 mstate ms = (mstate)msp;
05026 if (ok_magic(ms)) {
05027 msegmentptr sp = &ms->seg;
05028 while (sp != 0) {
05029 char* base = sp->base;
05030 size_t size = sp->size;
05031 flag_t flag = sp->sflags;
05032 sp = sp->next;
05033 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
05034 CALL_MUNMAP(base, size) == 0)
05035 freed += size;
05036 }
05037 }
05038 else {
05039 USAGE_ERROR_ACTION(ms,ms);
05040 }
05041 return freed;
05042 }
05043
05044
05045
05046
05047
05048
05049
05050 void* mspace_malloc(mspace msp, size_t bytes) {
05051 mstate ms = (mstate)msp;
05052 if (!ok_magic(ms)) {
05053 USAGE_ERROR_ACTION(ms,ms);
05054 return 0;
05055 }
05056 if (!PREACTION(ms)) {
05057 void* mem;
05058 size_t nb;
05059 if (bytes <= MAX_SMALL_REQUEST) {
05060 bindex_t idx;
05061 binmap_t smallbits;
05062 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
05063 idx = small_index(nb);
05064 smallbits = ms->smallmap >> idx;
05065
05066 if ((smallbits & 0x3U) != 0) {
05067 mchunkptr b, p;
05068 idx += ~smallbits & 1;
05069 b = smallbin_at(ms, idx);
05070 p = b->fd;
05071 assert(chunksize(p) == small_index2size(idx));
05072 unlink_first_small_chunk(ms, b, p, idx);
05073 set_inuse_and_pinuse(ms, p, small_index2size(idx));
05074 mem = chunk2mem(p);
05075 check_malloced_chunk(ms, mem, nb);
05076 goto postaction;
05077 }
05078
05079 else if (nb > ms->dvsize) {
05080 if (smallbits != 0) {
05081 mchunkptr b, p, r;
05082 size_t rsize;
05083 bindex_t i;
05084 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
05085 binmap_t leastbit = least_bit(leftbits);
05086 compute_bit2idx(leastbit, i);
05087 b = smallbin_at(ms, i);
05088 p = b->fd;
05089 assert(chunksize(p) == small_index2size(i));
05090 unlink_first_small_chunk(ms, b, p, i);
05091 rsize = small_index2size(i) - nb;
05092
05093 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
05094 set_inuse_and_pinuse(ms, p, small_index2size(i));
05095 else {
05096 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
05097 r = chunk_plus_offset(p, nb);
05098 set_size_and_pinuse_of_free_chunk(r, rsize);
05099 replace_dv(ms, r, rsize);
05100 }
05101 mem = chunk2mem(p);
05102 check_malloced_chunk(ms, mem, nb);
05103 goto postaction;
05104 }
05105
05106 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
05107 check_malloced_chunk(ms, mem, nb);
05108 goto postaction;
05109 }
05110 }
05111 }
05112 else if (bytes >= MAX_REQUEST)
05113 nb = MAX_SIZE_T;
05114 else {
05115 nb = pad_request(bytes);
05116 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
05117 check_malloced_chunk(ms, mem, nb);
05118 goto postaction;
05119 }
05120 }
05121
05122 if (nb <= ms->dvsize) {
05123 size_t rsize = ms->dvsize - nb;
05124 mchunkptr p = ms->dv;
05125 if (rsize >= MIN_CHUNK_SIZE) {
05126 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
05127 ms->dvsize = rsize;
05128 set_size_and_pinuse_of_free_chunk(r, rsize);
05129 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
05130 }
05131 else {
05132 size_t dvs = ms->dvsize;
05133 ms->dvsize = 0;
05134 ms->dv = 0;
05135 set_inuse_and_pinuse(ms, p, dvs);
05136 }
05137 mem = chunk2mem(p);
05138 check_malloced_chunk(ms, mem, nb);
05139 goto postaction;
05140 }
05141
05142 else if (nb < ms->topsize) {
05143 size_t rsize = ms->topsize -= nb;
05144 mchunkptr p = ms->top;
05145 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
05146 r->head = rsize | PINUSE_BIT;
05147 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
05148 mem = chunk2mem(p);
05149 check_top_chunk(ms, ms->top);
05150 check_malloced_chunk(ms, mem, nb);
05151 goto postaction;
05152 }
05153
05154 mem = sys_alloc(ms, nb);
05155
05156 postaction:
05157 POSTACTION(ms);
05158 return mem;
05159 }
05160
05161 return 0;
05162 }
05163
05164 void mspace_free(mspace msp, void* mem) {
05165 if (mem != 0) {
05166 mchunkptr p = mem2chunk(mem);
05167 #if FOOTERS
05168 mstate fm = get_mstate_for(p);
05169 msp = msp;
05170 #else
05171 mstate fm = (mstate)msp;
05172 #endif
05173 if (!ok_magic(fm)) {
05174 USAGE_ERROR_ACTION(fm, p);
05175 return;
05176 }
05177 if (!PREACTION(fm)) {
05178 check_inuse_chunk(fm, p);
05179 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
05180 size_t psize = chunksize(p);
05181 mchunkptr next = chunk_plus_offset(p, psize);
05182 if (!pinuse(p)) {
05183 size_t prevsize = p->prev_foot;
05184 if (is_mmapped(p)) {
05185 psize += prevsize + MMAP_FOOT_PAD;
05186 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
05187 fm->footprint -= psize;
05188 goto postaction;
05189 }
05190 else {
05191 mchunkptr prev = chunk_minus_offset(p, prevsize);
05192 psize += prevsize;
05193 p = prev;
05194 if (RTCHECK(ok_address(fm, prev))) {
05195 if (p != fm->dv) {
05196 unlink_chunk(fm, p, prevsize);
05197 }
05198 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
05199 fm->dvsize = psize;
05200 set_free_with_pinuse(p, psize, next);
05201 goto postaction;
05202 }
05203 }
05204 else
05205 goto erroraction;
05206 }
05207 }
05208
05209 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
05210 if (!cinuse(next)) {
05211 if (next == fm->top) {
05212 size_t tsize = fm->topsize += psize;
05213 fm->top = p;
05214 p->head = tsize | PINUSE_BIT;
05215 if (p == fm->dv) {
05216 fm->dv = 0;
05217 fm->dvsize = 0;
05218 }
05219 if (should_trim(fm, tsize))
05220 sys_trim(fm, 0);
05221 goto postaction;
05222 }
05223 else if (next == fm->dv) {
05224 size_t dsize = fm->dvsize += psize;
05225 fm->dv = p;
05226 set_size_and_pinuse_of_free_chunk(p, dsize);
05227 goto postaction;
05228 }
05229 else {
05230 size_t nsize = chunksize(next);
05231 psize += nsize;
05232 unlink_chunk(fm, next, nsize);
05233 set_size_and_pinuse_of_free_chunk(p, psize);
05234 if (p == fm->dv) {
05235 fm->dvsize = psize;
05236 goto postaction;
05237 }
05238 }
05239 }
05240 else
05241 set_free_with_pinuse(p, psize, next);
05242
05243 if (is_small(psize)) {
05244 insert_small_chunk(fm, p, psize);
05245 check_free_chunk(fm, p);
05246 }
05247 else {
05248 tchunkptr tp = (tchunkptr)p;
05249 insert_large_chunk(fm, tp, psize);
05250 check_free_chunk(fm, p);
05251 if (--fm->release_checks == 0)
05252 release_unused_segments(fm);
05253 }
05254 goto postaction;
05255 }
05256 }
05257 erroraction:
05258 USAGE_ERROR_ACTION(fm, p);
05259 postaction:
05260 POSTACTION(fm);
05261 }
05262 }
05263 }
05264
05265 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
05266 void* mem;
05267 size_t req = 0;
05268 mstate ms = (mstate)msp;
05269 if (!ok_magic(ms)) {
05270 USAGE_ERROR_ACTION(ms,ms);
05271 return 0;
05272 }
05273 if (n_elements != 0) {
05274 req = n_elements * elem_size;
05275 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
05276 (req / n_elements != elem_size))
05277 req = MAX_SIZE_T;
05278 }
05279 mem = internal_malloc(ms, req);
05280 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
05281 memset(mem, 0, req);
05282 return mem;
05283 }
05284
05285 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
05286 if (oldmem == 0)
05287 return mspace_malloc(msp, bytes);
05288 #ifdef REALLOC_ZERO_BYTES_FREES
05289 if (bytes == 0) {
05290 mspace_free(msp, oldmem);
05291 return 0;
05292 }
05293 #endif
05294 else {
05295 #if FOOTERS
05296 mchunkptr p = mem2chunk(oldmem);
05297 mstate ms = get_mstate_for(p);
05298 #else
05299 mstate ms = (mstate)msp;
05300 #endif
05301 if (!ok_magic(ms)) {
05302 USAGE_ERROR_ACTION(ms,ms);
05303 return 0;
05304 }
05305 return internal_realloc(ms, oldmem, bytes);
05306 }
05307 }
05308
05309 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
05310 mstate ms = (mstate)msp;
05311 if (!ok_magic(ms)) {
05312 USAGE_ERROR_ACTION(ms,ms);
05313 return 0;
05314 }
05315 return internal_memalign(ms, alignment, bytes);
05316 }
05317
05318 void** mspace_independent_calloc(mspace msp, size_t n_elements,
05319 size_t elem_size, void* chunks[]) {
05320 size_t sz = elem_size;
05321 mstate ms = (mstate)msp;
05322 if (!ok_magic(ms)) {
05323 USAGE_ERROR_ACTION(ms,ms);
05324 return 0;
05325 }
05326 return ialloc(ms, n_elements, &sz, 3, chunks);
05327 }
05328
05329 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
05330 size_t sizes[], void* chunks[]) {
05331 mstate ms = (mstate)msp;
05332 if (!ok_magic(ms)) {
05333 USAGE_ERROR_ACTION(ms,ms);
05334 return 0;
05335 }
05336 return ialloc(ms, n_elements, sizes, 0, chunks);
05337 }
05338
05339 int mspace_trim(mspace msp, size_t pad) {
05340 int result = 0;
05341 mstate ms = (mstate)msp;
05342 if (ok_magic(ms)) {
05343 if (!PREACTION(ms)) {
05344 result = sys_trim(ms, pad);
05345 POSTACTION(ms);
05346 }
05347 }
05348 else {
05349 USAGE_ERROR_ACTION(ms,ms);
05350 }
05351 return result;
05352 }
05353
05354 void mspace_malloc_stats(mspace msp) {
05355 mstate ms = (mstate)msp;
05356 if (ok_magic(ms)) {
05357 internal_malloc_stats(ms);
05358 }
05359 else {
05360 USAGE_ERROR_ACTION(ms,ms);
05361 }
05362 }
05363
05364 size_t mspace_footprint(mspace msp) {
05365 size_t result = 0;
05366 mstate ms = (mstate)msp;
05367 if (ok_magic(ms)) {
05368 result = ms->footprint;
05369 }
05370 else {
05371 USAGE_ERROR_ACTION(ms,ms);
05372 }
05373 return result;
05374 }
05375
05376
05377 size_t mspace_max_footprint(mspace msp) {
05378 size_t result = 0;
05379 mstate ms = (mstate)msp;
05380 if (ok_magic(ms)) {
05381 result = ms->max_footprint;
05382 }
05383 else {
05384 USAGE_ERROR_ACTION(ms,ms);
05385 }
05386 return result;
05387 }
05388
05389
05390 #if !NO_MALLINFO
05391 struct mallinfo mspace_mallinfo(mspace msp) {
05392 mstate ms = (mstate)msp;
05393 if (!ok_magic(ms)) {
05394 USAGE_ERROR_ACTION(ms,ms);
05395 }
05396 return internal_mallinfo(ms);
05397 }
05398 #endif
05399
05400 size_t mspace_usable_size(void* mem) {
05401 if (mem != 0) {
05402 mchunkptr p = mem2chunk(mem);
05403 if (is_inuse(p))
05404 return chunksize(p) - overhead_for(p);
05405 }
05406 return 0;
05407 }
05408
05409 int mspace_mallopt(int param_number, int value) {
05410 return change_mparam(param_number, value);
05411 }
05412
05413 #endif
05414
05415
05416
05417
05418
05419
05420
05421
05422
05423
05424
05425
05426
05427
05428
05429
05430
05431
05432
05433
05434
05435
05436
05437
05438
05439
05440
05441
05442
05443
05444
05445
05446
05447
05448
05449
05450
05451
05452
05453
05454
05455
05456
05457
05458
05459
05460
05461
05462
05463
05464
05465
05466
05467
05468
05469
05470
05471
05472
05473
05474
05475
05476
05477
05478
05479
05480
05481
05482
05483
05484
05485
05486
05487
05488
05489
05490
05491
05492
05493
05494
05495
05496
05497
05498
05499
05500
05501
05502
05503
05504
05505
05506
05507
05508
05509
05510
05511
05512
05513
05514
05515
05516
05517
05518
05519
05520
05521
05522
05523
05524
05525
05526
05527
05528
05529
05530
05531
05532
05533
05534
05535
05536
05537
05538
05539
05540
05541
05542
05543
05544
05545
05546
05547
05548
05549
05550
05551
05552
05553
05554
05555
05556
05557
05558
05559
05560
05561
05562
05563
05564
05565
05566
05567
05568
05569
05570
05571
05572
05573
05574
05575
05576
05577
05578
05579
05580
05581
05582
05583
05584
05585
05586
05587
05588
05589
05590
05591
05592
05593
05594
05595
05596
05597
05598
05599
05600
05601
05602
05603
05604
05605
05606
05607
05608
05609
05610
05611
05612
05613
05614
05615
05616
05617
05618
05619
05620
05621
05622
05623
05624
05625
05626
05627
05628
05629
05630
05631
05632
05633
05634
05635
05636
05637
05638
05639
05640
05641
05642
05643
05644
05645
05646
05647
05648
05649
05650
05651
05652
05653
05654
05655
05656
05657
05658
05659
05660
05661
05662
05663
05664
05665
05666
05667
05668
05669
05670
05671
05672
05673
05674
05675
05676
05677
05678
05679
05680
05681
05682
05683
05684
05685
05686
05687
05688
05689
05690
05691
05692
05693
05694
05695
05696
05697
05698
05699
05700
05701
05702
05703