00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484 #ifndef WIN32
00485 #ifdef _WIN32
00486 #define WIN32 1
00487 #endif
00488 #endif
00489 #ifdef WIN32
00490 #define WIN32_LEAN_AND_MEAN
00491 #include <windows.h>
00492 #define HAVE_MMAP 1
00493 #define HAVE_MORECORE 0
00494 #define LACKS_UNISTD_H
00495 #define LACKS_SYS_PARAM_H
00496 #define LACKS_SYS_MMAN_H
00497 #define LACKS_STRING_H
00498 #define LACKS_STRINGS_H
00499 #define LACKS_SYS_TYPES_H
00500 #define LACKS_ERRNO_H
00501 #define MALLOC_FAILURE_ACTION
00502 #ifdef _WIN32_WCE
00503 #define MMAP_CLEARS 0
00504 #else
00505 #define MMAP_CLEARS 1
00506 #endif
00507 #endif
00508
00509 #if defined(DARWIN) || defined(_DARWIN)
00510
00511 #ifndef HAVE_MORECORE
00512 #define HAVE_MORECORE 0
00513 #define HAVE_MMAP 1
00514 #endif
00515 #endif
00516
00517 #ifndef LACKS_SYS_TYPES_H
00518 #include <sys/types.h>
00519 #endif
00520
00521
00522 #define MAX_SIZE_T (~(size_t)0)
00523
00524 #ifndef ONLY_MSPACES
00525 #define ONLY_MSPACES 0
00526 #endif
00527 #ifndef MSPACES
00528 #if ONLY_MSPACES
00529 #define MSPACES 1
00530 #else
00531 #define MSPACES 0
00532 #endif
00533 #endif
00534 #ifndef MALLOC_ALIGNMENT
00535 #define MALLOC_ALIGNMENT ((size_t)(ALIGN_BYTES))
00536 #endif
00537 #ifndef FOOTERS
00538 #define FOOTERS 0
00539 #endif
00540 #ifndef ABORT
00541 #define ABORT abort()
00542 #endif
00543 #ifndef ABORT_ON_ASSERT_FAILURE
00544 #define ABORT_ON_ASSERT_FAILURE 1
00545 #endif
00546 #ifndef PROCEED_ON_ERROR
00547 #define PROCEED_ON_ERROR 0
00548 #endif
00549 #ifndef USE_LOCKS
00550 #define USE_LOCKS 0
00551 #endif
00552 #ifndef USE_SPIN_LOCKS
00553 #if USE_LOCKS && (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(_MSC_VER) && _MSC_VER>=1310)
00554 #define USE_SPIN_LOCKS 1
00555 #else
00556 #define USE_SPIN_LOCKS 0
00557 #endif
00558 #endif
00559 #ifndef INSECURE
00560 #define INSECURE 0
00561 #endif
00562 #ifndef HAVE_MMAP
00563 #define HAVE_MMAP 1
00564 #endif
00565 #ifndef MMAP_CLEARS
00566 #define MMAP_CLEARS 1
00567 #endif
00568 #ifndef HAVE_MREMAP
00569 #ifdef linux
00570 #define HAVE_MREMAP 1
00571 #else
00572 #define HAVE_MREMAP 0
00573 #endif
00574 #endif
00575 #ifndef MALLOC_FAILURE_ACTION
00576 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
00577 #endif
00578 #ifndef HAVE_MORECORE
00579 #if ONLY_MSPACES
00580 #define HAVE_MORECORE 0
00581 #else
00582 #define HAVE_MORECORE 1
00583 #endif
00584 #endif
00585 #if !HAVE_MORECORE
00586 #define MORECORE_CONTIGUOUS 0
00587 #else
00588 #ifndef MORECORE
00589 #define MORECORE sbrk
00590 #endif
00591 #ifndef MORECORE_CONTIGUOUS
00592 #define MORECORE_CONTIGUOUS 1
00593 #endif
00594 #endif
00595 #ifndef DEFAULT_GRANULARITY
00596 #if MORECORE_CONTIGUOUS
00597 #define DEFAULT_GRANULARITY (0)
00598 #else
00599 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
00600 #endif
00601 #endif
00602 #ifndef DEFAULT_TRIM_THRESHOLD
00603 #ifndef MORECORE_CANNOT_TRIM
00604 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
00605 #else
00606 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
00607 #endif
00608 #endif
00609 #ifndef DEFAULT_MMAP_THRESHOLD
00610 #if HAVE_MMAP
00611 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
00612 #else
00613 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
00614 #endif
00615 #endif
00616 #ifndef MAX_RELEASE_CHECK_RATE
00617 #if HAVE_MMAP
00618 #define MAX_RELEASE_CHECK_RATE 255
00619 #else
00620 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
00621 #endif
00622 #endif
00623 #ifndef USE_BUILTIN_FFS
00624 #define USE_BUILTIN_FFS 0
00625 #endif
00626 #ifndef USE_DEV_RANDOM
00627 #define USE_DEV_RANDOM 0
00628 #endif
00629 #ifndef NO_MALLINFO
00630 #define NO_MALLINFO 0
00631 #endif
00632 #ifndef MALLINFO_FIELD_TYPE
00633 #define MALLINFO_FIELD_TYPE size_t
00634 #endif
00635 #ifndef NO_SEGMENT_TRAVERSAL
00636 #define NO_SEGMENT_TRAVERSAL 0
00637 #endif
00638
00639
00640
00641
00642
00643
00644
00645
00646 #define M_TRIM_THRESHOLD (-1)
00647 #define M_GRANULARITY (-2)
00648 #define M_MMAP_THRESHOLD (-3)
00649
00650
00651
00652 #if !NO_MALLINFO
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677 #ifdef HAVE_USR_INCLUDE_MALLOC_H
00678 #include "/usr/include/malloc.h"
00679 #else
00680
00681 struct mallinfo {
00682 MALLINFO_FIELD_TYPE arena;
00683 MALLINFO_FIELD_TYPE ordblks;
00684 MALLINFO_FIELD_TYPE smblks;
00685 MALLINFO_FIELD_TYPE hblks;
00686 MALLINFO_FIELD_TYPE hblkhd;
00687 MALLINFO_FIELD_TYPE usmblks;
00688 MALLINFO_FIELD_TYPE fsmblks;
00689 MALLINFO_FIELD_TYPE uordblks;
00690 MALLINFO_FIELD_TYPE fordblks;
00691 MALLINFO_FIELD_TYPE keepcost;
00692 };
00693
00694 #endif
00695 #endif
00696
00697
00698
00699
00700
00701
00702 #ifndef FORCEINLINE
00703 #if defined(__GNUC__)
00704 #define FORCEINLINE __inline __attribute__ ((always_inline))
00705 #elif defined(_MSC_VER)
00706 #define FORCEINLINE __forceinline
00707 #endif
00708 #endif
00709 #ifndef NOINLINE
00710 #if defined(__GNUC__)
00711 #define NOINLINE __attribute__ ((noinline))
00712 #elif defined(_MSC_VER)
00713 #define NOINLINE __declspec(noinline)
00714 #else
00715 #define NOINLINE
00716 #endif
00717 #endif
00718
00719 #ifdef __cplusplus
00720 extern "C" {
00721 #ifndef FORCEINLINE
00722 #define FORCEINLINE inline
00723 #endif
00724 #endif
00725 #ifndef FORCEINLINE
00726 #define FORCEINLINE
00727 #endif
00728
00729 #if !ONLY_MSPACES
00730
00731
00732
00733 #ifndef USE_DL_PREFIX
00734 #define dlcalloc calloc
00735 #define dlfree free
00736 #define dlmalloc malloc
00737 #define dlmemalign memalign
00738 #define dlrealloc realloc
00739 #define dlvalloc valloc
00740 #define dlpvalloc pvalloc
00741 #define dlmallinfo mallinfo
00742 #define dlmallopt mallopt
00743 #define dlmalloc_trim malloc_trim
00744 #define dlmalloc_stats malloc_stats
00745 #define dlmalloc_usable_size malloc_usable_size
00746 #define dlmalloc_footprint malloc_footprint
00747 #define dlmalloc_max_footprint malloc_max_footprint
00748 #define dlindependent_calloc independent_calloc
00749 #define dlindependent_comalloc independent_comalloc
00750 #endif
00751
00752
00753
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763
00764
00765
00766
00767 void* dlmalloc(size_t);
00768
00769
00770
00771
00772
00773
00774
00775
00776 void dlfree(void*);
00777
00778
00779
00780
00781
00782
00783 void* dlcalloc(size_t, size_t);
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808 void* dlrealloc(void*, size_t);
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822 void* dlmemalign(size_t, size_t);
00823
00824
00825
00826
00827
00828
00829 void* dlvalloc(size_t);
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848
00849 int dlmallopt(int, int);
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860 size_t dlmalloc_footprint(void);
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873 size_t dlmalloc_max_footprint(void);
00874
00875 #if !NO_MALLINFO
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898 struct mallinfo dlmallinfo(void);
00899 #endif
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928
00929
00930
00931
00932
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953 void** dlindependent_calloc(size_t, size_t, void**);
00954
00955
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969
00970
00971
00972
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014 void** dlindependent_comalloc(size_t, size_t*, void**);
01015
01016
01017
01018
01019
01020
01021
01022 void* dlpvalloc(size_t);
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045 int dlmalloc_trim(size_t);
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061 size_t dlmalloc_usable_size(void*);
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077
01078
01079
01080
01081
01082 void dlmalloc_stats(void);
01083
01084 #endif
01085
01086 #if MSPACES
01087
01088
01089
01090
01091
01092 typedef void* mspace;
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105 mspace create_mspace(size_t capacity, int locked);
01106
01107
01108
01109
01110
01111
01112
01113 size_t destroy_mspace(mspace msp);
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
01125
01126
01127
01128
01129
01130 void* mspace_malloc(mspace msp, size_t bytes);
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140 void mspace_free(mspace msp, void* mem);
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
01152
01153
01154
01155
01156
01157 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
01158
01159
01160
01161
01162
01163 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
01164
01165
01166
01167
01168
01169 void** mspace_independent_calloc(mspace msp, size_t n_elements,
01170 size_t elem_size, void* chunks[]);
01171
01172
01173
01174
01175
01176 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
01177 size_t sizes[], void* chunks[]);
01178
01179
01180
01181
01182
01183 size_t mspace_footprint(mspace msp);
01184
01185
01186
01187
01188
01189 size_t mspace_max_footprint(mspace msp);
01190
01191
01192 #if !NO_MALLINFO
01193
01194
01195
01196
01197 struct mallinfo mspace_mallinfo(mspace msp);
01198 #endif
01199
01200
01201
01202
01203
01204 void mspace_malloc_stats(mspace msp);
01205
01206
01207
01208
01209
01210 int mspace_trim(mspace msp, size_t pad);
01211
01212
01213
01214
01215 int mspace_mallopt(int, int);
01216
01217 #endif
01218
01219 #ifdef __cplusplus
01220 }
01221 #endif
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235 #ifdef WIN32
01236 #pragma warning( disable : 4146 )
01237 #endif
01238
01239 #include <stdio.h>
01240
01241 #ifndef LACKS_ERRNO_H
01242 #include <errno.h>
01243 #endif
01244 #if FOOTERS
01245 #include <time.h>
01246 #endif
01247 #ifndef LACKS_STDLIB_H
01248 #include <stdlib.h>
01249 #endif
01250 #ifdef DEBUG
01251 #if ABORT_ON_ASSERT_FAILURE
01252 #define assert(x) if(!(x)) ABORT
01253 #else
01254 #include <assert.h>
01255 #endif
01256 #else
01257 #define assert(x)
01258 #endif
01259 #ifndef LACKS_STRING_H
01260 #include <string.h>
01261 #endif
01262 #if USE_BUILTIN_FFS
01263 #ifndef LACKS_STRINGS_H
01264 #include <strings.h>
01265 #endif
01266 #endif
01267 #if HAVE_MMAP
01268 #ifndef LACKS_SYS_MMAN_H
01269 #include <sys/mman.h>
01270 #endif
01271 #ifndef LACKS_FCNTL_H
01272 #include <fcntl.h>
01273 #endif
01274 #endif
01275 #if HAVE_MORECORE
01276 #ifndef LACKS_UNISTD_H
01277 #include <unistd.h>
01278 #else
01279 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
01280 extern void* sbrk(ptrdiff_t);
01281 #endif
01282 #endif
01283 #endif
01284
01285
01286 #if USE_LOCKS
01287 #ifndef WIN32
01288 #include <pthread.h>
01289 #if defined (__SVR4) && defined (__sun)
01290 #include <thread.h>
01291 #endif
01292 #else
01293 #ifndef _M_AMD64
01294
01295 #ifdef __cplusplus
01296 extern "C" {
01297 #endif
01298 LONG __cdecl _InterlockedCompareExchange(LPLONG volatile Dest, LONG Exchange, LONG Comp);
01299 LONG __cdecl _InterlockedExchange(LPLONG volatile Target, LONG Value);
01300 #ifdef __cplusplus
01301 }
01302 #endif
01303 #endif
01304 #pragma intrinsic (_InterlockedCompareExchange)
01305 #pragma intrinsic (_InterlockedExchange)
01306 #define interlockedcompareexchange _InterlockedCompareExchange
01307 #define interlockedexchange _InterlockedExchange
01308 #endif
01309 #endif
01310
01311
01312 #if defined(_MSC_VER) && _MSC_VER>=1300
01313 #ifndef BitScanForward
01314 #ifdef __cplusplus
01315 extern "C" {
01316 #endif
01317 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
01318 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
01319 #ifdef __cplusplus
01320 }
01321 #endif
01322
01323 #define BitScanForward _BitScanForward
01324 #define BitScanReverse _BitScanReverse
01325 #pragma intrinsic(_BitScanForward)
01326 #pragma intrinsic(_BitScanReverse)
01327 #endif
01328 #endif
01329
01330 #ifndef WIN32
01331 #ifndef malloc_getpagesize
01332 # ifdef _SC_PAGESIZE
01333 # ifndef _SC_PAGE_SIZE
01334 # define _SC_PAGE_SIZE _SC_PAGESIZE
01335 # endif
01336 # endif
01337 # ifdef _SC_PAGE_SIZE
01338 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
01339 # else
01340 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
01341 extern size_t getpagesize();
01342 # define malloc_getpagesize getpagesize()
01343 # else
01344 # ifdef WIN32
01345 # define malloc_getpagesize getpagesize()
01346 # else
01347 # ifndef LACKS_SYS_PARAM_H
01348 # include <sys/param.h>
01349 # endif
01350 # ifdef EXEC_PAGESIZE
01351 # define malloc_getpagesize EXEC_PAGESIZE
01352 # else
01353 # ifdef NBPG
01354 # ifndef CLSIZE
01355 # define malloc_getpagesize NBPG
01356 # else
01357 # define malloc_getpagesize (NBPG * CLSIZE)
01358 # endif
01359 # else
01360 # ifdef NBPC
01361 # define malloc_getpagesize NBPC
01362 # else
01363 # ifdef PAGESIZE
01364 # define malloc_getpagesize PAGESIZE
01365 # else
01366 # define malloc_getpagesize ((size_t)4096U)
01367 # endif
01368 # endif
01369 # endif
01370 # endif
01371 # endif
01372 # endif
01373 # endif
01374 #endif
01375 #endif
01376
01377
01378
01379
01380
01381
01382 #define SIZE_T_SIZE (sizeof(size_t))
01383 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
01384
01385
01386
01387 #define SIZE_T_ZERO ((size_t)0)
01388 #define SIZE_T_ONE ((size_t)1)
01389 #define SIZE_T_TWO ((size_t)2)
01390 #define SIZE_T_FOUR ((size_t)4)
01391 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
01392 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
01393 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
01394 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
01395
01396
01397 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
01398
01399
01400 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
01401
01402
01403 #define align_offset(A)\
01404 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
01405 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416
01417 #define MFAIL ((void*)(MAX_SIZE_T))
01418 #define CMFAIL ((char*)(MFAIL))
01419
01420 #if !HAVE_MMAP
01421 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
01422 #define USE_MMAP_BIT (SIZE_T_ZERO)
01423 #define CALL_MMAP(s) MFAIL
01424 #define CALL_MUNMAP(a, s) (-1)
01425 #define DIRECT_MMAP(s) MFAIL
01426
01427 #else
01428 #define IS_MMAPPED_BIT (SIZE_T_ONE)
01429 #define USE_MMAP_BIT (SIZE_T_ONE)
01430
01431 #ifndef WIN32
01432 #define CALL_MUNMAP(a, s) munmap((a), (s))
01433 #define MMAP_PROT (PROT_READ|PROT_WRITE)
01434 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
01435 #define MAP_ANONYMOUS MAP_ANON
01436 #endif
01437 #ifdef MAP_ANONYMOUS
01438 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
01439 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
01440 #else
01441
01442
01443
01444
01445 #define MMAP_FLAGS (MAP_PRIVATE)
01446 static int dev_zero_fd = -1;
01447 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
01448 (dev_zero_fd = open("/dev/zero", O_RDWR), \
01449 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
01450 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
01451 #endif
01452
01453 #define DIRECT_MMAP(s) CALL_MMAP(s)
01454 #else
01455
01456
01457 static FORCEINLINE void* win32mmap(size_t size) {
01458 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
01459 return (ptr != 0)? ptr: MFAIL;
01460 }
01461
01462
01463 static FORCEINLINE void* win32direct_mmap(size_t size) {
01464 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
01465 PAGE_READWRITE);
01466 return (ptr != 0)? ptr: MFAIL;
01467 }
01468
01469
01470 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
01471 MEMORY_BASIC_INFORMATION minfo;
01472 char* cptr = (char*)ptr;
01473 while (size) {
01474 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
01475 return -1;
01476 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
01477 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
01478 return -1;
01479 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
01480 return -1;
01481 cptr += minfo.RegionSize;
01482 size -= minfo.RegionSize;
01483 }
01484 return 0;
01485 }
01486
01487 #define CALL_MMAP(s) win32mmap(s)
01488 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
01489 #define DIRECT_MMAP(s) win32direct_mmap(s)
01490 #endif
01491 #endif
01492
01493 #if HAVE_MMAP && HAVE_MREMAP
01494 void *mremap(void *old_address, size_t old_size,
01495 size_t new_size, int flags, ... );
01496 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
01497 #else
01498 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)(addr),(void)(osz), \
01499 (void)(nsz), (void)(mv),MFAIL)
01500 #endif
01501
01502 #if HAVE_MORECORE
01503 #define CALL_MORECORE(S) MORECORE(S)
01504 #else
01505 #define CALL_MORECORE(S) MFAIL
01506 #endif
01507
01508
01509 #define USE_NONCONTIGUOUS_BIT (4U)
01510
01511
01512 #define EXTERN_BIT (8U)
01513
01514
01515
01516
01517
01518
01519
01520
01521
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531
01532
01533
01534
01535
01536
01537
01538
01539
01540
01541
01542
01543 #if USE_LOCKS == 1
01544
01545 #if USE_SPIN_LOCKS
01546 #ifndef WIN32
01547
01548 struct pthread_mlock_t
01549 {
01550 volatile pthread_t threadid;
01551 volatile unsigned int c;
01552 volatile unsigned int l;
01553 };
01554 #define MLOCK_T struct pthread_mlock_t
01555 #define CURRENT_THREAD pthread_self()
01556 #define SPINS_PER_YIELD 63
01557 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
01558 if(CURRENT_THREAD==sl->threadid)
01559 ++sl->c;
01560 else {
01561 int spins = 0;
01562 for (;;) {
01563 int ret;
01564 __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
01565 if(!ret) {
01566 assert(!sl->threadid);
01567 sl->threadid=CURRENT_THREAD;
01568 sl->c=1;
01569 break;
01570 }
01571 if ((++spins & SPINS_PER_YIELD) == 0) {
01572 #if defined (__SVR4) && defined (__sun)
01573 thr_yield();
01574 #else
01575 #ifdef linux
01576 sched_yield();
01577 #else
01578 ;
01579 #endif
01580 #endif
01581 }
01582 }
01583 }
01584
01585 return 0;
01586 }
01587
01588 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
01589 int ret;
01590 assert(CURRENT_THREAD==sl->threadid);
01591 if (!--sl->c) {
01592 sl->threadid=0;
01593 __asm__ __volatile__ ("xchgl %2,(%1)" : "=r" (ret) : "r" (&sl->l), "0" (0));
01594 }
01595 }
01596
01597 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
01598 int ret;
01599 __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
01600 if(!ret){
01601 assert(!sl->threadid);
01602 sl->threadid=CURRENT_THREAD;
01603 sl->c=1;
01604 return 1;
01605 }
01606 return 0;
01607 }
01608
01609 #define INITIAL_LOCK(sl) (memset((sl), 0, sizeof(MLOCK_T)), 0)
01610 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
01611 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
01612 #define TRY_LOCK(sl) pthread_try_lock(sl)
01613 #define IS_LOCKED(sl) ((sl)->l)
01614
01615 static MLOCK_T magic_init_mutex = {0, 0, 0 };
01616 #if HAVE_MORECORE
01617 static MLOCK_T morecore_mutex = {0, 0, 0 };
01618 #endif
01619
01620 #else
01621
01622 struct win32_mlock_t
01623 {
01624 volatile long threadid;
01625 volatile unsigned int c;
01626 long l;
01627 };
01628 #define MLOCK_T struct win32_mlock_t
01629 #define CURRENT_THREAD GetCurrentThreadId()
01630 #define SPINS_PER_YIELD 63
01631 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
01632 long mythreadid=CURRENT_THREAD;
01633 if(mythreadid==sl->threadid)
01634 ++sl->c;
01635 else {
01636 int spins = 0;
01637 for (;;) {
01638 if (!interlockedexchange(&sl->l, 1)) {
01639 assert(!sl->threadid);
01640 sl->threadid=mythreadid;
01641 sl->c=1;
01642 break;
01643 }
01644 if ((++spins & SPINS_PER_YIELD) == 0)
01645 SleepEx(0, FALSE);
01646 }
01647 }
01648 return 0;
01649 }
01650
01651 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
01652 assert(CURRENT_THREAD==sl->threadid);
01653 if (!--sl->c) {
01654 sl->threadid=0;
01655 interlockedexchange (&sl->l, 0);
01656 }
01657 }
01658
01659 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
01660 if (!interlockedexchange(&sl->l, 1)){
01661 assert(!sl->threadid);
01662 sl->threadid=CURRENT_THREAD;
01663 sl->c=1;
01664 return 1;
01665 }
01666 return 0;
01667 }
01668
01669 #define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0)
01670 #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
01671 #define RELEASE_LOCK(sl) win32_release_lock(sl)
01672 #define TRY_LOCK(sl) win32_try_lock(sl)
01673 #define IS_LOCKED(sl) ((sl)->l)
01674
01675 static MLOCK_T magic_init_mutex = {0, 0 };
01676 #if HAVE_MORECORE
01677 static MLOCK_T morecore_mutex = {0, 0 };
01678 #endif
01679
01680 #endif
01681 #else
01682
01683 #ifndef WIN32
01684
01685 struct pthread_mlock_t
01686 {
01687 volatile unsigned int c;
01688 pthread_mutex_t l;
01689 };
01690 #define MLOCK_T struct pthread_mlock_t
01691 #define CURRENT_THREAD pthread_self()
01692 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
01693 if(!pthread_mutex_lock(&(sl)->l)){
01694 sl->c++;
01695 return 0;
01696 }
01697 return 1;
01698 }
01699
01700 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
01701 --sl->c;
01702 pthread_mutex_unlock(&(sl)->l);
01703 }
01704
01705 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
01706 if(!pthread_mutex_trylock(&(sl)->l)){
01707 sl->c++;
01708 return 1;
01709 }
01710 return 0;
01711 }
01712
01713 static FORCEINLINE int pthread_init_lock (MLOCK_T *sl) {
01714 pthread_mutexattr_t attr;
01715 sl->c=0;
01716 if(pthread_mutexattr_init(&attr)) return 1;
01717 if(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
01718 if(pthread_mutex_init(&sl->l, &attr)) return 1;
01719 pthread_mutexattr_destroy(&attr);
01720 return 0;
01721 }
01722
01723 static FORCEINLINE int pthread_islocked (MLOCK_T *sl) {
01724 if(!pthread_try_lock(sl)){
01725 int ret = (sl->c != 0);
01726 pthread_mutex_unlock(sl);
01727 return ret;
01728 }
01729 return 0;
01730 }
01731
01732 #define INITIAL_LOCK(sl) pthread_init_lock(sl)
01733 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
01734 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
01735 #define TRY_LOCK(sl) pthread_try_lock(sl)
01736 #define IS_LOCKED(sl) pthread_islocked(sl)
01737
01738 static MLOCK_T magic_init_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
01739 #if HAVE_MORECORE
01740 static MLOCK_T morecore_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
01741 #endif
01742
01743 #else
01744
01745 #define MLOCK_T CRITICAL_SECTION
01746 #define CURRENT_THREAD GetCurrentThreadId()
01747 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 4000)
01748 #define ACQUIRE_LOCK(s) ( (!((s))->DebugInfo ? INITIAL_LOCK((s)) : 0), !EnterCriticalSection((s)), 0)
01749 #define RELEASE_LOCK(s) ( LeaveCriticalSection((s)), 0 )
01750 #define TRY_LOCK(s) ( TryEnterCriticalSection((s)) )
01751 #define IS_LOCKED(s) ( (s)->LockCount >= 0 )
01752 #define NULL_LOCK_INITIALIZER
01753 static MLOCK_T magic_init_mutex;
01754 #if HAVE_MORECORE
01755 static MLOCK_T morecore_mutex;
01756 #endif
01757 #endif
01758 #endif
01759 #endif
01760
01761
01762
01763 #if USE_LOCKS > 1
01764
01765
01766
01767
01768
01769
01770
01771
01772 static MLOCK_T magic_init_mutex = NULL_LOCK_INITIALIZER;
01773 #if HAVE_MORECORE
01774 static MLOCK_T morecore_mutex = NULL_LOCK_INITIALIZER;
01775 #endif
01776 #endif
01777
01778
01779
01780
01781 #if USE_LOCKS
01782 #define USE_LOCK_BIT (2U)
01783 #else
01784 #define USE_LOCK_BIT (0U)
01785 #define INITIAL_LOCK(l)
01786 #endif
01787
01788 #if USE_LOCKS && HAVE_MORECORE
01789 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
01790 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
01791 #else
01792 #define ACQUIRE_MORECORE_LOCK()
01793 #define RELEASE_MORECORE_LOCK()
01794 #endif
01795
01796 #if USE_LOCKS
01797 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
01798 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
01799 #else
01800 #define ACQUIRE_MAGIC_INIT_LOCK()
01801 #define RELEASE_MAGIC_INIT_LOCK()
01802 #endif
01803
01804
01805
01806
01807
01808
01809
01810
01811
01812
01813
01814
01815
01816
01817
01818
01819
01820
01821
01822
01823
01824
01825
01826
01827
01828
01829
01830
01831
01832
01833
01834
01835
01836
01837
01838
01839
01840
01841
01842
01843
01844
01845
01846
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869
01870
01871
01872
01873
01874
01875
01876
01877
01878
01879
01880
01881
01882
01883
01884
01885
01886
01887
01888
01889
01890
01891
01892
01893
01894
01895
01896
01897
01898
01899
01900
01901
01902
01903
01904
01905
01906
01907
01908
01909
01910
01911
01912
01913
01914
01915
01916
01917
01918
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929
01930
01931
01932
01933
01934
01935
01936
01937
01938
01939
01940
01941
01942 struct malloc_chunk {
01943 size_t prev_foot;
01944 size_t head;
01945 struct malloc_chunk* fd;
01946 struct malloc_chunk* bk;
01947 };
01948
01949 typedef struct malloc_chunk mchunk;
01950 typedef struct malloc_chunk* mchunkptr;
01951 typedef struct malloc_chunk* sbinptr;
01952 typedef unsigned int bindex_t;
01953 typedef unsigned int binmap_t;
01954 typedef unsigned int flag_t;
01955
01956
01957
01958 #define MCHUNK_SIZE (sizeof(mchunk))
01959
01960 #if FOOTERS
01961 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
01962 #else
01963 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
01964 #endif
01965
01966
01967 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
01968
01969 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
01970
01971
01972 #define MIN_CHUNK_SIZE\
01973 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
01974
01975
01976 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
01977 #define chunk2mem_int(p) ((uintptr_t)(p) + TWO_SIZE_T_SIZES)
01978 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
01979
01980 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
01981
01982
01983 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
01984 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
01985
01986
01987 #define pad_request(req) \
01988 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
01989
01990
01991 #define request2size(req) \
01992 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
01993
01994
01995
01996
01997
01998
01999
02000
02001
02002
02003
02004
02005
02006
02007 #define PINUSE_BIT (SIZE_T_ONE)
02008 #define CINUSE_BIT (SIZE_T_TWO)
02009 #define FLAG4_BIT (SIZE_T_FOUR)
02010 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
02011 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
02012
02013
02014 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
02015
02016
02017 #define cinuse(p) ((p)->head & CINUSE_BIT)
02018 #define pinuse(p) ((p)->head & PINUSE_BIT)
02019 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
02020
02021 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
02022 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
02023
02024
02025 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
02026 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
02027
02028
02029 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
02030 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
02031
02032
02033 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
02034
02035
02036 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
02037 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
02038
02039
02040 #define set_size_and_pinuse_of_free_chunk(p, s)\
02041 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
02042
02043
02044 #define set_free_with_pinuse(p, s, n)\
02045 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
02046
02047 #define is_mmapped(p)\
02048 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
02049
02050
02051 #define overhead_for(p)\
02052 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
02053
02054
02055 #if MMAP_CLEARS
02056 #define calloc_must_clear(p) (!is_mmapped(p))
02057 #else
02058 #define calloc_must_clear(p) (1)
02059 #endif
02060
02061
02062
02063
02064
02065
02066
02067
02068
02069
02070
02071
02072
02073
02074
02075
02076
02077
02078
02079
02080
02081
02082
02083
02084
02085
02086
02087
02088
02089
02090
02091
02092
02093
02094
02095
02096
02097
02098
02099
02100
02101
02102
02103
02104
02105
02106
02107
02108
02109
02110
02111
02112
02113
02114
02115
02116
02117
02118
02119
02120
02121
02122
02123
02124
02125
02126
02127
02128
02129
02130
02131
02132
02133
02134
02135
02136
02137
02138
02139
02140
02141
02142
02143
02144
02145
02146
02147
02148
02149
02150
02151
02152 struct malloc_tree_chunk {
02153
02154 size_t prev_foot;
02155 size_t head;
02156 struct malloc_tree_chunk* fd;
02157 struct malloc_tree_chunk* bk;
02158
02159 struct malloc_tree_chunk* child[2];
02160 struct malloc_tree_chunk* parent;
02161 bindex_t index;
02162 };
02163
02164 typedef struct malloc_tree_chunk tchunk;
02165 typedef struct malloc_tree_chunk* tchunkptr;
02166 typedef struct malloc_tree_chunk* tbinptr;
02167
02168
02169 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
02170
02171
02172
02173
02174
02175
02176
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197
02198
02199
02200
02201
02202
02203
02204
02205
02206
02207
02208
02209
02210
02211
02212
02213
02214
02215
02216
02217
02218
02219
02220
02221
02222
02223
02224
02225
02226
02227
02228 struct malloc_segment {
02229 char* base;
02230 size_t size;
02231 struct malloc_segment* next;
02232 flag_t sflags;
02233 };
02234
02235 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
02236 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
02237
02238 typedef struct malloc_segment msegment;
02239 typedef struct malloc_segment* msegmentptr;
02240
02241
02242
02243
02244
02245
02246
02247
02248
02249
02250
02251
02252
02253
02254
02255
02256
02257
02258
02259
02260
02261
02262
02263
02264
02265
02266
02267
02268
02269
02270
02271
02272
02273
02274
02275
02276
02277
02278
02279
02280
02281
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300
02301
02302
02303
02304
02305
02306
02307
02308
02309
02310
02311
02312
02313
02314
02315
02316
02317
02318
02319
02320
02321
02322
02323
02324
02325
02326 #define NSMALLBINS (32U)
02327 #define NTREEBINS (32U)
02328 #define SMALLBIN_SHIFT (3U)
02329 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
02330 #define TREEBIN_SHIFT (8U)
02331 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
02332 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
02333 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
02334
02335 struct malloc_state {
02336 binmap_t smallmap;
02337 binmap_t treemap;
02338 size_t dvsize;
02339 size_t topsize;
02340 char* least_addr;
02341 mchunkptr dv;
02342 mchunkptr top;
02343 size_t trim_check;
02344 size_t release_checks;
02345 size_t magic;
02346 mchunkptr smallbins[(NSMALLBINS+1)*2];
02347 tbinptr treebins[NTREEBINS];
02348 size_t footprint;
02349 size_t max_footprint;
02350 flag_t mflags;
02351 #if USE_LOCKS
02352 MLOCK_T mutex;
02353 #endif
02354 msegment seg;
02355 void* extp;
02356 size_t exts;
02357 };
02358
02359 typedef struct malloc_state* mstate;
02360
02361
02362
02363
02364
02365
02366
02367
02368
02369 struct malloc_params {
02370 size_t magic;
02371 size_t page_size;
02372 size_t granularity;
02373 size_t mmap_threshold;
02374 size_t trim_threshold;
02375 flag_t default_mflags;
02376 };
02377
02378 static struct malloc_params mparams;
02379
02380 #if !ONLY_MSPACES
02381
02382
02383 static struct malloc_state _gm_;
02384 #define gm (&_gm_)
02385 #define is_global(M) ((M) == &_gm_)
02386
02387 #endif
02388
02389 #define is_initialized(M) ((M)->top != 0)
02390
02391
02392
02393
02394
02395 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
02396 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
02397 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
02398
02399 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
02400 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
02401 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
02402
02403 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
02404 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
02405
02406 #define set_lock(M,L)\
02407 ((M)->mflags = (L)?\
02408 ((M)->mflags | USE_LOCK_BIT) :\
02409 ((M)->mflags & ~USE_LOCK_BIT))
02410
02411
02412 #define page_align(S)\
02413 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
02414
02415
02416 #define granularity_align(S)\
02417 (((S) + (mparams.granularity - SIZE_T_ONE))\
02418 & ~(mparams.granularity - SIZE_T_ONE))
02419
02420
02421
02422 #ifdef WIN32
02423 #define mmap_align(S) granularity_align(S)
02424 #else
02425 #define mmap_align(S) page_align(S)
02426 #endif
02427
02428 #define is_page_aligned(S)\
02429 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
02430 #define is_granularity_aligned(S)\
02431 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
02432
02433
02434 #define segment_holds(S, A)\
02435 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
02436
02437
02438 static msegmentptr segment_holding(mstate m, char* addr) {
02439 msegmentptr sp = &m->seg;
02440 for (;;) {
02441 if (addr >= sp->base && addr < sp->base + sp->size)
02442 return sp;
02443 if ((sp = sp->next) == 0)
02444 return 0;
02445 }
02446 }
02447
02448
02449 static int has_segment_link(mstate m, msegmentptr ss) {
02450 msegmentptr sp = &m->seg;
02451 for (;;) {
02452 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
02453 return 1;
02454 if ((sp = sp->next) == 0)
02455 return 0;
02456 }
02457 }
02458
02459 #ifndef MORECORE_CANNOT_TRIM
02460 #define should_trim(M,s) ((s) > (M)->trim_check)
02461 #else
02462 #define should_trim(M,s) (0)
02463 #endif
02464
02465
02466
02467
02468
02469
02470 #define TOP_FOOT_SIZE\
02471 (align_offset(chunk2mem_int(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
02472
02473
02474
02475
02476
02477
02478
02479
02480
02481
02482 #if USE_LOCKS
02483
02484
02485 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
02486
02487 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
02488 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
02489 #else
02490
02491 #ifndef PREACTION
02492 #define PREACTION(M) (0)
02493 #endif
02494
02495 #ifndef POSTACTION
02496 #define POSTACTION(M)
02497 #endif
02498
02499 #endif
02500
02501
02502
02503
02504
02505
02506
02507
02508
02509 #if PROCEED_ON_ERROR
02510
02511
02512 int malloc_corruption_error_count;
02513
02514
02515 static void reset_on_error(mstate m);
02516
02517 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
02518 #define USAGE_ERROR_ACTION(m, p)
02519
02520 #else
02521
02522 #ifndef CORRUPTION_ERROR_ACTION
02523 #define CORRUPTION_ERROR_ACTION(m) ABORT
02524 #endif
02525
02526 #ifndef USAGE_ERROR_ACTION
02527 #define USAGE_ERROR_ACTION(m,p) ABORT
02528 #endif
02529
02530 #endif
02531
02532
02533
02534 #if ! DEBUG
02535
02536 #define check_free_chunk(M,P)
02537 #define check_inuse_chunk(M,P)
02538 #define check_malloced_chunk(M,P,N)
02539 #define check_mmapped_chunk(M,P)
02540 #define check_malloc_state(M)
02541 #define check_top_chunk(M,P)
02542
02543 #else
02544 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
02545 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
02546 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
02547 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
02548 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
02549 #define check_malloc_state(M) do_check_malloc_state(M)
02550
02551 static void do_check_any_chunk(mstate m, mchunkptr p);
02552 static void do_check_top_chunk(mstate m, mchunkptr p);
02553 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
02554 static void do_check_inuse_chunk(mstate m, mchunkptr p);
02555 static void do_check_free_chunk(mstate m, mchunkptr p);
02556 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
02557 static void do_check_tree(mstate m, tchunkptr t);
02558 static void do_check_treebin(mstate m, bindex_t i);
02559 static void do_check_smallbin(mstate m, bindex_t i);
02560 static void do_check_malloc_state(mstate m);
02561 static int bin_find(mstate m, mchunkptr x);
02562 static size_t traverse_and_check(mstate m);
02563 #endif
02564
02565
02566
02567 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
02568 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
02569 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
02570 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
02571
02572
02573 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
02574 #define treebin_at(M,i) (&((M)->treebins[i]))
02575
02576
02577 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
02578 #define compute_tree_index(S, I)\
02579 {\
02580 unsigned int X = S >> TREEBIN_SHIFT;\
02581 if (X == 0)\
02582 I = 0;\
02583 else if (X > 0xFFFF)\
02584 I = NTREEBINS-1;\
02585 else {\
02586 unsigned int K;\
02587 __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
02588 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02589 }\
02590 }
02591
02592 #elif defined(_MSC_VER) && _MSC_VER>=1300
02593 #define compute_tree_index(S, I)\
02594 {\
02595 size_t X = S >> TREEBIN_SHIFT;\
02596 if (X == 0)\
02597 I = 0;\
02598 else if (X > 0xFFFF)\
02599 I = NTREEBINS-1;\
02600 else {\
02601 unsigned int K;\
02602 _BitScanReverse((DWORD *) &K, X);\
02603 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02604 }\
02605 }
02606 #else
02607 #define compute_tree_index(S, I)\
02608 {\
02609 size_t X = S >> TREEBIN_SHIFT;\
02610 if (X == 0)\
02611 I = 0;\
02612 else if (X > 0xFFFF)\
02613 I = NTREEBINS-1;\
02614 else {\
02615 unsigned int Y = (unsigned int)X;\
02616 unsigned int N = ((Y - 0x100) >> 16) & 8;\
02617 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
02618 N += K;\
02619 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
02620 K = 14 - N + ((Y <<= K) >> 15);\
02621 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
02622 }\
02623 }
02624 #endif
02625
02626
02627 #define bit_for_tree_index(i) \
02628 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
02629
02630
02631 #define leftshift_for_tree_index(i) \
02632 ((i == NTREEBINS-1)? 0 : \
02633 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
02634
02635
02636 #define minsize_for_tree_index(i) \
02637 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
02638 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
02639
02640
02641
02642
02643
02644 #define idx2bit(i) ((binmap_t)(1) << (i))
02645
02646
02647 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
02648 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
02649 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
02650
02651 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
02652 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
02653 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
02654
02655
02656
02657 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
02658 #define compute_bit2idx(X, I)\
02659 {\
02660 unsigned int J;\
02661 __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
02662 I = (bindex_t)J;\
02663 }
02664 #elif defined(_MSC_VER) && _MSC_VER>=1300
02665 #define compute_bit2idx(X, I)\
02666 {\
02667 unsigned int J;\
02668 _BitScanForward((DWORD *) &J, X);\
02669 I = (bindex_t)J;\
02670 }
02671
02672 #else
02673 #if USE_BUILTIN_FFS
02674 #define compute_bit2idx(X, I) I = ffs(X)-1
02675
02676 #else
02677 #define compute_bit2idx(X, I)\
02678 {\
02679 unsigned int Y = X - 1;\
02680 unsigned int K = Y >> (16-4) & 16;\
02681 unsigned int N = K; Y >>= K;\
02682 N += K = Y >> (8-3) & 8; Y >>= K;\
02683 N += K = Y >> (4-2) & 4; Y >>= K;\
02684 N += K = Y >> (2-1) & 2; Y >>= K;\
02685 N += K = Y >> (1-0) & 1; Y >>= K;\
02686 I = (bindex_t)(N + Y);\
02687 }
02688 #endif
02689 #endif
02690
02691
02692 #define least_bit(x) ((x) & -(x))
02693
02694
02695 #define left_bits(x) ((x<<1) | -(x<<1))
02696
02697
02698 #define same_or_left_bits(x) ((x) | -(x))
02699
02700
02701
02702
02703
02704
02705
02706
02707
02708
02709
02710
02711
02712
02713
02714
02715
02716
02717
02718
02719
02720
02721
02722
02723
02724
02725
02726
02727
02728
02729 #if !INSECURE
02730
02731 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
02732
02733 #define ok_next(p, n) ((char*)(p) < (char*)(n))
02734
02735 #define ok_cinuse(p) cinuse(p)
02736
02737 #define ok_pinuse(p) pinuse(p)
02738
02739 #else
02740 #define ok_address(M, a) (1)
02741 #define ok_next(b, n) (1)
02742 #define ok_cinuse(p) (1)
02743 #define ok_pinuse(p) (1)
02744 #endif
02745
02746 #if (FOOTERS && !INSECURE)
02747
02748 #define ok_magic(M) ((M)->magic == mparams.magic)
02749 #else
02750 #define ok_magic(M) (1)
02751 #endif
02752
02753
02754
02755 #if !INSECURE
02756 #if defined(__GNUC__) && __GNUC__ >= 3
02757 #define RTCHECK(e) __builtin_expect(e, 1)
02758 #else
02759 #define RTCHECK(e) (e)
02760 #endif
02761 #else
02762 #define RTCHECK(e) (1)
02763 #endif
02764
02765
02766
02767 #if !FOOTERS
02768
02769 #define mark_inuse_foot(M,p,s)
02770
02771
02772 #define set_inuse(M,p,s)\
02773 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02774 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02775
02776
02777 #define set_inuse_and_pinuse(M,p,s)\
02778 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02779 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02780
02781
02782 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02783 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
02784
02785 #else
02786
02787
02788 #define mark_inuse_foot(M,p,s)\
02789 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
02790
02791 #define get_mstate_for(p)\
02792 ((mstate)(((mchunkptr)((char*)(p) +\
02793 (chunksize(p))))->prev_foot ^ mparams.magic))
02794
02795 #define set_inuse(M,p,s)\
02796 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02797 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
02798 mark_inuse_foot(M,p,s))
02799
02800 #define set_inuse_and_pinuse(M,p,s)\
02801 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02802 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
02803 mark_inuse_foot(M,p,s))
02804
02805 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02806 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02807 mark_inuse_foot(M, p, s))
02808
02809 #endif
02810
02811
02812
02813
02814 static int init_mparams(void) {
02815 if (mparams.page_size == 0) {
02816 size_t s;
02817
02818 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
02819 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
02820 #if MORECORE_CONTIGUOUS
02821 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
02822 #else
02823 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
02824 #endif
02825
02826 #if (FOOTERS && !INSECURE)
02827 {
02828 #if USE_DEV_RANDOM
02829 int fd;
02830 unsigned char buf[sizeof(size_t)];
02831
02832 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
02833 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
02834 s = *((size_t *) buf);
02835 close(fd);
02836 }
02837 else
02838 #endif
02839 s = (size_t)(time(0) ^ (size_t)0x55555555U);
02840
02841 s |= (size_t)8U;
02842 s &= ~(size_t)7U;
02843
02844 }
02845 #else
02846 s = (size_t)0x58585858U;
02847 #endif
02848 ACQUIRE_MAGIC_INIT_LOCK();
02849 if (mparams.magic == 0) {
02850 mparams.magic = s;
02851 #if !ONLY_MSPACES
02852
02853 INITIAL_LOCK(&gm->mutex);
02854 gm->mflags = mparams.default_mflags;
02855 #endif
02856 }
02857 RELEASE_MAGIC_INIT_LOCK();
02858
02859 #ifndef WIN32
02860 mparams.page_size = malloc_getpagesize;
02861 mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
02862 DEFAULT_GRANULARITY : mparams.page_size);
02863 #else
02864 {
02865 SYSTEM_INFO system_info;
02866 GetSystemInfo(&system_info);
02867 mparams.page_size = system_info.dwPageSize;
02868 mparams.granularity = system_info.dwAllocationGranularity;
02869 }
02870 #endif
02871
02872
02873
02874
02875
02876
02877
02878 if ((sizeof(size_t) != sizeof(char*)) ||
02879 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
02880 (sizeof(int) < 4) ||
02881 (MALLOC_ALIGNMENT < (size_t)8U) ||
02882 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
02883 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
02884 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
02885 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
02886 ABORT;
02887 }
02888 return 0;
02889 }
02890
02891
02892 static int change_mparam(int param_number, int value) {
02893 size_t val = (size_t)value;
02894 init_mparams();
02895 switch(param_number) {
02896 case M_TRIM_THRESHOLD:
02897 mparams.trim_threshold = val;
02898 return 1;
02899 case M_GRANULARITY:
02900 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
02901 mparams.granularity = val;
02902 return 1;
02903 }
02904 else
02905 return 0;
02906 case M_MMAP_THRESHOLD:
02907 mparams.mmap_threshold = val;
02908 return 1;
02909 default:
02910 return 0;
02911 }
02912 }
02913
02914 #if DEBUG
02915
02916
02917
02918 static void do_check_any_chunk(mstate m, mchunkptr p) {
02919 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02920 assert(ok_address(m, p));
02921 }
02922
02923
02924 static void do_check_top_chunk(mstate m, mchunkptr p) {
02925 msegmentptr sp = segment_holding(m, (char*)p);
02926 size_t sz = p->head & ~INUSE_BITS;
02927 assert(sp != 0);
02928 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02929 assert(ok_address(m, p));
02930 assert(sz == m->topsize);
02931 assert(sz > 0);
02932 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
02933 assert(pinuse(p));
02934 assert(!pinuse(chunk_plus_offset(p, sz)));
02935 }
02936
02937
02938 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
02939 size_t sz = chunksize(p);
02940 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
02941 assert(is_mmapped(p));
02942 assert(use_mmap(m));
02943 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02944 assert(ok_address(m, p));
02945 assert(!is_small(sz));
02946 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
02947 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
02948 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
02949 }
02950
02951
02952 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
02953 do_check_any_chunk(m, p);
02954 assert(cinuse(p));
02955 assert(next_pinuse(p));
02956
02957 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
02958 if (is_mmapped(p))
02959 do_check_mmapped_chunk(m, p);
02960 }
02961
02962
02963 static void do_check_free_chunk(mstate m, mchunkptr p) {
02964 size_t sz = chunksize(p);
02965 mchunkptr next = chunk_plus_offset(p, sz);
02966 do_check_any_chunk(m, p);
02967 assert(!cinuse(p));
02968 assert(!next_pinuse(p));
02969 assert (!is_mmapped(p));
02970 if (p != m->dv && p != m->top) {
02971 if (sz >= MIN_CHUNK_SIZE) {
02972 assert((sz & CHUNK_ALIGN_MASK) == 0);
02973 assert(is_aligned(chunk2mem(p)));
02974 assert(next->prev_foot == sz);
02975 assert(pinuse(p));
02976 assert (next == m->top || cinuse(next));
02977 assert(p->fd->bk == p);
02978 assert(p->bk->fd == p);
02979 }
02980 else
02981 assert(sz == SIZE_T_SIZE);
02982 }
02983 }
02984
02985
02986 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
02987 if (mem != 0) {
02988 mchunkptr p = mem2chunk(mem);
02989 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
02990 do_check_inuse_chunk(m, p);
02991 assert((sz & CHUNK_ALIGN_MASK) == 0);
02992 assert(sz >= MIN_CHUNK_SIZE);
02993 assert(sz >= s);
02994
02995 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
02996 }
02997 }
02998
02999
03000 static void do_check_tree(mstate m, tchunkptr t) {
03001 tchunkptr head = 0;
03002 tchunkptr u = t;
03003 bindex_t tindex = t->index;
03004 size_t tsize = chunksize(t);
03005 bindex_t idx;
03006 compute_tree_index(tsize, idx);
03007 assert(tindex == idx);
03008 assert(tsize >= MIN_LARGE_SIZE);
03009 assert(tsize >= minsize_for_tree_index(idx));
03010 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
03011
03012 do {
03013 do_check_any_chunk(m, ((mchunkptr)u));
03014 assert(u->index == tindex);
03015 assert(chunksize(u) == tsize);
03016 assert(!cinuse(u));
03017 assert(!next_pinuse(u));
03018 assert(u->fd->bk == u);
03019 assert(u->bk->fd == u);
03020 if (u->parent == 0) {
03021 assert(u->child[0] == 0);
03022 assert(u->child[1] == 0);
03023 }
03024 else {
03025 assert(head == 0);
03026 head = u;
03027 assert(u->parent != u);
03028 assert (u->parent->child[0] == u ||
03029 u->parent->child[1] == u ||
03030 *((tbinptr*)(u->parent)) == u);
03031 if (u->child[0] != 0) {
03032 assert(u->child[0]->parent == u);
03033 assert(u->child[0] != u);
03034 do_check_tree(m, u->child[0]);
03035 }
03036 if (u->child[1] != 0) {
03037 assert(u->child[1]->parent == u);
03038 assert(u->child[1] != u);
03039 do_check_tree(m, u->child[1]);
03040 }
03041 if (u->child[0] != 0 && u->child[1] != 0) {
03042 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
03043 }
03044 }
03045 u = u->fd;
03046 } while (u != t);
03047 assert(head != 0);
03048 }
03049
03050
03051 static void do_check_treebin(mstate m, bindex_t i) {
03052 tbinptr* tb = treebin_at(m, i);
03053 tchunkptr t = *tb;
03054 int empty = (m->treemap & (1U << i)) == 0;
03055 if (t == 0)
03056 assert(empty);
03057 if (!empty)
03058 do_check_tree(m, t);
03059 }
03060
03061
03062 static void do_check_smallbin(mstate m, bindex_t i) {
03063 sbinptr b = smallbin_at(m, i);
03064 mchunkptr p = b->bk;
03065 unsigned int empty = (m->smallmap & (1U << i)) == 0;
03066 if (p == b)
03067 assert(empty);
03068 if (!empty) {
03069 for (; p != b; p = p->bk) {
03070 size_t size = chunksize(p);
03071 mchunkptr q;
03072
03073 do_check_free_chunk(m, p);
03074
03075 assert(small_index(size) == i);
03076 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
03077
03078 q = next_chunk(p);
03079 if (q->head != FENCEPOST_HEAD)
03080 do_check_inuse_chunk(m, q);
03081 }
03082 }
03083 }
03084
03085
03086 static int bin_find(mstate m, mchunkptr x) {
03087 size_t size = chunksize(x);
03088 if (is_small(size)) {
03089 bindex_t sidx = small_index(size);
03090 sbinptr b = smallbin_at(m, sidx);
03091 if (smallmap_is_marked(m, sidx)) {
03092 mchunkptr p = b;
03093 do {
03094 if (p == x)
03095 return 1;
03096 } while ((p = p->fd) != b);
03097 }
03098 }
03099 else {
03100 bindex_t tidx;
03101 compute_tree_index(size, tidx);
03102 if (treemap_is_marked(m, tidx)) {
03103 tchunkptr t = *treebin_at(m, tidx);
03104 size_t sizebits = size << leftshift_for_tree_index(tidx);
03105 while (t != 0 && chunksize(t) != size) {
03106 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
03107 sizebits <<= 1;
03108 }
03109 if (t != 0) {
03110 tchunkptr u = t;
03111 do {
03112 if (u == (tchunkptr)x)
03113 return 1;
03114 } while ((u = u->fd) != t);
03115 }
03116 }
03117 }
03118 return 0;
03119 }
03120
03121
03122 static size_t traverse_and_check(mstate m) {
03123 size_t sum = 0;
03124 if (is_initialized(m)) {
03125 msegmentptr s = &m->seg;
03126 sum += m->topsize + TOP_FOOT_SIZE;
03127 while (s != 0) {
03128 mchunkptr q = align_as_chunk(s->base);
03129 mchunkptr lastq = 0;
03130 assert(pinuse(q));
03131 while (segment_holds(s, q) &&
03132 q != m->top && q->head != FENCEPOST_HEAD) {
03133 sum += chunksize(q);
03134 if (cinuse(q)) {
03135 assert(!bin_find(m, q));
03136 do_check_inuse_chunk(m, q);
03137 }
03138 else {
03139 assert(q == m->dv || bin_find(m, q));
03140 assert(lastq == 0 || cinuse(lastq));
03141 do_check_free_chunk(m, q);
03142 }
03143 lastq = q;
03144 q = next_chunk(q);
03145 }
03146 s = s->next;
03147 }
03148 }
03149 return sum;
03150 }
03151
03152
03153 static void do_check_malloc_state(mstate m) {
03154 bindex_t i;
03155 size_t total;
03156
03157 for (i = 0; i < NSMALLBINS; ++i)
03158 do_check_smallbin(m, i);
03159 for (i = 0; i < NTREEBINS; ++i)
03160 do_check_treebin(m, i);
03161
03162 if (m->dvsize != 0) {
03163 do_check_any_chunk(m, m->dv);
03164 assert(m->dvsize == chunksize(m->dv));
03165 assert(m->dvsize >= MIN_CHUNK_SIZE);
03166 assert(bin_find(m, m->dv) == 0);
03167 }
03168
03169 if (m->top != 0) {
03170 do_check_top_chunk(m, m->top);
03171
03172 assert(m->topsize > 0);
03173 assert(bin_find(m, m->top) == 0);
03174 }
03175
03176 total = traverse_and_check(m);
03177 assert(total <= m->footprint);
03178 assert(m->footprint <= m->max_footprint);
03179 }
03180 #endif
03181
03182
03183
03184 #if !NO_MALLINFO
03185 static struct mallinfo internal_mallinfo(mstate m) {
03186 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
03187 if (!PREACTION(m)) {
03188 check_malloc_state(m);
03189 if (is_initialized(m)) {
03190 size_t nfree = SIZE_T_ONE;
03191 size_t mfree = m->topsize + TOP_FOOT_SIZE;
03192 size_t sum = mfree;
03193 msegmentptr s = &m->seg;
03194 while (s != 0) {
03195 mchunkptr q = align_as_chunk(s->base);
03196 while (segment_holds(s, q) &&
03197 q != m->top && q->head != FENCEPOST_HEAD) {
03198 size_t sz = chunksize(q);
03199 sum += sz;
03200 if (!cinuse(q)) {
03201 mfree += sz;
03202 ++nfree;
03203 }
03204 q = next_chunk(q);
03205 }
03206 s = s->next;
03207 }
03208
03209 nm.arena = sum;
03210 nm.ordblks = nfree;
03211 nm.hblkhd = m->footprint - sum;
03212 nm.usmblks = m->max_footprint;
03213 nm.uordblks = m->footprint - mfree;
03214 nm.fordblks = mfree;
03215 nm.keepcost = m->topsize;
03216 }
03217
03218 POSTACTION(m);
03219 }
03220 return nm;
03221 }
03222 #endif
03223
03224 static void internal_malloc_stats(mstate m) {
03225 if (!PREACTION(m)) {
03226 size_t maxfp = 0;
03227 size_t fp = 0;
03228 size_t used = 0;
03229 check_malloc_state(m);
03230 if (is_initialized(m)) {
03231 msegmentptr s = &m->seg;
03232 maxfp = m->max_footprint;
03233 fp = m->footprint;
03234 used = fp - (m->topsize + TOP_FOOT_SIZE);
03235
03236 while (s != 0) {
03237 mchunkptr q = align_as_chunk(s->base);
03238 while (segment_holds(s, q) &&
03239 q != m->top && q->head != FENCEPOST_HEAD) {
03240 if (!cinuse(q))
03241 used -= chunksize(q);
03242 q = next_chunk(q);
03243 }
03244 s = s->next;
03245 }
03246 }
03247
03248 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
03249 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
03250 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
03251
03252 POSTACTION(m);
03253 }
03254 }
03255
03256
03257
03258
03259
03260
03261
03262
03263
03264
03265
03266 #define insert_small_chunk(M, P, S) {\
03267 bindex_t I = small_index(S);\
03268 mchunkptr B = smallbin_at(M, I);\
03269 mchunkptr F = B;\
03270 assert(S >= MIN_CHUNK_SIZE);\
03271 if (!smallmap_is_marked(M, I))\
03272 mark_smallmap(M, I);\
03273 else if (RTCHECK(ok_address(M, B->fd)))\
03274 F = B->fd;\
03275 else {\
03276 CORRUPTION_ERROR_ACTION(M);\
03277 }\
03278 B->fd = P;\
03279 F->bk = P;\
03280 P->fd = F;\
03281 P->bk = B;\
03282 }
03283
03284
03285 #define unlink_small_chunk(M, P, S) {\
03286 mchunkptr F = P->fd;\
03287 mchunkptr B = P->bk;\
03288 bindex_t I = small_index(S);\
03289 assert(P != B);\
03290 assert(P != F);\
03291 assert(chunksize(P) == small_index2size(I));\
03292 if (F == B)\
03293 clear_smallmap(M, I);\
03294 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
03295 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
03296 F->bk = B;\
03297 B->fd = F;\
03298 }\
03299 else {\
03300 CORRUPTION_ERROR_ACTION(M);\
03301 }\
03302 }
03303
03304
03305 #define unlink_first_small_chunk(M, B, P, I) {\
03306 mchunkptr F = P->fd;\
03307 assert(P != B);\
03308 assert(P != F);\
03309 assert(chunksize(P) == small_index2size(I));\
03310 if (B == F)\
03311 clear_smallmap(M, I);\
03312 else if (RTCHECK(ok_address(M, F))) {\
03313 B->fd = F;\
03314 F->bk = B;\
03315 }\
03316 else {\
03317 CORRUPTION_ERROR_ACTION(M);\
03318 }\
03319 }
03320
03321
03322
03323 #define replace_dv(M, P, S) {\
03324 size_t DVS = M->dvsize;\
03325 if (DVS != 0) {\
03326 mchunkptr DV = M->dv;\
03327 assert(is_small(DVS));\
03328 insert_small_chunk(M, DV, DVS);\
03329 }\
03330 M->dvsize = S;\
03331 M->dv = P;\
03332 }
03333
03334
03335
03336
03337 #define insert_large_chunk(M, X, S) {\
03338 tbinptr* H;\
03339 bindex_t I;\
03340 compute_tree_index(S, I);\
03341 H = treebin_at(M, I);\
03342 X->index = I;\
03343 X->child[0] = X->child[1] = 0;\
03344 if (!treemap_is_marked(M, I)) {\
03345 mark_treemap(M, I);\
03346 *H = X;\
03347 X->parent = (tchunkptr)H;\
03348 X->fd = X->bk = X;\
03349 }\
03350 else {\
03351 tchunkptr T = *H;\
03352 size_t K = S << leftshift_for_tree_index(I);\
03353 for (;;) {\
03354 if (chunksize(T) != S) {\
03355 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
03356 K <<= 1;\
03357 if (*C != 0)\
03358 T = *C;\
03359 else if (RTCHECK(ok_address(M, C))) {\
03360 *C = X;\
03361 X->parent = T;\
03362 X->fd = X->bk = X;\
03363 break;\
03364 }\
03365 else {\
03366 CORRUPTION_ERROR_ACTION(M);\
03367 break;\
03368 }\
03369 }\
03370 else {\
03371 tchunkptr F = T->fd;\
03372 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
03373 T->fd = F->bk = X;\
03374 X->fd = F;\
03375 X->bk = T;\
03376 X->parent = 0;\
03377 break;\
03378 }\
03379 else {\
03380 CORRUPTION_ERROR_ACTION(M);\
03381 break;\
03382 }\
03383 }\
03384 }\
03385 }\
03386 }
03387
03388
03389
03390
03391
03392
03393
03394
03395
03396
03397
03398
03399
03400
03401
03402
03403
03404
03405 #define unlink_large_chunk(M, X) {\
03406 tchunkptr XP = X->parent;\
03407 tchunkptr R;\
03408 if (X->bk != X) {\
03409 tchunkptr F = X->fd;\
03410 R = X->bk;\
03411 if (RTCHECK(ok_address(M, F))) {\
03412 F->bk = R;\
03413 R->fd = F;\
03414 }\
03415 else {\
03416 CORRUPTION_ERROR_ACTION(M);\
03417 }\
03418 }\
03419 else {\
03420 tchunkptr* RP;\
03421 if (((R = *(RP = &(X->child[1]))) != 0) ||\
03422 ((R = *(RP = &(X->child[0]))) != 0)) {\
03423 tchunkptr* CP;\
03424 while ((*(CP = &(R->child[1])) != 0) ||\
03425 (*(CP = &(R->child[0])) != 0)) {\
03426 R = *(RP = CP);\
03427 }\
03428 if (RTCHECK(ok_address(M, RP)))\
03429 *RP = 0;\
03430 else {\
03431 CORRUPTION_ERROR_ACTION(M);\
03432 }\
03433 }\
03434 }\
03435 if (XP != 0) {\
03436 tbinptr* H = treebin_at(M, X->index);\
03437 if (X == *H) {\
03438 if ((*H = R) == 0) \
03439 clear_treemap(M, X->index);\
03440 }\
03441 else if (RTCHECK(ok_address(M, XP))) {\
03442 if (XP->child[0] == X) \
03443 XP->child[0] = R;\
03444 else \
03445 XP->child[1] = R;\
03446 }\
03447 else\
03448 CORRUPTION_ERROR_ACTION(M);\
03449 if (R != 0) {\
03450 if (RTCHECK(ok_address(M, R))) {\
03451 tchunkptr C0, C1;\
03452 R->parent = XP;\
03453 if ((C0 = X->child[0]) != 0) {\
03454 if (RTCHECK(ok_address(M, C0))) {\
03455 R->child[0] = C0;\
03456 C0->parent = R;\
03457 }\
03458 else\
03459 CORRUPTION_ERROR_ACTION(M);\
03460 }\
03461 if ((C1 = X->child[1]) != 0) {\
03462 if (RTCHECK(ok_address(M, C1))) {\
03463 R->child[1] = C1;\
03464 C1->parent = R;\
03465 }\
03466 else\
03467 CORRUPTION_ERROR_ACTION(M);\
03468 }\
03469 }\
03470 else\
03471 CORRUPTION_ERROR_ACTION(M);\
03472 }\
03473 }\
03474 }
03475
03476
03477
03478 #define insert_chunk(M, P, S)\
03479 if (is_small(S)) insert_small_chunk(M, P, S)\
03480 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
03481
03482 #define unlink_chunk(M, P, S)\
03483 if (is_small(S)) unlink_small_chunk(M, P, S)\
03484 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
03485
03486
03487
03488
03489 #if ONLY_MSPACES
03490 #define internal_malloc(m, b) mspace_malloc(m, b)
03491 #define internal_free(m, mem) mspace_free(m,mem);
03492 #else
03493 #if MSPACES
03494 #define internal_malloc(m, b)\
03495 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
03496 #define internal_free(m, mem)\
03497 if (m == gm) dlfree(mem); else mspace_free(m,mem);
03498 #else
03499 #define internal_malloc(m, b) dlmalloc(b)
03500 #define internal_free(m, mem) dlfree(mem)
03501 #endif
03502 #endif
03503
03504
03505
03506
03507
03508
03509
03510
03511
03512
03513
03514
03515
03516
03517 static void* mmap_alloc(mstate m, size_t nb) {
03518 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03519 if (mmsize > nb) {
03520 char* mm = (char*)(DIRECT_MMAP(mmsize));
03521 if (mm != CMFAIL) {
03522 size_t offset = align_offset(chunk2mem(mm));
03523 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
03524 mchunkptr p = (mchunkptr)(mm + offset);
03525 p->prev_foot = offset | IS_MMAPPED_BIT;
03526 (p)->head = (psize|CINUSE_BIT);
03527 mark_inuse_foot(m, p, psize);
03528 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
03529 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
03530
03531 if (mm < m->least_addr)
03532 m->least_addr = mm;
03533 if ((m->footprint += mmsize) > m->max_footprint)
03534 m->max_footprint = m->footprint;
03535 assert(is_aligned(chunk2mem(p)));
03536 check_mmapped_chunk(m, p);
03537 return chunk2mem(p);
03538 }
03539 }
03540 return 0;
03541 }
03542
03543
03544 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
03545 size_t oldsize = chunksize(oldp);
03546 if (is_small(nb))
03547 return 0;
03548
03549 if (oldsize >= nb + SIZE_T_SIZE &&
03550 (oldsize - nb) <= (mparams.granularity << 1))
03551 return oldp;
03552 else {
03553 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
03554 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
03555 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03556 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
03557 oldmmsize, newmmsize, 1);
03558 if (cp != CMFAIL) {
03559 mchunkptr newp = (mchunkptr)(cp + offset);
03560 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
03561 newp->head = (psize|CINUSE_BIT);
03562 mark_inuse_foot(m, newp, psize);
03563 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
03564 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
03565
03566 if (cp < m->least_addr)
03567 m->least_addr = cp;
03568 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
03569 m->max_footprint = m->footprint;
03570 check_mmapped_chunk(m, newp);
03571 return newp;
03572 }
03573 }
03574 return 0;
03575 }
03576
03577
03578
03579
03580 static void init_top(mstate m, mchunkptr p, size_t psize) {
03581
03582 size_t offset = align_offset(chunk2mem(p));
03583 p = (mchunkptr)((char*)p + offset);
03584 psize -= offset;
03585
03586 m->top = p;
03587 m->topsize = psize;
03588 p->head = psize | PINUSE_BIT;
03589
03590 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
03591 m->trim_check = mparams.trim_threshold;
03592 }
03593
03594
03595 static void init_bins(mstate m) {
03596
03597 bindex_t i;
03598 for (i = 0; i < NSMALLBINS; ++i) {
03599 sbinptr bin = smallbin_at(m,i);
03600 bin->fd = bin->bk = bin;
03601 }
03602 }
03603
03604 #if PROCEED_ON_ERROR
03605
03606
03607 static void reset_on_error(mstate m) {
03608 int i;
03609 ++malloc_corruption_error_count;
03610
03611 m->smallbins = m->treebins = 0;
03612 m->dvsize = m->topsize = 0;
03613 m->seg.base = 0;
03614 m->seg.size = 0;
03615 m->seg.next = 0;
03616 m->top = m->dv = 0;
03617 for (i = 0; i < NTREEBINS; ++i)
03618 *treebin_at(m, i) = 0;
03619 init_bins(m);
03620 }
03621 #endif
03622
03623
03624 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
03625 size_t nb) {
03626 mchunkptr p = align_as_chunk(newbase);
03627 mchunkptr oldfirst = align_as_chunk(oldbase);
03628 size_t psize = (char*)oldfirst - (char*)p;
03629 mchunkptr q = chunk_plus_offset(p, nb);
03630 size_t qsize = psize - nb;
03631 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
03632
03633 assert((char*)oldfirst > (char*)q);
03634 assert(pinuse(oldfirst));
03635 assert(qsize >= MIN_CHUNK_SIZE);
03636
03637
03638 if (oldfirst == m->top) {
03639 size_t tsize = m->topsize += qsize;
03640 m->top = q;
03641 q->head = tsize | PINUSE_BIT;
03642 check_top_chunk(m, q);
03643 }
03644 else if (oldfirst == m->dv) {
03645 size_t dsize = m->dvsize += qsize;
03646 m->dv = q;
03647 set_size_and_pinuse_of_free_chunk(q, dsize);
03648 }
03649 else {
03650 if (!cinuse(oldfirst)) {
03651 size_t nsize = chunksize(oldfirst);
03652 unlink_chunk(m, oldfirst, nsize);
03653 oldfirst = chunk_plus_offset(oldfirst, nsize);
03654 qsize += nsize;
03655 }
03656 set_free_with_pinuse(q, qsize, oldfirst);
03657 insert_chunk(m, q, qsize);
03658 check_free_chunk(m, q);
03659 }
03660
03661 check_malloced_chunk(m, chunk2mem(p), nb);
03662 return chunk2mem(p);
03663 }
03664
03665
03666 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
03667
03668 char* old_top = (char*)m->top;
03669 msegmentptr oldsp = segment_holding(m, old_top);
03670 char* old_end = oldsp->base + oldsp->size;
03671 size_t ssize = pad_request(sizeof(struct malloc_segment));
03672 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03673 size_t offset = align_offset(chunk2mem(rawsp));
03674 char* asp = rawsp + offset;
03675 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
03676 mchunkptr sp = (mchunkptr)csp;
03677 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
03678 mchunkptr tnext = chunk_plus_offset(sp, ssize);
03679 mchunkptr p = tnext;
03680 int nfences = 0;
03681
03682
03683 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
03684
03685
03686 assert(is_aligned(ss));
03687 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
03688 *ss = m->seg;
03689 m->seg.base = tbase;
03690 m->seg.size = tsize;
03691 m->seg.sflags = mmapped;
03692 m->seg.next = ss;
03693
03694
03695 for (;;) {
03696 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
03697 p->head = FENCEPOST_HEAD;
03698 ++nfences;
03699 if ((char*)(&(nextp->head)) < old_end)
03700 p = nextp;
03701 else
03702 break;
03703 }
03704 assert(nfences >= 2);
03705
03706
03707 if (csp != old_top) {
03708 mchunkptr q = (mchunkptr)old_top;
03709 size_t psize = csp - old_top;
03710 mchunkptr tn = chunk_plus_offset(q, psize);
03711 set_free_with_pinuse(q, psize, tn);
03712 insert_chunk(m, q, psize);
03713 }
03714
03715 check_top_chunk(m, m->top);
03716 }
03717
03718
03719
03720
03721 static void* sys_alloc(mstate m, size_t nb) {
03722 char* tbase = CMFAIL;
03723 size_t tsize = 0;
03724 flag_t mmap_flag = 0;
03725
03726 init_mparams();
03727
03728
03729 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
03730 void* mem = mmap_alloc(m, nb);
03731 if (mem != 0)
03732 return mem;
03733 }
03734
03735
03736
03737
03738
03739
03740
03741
03742
03743
03744
03745
03746
03747
03748
03749
03750
03751
03752 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
03753 char* br = CMFAIL;
03754 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
03755 size_t asize = 0;
03756 ACQUIRE_MORECORE_LOCK();
03757
03758 if (ss == 0) {
03759 char* base = (char*)CALL_MORECORE(0);
03760 if (base != CMFAIL) {
03761 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
03762
03763 if (!is_page_aligned(base))
03764 asize += (page_align((size_t)base) - (size_t)base);
03765
03766 if (asize < HALF_MAX_SIZE_T &&
03767 (br = (char*)(CALL_MORECORE(asize))) == base) {
03768 tbase = base;
03769 tsize = asize;
03770 }
03771 }
03772 }
03773 else {
03774
03775 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
03776
03777 if (asize < HALF_MAX_SIZE_T &&
03778 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
03779 tbase = br;
03780 tsize = asize;
03781 }
03782 }
03783
03784 if (tbase == CMFAIL) {
03785 if (br != CMFAIL) {
03786 if (asize < HALF_MAX_SIZE_T &&
03787 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
03788 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
03789 if (esize < HALF_MAX_SIZE_T) {
03790 char* end = (char*)CALL_MORECORE(esize);
03791 if (end != CMFAIL)
03792 asize += esize;
03793 else {
03794 (void) CALL_MORECORE(-asize);
03795 br = CMFAIL;
03796 }
03797 }
03798 }
03799 }
03800 if (br != CMFAIL) {
03801 tbase = br;
03802 tsize = asize;
03803 }
03804 else
03805 disable_contiguous(m);
03806 }
03807
03808 RELEASE_MORECORE_LOCK();
03809 }
03810
03811 if (HAVE_MMAP && tbase == CMFAIL) {
03812 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
03813 size_t rsize = granularity_align(req);
03814 if (rsize > nb) {
03815 char* mp = (char*)(CALL_MMAP(rsize));
03816 if (mp != CMFAIL) {
03817 tbase = mp;
03818 tsize = rsize;
03819 mmap_flag = IS_MMAPPED_BIT;
03820 }
03821 }
03822 }
03823
03824 if (HAVE_MORECORE && tbase == CMFAIL) {
03825 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
03826 if (asize < HALF_MAX_SIZE_T) {
03827 char* br = CMFAIL;
03828 char* end = CMFAIL;
03829 ACQUIRE_MORECORE_LOCK();
03830 br = (char*)(CALL_MORECORE(asize));
03831 end = (char*)(CALL_MORECORE(0));
03832 RELEASE_MORECORE_LOCK();
03833 if (br != CMFAIL && end != CMFAIL && br < end) {
03834 size_t ssize = end - br;
03835 if (ssize > nb + TOP_FOOT_SIZE) {
03836 tbase = br;
03837 tsize = ssize;
03838 }
03839 }
03840 }
03841 }
03842
03843 if (tbase != CMFAIL) {
03844
03845 if ((m->footprint += tsize) > m->max_footprint)
03846 m->max_footprint = m->footprint;
03847
03848 if (!is_initialized(m)) {
03849 m->seg.base = m->least_addr = tbase;
03850 m->seg.size = tsize;
03851 m->seg.sflags = mmap_flag;
03852 m->magic = mparams.magic;
03853 m->release_checks = MAX_RELEASE_CHECK_RATE;
03854 init_bins(m);
03855 #if !ONLY_MSPACES
03856 if (is_global(m))
03857 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
03858 else
03859 #endif
03860 {
03861
03862 mchunkptr mn = next_chunk(mem2chunk(m));
03863 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
03864 }
03865 }
03866
03867 else {
03868
03869 msegmentptr sp = &m->seg;
03870
03871 while (sp != 0 && tbase != sp->base + sp->size)
03872 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
03873 if (sp != 0 &&
03874 !is_extern_segment(sp) &&
03875 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
03876 segment_holds(sp, m->top)) {
03877 sp->size += tsize;
03878 init_top(m, m->top, m->topsize + tsize);
03879 }
03880 else {
03881 if (tbase < m->least_addr)
03882 m->least_addr = tbase;
03883 sp = &m->seg;
03884 while (sp != 0 && sp->base != tbase + tsize)
03885 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
03886 if (sp != 0 &&
03887 !is_extern_segment(sp) &&
03888 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
03889 char* oldbase = sp->base;
03890 sp->base = tbase;
03891 sp->size += tsize;
03892 return prepend_alloc(m, tbase, oldbase, nb);
03893 }
03894 else
03895 add_segment(m, tbase, tsize, mmap_flag);
03896 }
03897 }
03898
03899 if (nb < m->topsize) {
03900 size_t rsize = m->topsize -= nb;
03901 mchunkptr p = m->top;
03902 mchunkptr r = m->top = chunk_plus_offset(p, nb);
03903 r->head = rsize | PINUSE_BIT;
03904 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
03905 check_top_chunk(m, m->top);
03906 check_malloced_chunk(m, chunk2mem(p), nb);
03907 return chunk2mem(p);
03908 }
03909 }
03910
03911 MALLOC_FAILURE_ACTION;
03912 return 0;
03913 }
03914
03915
03916
03917
03918 static size_t release_unused_segments(mstate m) {
03919 size_t released = 0;
03920 int nsegs = 0;
03921 msegmentptr pred = &m->seg;
03922 msegmentptr sp = pred->next;
03923 while (sp != 0) {
03924 char* base = sp->base;
03925 size_t size = sp->size;
03926 msegmentptr next = sp->next;
03927 ++nsegs;
03928 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
03929 mchunkptr p = align_as_chunk(base);
03930 size_t psize = chunksize(p);
03931
03932 if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
03933 tchunkptr tp = (tchunkptr)p;
03934 assert(segment_holds(sp, (char*)sp));
03935 if (p == m->dv) {
03936 m->dv = 0;
03937 m->dvsize = 0;
03938 }
03939 else {
03940 unlink_large_chunk(m, tp);
03941 }
03942 if (CALL_MUNMAP(base, size) == 0) {
03943 released += size;
03944 m->footprint -= size;
03945
03946 sp = pred;
03947 sp->next = next;
03948 }
03949 else {
03950 insert_large_chunk(m, tp, psize);
03951 }
03952 }
03953 }
03954 if (NO_SEGMENT_TRAVERSAL)
03955 break;
03956 pred = sp;
03957 sp = next;
03958 }
03959
03960 m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)?
03961 nsegs : MAX_RELEASE_CHECK_RATE);
03962 return released;
03963 }
03964
03965 static int sys_trim(mstate m, size_t pad) {
03966 size_t released = 0;
03967 if (pad < MAX_REQUEST && is_initialized(m)) {
03968 pad += TOP_FOOT_SIZE;
03969
03970 if (m->topsize > pad) {
03971
03972 size_t unit = mparams.granularity;
03973 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
03974 SIZE_T_ONE) * unit;
03975 msegmentptr sp = segment_holding(m, (char*)m->top);
03976
03977 if (!is_extern_segment(sp)) {
03978 if (is_mmapped_segment(sp)) {
03979 if (HAVE_MMAP &&
03980 sp->size >= extra &&
03981 !has_segment_link(m, sp)) {
03982 size_t newsize = sp->size - extra;
03983
03984 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
03985 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
03986 released = extra;
03987 }
03988 }
03989 }
03990 else if (HAVE_MORECORE) {
03991 if (extra >= HALF_MAX_SIZE_T)
03992 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
03993 ACQUIRE_MORECORE_LOCK();
03994 {
03995
03996 char* old_br = (char*)(CALL_MORECORE(0));
03997 if (old_br == sp->base + sp->size) {
03998 char* rel_br = (char*)(CALL_MORECORE(-extra));
03999 char* new_br = (char*)(CALL_MORECORE(0));
04000 if (rel_br != CMFAIL && new_br < old_br)
04001 released = old_br - new_br;
04002 }
04003 }
04004 RELEASE_MORECORE_LOCK();
04005 }
04006 }
04007
04008 if (released != 0) {
04009 sp->size -= released;
04010 m->footprint -= released;
04011 init_top(m, m->top, m->topsize - released);
04012 check_top_chunk(m, m->top);
04013 }
04014 }
04015
04016
04017 if (HAVE_MMAP)
04018 released += release_unused_segments(m);
04019
04020
04021 if (released == 0 && m->topsize > m->trim_check)
04022 m->trim_check = MAX_SIZE_T;
04023 }
04024
04025 return (released != 0)? 1 : 0;
04026 }
04027
04028
04029
04030
04031 static void* tmalloc_large(mstate m, size_t nb) {
04032 tchunkptr v = 0;
04033 size_t rsize = -nb;
04034 tchunkptr t;
04035 bindex_t idx;
04036 compute_tree_index(nb, idx);
04037
04038 if ((t = *treebin_at(m, idx)) != 0) {
04039
04040 size_t sizebits = nb << leftshift_for_tree_index(idx);
04041 tchunkptr rst = 0;
04042 for (;;) {
04043 tchunkptr rt;
04044 size_t trem = chunksize(t) - nb;
04045 if (trem < rsize) {
04046 v = t;
04047 if ((rsize = trem) == 0)
04048 break;
04049 }
04050 rt = t->child[1];
04051 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
04052 if (rt != 0 && rt != t)
04053 rst = rt;
04054 if (t == 0) {
04055 t = rst;
04056 break;
04057 }
04058 sizebits <<= 1;
04059 }
04060 }
04061
04062 if (t == 0 && v == 0) {
04063 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
04064 if (leftbits != 0) {
04065 bindex_t i;
04066 binmap_t leastbit = least_bit(leftbits);
04067 compute_bit2idx(leastbit, i);
04068 t = *treebin_at(m, i);
04069 }
04070 }
04071
04072 while (t != 0) {
04073 size_t trem = chunksize(t) - nb;
04074 if (trem < rsize) {
04075 rsize = trem;
04076 v = t;
04077 }
04078 t = leftmost_child(t);
04079 }
04080
04081
04082 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
04083 if (RTCHECK(ok_address(m, v))) {
04084 mchunkptr r = chunk_plus_offset(v, nb);
04085 assert(chunksize(v) == rsize + nb);
04086 if (RTCHECK(ok_next(v, r))) {
04087 unlink_large_chunk(m, v);
04088 if (rsize < MIN_CHUNK_SIZE)
04089 set_inuse_and_pinuse(m, v, (rsize + nb));
04090 else {
04091 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
04092 set_size_and_pinuse_of_free_chunk(r, rsize);
04093 insert_chunk(m, r, rsize);
04094 }
04095 return chunk2mem(v);
04096 }
04097 }
04098 CORRUPTION_ERROR_ACTION(m);
04099 }
04100 return 0;
04101 }
04102
04103
04104 static void* tmalloc_small(mstate m, size_t nb) {
04105 tchunkptr t, v;
04106 size_t rsize;
04107 bindex_t i;
04108 binmap_t leastbit = least_bit(m->treemap);
04109 compute_bit2idx(leastbit, i);
04110
04111 v = t = *treebin_at(m, i);
04112 rsize = chunksize(t) - nb;
04113
04114 while ((t = leftmost_child(t)) != 0) {
04115 size_t trem = chunksize(t) - nb;
04116 if (trem < rsize) {
04117 rsize = trem;
04118 v = t;
04119 }
04120 }
04121
04122 if (RTCHECK(ok_address(m, v))) {
04123 mchunkptr r = chunk_plus_offset(v, nb);
04124 assert(chunksize(v) == rsize + nb);
04125 if (RTCHECK(ok_next(v, r))) {
04126 unlink_large_chunk(m, v);
04127 if (rsize < MIN_CHUNK_SIZE)
04128 set_inuse_and_pinuse(m, v, (rsize + nb));
04129 else {
04130 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
04131 set_size_and_pinuse_of_free_chunk(r, rsize);
04132 replace_dv(m, r, rsize);
04133 }
04134 return chunk2mem(v);
04135 }
04136 }
04137
04138 CORRUPTION_ERROR_ACTION(m);
04139 return 0;
04140 }
04141
04142
04143
04144 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
04145 if (bytes >= MAX_REQUEST) {
04146 MALLOC_FAILURE_ACTION;
04147 return 0;
04148 }
04149 if (!PREACTION(m)) {
04150 mchunkptr oldp = mem2chunk(oldmem);
04151 size_t oldsize = chunksize(oldp);
04152 mchunkptr next = chunk_plus_offset(oldp, oldsize);
04153 mchunkptr newp = 0;
04154 void* extra = 0;
04155
04156
04157
04158 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
04159 ok_next(oldp, next) && ok_pinuse(next))) {
04160 size_t nb = request2size(bytes);
04161 if (is_mmapped(oldp))
04162 newp = mmap_resize(m, oldp, nb);
04163 else if (oldsize >= nb) {
04164 size_t rsize = oldsize - nb;
04165 newp = oldp;
04166 if (rsize >= MIN_CHUNK_SIZE) {
04167 mchunkptr remainder = chunk_plus_offset(newp, nb);
04168 set_inuse(m, newp, nb);
04169 set_inuse(m, remainder, rsize);
04170 extra = chunk2mem(remainder);
04171 }
04172 }
04173 else if (next == m->top && oldsize + m->topsize > nb) {
04174
04175 size_t newsize = oldsize + m->topsize;
04176 size_t newtopsize = newsize - nb;
04177 mchunkptr newtop = chunk_plus_offset(oldp, nb);
04178 set_inuse(m, oldp, nb);
04179 newtop->head = newtopsize |PINUSE_BIT;
04180 m->top = newtop;
04181 m->topsize = newtopsize;
04182 newp = oldp;
04183 }
04184 }
04185 else {
04186 USAGE_ERROR_ACTION(m, oldmem);
04187 POSTACTION(m);
04188 return 0;
04189 }
04190
04191 POSTACTION(m);
04192
04193 if (newp != 0) {
04194 if (extra != 0) {
04195 internal_free(m, extra);
04196 }
04197 check_inuse_chunk(m, newp);
04198 return chunk2mem(newp);
04199 }
04200 else {
04201 void* newmem = internal_malloc(m, bytes);
04202 if (newmem != 0) {
04203 size_t oc = oldsize - overhead_for(oldp);
04204 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
04205 internal_free(m, oldmem);
04206 }
04207 return newmem;
04208 }
04209 }
04210 return 0;
04211 }
04212
04213
04214
04215 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
04216 if (alignment <= MALLOC_ALIGNMENT)
04217 return internal_malloc(m, bytes);
04218 if (alignment < MIN_CHUNK_SIZE)
04219 alignment = MIN_CHUNK_SIZE;
04220 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
04221 size_t a = MALLOC_ALIGNMENT << 1;
04222 while (a < alignment) a <<= 1;
04223 alignment = a;
04224 }
04225
04226 if (bytes >= MAX_REQUEST - alignment) {
04227 if (m != 0) {
04228 MALLOC_FAILURE_ACTION;
04229 }
04230 }
04231 else {
04232 size_t nb = request2size(bytes);
04233 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
04234 char* mem = (char*)internal_malloc(m, req);
04235 if (mem != 0) {
04236 void* leader = 0;
04237 void* trailer = 0;
04238 mchunkptr p = mem2chunk(mem);
04239
04240 if (PREACTION(m)) return 0;
04241 if ((((size_t)(mem)) % alignment) != 0) {
04242
04243
04244
04245
04246
04247
04248
04249
04250 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
04251 alignment -
04252 SIZE_T_ONE)) &
04253 -alignment));
04254 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
04255 br : br+alignment;
04256 mchunkptr newp = (mchunkptr)pos;
04257 size_t leadsize = pos - (char*)(p);
04258 size_t newsize = chunksize(p) - leadsize;
04259
04260 if (is_mmapped(p)) {
04261 newp->prev_foot = p->prev_foot + leadsize;
04262 newp->head = (newsize|CINUSE_BIT);
04263 }
04264 else {
04265 set_inuse(m, newp, newsize);
04266 set_inuse(m, p, leadsize);
04267 leader = chunk2mem(p);
04268 }
04269 p = newp;
04270 }
04271
04272
04273 if (!is_mmapped(p)) {
04274 size_t size = chunksize(p);
04275 if (size > nb + MIN_CHUNK_SIZE) {
04276 size_t remainder_size = size - nb;
04277 mchunkptr remainder = chunk_plus_offset(p, nb);
04278 set_inuse(m, p, nb);
04279 set_inuse(m, remainder, remainder_size);
04280 trailer = chunk2mem(remainder);
04281 }
04282 }
04283
04284 assert (chunksize(p) >= nb);
04285 assert((((size_t)(chunk2mem(p))) % alignment) == 0);
04286 check_inuse_chunk(m, p);
04287 POSTACTION(m);
04288 if (leader != 0) {
04289 internal_free(m, leader);
04290 }
04291 if (trailer != 0) {
04292 internal_free(m, trailer);
04293 }
04294 return chunk2mem(p);
04295 }
04296 }
04297 return 0;
04298 }
04299
04300
04301
04302 static void** ialloc(mstate m,
04303 size_t n_elements,
04304 size_t* sizes,
04305 int opts,
04306 void* chunks[]) {
04307
04308
04309
04310
04311
04312
04313
04314
04315
04316 size_t element_size;
04317 size_t contents_size;
04318 size_t array_size;
04319 void* mem;
04320 mchunkptr p;
04321 size_t remainder_size;
04322 void** marray;
04323 mchunkptr array_chunk;
04324 flag_t was_enabled;
04325 size_t size;
04326 size_t i;
04327
04328
04329 if (chunks != 0) {
04330 if (n_elements == 0)
04331 return chunks;
04332 marray = chunks;
04333 array_size = 0;
04334 }
04335 else {
04336
04337 if (n_elements == 0)
04338 return (void**)internal_malloc(m, 0);
04339 marray = 0;
04340 array_size = request2size(n_elements * (sizeof(void*)));
04341 }
04342
04343
04344 if (opts & 0x1) {
04345 element_size = request2size(*sizes);
04346 contents_size = n_elements * element_size;
04347 }
04348 else {
04349 element_size = 0;
04350 contents_size = 0;
04351 for (i = 0; i != n_elements; ++i)
04352 contents_size += request2size(sizes[i]);
04353 }
04354
04355 size = contents_size + array_size;
04356
04357
04358
04359
04360
04361
04362 was_enabled = use_mmap(m);
04363 disable_mmap(m);
04364 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
04365 if (was_enabled)
04366 enable_mmap(m);
04367 if (mem == 0)
04368 return 0;
04369
04370 if (PREACTION(m)) return 0;
04371 p = mem2chunk(mem);
04372 remainder_size = chunksize(p);
04373
04374 assert(!is_mmapped(p));
04375
04376 if (opts & 0x2) {
04377 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
04378 }
04379
04380
04381 if (marray == 0) {
04382 size_t array_chunk_size;
04383 array_chunk = chunk_plus_offset(p, contents_size);
04384 array_chunk_size = remainder_size - contents_size;
04385 marray = (void**) (chunk2mem(array_chunk));
04386 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
04387 remainder_size = contents_size;
04388 }
04389
04390
04391 for (i = 0; ; ++i) {
04392 marray[i] = chunk2mem(p);
04393 if (i != n_elements-1) {
04394 if (element_size != 0)
04395 size = element_size;
04396 else
04397 size = request2size(sizes[i]);
04398 remainder_size -= size;
04399 set_size_and_pinuse_of_inuse_chunk(m, p, size);
04400 p = chunk_plus_offset(p, size);
04401 }
04402 else {
04403 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
04404 break;
04405 }
04406 }
04407
04408 #if DEBUG
04409 if (marray != chunks) {
04410
04411 if (element_size != 0) {
04412 assert(remainder_size == element_size);
04413 }
04414 else {
04415 assert(remainder_size == request2size(sizes[i]));
04416 }
04417 check_inuse_chunk(m, mem2chunk(marray));
04418 }
04419 for (i = 0; i != n_elements; ++i)
04420 check_inuse_chunk(m, mem2chunk(marray[i]));
04421
04422 #endif
04423
04424 POSTACTION(m);
04425 return marray;
04426 }
04427
04428
04429
04430
04431 #if !ONLY_MSPACES
04432
04433 void* dlmalloc(size_t bytes) {
04434
04435
04436
04437
04438
04439
04440
04441
04442
04443
04444
04445
04446
04447
04448
04449
04450
04451
04452
04453
04454
04455
04456
04457 if (!PREACTION(gm)) {
04458 void* mem;
04459 size_t nb;
04460 if (bytes <= MAX_SMALL_REQUEST) {
04461 bindex_t idx;
04462 binmap_t smallbits;
04463 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
04464 idx = small_index(nb);
04465 smallbits = gm->smallmap >> idx;
04466
04467 if ((smallbits & 0x3U) != 0) {
04468 mchunkptr b, p;
04469 idx += ~smallbits & 1;
04470 b = smallbin_at(gm, idx);
04471 p = b->fd;
04472 assert(chunksize(p) == small_index2size(idx));
04473 unlink_first_small_chunk(gm, b, p, idx);
04474 set_inuse_and_pinuse(gm, p, small_index2size(idx));
04475 mem = chunk2mem(p);
04476 check_malloced_chunk(gm, mem, nb);
04477 goto postaction;
04478 }
04479
04480 else if (nb > gm->dvsize) {
04481 if (smallbits != 0) {
04482 mchunkptr b, p, r;
04483 size_t rsize;
04484 bindex_t i;
04485 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
04486 binmap_t leastbit = least_bit(leftbits);
04487 compute_bit2idx(leastbit, i);
04488 b = smallbin_at(gm, i);
04489 p = b->fd;
04490 assert(chunksize(p) == small_index2size(i));
04491 unlink_first_small_chunk(gm, b, p, i);
04492 rsize = small_index2size(i) - nb;
04493
04494 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
04495 set_inuse_and_pinuse(gm, p, small_index2size(i));
04496 else {
04497 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04498 r = chunk_plus_offset(p, nb);
04499 set_size_and_pinuse_of_free_chunk(r, rsize);
04500 replace_dv(gm, r, rsize);
04501 }
04502 mem = chunk2mem(p);
04503 check_malloced_chunk(gm, mem, nb);
04504 goto postaction;
04505 }
04506
04507 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
04508 check_malloced_chunk(gm, mem, nb);
04509 goto postaction;
04510 }
04511 }
04512 }
04513 else if (bytes >= MAX_REQUEST)
04514 nb = MAX_SIZE_T;
04515 else {
04516 nb = pad_request(bytes);
04517 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
04518 check_malloced_chunk(gm, mem, nb);
04519 goto postaction;
04520 }
04521 }
04522
04523 if (nb <= gm->dvsize) {
04524 size_t rsize = gm->dvsize - nb;
04525 mchunkptr p = gm->dv;
04526 if (rsize >= MIN_CHUNK_SIZE) {
04527 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
04528 gm->dvsize = rsize;
04529 set_size_and_pinuse_of_free_chunk(r, rsize);
04530 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04531 }
04532 else {
04533 size_t dvs = gm->dvsize;
04534 gm->dvsize = 0;
04535 gm->dv = 0;
04536 set_inuse_and_pinuse(gm, p, dvs);
04537 }
04538 mem = chunk2mem(p);
04539 check_malloced_chunk(gm, mem, nb);
04540 goto postaction;
04541 }
04542
04543 else if (nb < gm->topsize) {
04544 size_t rsize = gm->topsize -= nb;
04545 mchunkptr p = gm->top;
04546 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
04547 r->head = rsize | PINUSE_BIT;
04548 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04549 mem = chunk2mem(p);
04550 check_top_chunk(gm, gm->top);
04551 check_malloced_chunk(gm, mem, nb);
04552 goto postaction;
04553 }
04554
04555 mem = sys_alloc(gm, nb);
04556
04557 postaction:
04558 POSTACTION(gm);
04559 return mem;
04560 }
04561
04562 return 0;
04563 }
04564
04565 void dlfree(void* mem) {
04566
04567
04568
04569
04570
04571
04572 if (mem != 0) {
04573 mchunkptr p = mem2chunk(mem);
04574 #if FOOTERS
04575 mstate fm = get_mstate_for(p);
04576 if (!ok_magic(fm)) {
04577 USAGE_ERROR_ACTION(fm, p);
04578 return;
04579 }
04580 #else
04581 #define fm gm
04582 #endif
04583 if (!PREACTION(fm)) {
04584 check_inuse_chunk(fm, p);
04585 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
04586 size_t psize = chunksize(p);
04587 mchunkptr next = chunk_plus_offset(p, psize);
04588 if (!pinuse(p)) {
04589 size_t prevsize = p->prev_foot;
04590 if ((prevsize & IS_MMAPPED_BIT) != 0) {
04591 prevsize &= ~IS_MMAPPED_BIT;
04592 psize += prevsize + MMAP_FOOT_PAD;
04593 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
04594 fm->footprint -= psize;
04595 goto postaction;
04596 }
04597 else {
04598 mchunkptr prev = chunk_minus_offset(p, prevsize);
04599 psize += prevsize;
04600 p = prev;
04601 if (RTCHECK(ok_address(fm, prev))) {
04602 if (p != fm->dv) {
04603 unlink_chunk(fm, p, prevsize);
04604 }
04605 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
04606 fm->dvsize = psize;
04607 set_free_with_pinuse(p, psize, next);
04608 goto postaction;
04609 }
04610 }
04611 else
04612 goto erroraction;
04613 }
04614 }
04615
04616 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
04617 if (!cinuse(next)) {
04618 if (next == fm->top) {
04619 size_t tsize = fm->topsize += psize;
04620 fm->top = p;
04621 p->head = tsize | PINUSE_BIT;
04622 if (p == fm->dv) {
04623 fm->dv = 0;
04624 fm->dvsize = 0;
04625 }
04626 if (should_trim(fm, tsize))
04627 sys_trim(fm, 0);
04628 goto postaction;
04629 }
04630 else if (next == fm->dv) {
04631 size_t dsize = fm->dvsize += psize;
04632 fm->dv = p;
04633 set_size_and_pinuse_of_free_chunk(p, dsize);
04634 goto postaction;
04635 }
04636 else {
04637 size_t nsize = chunksize(next);
04638 psize += nsize;
04639 unlink_chunk(fm, next, nsize);
04640 set_size_and_pinuse_of_free_chunk(p, psize);
04641 if (p == fm->dv) {
04642 fm->dvsize = psize;
04643 goto postaction;
04644 }
04645 }
04646 }
04647 else
04648 set_free_with_pinuse(p, psize, next);
04649
04650 if (is_small(psize)) {
04651 insert_small_chunk(fm, p, psize);
04652 check_free_chunk(fm, p);
04653 }
04654 else {
04655 tchunkptr tp = (tchunkptr)p;
04656 insert_large_chunk(fm, tp, psize);
04657 check_free_chunk(fm, p);
04658 if (--fm->release_checks == 0)
04659 release_unused_segments(fm);
04660 }
04661 goto postaction;
04662 }
04663 }
04664 erroraction:
04665 USAGE_ERROR_ACTION(fm, p);
04666 postaction:
04667 POSTACTION(fm);
04668 }
04669 }
04670 #if !FOOTERS
04671 #undef fm
04672 #endif
04673 }
04674
04675 void* dlcalloc(size_t n_elements, size_t elem_size) {
04676 void* mem;
04677 size_t req = 0;
04678 if (n_elements != 0) {
04679 req = n_elements * elem_size;
04680 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
04681 (req / n_elements != elem_size))
04682 req = MAX_SIZE_T;
04683 }
04684 mem = dlmalloc(req);
04685 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
04686 memset(mem, 0, req);
04687 return mem;
04688 }
04689
04690 void* dlrealloc(void* oldmem, size_t bytes) {
04691 if (oldmem == 0)
04692 return dlmalloc(bytes);
04693 #ifdef REALLOC_ZERO_BYTES_FREES
04694 if (bytes == 0) {
04695 dlfree(oldmem);
04696 return 0;
04697 }
04698 #endif
04699 else {
04700 #if ! FOOTERS
04701 mstate m = gm;
04702 #else
04703 mstate m = get_mstate_for(mem2chunk(oldmem));
04704 if (!ok_magic(m)) {
04705 USAGE_ERROR_ACTION(m, oldmem);
04706 return 0;
04707 }
04708 #endif
04709 return internal_realloc(m, oldmem, bytes);
04710 }
04711 }
04712
04713 void* dlmemalign(size_t alignment, size_t bytes) {
04714 return internal_memalign(gm, alignment, bytes);
04715 }
04716
04717 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
04718 void* chunks[]) {
04719 size_t sz = elem_size;
04720 return ialloc(gm, n_elements, &sz, 3, chunks);
04721 }
04722
04723 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
04724 void* chunks[]) {
04725 return ialloc(gm, n_elements, sizes, 0, chunks);
04726 }
04727
04728 void* dlvalloc(size_t bytes) {
04729 size_t pagesz;
04730 init_mparams();
04731 pagesz = mparams.page_size;
04732 return dlmemalign(pagesz, bytes);
04733 }
04734
04735 void* dlpvalloc(size_t bytes) {
04736 size_t pagesz;
04737 init_mparams();
04738 pagesz = mparams.page_size;
04739 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
04740 }
04741
04742 int dlmalloc_trim(size_t pad) {
04743 int result = 0;
04744 if (!PREACTION(gm)) {
04745 result = sys_trim(gm, pad);
04746 POSTACTION(gm);
04747 }
04748 return result;
04749 }
04750
04751 size_t dlmalloc_footprint(void) {
04752 return gm->footprint;
04753 }
04754
04755 size_t dlmalloc_max_footprint(void) {
04756 return gm->max_footprint;
04757 }
04758
04759 #if !NO_MALLINFO
04760 struct mallinfo dlmallinfo(void) {
04761 return internal_mallinfo(gm);
04762 }
04763 #endif
04764
04765 void dlmalloc_stats() {
04766 internal_malloc_stats(gm);
04767 }
04768
04769 size_t dlmalloc_usable_size(void* mem) {
04770 if (mem != 0) {
04771 mchunkptr p = mem2chunk(mem);
04772 if (cinuse(p))
04773 return chunksize(p) - overhead_for(p);
04774 }
04775 return 0;
04776 }
04777
04778 int dlmallopt(int param_number, int value) {
04779 return change_mparam(param_number, value);
04780 }
04781
04782 #endif
04783
04784
04785
04786 #if MSPACES
04787
04788 static mstate init_user_mstate(char* tbase, size_t tsize) {
04789 size_t msize = pad_request(sizeof(struct malloc_state));
04790 mchunkptr mn;
04791 mchunkptr msp = align_as_chunk(tbase);
04792 mstate m = (mstate)(chunk2mem(msp));
04793 memset(m, 0, msize);
04794 INITIAL_LOCK(&m->mutex);
04795 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
04796 m->seg.base = m->least_addr = tbase;
04797 m->seg.size = m->footprint = m->max_footprint = tsize;
04798 m->magic = mparams.magic;
04799 m->release_checks = MAX_RELEASE_CHECK_RATE;
04800 m->mflags = mparams.default_mflags;
04801 m->extp = 0;
04802 m->exts = 0;
04803 disable_contiguous(m);
04804 init_bins(m);
04805 mn = next_chunk(mem2chunk(m));
04806 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
04807 check_top_chunk(m, m->top);
04808 return m;
04809 }
04810
04811 mspace create_mspace(size_t capacity, int locked) {
04812 mstate m = 0;
04813 size_t msize = pad_request(sizeof(struct malloc_state));
04814 init_mparams();
04815
04816 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
04817 size_t rs = ((capacity == 0)? mparams.granularity :
04818 (capacity + TOP_FOOT_SIZE + msize));
04819 size_t tsize = granularity_align(rs);
04820 char* tbase = (char*)(CALL_MMAP(tsize));
04821 if (tbase != CMFAIL) {
04822 m = init_user_mstate(tbase, tsize);
04823 m->seg.sflags = IS_MMAPPED_BIT;
04824 set_lock(m, locked);
04825 }
04826 }
04827 return (mspace)m;
04828 }
04829
04830 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
04831 mstate m = 0;
04832 size_t msize = pad_request(sizeof(struct malloc_state));
04833 init_mparams();
04834
04835 if (capacity > msize + TOP_FOOT_SIZE &&
04836 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
04837 m = init_user_mstate((char*)base, capacity);
04838 m->seg.sflags = EXTERN_BIT;
04839 set_lock(m, locked);
04840 }
04841 return (mspace)m;
04842 }
04843
04844 size_t destroy_mspace(mspace msp) {
04845 size_t freed = 0;
04846 mstate ms = (mstate)msp;
04847 if (ok_magic(ms)) {
04848 msegmentptr sp = &ms->seg;
04849 while (sp != 0) {
04850 char* base = sp->base;
04851 size_t size = sp->size;
04852 flag_t flag = sp->sflags;
04853 sp = sp->next;
04854 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
04855 CALL_MUNMAP(base, size) == 0)
04856 freed += size;
04857 }
04858 }
04859 else {
04860 USAGE_ERROR_ACTION(ms,ms);
04861 }
04862 return freed;
04863 }
04864
04865
04866
04867
04868
04869
04870
04871 void* mspace_malloc(mspace msp, size_t bytes) {
04872 mstate ms = (mstate)msp;
04873 if (!ok_magic(ms)) {
04874 USAGE_ERROR_ACTION(ms,ms);
04875 return 0;
04876 }
04877 if (!PREACTION(ms)) {
04878 void* mem;
04879 size_t nb;
04880 if (bytes <= MAX_SMALL_REQUEST) {
04881 bindex_t idx;
04882 binmap_t smallbits;
04883 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
04884 idx = small_index(nb);
04885 smallbits = ms->smallmap >> idx;
04886
04887 if ((smallbits & 0x3U) != 0) {
04888 mchunkptr b, p;
04889 idx += ~smallbits & 1;
04890 b = smallbin_at(ms, idx);
04891 p = b->fd;
04892 assert(chunksize(p) == small_index2size(idx));
04893 unlink_first_small_chunk(ms, b, p, idx);
04894 set_inuse_and_pinuse(ms, p, small_index2size(idx));
04895 mem = chunk2mem(p);
04896 check_malloced_chunk(ms, mem, nb);
04897 goto postaction;
04898 }
04899
04900 else if (nb > ms->dvsize) {
04901 if (smallbits != 0) {
04902 mchunkptr b, p, r;
04903 size_t rsize;
04904 bindex_t i;
04905 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
04906 binmap_t leastbit = least_bit(leftbits);
04907 compute_bit2idx(leastbit, i);
04908 b = smallbin_at(ms, i);
04909 p = b->fd;
04910 assert(chunksize(p) == small_index2size(i));
04911 unlink_first_small_chunk(ms, b, p, i);
04912 rsize = small_index2size(i) - nb;
04913
04914 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
04915 set_inuse_and_pinuse(ms, p, small_index2size(i));
04916 else {
04917 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04918 r = chunk_plus_offset(p, nb);
04919 set_size_and_pinuse_of_free_chunk(r, rsize);
04920 replace_dv(ms, r, rsize);
04921 }
04922 mem = chunk2mem(p);
04923 check_malloced_chunk(ms, mem, nb);
04924 goto postaction;
04925 }
04926
04927 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
04928 check_malloced_chunk(ms, mem, nb);
04929 goto postaction;
04930 }
04931 }
04932 }
04933 else if (bytes >= MAX_REQUEST)
04934 nb = MAX_SIZE_T;
04935 else {
04936 nb = pad_request(bytes);
04937 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
04938 check_malloced_chunk(ms, mem, nb);
04939 goto postaction;
04940 }
04941 }
04942
04943 if (nb <= ms->dvsize) {
04944 size_t rsize = ms->dvsize - nb;
04945 mchunkptr p = ms->dv;
04946 if (rsize >= MIN_CHUNK_SIZE) {
04947 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
04948 ms->dvsize = rsize;
04949 set_size_and_pinuse_of_free_chunk(r, rsize);
04950 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04951 }
04952 else {
04953 size_t dvs = ms->dvsize;
04954 ms->dvsize = 0;
04955 ms->dv = 0;
04956 set_inuse_and_pinuse(ms, p, dvs);
04957 }
04958 mem = chunk2mem(p);
04959 check_malloced_chunk(ms, mem, nb);
04960 goto postaction;
04961 }
04962
04963 else if (nb < ms->topsize) {
04964 size_t rsize = ms->topsize -= nb;
04965 mchunkptr p = ms->top;
04966 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
04967 r->head = rsize | PINUSE_BIT;
04968 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04969 mem = chunk2mem(p);
04970 check_top_chunk(ms, ms->top);
04971 check_malloced_chunk(ms, mem, nb);
04972 goto postaction;
04973 }
04974
04975 mem = sys_alloc(ms, nb);
04976
04977 postaction:
04978 POSTACTION(ms);
04979 return mem;
04980 }
04981
04982 return 0;
04983 }
04984
04985 void mspace_free(mspace msp, void* mem) {
04986 if (mem != 0) {
04987 mchunkptr p = mem2chunk(mem);
04988 #if FOOTERS
04989 mstate fm = get_mstate_for(p);
04990 #else
04991 mstate fm = (mstate)msp;
04992 #endif
04993 if (!ok_magic(fm)) {
04994 USAGE_ERROR_ACTION(fm, p);
04995 return;
04996 }
04997 if (!PREACTION(fm)) {
04998 check_inuse_chunk(fm, p);
04999 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
05000 size_t psize = chunksize(p);
05001 mchunkptr next = chunk_plus_offset(p, psize);
05002 if (!pinuse(p)) {
05003 size_t prevsize = p->prev_foot;
05004 if ((prevsize & IS_MMAPPED_BIT) != 0) {
05005 prevsize &= ~IS_MMAPPED_BIT;
05006 psize += prevsize + MMAP_FOOT_PAD;
05007 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
05008 fm->footprint -= psize;
05009 goto postaction;
05010 }
05011 else {
05012 mchunkptr prev = chunk_minus_offset(p, prevsize);
05013 psize += prevsize;
05014 p = prev;
05015 if (RTCHECK(ok_address(fm, prev))) {
05016 if (p != fm->dv) {
05017 unlink_chunk(fm, p, prevsize);
05018 }
05019 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
05020 fm->dvsize = psize;
05021 set_free_with_pinuse(p, psize, next);
05022 goto postaction;
05023 }
05024 }
05025 else
05026 goto erroraction;
05027 }
05028 }
05029
05030 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
05031 if (!cinuse(next)) {
05032 if (next == fm->top) {
05033 size_t tsize = fm->topsize += psize;
05034 fm->top = p;
05035 p->head = tsize | PINUSE_BIT;
05036 if (p == fm->dv) {
05037 fm->dv = 0;
05038 fm->dvsize = 0;
05039 }
05040 if (should_trim(fm, tsize))
05041 sys_trim(fm, 0);
05042 goto postaction;
05043 }
05044 else if (next == fm->dv) {
05045 size_t dsize = fm->dvsize += psize;
05046 fm->dv = p;
05047 set_size_and_pinuse_of_free_chunk(p, dsize);
05048 goto postaction;
05049 }
05050 else {
05051 size_t nsize = chunksize(next);
05052 psize += nsize;
05053 unlink_chunk(fm, next, nsize);
05054 set_size_and_pinuse_of_free_chunk(p, psize);
05055 if (p == fm->dv) {
05056 fm->dvsize = psize;
05057 goto postaction;
05058 }
05059 }
05060 }
05061 else
05062 set_free_with_pinuse(p, psize, next);
05063
05064 if (is_small(psize)) {
05065 insert_small_chunk(fm, p, psize);
05066 check_free_chunk(fm, p);
05067 }
05068 else {
05069 tchunkptr tp = (tchunkptr)p;
05070 insert_large_chunk(fm, tp, psize);
05071 check_free_chunk(fm, p);
05072 if (--fm->release_checks == 0)
05073 release_unused_segments(fm);
05074 }
05075 goto postaction;
05076 }
05077 }
05078 erroraction:
05079 USAGE_ERROR_ACTION(fm, p);
05080 postaction:
05081 POSTACTION(fm);
05082 }
05083 }
05084 }
05085
05086 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
05087 void* mem;
05088 size_t req = 0;
05089 mstate ms = (mstate)msp;
05090 if (!ok_magic(ms)) {
05091 USAGE_ERROR_ACTION(ms,ms);
05092 return 0;
05093 }
05094 if (n_elements != 0) {
05095 req = n_elements * elem_size;
05096 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
05097 (req / n_elements != elem_size))
05098 req = MAX_SIZE_T;
05099 }
05100 mem = internal_malloc(ms, req);
05101 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
05102 memset(mem, 0, req);
05103 return mem;
05104 }
05105
05106 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
05107 if (oldmem == 0)
05108 return mspace_malloc(msp, bytes);
05109 #ifdef REALLOC_ZERO_BYTES_FREES
05110 if (bytes == 0) {
05111 mspace_free(msp, oldmem);
05112 return 0;
05113 }
05114 #endif
05115 else {
05116 #if FOOTERS
05117 mchunkptr p = mem2chunk(oldmem);
05118 mstate ms = get_mstate_for(p);
05119 #else
05120 mstate ms = (mstate)msp;
05121 #endif
05122 if (!ok_magic(ms)) {
05123 USAGE_ERROR_ACTION(ms,ms);
05124 return 0;
05125 }
05126 return internal_realloc(ms, oldmem, bytes);
05127 }
05128 }
05129
05130 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
05131 mstate ms = (mstate)msp;
05132 if (!ok_magic(ms)) {
05133 USAGE_ERROR_ACTION(ms,ms);
05134 return 0;
05135 }
05136 return internal_memalign(ms, alignment, bytes);
05137 }
05138
05139 void** mspace_independent_calloc(mspace msp, size_t n_elements,
05140 size_t elem_size, void* chunks[]) {
05141 size_t sz = elem_size;
05142 mstate ms = (mstate)msp;
05143 if (!ok_magic(ms)) {
05144 USAGE_ERROR_ACTION(ms,ms);
05145 return 0;
05146 }
05147 return ialloc(ms, n_elements, &sz, 3, chunks);
05148 }
05149
05150 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
05151 size_t sizes[], void* chunks[]) {
05152 mstate ms = (mstate)msp;
05153 if (!ok_magic(ms)) {
05154 USAGE_ERROR_ACTION(ms,ms);
05155 return 0;
05156 }
05157 return ialloc(ms, n_elements, sizes, 0, chunks);
05158 }
05159
05160 int mspace_trim(mspace msp, size_t pad) {
05161 int result = 0;
05162 mstate ms = (mstate)msp;
05163 if (ok_magic(ms)) {
05164 if (!PREACTION(ms)) {
05165 result = sys_trim(ms, pad);
05166 POSTACTION(ms);
05167 }
05168 }
05169 else {
05170 USAGE_ERROR_ACTION(ms,ms);
05171 }
05172 return result;
05173 }
05174
05175 void mspace_malloc_stats(mspace msp) {
05176 mstate ms = (mstate)msp;
05177 if (ok_magic(ms)) {
05178 internal_malloc_stats(ms);
05179 }
05180 else {
05181 USAGE_ERROR_ACTION(ms,ms);
05182 }
05183 }
05184
05185 size_t mspace_footprint(mspace msp) {
05186 size_t result = 0;
05187 mstate ms = (mstate)msp;
05188 if (ok_magic(ms)) {
05189 result = ms->footprint;
05190 }
05191 else {
05192 USAGE_ERROR_ACTION(ms,ms);
05193 }
05194 return result;
05195 }
05196
05197
05198 size_t mspace_max_footprint(mspace msp) {
05199 size_t result = 0;
05200 mstate ms = (mstate)msp;
05201 if (ok_magic(ms)) {
05202 result = ms->max_footprint;
05203 }
05204 else {
05205 USAGE_ERROR_ACTION(ms,ms);
05206 }
05207 return result;
05208 }
05209
05210
05211 #if !NO_MALLINFO
05212 struct mallinfo mspace_mallinfo(mspace msp) {
05213 mstate ms = (mstate)msp;
05214 if (!ok_magic(ms)) {
05215 USAGE_ERROR_ACTION(ms,ms);
05216 }
05217 return internal_mallinfo(ms);
05218 }
05219 #endif
05220
05221 size_t mspace_usable_size(void* mem) {
05222 if (mem != 0) {
05223 mchunkptr p = mem2chunk(mem);
05224 if (cinuse(p))
05225 return chunksize(p) - overhead_for(p);
05226 }
05227 return 0;
05228 }
05229
05230 int mspace_mallopt(int param_number, int value) {
05231 return change_mparam(param_number, value);
05232 }
05233
05234 #endif
05235
05236
05237
05238
05239
05240
05241
05242
05243
05244
05245
05246
05247
05248
05249
05250
05251
05252
05253
05254
05255
05256
05257
05258
05259
05260
05261
05262
05263
05264
05265
05266
05267
05268
05269
05270
05271
05272
05273
05274
05275
05276
05277
05278
05279
05280
05281
05282
05283
05284
05285
05286
05287
05288
05289
05290
05291
05292
05293
05294
05295
05296
05297
05298
05299
05300
05301
05302
05303
05304
05305
05306
05307
05308
05309
05310
05311
05312
05313
05314
05315
05316
05317
05318
05319
05320
05321
05322
05323
05324
05325
05326
05327
05328
05329
05330
05331
05332
05333
05334
05335
05336
05337
05338
05339
05340
05341
05342
05343
05344
05345
05346
05347
05348
05349
05350
05351
05352
05353
05354
05355
05356
05357
05358
05359
05360
05361
05362
05363
05364
05365
05366
05367
05368
05369
05370
05371
05372
05373
05374
05375
05376
05377
05378
05379
05380
05381
05382
05383
05384
05385
05386
05387
05388
05389
05390
05391
05392
05393
05394
05395
05396
05397
05398
05399
05400
05401
05402
05403
05404
05405
05406
05407
05408
05409
05410
05411
05412
05413
05414
05415
05416
05417
05418
05419
05420
05421
05422
05423
05424
05425
05426
05427
05428
05429
05430
05431
05432
05433
05434
05435
05436
05437
05438
05439
05440
05441
05442
05443
05444
05445
05446
05447
05448
05449
05450
05451
05452
05453
05454
05455
05456
05457
05458
05459
05460
05461
05462
05463
05464
05465
05466
05467
05468
05469
05470
05471
05472
05473
05474
05475
05476
05477
05478
05479
05480
05481
05482
05483
05484
05485
05486
05487
05488
05489
05490
05491
05492
05493
05494
05495
05496
05497
05498
05499
05500
05501
05502
05503
05504
05505
05506
05507
05508
05509
05510
05511
05512
05513
05514
05515
05516
05517
05518