00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #define CMI_MEMORY_GNU
00012
00013 #define malloc mm_malloc
00014 #define free mm_free
00015 #define calloc mm_calloc
00016 #define cfree mm_cfree
00017 #define realloc mm_realloc
00018 #define memalign mm_memalign
00019 #define posix_memalign mm_posix_memalign
00020 #define aligned_alloc mm_aligned_alloc
00021 #define valloc mm_valloc
00022 #define pvalloc mm_pvalloc
00023
00024 extern CMK_TYPEDEF_UINT8 _memory_allocated;
00025 extern CMK_TYPEDEF_UINT8 _memory_allocated_max;
00026 extern CMK_TYPEDEF_UINT8 _memory_allocated_min;
00027
00028 #define UPDATE_MEMUSAGE \
00029 if(_memory_allocated > _memory_allocated_max) \
00030 _memory_allocated_max=_memory_allocated; \
00031 if(_memory_allocated < _memory_allocated_min) \
00032 _memory_allocated_min=_memory_allocated;
00033
00034 #define ONLY_MSPACES 1
00035 #define MSPACES 1
00036 #define USE_LOCKS 0
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073 #include <sys/types.h>
00074 #include <sys/mman.h>
00075 #include <errno.h>
00076 #include <stdlib.h>
00077 #include <stdio.h>
00078 #include <string.h>
00079 #include "converse.h"
00080
00081 #include "memory-gnu-threads.h"
00082
00083
00084
00085
00086
00087 #include "memory-gnu-internal.C"
00088
00089
00090
00091
00092 #define NON_MAIN_ARENA (SIZE_T_FOUR)
00093
00094
00095
00096 #define munmap_chunk(mst, p) do { \
00097 size_t prevsize = (p)->prev_foot & ~IS_MMAPPED_BIT; \
00098 size_t psize = chunksize(p) + prevsize + MMAP_FOOT_PAD; \
00099 if (CALL_MUNMAP((char*)(p) - prevsize, psize) == 0) \
00100 ((struct malloc_state*)(mst))->footprint -= psize; \
00101 } while (0)
00102
00103
00104
00105
00106 #ifndef ARENA_SIZE_MIN
00107 # define ARENA_SIZE_MIN (128*1024)
00108 #endif
00109 #define HAVE_MEMCPY 1
00110
00111
00112
00113 #ifndef THREAD_STATS
00114 # define THREAD_STATS 0
00115 #endif
00116
00117 #ifndef MALLOC_DEBUG
00118 # define MALLOC_DEBUG 0
00119 #endif
00120
00121 #define my_powerof2(x) ((((x)-1)&(x))==0)
00122
00123
00124 int __malloc_initialized = -1;
00125
00126 #ifndef RETURN_ADDRESS
00127 # define RETURN_ADDRESS(X_) (NULL)
00128 #endif
00129
00130 #if THREAD_STATS
00131 # define THREAD_STAT(x) x
00132 #else
00133 # define THREAD_STAT(x) do ; while(0)
00134 #endif
00135
00136 #ifdef _LIBC
00137
00138
00139 #define public_cALLOc __libc_calloc
00140 #define public_fREe __libc_free
00141 #define public_cFREe __libc_cfree
00142 #define public_mALLOc __libc_malloc
00143 #define public_mEMALIGn __libc_memalign
00144 #define public_rEALLOc __libc_realloc
00145 #define public_vALLOc __libc_valloc
00146 #define public_pVALLOc __libc_pvalloc
00147 #define public_pMEMALIGn __posix_memalign
00148 #define public_mALLINFo __libc_mallinfo
00149 #define public_mALLOPt __libc_mallopt
00150 #define public_mTRIm __malloc_trim
00151 #define public_mSTATs __malloc_stats
00152 #define public_mUSABLe __malloc_usable_size
00153 #define public_iCALLOc __libc_independent_calloc
00154 #define public_iCOMALLOc __libc_independent_comalloc
00155 #define public_gET_STATe __malloc_get_state
00156 #define public_sET_STATe __malloc_set_state
00157 #define public_aLIGNED_ALLOc aligned_alloc
00158 #define malloc_getpagesize __getpagesize()
00159 #define open __open
00160 #define mmap __mmap
00161 #define munmap __munmap
00162 #define mremap __mremap
00163 #define mprotect __mprotect
00164 #define MORECORE (*__morecore)
00165 #define MORECORE_FAILURE 0
00166
00167 void * __default_morecore (ptrdiff_t);
00168 void *(*__morecore)(ptrdiff_t) = __default_morecore;
00169
00170 #else
00171
00172 #define public_cALLOc calloc
00173 #define public_fREe free
00174 #define public_cFREe cfree
00175 #define public_mALLOc malloc
00176 #define public_mEMALIGn memalign
00177 #define public_rEALLOc realloc
00178 #define public_vALLOc valloc
00179 #define public_pVALLOc pvalloc
00180 #define public_pMEMALIGn posix_memalign
00181 #define public_mALLINFo mallinfo
00182 #define public_mALLOPt mallopt
00183 #define public_mTRIm malloc_trim
00184 #define public_mSTATs malloc_stats
00185 #define public_mUSABLe malloc_usable_size
00186 #define public_iCALLOc independent_calloc
00187 #define public_iCOMALLOc independent_comalloc
00188 #define public_gET_STATe malloc_get_state
00189 #define public_sET_STATe malloc_set_state
00190 #define public_aLIGNED_ALLOc aligned_alloc
00191
00192 #endif
00193
00194
00195 #ifdef __cplusplus
00196 extern "C" {
00197 #endif
00198
00199
00200 void* public_mALLOc(size_t bytes);
00201 void public_fREe(void* mem);
00202 void* public_rEALLOc(void* oldmem, size_t bytes);
00203 void* public_mEMALIGn(size_t alignment, size_t bytes);
00204 void* public_aLIGNED_ALLOc(size_t alignment, size_t bytes);
00205 void* public_vALLOc(size_t bytes);
00206 int public_pMEMALIGn (void **memptr, size_t alignment, size_t size) CMK_THROW;
00207 void* public_cALLOc(size_t n_elements, size_t elem_size);
00208 void** public_iCALLOc(size_t n, size_t elem_size, void* chunks[]);
00209 void** public_iCOMALLOc(size_t n, size_t sizes[], void* chunks[]);
00210 int public_mTRIm(size_t s);
00211 size_t public_mUSABLe(void* mem);
00212 int public_mALLOPt(int p, int v);
00213 void public_mSTATs(void);
00214
00215 #ifdef __cplusplus
00216 }
00217 #endif
00218
00219
00220 #if !defined _LIBC && (!defined __GNUC__ || __GNUC__<3)
00221 #define __builtin_expect(expr, val) (expr)
00222 #endif
00223
00224 #if MALLOC_DEBUG
00225 #include <assert.h>
00226 #else
00227 #undef assert
00228 #define assert(x) ((void)0)
00229 #endif
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241 #ifndef USE_STARTER
00242 # ifndef _LIBC
00243 # define USE_STARTER 1
00244 # else
00245 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
00246
00247 # define USE_STARTER 0
00248 # else
00249 # define USE_STARTER (USE_TLS ? 4 : 1)
00250 # endif
00251 # endif
00252 #endif
00253
00254
00255
00256
00257 static tsd_key_t arena_key;
00258 static mutex_t list_lock;
00259
00260
00261 #ifdef __APPLE__
00262 struct alignas(16) malloc_arena {
00263 #else
00264 struct malloc_arena {
00265 #endif
00266
00267 mutex_t mutex;
00268
00269
00270 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
00271 long stat_starter;
00272
00273
00274 struct malloc_arena *next;
00275
00276
00277
00278 char buf_[pad_request(sizeof(struct malloc_state)) + TOP_FOOT_SIZE +
00279 CHUNK_ALIGN_MASK + 1];
00280 };
00281 #define MSPACE_OFFSET (((offsetof(struct malloc_arena, buf_) \
00282 + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK))
00283 #define arena_to_mspace(a) ((void *)chunk2mem((char*)(a) + MSPACE_OFFSET))
00284
00285
00286 #define chunk_non_main_arena(p) ((p)->head & NON_MAIN_ARENA)
00287
00288 static struct malloc_arena* _int_new_arena(size_t size);
00289
00290
00291 static struct malloc_arena main_arena;
00292
00293
00294
00295
00296
00297 #define FOOTER_OVERHEAD \
00298 (2*sizeof(struct malloc_arena*) - SIZE_T_SIZE)
00299
00300 #define arena_for_chunk(ptr) \
00301 (chunk_non_main_arena(ptr) ? *(struct malloc_arena**) \
00302 ((char*)(ptr) + chunksize(ptr) - (FOOTER_OVERHEAD - SIZE_T_SIZE)) \
00303 : &main_arena)
00304
00305
00306 #define arena_for_mmap_chunk(ptr) \
00307 (chunk_non_main_arena(ptr) ? *(struct malloc_arena**) \
00308 ((char*)(ptr) + chunksize(ptr) - sizeof(struct malloc_arena*)) \
00309 : &main_arena)
00310
00311 #define set_non_main_arena(mem, ar_ptr) do { \
00312 mchunkptr P = mem2chunk(mem); \
00313 size_t SZ = chunksize(P) - (is_mmapped(P) ? sizeof(struct malloc_arena*) \
00314 : (FOOTER_OVERHEAD - SIZE_T_SIZE)); \
00315 assert((unsigned long)((char*)(P) + SZ)%sizeof(struct malloc_arena*) == 0); \
00316 *(struct malloc_arena**)((char*)(P) + SZ) = (ar_ptr); \
00317 P->head |= NON_MAIN_ARENA; \
00318 } while (0)
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328 #define arena_get(ptr, size) do { \
00329 void *vptr = NULL; \
00330 ptr = (struct malloc_arena*)tsd_getspecific(arena_key, vptr); \
00331 if(ptr && !mutex_trylock(&ptr->mutex)) { \
00332 THREAD_STAT(++(ptr->stat_lock_direct)); \
00333 } else \
00334 ptr = arena_get2(ptr, (size)); \
00335 } while(0)
00336
00337 static struct malloc_arena*
00338 arena_get2(struct malloc_arena* a_tsd, size_t size)
00339 {
00340 struct malloc_arena* a;
00341 int err;
00342
00343 if(!a_tsd)
00344 a = a_tsd = &main_arena;
00345 else {
00346 a = a_tsd->next;
00347 if(!a) {
00348
00349 (void)mutex_lock(&main_arena.mutex);
00350 THREAD_STAT(++(main_arena.stat_lock_wait));
00351 return &main_arena;
00352 }
00353 }
00354
00355
00356 repeat:
00357 do {
00358 if(!mutex_trylock(&a->mutex)) {
00359 THREAD_STAT(++(a->stat_lock_loop));
00360 tsd_setspecific(arena_key, (void *)a);
00361 return a;
00362 }
00363 a = a->next;
00364 } while(a != a_tsd);
00365
00366
00367
00368
00369
00370 if(mutex_trylock(&list_lock)) {
00371 a = a_tsd;
00372 goto repeat;
00373 }
00374 (void)mutex_unlock(&list_lock);
00375
00376
00377 a = _int_new_arena(size);
00378 if(!a)
00379 return 0;
00380
00381 tsd_setspecific(arena_key, (void *)a);
00382 mutex_init(&a->mutex);
00383 err = mutex_lock(&a->mutex);
00384
00385
00386 (void)mutex_lock(&list_lock);
00387 a->next = main_arena.next;
00388 atomic_write_barrier ();
00389 main_arena.next = a;
00390 (void)mutex_unlock(&list_lock);
00391
00392 if(err)
00393 return 0;
00394
00395 THREAD_STAT(++(a->stat_lock_loop));
00396 return a;
00397 }
00398
00399
00400
00401 static struct malloc_arena*
00402 _int_new_arena(size_t size)
00403 {
00404 struct malloc_arena* a;
00405 size_t mmap_sz = sizeof(*a) + pad_request(size);
00406 void *m;
00407
00408 if (mmap_sz < ARENA_SIZE_MIN)
00409 mmap_sz = ARENA_SIZE_MIN;
00410
00411 mmap_sz = (mmap_sz + 8191) & ~(size_t)8191;
00412 a = (struct malloc_arena *)CALL_MMAP(mmap_sz);
00413 if ((char*)a == (char*)-1)
00414 return 0;
00415
00416 m = create_mspace_with_base((char*)a + MSPACE_OFFSET,
00417 mmap_sz - MSPACE_OFFSET,
00418 0);
00419
00420 if (!m) {
00421 CALL_MUNMAP(a, mmap_sz);
00422 a = 0;
00423 } else {
00424
00425
00426 }
00427
00428 return a;
00429 }
00430
00431
00432
00433
00434
00435
00436
00437 #ifndef weak_variable
00438 #ifndef _LIBC
00439 #define weak_variable
00440 #else
00441
00442
00443 #define weak_variable weak_function
00444 #endif
00445 #endif
00446
00447 #if !(USE_STARTER & 2)
00448 # define free_hook_ini NULL
00449
00450 static void* malloc_hook_ini (size_t sz, const void *caller);
00451 static void* realloc_hook_ini (void* ptr, size_t sz, const void* caller);
00452 static void* memalign_hook_ini (size_t alignment, size_t sz,
00453 const void* caller);
00454 #else
00455 # define free_hook_ini free_starter
00456 # define malloc_hook_ini malloc_starter
00457 # define realloc_hook_ini NULL
00458 # define memalign_hook_ini memalign_starter
00459 #endif
00460
00461 #ifdef __GNUC__
00462 #pragma GCC diagnostic push
00463 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
00464 #endif
00465
00466 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
00467 void weak_variable (*__free_hook) (void * __ptr, const void *)
00468 = free_hook_ini;
00469 void * weak_variable (*__malloc_hook) (size_t __size, const void *)
00470 = malloc_hook_ini;
00471 void * weak_variable (*__realloc_hook)
00472 (void * __ptr, size_t __size, const void *) = realloc_hook_ini;
00473 void * weak_variable (*__memalign_hook)
00474 (size_t __alignment, size_t __size, const void *) = memalign_hook_ini;
00475
00476
00477 #ifdef __GNUC__
00478 #pragma GCC diagnostic pop
00479 #endif
00480
00481
00482
00483
00484 #if !(USE_STARTER & 2)
00485 static
00486 #endif
00487 void ptmalloc_init(void);
00488
00489 #if !(USE_STARTER & 2)
00490
00491 static void*
00492 malloc_hook_ini(size_t sz, const void * caller)
00493 {
00494 __malloc_hook = NULL;
00495 ptmalloc_init();
00496 return public_mALLOc(sz);
00497 }
00498
00499 static void *
00500 realloc_hook_ini(void *ptr, size_t sz, const void * caller)
00501 {
00502 __malloc_hook = NULL;
00503 __realloc_hook = NULL;
00504 ptmalloc_init();
00505 return public_rEALLOc(ptr, sz);
00506 }
00507
00508 static void*
00509 memalign_hook_ini(size_t alignment, size_t sz, const void * caller)
00510 {
00511 __memalign_hook = NULL;
00512 ptmalloc_init();
00513 return public_mEMALIGn(alignment, sz);
00514 }
00515
00516 #endif
00517
00518
00519
00520 #if !defined NO_THREADS && USE_STARTER
00521
00522
00523
00524
00525 static void*
00526 malloc_starter(size_t sz, const void *caller)
00527 {
00528 void* victim;
00529
00530
00531 victim = mspace_malloc(arena_to_mspace(&main_arena), sz);
00532 THREAD_STAT(++main_arena.stat_starter);
00533
00534 return victim;
00535 }
00536
00537 static void*
00538 memalign_starter(size_t align, size_t sz, const void *caller)
00539 {
00540 void* victim;
00541
00542
00543 victim = mspace_memalign(arena_to_mspace(&main_arena), align, sz);
00544 THREAD_STAT(++main_arena.stat_starter);
00545
00546 return victim;
00547 }
00548
00549 static void
00550 free_starter(void* mem, const void *caller)
00551 {
00552 if (mem) {
00553 mchunkptr p = mem2chunk(mem);
00554 void *msp = arena_to_mspace(&main_arena);
00555 if (is_mmapped(p))
00556 munmap_chunk(msp, p);
00557 else
00558 mspace_free(msp, mem);
00559 }
00560 THREAD_STAT(++main_arena.stat_starter);
00561 }
00562
00563 #endif
00564
00565
00566
00567 #ifndef NO_THREADS
00568
00569
00570
00571 static void * (*save_malloc_hook) (size_t __size, const void *);
00572 # if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
00573 static void * (*save_memalign_hook) (size_t __align, size_t __size,
00574 const void *);
00575 # endif
00576 static void (*save_free_hook) (void * __ptr, const void *);
00577 static void* save_arena;
00578
00579
00580
00581
00582 #define ATFORK_ARENA_PTR ((void*)-1)
00583
00584
00585
00586
00587 static void*
00588 malloc_atfork(size_t sz, const void *caller)
00589 {
00590 void *vptr = NULL;
00591
00592 tsd_getspecific(arena_key, vptr);
00593 if(vptr == ATFORK_ARENA_PTR) {
00594
00595 return mspace_malloc(arena_to_mspace(&main_arena), sz);
00596 } else {
00597
00598
00599
00600 (void)mutex_lock(&list_lock);
00601 (void)mutex_unlock(&list_lock);
00602 return public_mALLOc(sz);
00603 }
00604 }
00605
00606 static void
00607 free_atfork(void* mem, const void *caller)
00608 {
00609 void *vptr = NULL;
00610 struct malloc_arena *ar_ptr;
00611 mchunkptr p;
00612
00613 if (mem == 0)
00614 return;
00615
00616 p = mem2chunk(mem);
00617
00618 if (is_mmapped(p)) {
00619 ar_ptr = arena_for_mmap_chunk(p);
00620 munmap_chunk(arena_to_mspace(ar_ptr), p);
00621 return;
00622 }
00623
00624 ar_ptr = arena_for_chunk(p);
00625 tsd_getspecific(arena_key, vptr);
00626 if(vptr != ATFORK_ARENA_PTR)
00627 (void)mutex_lock(&ar_ptr->mutex);
00628 mspace_free(arena_to_mspace(ar_ptr), mem);
00629 if(vptr != ATFORK_ARENA_PTR)
00630 (void)mutex_unlock(&ar_ptr->mutex);
00631 }
00632
00633
00634
00635
00636
00637
00638
00639 static void
00640 ptmalloc_lock_all (void)
00641 {
00642 struct malloc_arena* ar_ptr;
00643
00644 if(__malloc_initialized < 1)
00645 return;
00646 (void)mutex_lock(&list_lock);
00647 for(ar_ptr = &main_arena;;) {
00648 (void)mutex_lock(&ar_ptr->mutex);
00649 ar_ptr = ar_ptr->next;
00650 if(ar_ptr == &main_arena)
00651 break;
00652 }
00653 save_malloc_hook = __malloc_hook;
00654 save_free_hook = __free_hook;
00655 __malloc_hook = malloc_atfork;
00656 __free_hook = free_atfork;
00657
00658 tsd_getspecific(arena_key, save_arena);
00659 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
00660 }
00661
00662 static void
00663 ptmalloc_unlock_all (void)
00664 {
00665 struct malloc_arena *ar_ptr;
00666
00667 if(__malloc_initialized < 1)
00668 return;
00669 tsd_setspecific(arena_key, save_arena);
00670 __malloc_hook = save_malloc_hook;
00671 __free_hook = save_free_hook;
00672 for(ar_ptr = &main_arena;;) {
00673 (void)mutex_unlock(&ar_ptr->mutex);
00674 ar_ptr = ar_ptr->next;
00675 if(ar_ptr == &main_arena) break;
00676 }
00677 (void)mutex_unlock(&list_lock);
00678 }
00679
00680 #ifdef __linux__
00681
00682
00683
00684
00685
00686
00687 static void
00688 ptmalloc_unlock_all2(void)
00689 {
00690 struct malloc_arena *ar_ptr;
00691
00692 if(__malloc_initialized < 1)
00693 return;
00694 #if defined _LIBC || 1
00695 tsd_setspecific(arena_key, save_arena);
00696 __malloc_hook = save_malloc_hook;
00697 __free_hook = save_free_hook;
00698 #endif
00699 for(ar_ptr = &main_arena;;) {
00700 (void)mutex_init(&ar_ptr->mutex);
00701 ar_ptr = ar_ptr->next;
00702 if(ar_ptr == &main_arena) break;
00703 }
00704 (void)mutex_init(&list_lock);
00705 }
00706
00707 #else
00708
00709 #define ptmalloc_unlock_all2 ptmalloc_unlock_all
00710
00711 #endif
00712
00713 #endif
00714
00715
00716
00717 #if !(USE_STARTER & 2)
00718 static
00719 #endif
00720 void
00721 ptmalloc_init(void)
00722 {
00723 const char* s;
00724 int secure = 0;
00725 void *mspace;
00726
00727 if(__malloc_initialized >= 0) return;
00728 __malloc_initialized = 0;
00729
00730
00731
00732
00733 #ifndef NO_THREADS
00734 # if USE_STARTER & 1
00735
00736
00737
00738 save_malloc_hook = __malloc_hook;
00739 save_memalign_hook = __memalign_hook;
00740 save_free_hook = __free_hook;
00741 __malloc_hook = malloc_starter;
00742 __memalign_hook = memalign_starter;
00743 __free_hook = free_starter;
00744 # ifdef _LIBC
00745
00746 if (__pthread_initialize != NULL)
00747 __pthread_initialize();
00748 # endif
00749 # endif
00750 #endif
00751 mutex_init(&main_arena.mutex);
00752 main_arena.next = &main_arena;
00753 mspace = create_mspace_with_base((char*)&main_arena + MSPACE_OFFSET,
00754 sizeof(main_arena) - MSPACE_OFFSET,
00755 0);
00756 assert(mspace == arena_to_mspace(&main_arena));
00757
00758 mutex_init(&list_lock);
00759 tsd_key_create(&arena_key, NULL);
00760 tsd_setspecific(arena_key, (void *)&main_arena);
00761 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
00762 #ifndef NO_THREADS
00763 # if USE_STARTER & 1
00764 __malloc_hook = save_malloc_hook;
00765 __memalign_hook = save_memalign_hook;
00766 __free_hook = save_free_hook;
00767 # endif
00768 # if USE_STARTER & 2
00769 __malloc_hook = 0;
00770 __memalign_hook = 0;
00771 __free_hook = 0;
00772 # endif
00773 #endif
00774 #ifdef _LIBC
00775 secure = __libc_enable_secure;
00776 #else
00777 if (! secure) {
00778 if ((s = getenv("MALLOC_TRIM_THRESHOLD_")))
00779 public_mALLOPt(M_TRIM_THRESHOLD, atoi(s));
00780 if ((s = getenv("MALLOC_TOP_PAD_")) ||
00781 (s = getenv("MALLOC_GRANULARITY_")))
00782 public_mALLOPt(M_GRANULARITY, atoi(s));
00783 if ((s = getenv("MALLOC_MMAP_THRESHOLD_")))
00784 public_mALLOPt(M_MMAP_THRESHOLD, atoi(s));
00785
00786
00787 }
00788 s = getenv("MALLOC_CHECK_");
00789 #endif
00790 if (s) {
00791
00792
00793 }
00794 if (__malloc_initialize_hook != NULL)
00795 (*__malloc_initialize_hook)();
00796 __malloc_initialized = 1;
00797 }
00798
00799
00800
00801 void*
00802 public_mALLOc(size_t bytes)
00803 {
00804 struct malloc_arena* ar_ptr;
00805 void *victim;
00806 void * (*hook) (size_t, const void *) = __malloc_hook;
00807 if (hook != NULL)
00808 return (*hook)(bytes, RETURN_ADDRESS (0));
00809
00810 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
00811 if (!ar_ptr)
00812 return 0;
00813 if (ar_ptr != &main_arena)
00814 bytes += FOOTER_OVERHEAD;
00815 victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes);
00816 if (victim && ar_ptr != &main_arena)
00817 set_non_main_arena(victim, ar_ptr);
00818 (void)mutex_unlock(&ar_ptr->mutex);
00819 assert(!victim || is_mmapped(mem2chunk(victim)) ||
00820 ar_ptr == arena_for_chunk(mem2chunk(victim)));
00821
00822
00823 if (victim != NULL) {
00824 _memory_allocated += chunksize(mem2chunk(victim));
00825
00826 UPDATE_MEMUSAGE
00827 }
00828
00829 return victim;
00830 }
00831 #ifdef libc_hidden_def
00832 libc_hidden_def(public_mALLOc)
00833 #endif
00834
00835 void
00836 public_fREe(void* mem)
00837 {
00838 struct malloc_arena* ar_ptr;
00839 mchunkptr p;
00840
00841 void (*hook) (void *, const void *) = __free_hook;
00842 if (hook != NULL) {
00843 (*hook)(mem, RETURN_ADDRESS (0));
00844 return;
00845 }
00846
00847 if (mem == 0)
00848 return;
00849
00850 p = mem2chunk(mem);
00851
00852 if (is_mmapped(p)) {
00853 ar_ptr = arena_for_mmap_chunk(p);
00854 munmap_chunk(arena_to_mspace(ar_ptr), p);
00855 return;
00856 }
00857
00858 ar_ptr = arena_for_chunk(p);
00859 #if THREAD_STATS
00860 if(!mutex_trylock(&ar_ptr->mutex))
00861 ++(ar_ptr->stat_lock_direct);
00862 else {
00863 (void)mutex_lock(&ar_ptr->mutex);
00864 ++(ar_ptr->stat_lock_wait);
00865 }
00866 #else
00867 (void)mutex_lock(&ar_ptr->mutex);
00868 #endif
00869 mspace_free(arena_to_mspace(ar_ptr), mem);
00870 (void)mutex_unlock(&ar_ptr->mutex);
00871 }
00872 #ifdef libc_hidden_def
00873 libc_hidden_def (public_fREe)
00874 #endif
00875
00876 void*
00877 public_rEALLOc(void* oldmem, size_t bytes)
00878 {
00879 struct malloc_arena* ar_ptr;
00880
00881 mchunkptr oldp;
00882
00883 void* newp;
00884
00885 void * (*hook) (void *, size_t, const void *) = __realloc_hook;
00886 if (hook != NULL)
00887 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
00888
00889 #if REALLOC_ZERO_BYTES_FREES
00890 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
00891 #endif
00892
00893
00894 if (oldmem == 0)
00895 return public_mALLOc(bytes);
00896
00897 oldp = mem2chunk(oldmem);
00898
00899
00900 _memory_allocated -= chunksize(oldp);
00901
00902
00903 if (is_mmapped(oldp))
00904 ar_ptr = arena_for_mmap_chunk(oldp);
00905 else
00906 ar_ptr = arena_for_chunk(oldp);
00907 #if THREAD_STATS
00908 if(!mutex_trylock(&ar_ptr->mutex))
00909 ++(ar_ptr->stat_lock_direct);
00910 else {
00911 (void)mutex_lock(&ar_ptr->mutex);
00912 ++(ar_ptr->stat_lock_wait);
00913 }
00914 #else
00915 (void)mutex_lock(&ar_ptr->mutex);
00916 #endif
00917
00918 #ifndef NO_THREADS
00919
00920 tsd_setspecific(arena_key, (void *)ar_ptr);
00921 #endif
00922
00923 if (ar_ptr != &main_arena)
00924 bytes += FOOTER_OVERHEAD;
00925 newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);
00926
00927 if (newp && ar_ptr != &main_arena)
00928 set_non_main_arena(newp, ar_ptr);
00929 (void)mutex_unlock(&ar_ptr->mutex);
00930
00931 assert(!newp || is_mmapped(mem2chunk(newp)) ||
00932 ar_ptr == arena_for_chunk(mem2chunk(newp)));
00933
00934
00935 if (newp != NULL) {
00936 _memory_allocated += chunksize(mem2chunk(newp));
00937
00938 UPDATE_MEMUSAGE
00939 }
00940
00941
00942 return newp;
00943 }
00944 #ifdef libc_hidden_def
00945 libc_hidden_def (public_rEALLOc)
00946 #endif
00947
00948 void*
00949 public_mEMALIGn(size_t alignment, size_t bytes)
00950 {
00951 struct malloc_arena* ar_ptr;
00952 void *p;
00953
00954 void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
00955 if (hook != NULL)
00956 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
00957
00958
00959 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
00960
00961
00962 if (alignment < MIN_CHUNK_SIZE)
00963 alignment = MIN_CHUNK_SIZE;
00964
00965 arena_get(ar_ptr,
00966 bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
00967 if(!ar_ptr)
00968 return 0;
00969
00970 if (ar_ptr != &main_arena)
00971 bytes += FOOTER_OVERHEAD;
00972 p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);
00973
00974 if (p && ar_ptr != &main_arena)
00975 set_non_main_arena(p, ar_ptr);
00976 (void)mutex_unlock(&ar_ptr->mutex);
00977
00978 assert(!p || is_mmapped(mem2chunk(p)) ||
00979 ar_ptr == arena_for_chunk(mem2chunk(p)));
00980
00981
00982 if (p != NULL) {
00983 _memory_allocated += chunksize(mem2chunk(p));
00984
00985 UPDATE_MEMUSAGE
00986 }
00987
00988
00989 return p;
00990 }
00991 #ifdef libc_hidden_def
00992 libc_hidden_def (public_mEMALIGn)
00993 #endif
00994
00995 void*
00996 public_aLIGNED_ALLOc(size_t alignment, size_t bytes)
00997 {
00998 return public_mEMALIGn(alignment, bytes);
00999 }
01000 #ifdef libc_hidden_def
01001 libc_hidden_def (public_aLIGNED_ALLOc)
01002 #endif
01003
01004 void*
01005 public_vALLOc(size_t bytes)
01006 {
01007 struct malloc_arena* ar_ptr;
01008 void *p;
01009
01010 if(__malloc_initialized < 0)
01011 ptmalloc_init ();
01012 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + MIN_CHUNK_SIZE);
01013 if(!ar_ptr)
01014 return 0;
01015 if (ar_ptr != &main_arena)
01016 bytes += FOOTER_OVERHEAD;
01017 p = mspace_memalign(arena_to_mspace(ar_ptr), CmiGetPageSize(), bytes);
01018
01019 if (p && ar_ptr != &main_arena)
01020 set_non_main_arena(p, ar_ptr);
01021 (void)mutex_unlock(&ar_ptr->mutex);
01022
01023
01024 if (p != NULL) {
01025 _memory_allocated += chunksize(mem2chunk(p));
01026
01027 UPDATE_MEMUSAGE
01028 }
01029
01030
01031 return p;
01032 }
01033
01034 void*
01035 public_pVALLOc(size_t bytes)
01036 {
01037 struct malloc_arena* ar_ptr;
01038 void *p;
01039 size_t pagesize;
01040
01041 if(__malloc_initialized < 0)
01042 ptmalloc_init ();
01043 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + MIN_CHUNK_SIZE);
01044 if(!ar_ptr)
01045 return 0;
01046 if (ar_ptr != &main_arena)
01047 bytes += FOOTER_OVERHEAD;
01048 pagesize = CmiGetPageSize();
01049 p = mspace_memalign(arena_to_mspace(ar_ptr), pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
01050
01051 if (p && ar_ptr != &main_arena)
01052 set_non_main_arena(p, ar_ptr);
01053 (void)mutex_unlock(&ar_ptr->mutex);
01054
01055
01056 if (p != NULL) {
01057 _memory_allocated += chunksize(mem2chunk(p));
01058
01059 UPDATE_MEMUSAGE
01060 }
01061
01062
01063 return p;
01064 }
01065
01066 int
01067 public_pMEMALIGn (void **memptr, size_t alignment, size_t size) CMK_THROW
01068 {
01069 void *mem;
01070
01071
01072
01073 if (alignment % sizeof (void *) != 0
01074 || !my_powerof2 (alignment / sizeof (void *)) != 0
01075 || alignment == 0)
01076 return EINVAL;
01077
01078 mem = public_mEMALIGn (alignment, size);
01079
01080 if (mem != NULL) {
01081 *memptr = mem;
01082 return 0;
01083 }
01084
01085 return ENOMEM;
01086 }
01087
01088 void*
01089 public_cALLOc(size_t n_elements, size_t elem_size)
01090 {
01091 struct malloc_arena* ar_ptr;
01092 size_t bytes, sz;
01093 void* mem;
01094 void * (*hook) (size_t, const void *) = __malloc_hook;
01095
01096
01097 bytes = n_elements * elem_size;
01098 #define HALF_INTERNAL_SIZE_T \
01099 (((size_t) 1) << (8 * sizeof (size_t) / 2))
01100 if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
01101 if (elem_size != 0 && bytes / elem_size != n_elements) {
01102
01103 return 0;
01104 }
01105 }
01106
01107 if (hook != NULL) {
01108 sz = bytes;
01109 mem = (*hook)(sz, RETURN_ADDRESS (0));
01110 if(mem == 0)
01111 return 0;
01112 #ifdef HAVE_MEMCPY
01113 return memset(mem, 0, sz);
01114 #else
01115 while(sz > 0) ((char*)mem)[--sz] = 0;
01116 return mem;
01117 #endif
01118 }
01119
01120 arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
01121 if(!ar_ptr)
01122 return 0;
01123
01124 if (ar_ptr != &main_arena)
01125 bytes += FOOTER_OVERHEAD;
01126 mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);
01127
01128 if (mem && ar_ptr != &main_arena)
01129 set_non_main_arena(mem, ar_ptr);
01130 (void)mutex_unlock(&ar_ptr->mutex);
01131
01132 assert(!mem || is_mmapped(mem2chunk(mem)) ||
01133 ar_ptr == arena_for_chunk(mem2chunk(mem)));
01134
01135
01136 if (mem != NULL) {
01137 _memory_allocated += chunksize(mem2chunk(mem));
01138
01139 UPDATE_MEMUSAGE
01140 }
01141
01142
01143 return mem;
01144 }
01145
01146 void**
01147 public_iCALLOc(size_t n, size_t elem_size, void* chunks[])
01148 {
01149 struct malloc_arena* ar_ptr;
01150 void** m;
01151
01152 arena_get(ar_ptr, n*(elem_size + FOOTER_OVERHEAD));
01153 if (!ar_ptr)
01154 return 0;
01155
01156 if (ar_ptr != &main_arena)
01157 elem_size += FOOTER_OVERHEAD;
01158 m = mspace_independent_calloc(arena_to_mspace(ar_ptr), n, elem_size, chunks);
01159
01160 if (m && ar_ptr != &main_arena) {
01161 while (n > 0)
01162 set_non_main_arena(m[--n], ar_ptr);
01163 }
01164 (void)mutex_unlock(&ar_ptr->mutex);
01165
01166
01167 if (m != NULL) {
01168 _memory_allocated += chunksize(mem2chunk(m));
01169
01170 UPDATE_MEMUSAGE
01171 }
01172
01173
01174 return m;
01175 }
01176
01177 void**
01178 public_iCOMALLOc(size_t n, size_t sizes[], void* chunks[])
01179 {
01180 struct malloc_arena* ar_ptr;
01181 size_t* m_sizes;
01182 size_t i;
01183 void** m;
01184
01185 arena_get(ar_ptr, n*sizeof(size_t));
01186 if (!ar_ptr)
01187 return 0;
01188
01189 if (ar_ptr != &main_arena) {
01190
01191
01192 m_sizes = (size_t *)mspace_malloc(arena_to_mspace(ar_ptr), n*sizeof(size_t));
01193 if (!m_sizes) {
01194 (void)mutex_unlock(&ar_ptr->mutex);
01195 return 0;
01196 }
01197 for (i=0; i<n; ++i)
01198 m_sizes[i] = sizes[i] + FOOTER_OVERHEAD;
01199 if (!chunks) {
01200 chunks = (void **)mspace_malloc(arena_to_mspace(ar_ptr),
01201 n*sizeof(void*)+FOOTER_OVERHEAD);
01202 if (!chunks) {
01203 mspace_free(arena_to_mspace(ar_ptr), m_sizes);
01204 (void)mutex_unlock(&ar_ptr->mutex);
01205 return 0;
01206 }
01207 set_non_main_arena(chunks, ar_ptr);
01208 }
01209 } else
01210 m_sizes = sizes;
01211
01212 m = mspace_independent_comalloc(arena_to_mspace(ar_ptr), n, m_sizes, chunks);
01213
01214 if (ar_ptr != &main_arena) {
01215 mspace_free(arena_to_mspace(ar_ptr), m_sizes);
01216 if (m)
01217 for (i=0; i<n; ++i)
01218 set_non_main_arena(m[i], ar_ptr);
01219 }
01220 (void)mutex_unlock(&ar_ptr->mutex);
01221
01222
01223 if (m != NULL) {
01224 _memory_allocated += chunksize(mem2chunk(m));
01225
01226 UPDATE_MEMUSAGE
01227 }
01228
01229
01230 return m;
01231 }
01232
01233 void
01234 public_cFREe(void* m)
01235 {
01236 public_fREe(m);
01237 }
01238
01239 int
01240 public_mTRIm(size_t s)
01241 {
01242 int result;
01243
01244 (void)mutex_lock(&main_arena.mutex);
01245 result = mspace_trim(arena_to_mspace(&main_arena), s);
01246 (void)mutex_unlock(&main_arena.mutex);
01247 return result;
01248 }
01249
01250 size_t
01251 public_mUSABLe(void* mem)
01252 {
01253 if (mem != 0) {
01254 mchunkptr p = mem2chunk(mem);
01255 if (cinuse(p))
01256 return chunksize(p) - overhead_for(p);
01257 }
01258 return 0;
01259 }
01260
01261 int
01262 public_mALLOPt(int p, int v)
01263 {
01264 int result;
01265 result = mspace_mallopt(p, v);
01266 return result;
01267 }
01268
01269 void
01270 public_mSTATs(void)
01271 {
01272 int i;
01273 struct malloc_arena* ar_ptr;
01274
01275 #if THREAD_STATS
01276 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
01277 #endif
01278
01279 if(__malloc_initialized < 0)
01280 ptmalloc_init ();
01281 for (i=0, ar_ptr = &main_arena;; ++i) {
01282 struct malloc_state* msp = (struct malloc_state *)arena_to_mspace(ar_ptr);
01283
01284 fprintf(stderr, "Arena %d:\n", i);
01285 mspace_malloc_stats(msp);
01286 #if THREAD_STATS
01287 stat_lock_direct += ar_ptr->stat_lock_direct;
01288 stat_lock_loop += ar_ptr->stat_lock_loop;
01289 stat_lock_wait += ar_ptr->stat_lock_wait;
01290 #endif
01291 if (MALLOC_DEBUG > 1) {
01292 struct malloc_segment* mseg = &msp->seg;
01293 while (mseg) {
01294 fprintf(stderr, " seg %08lx-%08lx\n", (unsigned long)mseg->base,
01295 (unsigned long)(mseg->base + mseg->size));
01296 mseg = mseg->next;
01297 }
01298 }
01299 ar_ptr = ar_ptr->next;
01300 if (ar_ptr == &main_arena)
01301 break;
01302 }
01303 #if THREAD_STATS
01304 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
01305 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
01306 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
01307 fprintf(stderr, "locked total = %10ld\n",
01308 stat_lock_direct + stat_lock_loop + stat_lock_wait);
01309 if (main_arena.stat_starter > 0)
01310 fprintf(stderr, "starter hooks = %10ld\n", main_arena.stat_starter);
01311 #endif
01312 }
01313
01314
01315
01316
01317
01318
01319 #undef malloc
01320 #undef free
01321 #undef calloc
01322 #undef cfree
01323 #undef realloc
01324 #undef memalign
01325 #undef posix_memalign
01326 #undef aligned_alloc
01327 #undef valloc
01328 #undef pvalloc
01329