00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #ifndef __MALLOC_MACHINE_H
00026 #define __MALLOC_MACHINE_H
00027
00028 #if CMK_SHARED_VARS_POSIX_THREADS_SMP
00029
00030 #define USE_TSD_DATA_HACK 1
00031 #include <pthread.h>
00032
00033 #undef thread_atfork_static
00034
00035
00036 #if (defined __i386__ || defined __x86_64__) && defined __GNUC__ && \
00037 !defined USE_NO_SPINLOCKS
00038
00039 #include <time.h>
00040 #include <sched.h>
00041
00042 typedef struct {
00043 volatile unsigned int lock;
00044 int pad0_;
00045 } mutex_t;
00046
00047 #define MUTEX_INITIALIZER { 0 }
00048 #define mutex_init(m) ((m)->lock = 0)
00049 static inline int mutex_lock(mutex_t *m) {
00050 int cnt = 0, r;
00051 struct timespec tm;
00052
00053 for(;;) {
00054 __asm__ __volatile__
00055 ("xchgl %0, %1"
00056 : "=r"(r), "=m"(m->lock)
00057 : "0"(1), "m"(m->lock)
00058 : "memory");
00059 if(!r)
00060 return 0;
00061 if(cnt < 50) {
00062 sched_yield();
00063 cnt++;
00064 } else {
00065 tm.tv_sec = 0;
00066 tm.tv_nsec = 2000001;
00067 nanosleep(&tm, NULL);
00068 cnt = 0;
00069 }
00070 }
00071 }
00072 static inline int mutex_trylock(mutex_t *m) {
00073 int r;
00074
00075 __asm__ __volatile__
00076 ("xchgl %0, %1"
00077 : "=r"(r), "=m"(m->lock)
00078 : "0"(1), "m"(m->lock)
00079 : "memory");
00080 return r;
00081 }
00082 static inline int mutex_unlock(mutex_t *m) {
00083 __asm__ __volatile__ ("movl %1, %0" : "=m" (m->lock) : "g"(0) : "memory");
00084 return 0;
00085 }
00086
00087 #else
00088
00089
00090 typedef pthread_mutex_t mutex_t;
00091
00092 #define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
00093 #define mutex_init(m) pthread_mutex_init(m, NULL)
00094 #define mutex_lock(m) pthread_mutex_lock(m)
00095 #define mutex_trylock(m) pthread_mutex_trylock(m)
00096 #define mutex_unlock(m) pthread_mutex_unlock(m)
00097
00098 #endif
00099
00100
00101 #if defined(__sgi) || defined(USE_TSD_DATA_HACK)
00102
00103
00104
00105
00106
00107
00108 typedef void *tsd_key_t[256];
00109 #define tsd_key_create(key, destr) do { \
00110 int i; \
00111 for(i=0; i<256; i++) (*key)[i] = 0; \
00112 } while(0)
00113 #define tsd_setspecific(key, data) \
00114 (key[(uintptr_t)pthread_self() % 256] = (data))
00115 #define tsd_getspecific(key, vptr) \
00116 (vptr = key[(uintptr_t)pthread_self() % 256])
00117
00118 #else
00119
00120 typedef pthread_key_t tsd_key_t;
00121
00122 #define tsd_key_create(key, destr) pthread_key_create(key, destr)
00123 #define tsd_setspecific(key, data) pthread_setspecific(key, data)
00124 #define tsd_getspecific(key, vptr) (vptr = pthread_getspecific(key))
00125
00126 #endif
00127
00128
00129 #define thread_atfork(prepare, parent, child) \
00130 pthread_atfork(prepare, parent, child)
00131
00132 #endif
00133
00134 #ifndef mutex_init
00135
00136 # define NO_THREADS
00137
00138
00139
00140
00141
00142
00143
00144 typedef int mutex_t;
00145
00146 # define mutex_init(m) (*(m) = 0)
00147 # define mutex_lock(m) ((*(m) = 1), 0)
00148 # define mutex_trylock(m) (*(m) ? 1 : ((*(m) = 1), 0))
00149 # define mutex_unlock(m) (*(m) = 0)
00150
00151 typedef void *tsd_key_t;
00152 # define tsd_key_create(key, destr) do {} while(0)
00153 # define tsd_setspecific(key, data) ((key) = (data))
00154 # define tsd_getspecific(key, vptr) (vptr = (key))
00155
00156 # define thread_atfork(prepare, parent, child) do {} while(0)
00157
00158 #endif
00159
00160 #ifndef atomic_full_barrier
00161 # define atomic_full_barrier() __asm ("" ::: "memory")
00162 #endif
00163
00164 #ifndef atomic_read_barrier
00165 # define atomic_read_barrier() atomic_full_barrier ()
00166 #endif
00167
00168 #ifndef atomic_write_barrier
00169 # define atomic_write_barrier() atomic_full_barrier ()
00170 #endif
00171
00172 #ifndef DEFAULT_TOP_PAD
00173 # define DEFAULT_TOP_PAD 131072
00174 #endif
00175
00176 #endif