Go to the documentation of this file.
29 #define INITIAL_HEAP_SIZE 2500 // 10M
34 #define DEFAULT_MAX_RGCSTACK 32768
36 #define DEFAULT_EXPAND_SIZE_IDX 24
38 #define DEFAULT_GC_THRESHOLD 0.25
40 #define REALTIME_ALLOC_LIMIT_IDX 12
43 #define GC_ACTIVATE_CUSHION 15
47 #define AM_UNIT (M_UNIT)
48 #define AS_UNIT (S_UNIT)
50 #define GC_GRANULARITY 2
55 #if PTHREAD // the same as those in eus_thr.h
56 typedef pthread_t thread_t;
57 typedef pthread_mutex_t mutex_t;
58 typedef pthread_cond_t cond_t;
59 #define thr_exit(x) pthread_exit(x)
60 #define thr_join(x,y,z) pthread_join(x,z)
61 #define mutex_lock pthread_mutex_lock
62 #define mutex_trylock pthread_mutex_trylock
63 #define mutex_unlock pthread_mutex_unlock
64 #define cond_wait pthread_cond_wait
65 #define cond_signal pthread_cond_signal
66 #define cond_broadcast pthread_cond_broadcast
67 #define mutex_init(x,y) pthread_mutex_init(x,y)
68 #define mutex_destroy pthread_mutex_destroy
69 #define cond_init(x,y) pthread_cond_init(x,y)
70 #define cond_destroy pthread_cond_destroy
83 #define PHASE_PROLOGUE 1
84 #define PHASE_EPILOGUE 2
85 #define PHASE_ROOT_CORE 5
86 #define PHASE_ROOT_REM 6
96 #ifdef __RETURN_BARRIER
102 #define check_return_barrier(ctx) \
104 if((ctx)->rbar.pointer){\
105 register pointer *p, *newbase; \
106 if((ctx)->callfp) newbase = (pointer *)((ctx)->callfp); \
107 else newbase = (ctx)->stack; \
108 if(newbase < (ctx)->rbar.pointer){ \
109 mutex_lock(&(ctx)->rbar.lock); \
110 if(newbase < (ctx)->rbar.pointer){ \
112 for(p = (ctx)->rbar.pointer - 1; p >= newbase; p--) { \
113 if (*p == NULL) continue; \
114 if (((int)(*p) & 3)) continue; \
115 if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit)) \
117 if ((pointer *)*p >= (pointer *)hmax) continue; \
118 if ((pointer *)*p < (pointer *)hmin) continue; \
123 if(newbase == (ctx)->stack) \
124 (ctx)->rbar.pointer = NULL; \
126 (ctx)->rbar.pointer = newbase; \
128 mutex_unlock(&(ctx)->rbar.lock); \
171 #define BLACKTAG 0x40
172 #define COLORTAG 0x60
174 #ifndef __USE_MARK_BITMAP
175 #define colored_map(p) colored_hd(p)
176 #define blacked_map(p) blacked_hd(p)
177 #define markon_map(p) markon_hd(p)
178 #define markoff_map(p) markoff_hd(p)
179 #define marked_map(p) marked_hd(p)
181 #define colored_hd(p) (p->h.bix & COLORTAG)
182 #define blacked_hd(p) (p->h.bix & BLACKTAG)
183 #define markon_hd(p) (p->h.bix |= BLACKTAG)
184 #define markoff_hd(p) (p->h.bix &= TAGMASK)
185 #define marked_hd(p) (p->h.bix & BLACKTAG)
188 struct _mut_stat_table {
213 volatile int gc_request_flag;
214 struct _mut_stat_table mut_stat_table[MAXTHREAD];
227 #define collector_stack gc_data.collector_stack
228 #define collector_sp gc_data.collector_sp
229 #define collector_stacklimit gc_data.collector_stacklimit
230 #define gc_phase gc_data.gc_phase
231 #define active_mutator_num gc_data.active_mutator_num
232 #define gc_counter gc_data.gc_counter
233 #define gc_point_sync gc_data.gc_point_sync
234 #define gc_region_sync gc_data.gc_region_sync
235 #define ri_core_phase gc_data.ri_core_phase
236 #define mut_stat_phase gc_data.mut_stat_phase
239 #define gc_request_flag gc_data.gc_request_flag
240 #define mut_stat_table gc_data.mut_stat_table
243 #define gc_state_lock gc_data.gc_state_lock
244 #define collector_lock gc_data.collector_lock
245 #define ri_end_cv gc_data.ri_end_cv
246 #define gc_wakeup_cv gc_data.gc_wakeup_cv
247 #define gc_wakeup_cnt gc_data.gc_wakeup_cnt
248 #define gc_cmp_cnt gc_data.gc_cmp_cnt
249 #define gc_net_free gc_data.gc_net_free
251 #define lock_collector mutex_lock(&collector_lock)
252 #define unlock_collector mutex_unlock(&collector_lock)
262 extern int allocd_words;
267 #define GC_POINT _check_gc_request()
268 #define _check_gc_request() { \
269 if (gc_request_flag) scan_roots(); \
272 #define ENTER_GC_SAFE_REGION(id) enter_gc_region(id)
273 #define EXIT_GC_SAFE_REGION(id) exit_gc_region(id)
274 #define GC_REGION(comp_statement) \
276 int _tmp_id = thr_self(); \
277 ENTER_GC_SAFE_REGION(_tmp_id); \
279 EXIT_GC_SAFE_REGION(_tmp_id); \
285 #define GC_REGION(cmp_statement) cmp_statement
289 #define pgpush(v) ( *ctx->gsp++ = ((pointer)v) )
290 #define pgcpush(v) ( ctx->gsp < ctx->gcstacklimit ? \
291 pgpush(v) : error(E_GCSTACKOVER) )
292 #define pgcpop() ( *(--(ctx->gsp)) )
293 #define ppush(v) ( *psp++ = ((pointer)v) )
296 #define busy_sema_wait(k) { \
299 while ((i = read_volatile_int(k)) <= 0); \
300 } while (cas_int(k, i, i - 1)); \
301 start_access_after_write(); \
303 #define busy_sema_post(k) { \
305 finish_access_before_read(); \
307 i = read_volatile_int(k); \
308 } while (cas_int(k, i, i + 1)); \
311 #define pointerpush(v) { \
312 busy_sema_wait(ps_sem); \
313 psp < pstacklimit ? ppush(v) : (pointer)error(E_PSTACKOVER); \
314 busy_sema_post(ps_sem); \
316 #define pointerpop(lv) { \
317 busy_sema_wait(ps_sem); \
319 busy_sema_post(ps_sem); \
354 #define take_care(p) \
356 if(gc_phase >= PHASE_MARK){ \
357 mutex_lock(&pstack_lock); \
358 ASSERT((p) == NULL || !ispointer(p) || \
359 ((unsigned)(p) >= mingcheap && ((unsigned)(p) < maxgcheap))); \
361 mutex_unlock(&pstack_lock); \
369 #define pointer_update(l, r) \
371 if(gc_phase >= PHASE_MARK){ \
372 mutex_lock(&pstack_lock); \
373 ASSERT((l) == NULL || !ispointer(l) || \
374 ((unsigned)(l) >= mingcheap && ((unsigned)(l) < maxgcheap))); \
376 mutex_unlock(&pstack_lock); \
381 #define noticeCollector(p1, p2) \
383 if (gc_phase >= PHASE_MARK) { \
384 ASSERT((p1) == NULL || !ispointer(p1) || \
385 ((unsigned)(p1) >= mingcheap && ((unsigned)(p1) < maxgcheap))); \
386 ASSERT((p2) == NULL || !ispointer(p2) || \
387 ((unsigned)(p2) >= mingcheap && ((unsigned)(p2) < maxgcheap))); \
388 mutex_lock(&pstack_lock); \
391 mutex_unlock(&pstack_lock); \
394 #define noticeCollector1(p) take_care(p)
400 #define take_care(p){ \
401 if((((unsigned)p) < mingcheap || ((unsigned)p >= maxgcheap)) \
402 && (p) != 0 && ispointer(p)) \
404 if(gc_phase >= PHASE_MARK){ \
405 mutex_lock(&pstack_lock); \
407 mutex_unlock(&pstack_lock); \
432 void sighandler(
int);
void do_a_little_gc_work()
struct barrier_struct * barrier_t
ms_entry * collector_stack
barrier_t startup_barrier
void barrier_wait(barrier_t)
unsigned int allocate_heap()
volatile pointer * oldpsp
ms_entry * collector_stacklimit
void barrier_reset(barrier_t, int)
volatile int ri_core_phase
barrier_t barrier_init(int)
struct _sweeping_state sweeping_state
volatile int gc_region_sync
euslisp
Author(s): Toshihiro Matsui
autogenerated on Thu Jun 15 2023 02:06:43