collector.h
Go to the documentation of this file.
1 /*
2  * collector.h: R.Hanai
3  * concurrent snapshot collector with return barrier for eulisp
4  */
5 
6 #ifndef __COLLECTOR_H
7 #define __COLLECTOR_H
8 
9 #include "rgc_utils.h"
10 #include "xccmem.h"
11 
25 //#define INITIAL_HEAP_SIZE 520 // 2M
26 //#define INITIAL_HEAP_SIZE 800 // 3M
27 //#define INITIAL_HEAP_SIZE 1250 // 5M
28 //#define INITIAL_HEAP_SIZE 1800 //
29 #define INITIAL_HEAP_SIZE 2500 // 10M
30 //#define INITIAL_HEAP_SIZE 5000 // 20M
31 //#define INITIAL_HEAP_SIZE 12600 // 50M
32 //#define INITIAL_HEAP_SIZE 25200 // 100M
33 
34 #define DEFAULT_MAX_RGCSTACK 32768 /* 16384 */
35 
36 #define DEFAULT_EXPAND_SIZE_IDX 24 /* about ??? KB */
37 
38 #define DEFAULT_GC_THRESHOLD 0.25
39 
40 #define REALTIME_ALLOC_LIMIT_IDX 12 /* 1131 words */
41 /* 12:699, 13:1131, 14:1830, 15:2961, 16:4791.. */
42 
43 #define GC_ACTIVATE_CUSHION 15 /* 10...20 */
44 
45 #define M_UNIT 8000 /* 4000 */
46 #define S_UNIT 256 /* 128, 256, 512 */
47 #define AM_UNIT (M_UNIT)
48 #define AS_UNIT (S_UNIT)
49 
50 #define GC_GRANULARITY 2 /* 2, 4 */
51 
52 
53 extern char *minmemory;
54 
55 #if PTHREAD // the same as those in eus_thr.h
56 typedef pthread_t thread_t;
57 typedef pthread_mutex_t mutex_t;
58 typedef pthread_cond_t cond_t;
59 #define thr_exit(x) pthread_exit(x)
60 #define thr_join(x,y,z) pthread_join(x,z)
61 #define mutex_lock pthread_mutex_lock
62 #define mutex_trylock pthread_mutex_trylock
63 #define mutex_unlock pthread_mutex_unlock
64 #define cond_wait pthread_cond_wait
65 #define cond_signal pthread_cond_signal
66 #define cond_broadcast pthread_cond_broadcast
67 #define mutex_init(x,y) pthread_mutex_init(x,y)
68 #define mutex_destroy pthread_mutex_destroy
69 #define cond_init(x,y) pthread_cond_init(x,y)
70 #define cond_destroy pthread_cond_destroy
71 #endif
72 
73 /*
74  * collector state
75  *
76  * Don't edit:
77  * the next inequality must be satisfied:
78  * PHASE_ROOT_* > PHASE_MARK > PHASE_SWEEP > PHASE_*
79  * write barrier <=|
80  * allocate black <=|
81  */
82 #define PHASE_NOGC 0
83 #define PHASE_PROLOGUE 1
84 #define PHASE_EPILOGUE 2
85 #define PHASE_ROOT_CORE 5
86 #define PHASE_ROOT_REM 6
87 #define PHASE_MARK 4
88 #define PHASE_SWEEP 3
89 
90 extern struct _sweeping_state {
91  struct chunk *chp;
92  struct bcell *p; /* bpointer */
93  struct bcell *tail;
95 
96 #ifdef __RETURN_BARRIER
97 typedef struct {
99  mutex_t lock;
100 } rbar_t;
101 
102 #define check_return_barrier(ctx) \
103 { \
104  if((ctx)->rbar.pointer){\
105  register pointer *p, *newbase; \
106  if((ctx)->callfp) newbase = (pointer *)((ctx)->callfp); \
107  else newbase = (ctx)->stack; \
108  if(newbase < (ctx)->rbar.pointer){ \
109  mutex_lock(&(ctx)->rbar.lock); \
110  if(newbase < (ctx)->rbar.pointer){ \
111  /* printf("thread ID inserting root \n"); */ \
112  for(p = (ctx)->rbar.pointer - 1; p >= newbase; p--) { \
113  if (*p == NULL) continue; \
114  if (((int)(*p) & 3)) continue; \
115  if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit)) \
116  continue; \
117  if ((pointer *)*p >= (pointer *)hmax) continue; \
118  if ((pointer *)*p < (pointer *)hmin) continue; \
119  pgcpush(*p); \
120  /* ASSERT((pointer *)*p >= hmin); */ \
121  /* ASSERT((pointer *)*p < hmax); */ \
122  } \
123  if(newbase == (ctx)->stack) \
124  (ctx)->rbar.pointer = NULL; \
125  else \
126  (ctx)->rbar.pointer = newbase; \
127  } \
128  mutex_unlock(&(ctx)->rbar.lock); \
129  }\
130  }\
131 }
132 
133 /*
134  * old version code
135  */
136 /*
137 #define check_return_barrier(ctx) \
138 { \
139  int id = thr_self(); \
140  if(rbar[id].pointer){\
141  register pointer *p, *newbase = (ctx)->stack;\
142  if((ctx)->callfp) newbase = (pointer *)((ctx)->callfp); \
143  if(newbase < rbar[id].pointer){ \
144  mutex_lock(&rbar[id].lock); \
145  if(newbase < rbar[id].pointer){ \
146  printf("thread %d inserting root \n",id); \
147  for(p = rbar[id].pointer - 1; p >= newbase; p--) \
148  if((((int)(*p) & 3)==0) && (((ctx)->stack > (pointer *)*p) || \
149  ((pointer *)*p > (ctx)->stacklimit))) \
150  pgcpush(*p);\
151  if(newbase == (ctx)->stack) \
152  rbar[id].pointer = NULL; \
153  else \
154  rbar[id].pointer = newbase; \
155  } \
156  mutex_unlock(&rbar[id].lock); \
157  }\
158  }\
159 }\
160 */
161 #endif /* __RETURN_BARRIER */
162 
163 typedef struct {
165  unsigned int offset;
166 } ms_entry;
167 
168 #define TAGMASK 0x1f
169 #define FREETAG 0x20
170 #define GRAYTAG 0x20
171 #define BLACKTAG 0x40
172 #define COLORTAG 0x60
173 
174 #ifndef __USE_MARK_BITMAP
175 #define colored_map(p) colored_hd(p)
176 #define blacked_map(p) blacked_hd(p)
177 #define markon_map(p) markon_hd(p)
178 #define markoff_map(p) markoff_hd(p)
179 #define marked_map(p) marked_hd(p)
180 #endif
181 #define colored_hd(p) (p->h.bix & COLORTAG)
182 #define blacked_hd(p) (p->h.bix & BLACKTAG)
183 #define markon_hd(p) (p->h.bix |= BLACKTAG)
184 #define markoff_hd(p) (p->h.bix &= TAGMASK)
185 #define marked_hd(p) (p->h.bix & BLACKTAG)
186 
187 #ifdef __USE_POLLING
188 struct _mut_stat_table {
189  unsigned int stat;
190  /*
191  from lower bit
192  1:(may be) blocked
193  2:need to be scanned
194  3:being scanned
195  (4:standby)
196  */
197  mutex_t lock;
198 };
199 #endif
200 
201 struct _gc_data {
205  int gc_phase;
209  volatile int gc_region_sync;
210  volatile int ri_core_phase;
212 #ifdef __USE_POLLING
213  volatile int gc_request_flag;
214  struct _mut_stat_table mut_stat_table[MAXTHREAD];
215 #endif
216  mutex_t gc_state_lock;
217  mutex_t collector_lock;
218  cond_t ri_end_cv;
219  cond_t gc_wakeup_cv;
223 };
224 
225 extern struct _gc_data gc_data;
226 
227 #define collector_stack gc_data.collector_stack
228 #define collector_sp gc_data.collector_sp
229 #define collector_stacklimit gc_data.collector_stacklimit
230 #define gc_phase gc_data.gc_phase
231 #define active_mutator_num gc_data.active_mutator_num
232 #define gc_counter gc_data.gc_counter
233 #define gc_point_sync gc_data.gc_point_sync
234 #define gc_region_sync gc_data.gc_region_sync
235 #define ri_core_phase gc_data.ri_core_phase
236 #define mut_stat_phase gc_data.mut_stat_phase
237 
238 #ifdef __USE_POLLING
239 #define gc_request_flag gc_data.gc_request_flag
240 #define mut_stat_table gc_data.mut_stat_table
241 #endif
242 
243 #define gc_state_lock gc_data.gc_state_lock
244 #define collector_lock gc_data.collector_lock
245 #define ri_end_cv gc_data.ri_end_cv
246 #define gc_wakeup_cv gc_data.gc_wakeup_cv
247 #define gc_wakeup_cnt gc_data.gc_wakeup_cnt
248 #define gc_cmp_cnt gc_data.gc_cmp_cnt
249 #define gc_net_free gc_data.gc_net_free
250 
251 #define lock_collector mutex_lock(&collector_lock)
252 #define unlock_collector mutex_unlock(&collector_lock)
253 
254 extern mutex_t pstack_lock;
255 extern mutex_t gcstate_lock;
256 extern pointer pstack[];
257 extern volatile pointer *psp;
258 extern volatile pointer *oldpsp;
259 extern pointer *pstacklimit;
260 
261 #ifdef __PROFILE_GC
262 extern int allocd_words;
263 #endif
264 
265 #ifdef __USE_POLLING
266 
267 #define GC_POINT _check_gc_request()
268 #define _check_gc_request() { \
269  if (gc_request_flag) scan_roots(); \
270 }
271 /* <= memory barrier instructions may be needed */
272 #define ENTER_GC_SAFE_REGION(id) enter_gc_region(id)
273 #define EXIT_GC_SAFE_REGION(id) exit_gc_region(id)
274 #define GC_REGION(comp_statement) \
275  { \
276  int _tmp_id = thr_self(); \
277  ENTER_GC_SAFE_REGION(_tmp_id); \
278  comp_statement \
279  EXIT_GC_SAFE_REGION(_tmp_id); \
280  }
281 
282 #else /* __USE_SIGNAL */
283 
284 #define GC_POINT
285 #define GC_REGION(cmp_statement) cmp_statement
286 
287 #endif
288 
289 #define pgpush(v) ( *ctx->gsp++ = ((pointer)v) )
290 #define pgcpush(v) ( ctx->gsp < ctx->gcstacklimit ? \
291  pgpush(v) : error(E_GCSTACKOVER) )
292 #define pgcpop() ( *(--(ctx->gsp)) )
293 #define ppush(v) ( *psp++ = ((pointer)v) )
294 
295 extern int ps_sem;
296 #define busy_sema_wait(k) { \
297  int i; \
298  do { \
299  while ((i = read_volatile_int(k)) <= 0); \
300  } while (cas_int(k, i, i - 1)); \
301  start_access_after_write(); \
302 }
303 #define busy_sema_post(k) { \
304  int i; \
305  finish_access_before_read(); \
306  do { \
307  i = read_volatile_int(k); \
308  } while (cas_int(k, i, i + 1)); \
309 }
310 
311 #define pointerpush(v) { \
312  busy_sema_wait(ps_sem); \
313  psp < pstacklimit ? ppush(v) : (pointer)error(E_PSTACKOVER); \
314  busy_sema_post(ps_sem); \
315 }
316 #define pointerpop(lv) { \
317  busy_sema_wait(ps_sem); \
318  lv = *(--psp); \
319  busy_sema_post(ps_sem); \
320 }
321 
322 /*
323 #define pgcpush(v, off) \
324 { \
325  register ms_entry *_ms_gsp = (ms_entry *)ctx->gsp; \
326  if((pointer *)_ms_gsp < ctx->gcstacklimit) { \
327  _ms_gsp->addr = (pointer)v; \
328  _ms_gsp->offset = off; \
329  ctx->gsp += (sizeof(ms_entry)/sizeof(pointer)); \
330  }else{ \
331  error(E_GCSTACKOVER); \
332  } \
333 }
334 
335 #define pointerpush(v) \
336 { \
337  if (psp < pstacklimit) { \
338  *psp = v; \
339  psp++; \
340  } else { \
341  error(E_PSTACKOVER); \
342  } \
343 }
344 
345 #define pointerpop() \
346 ( \
347  psp--, \
348  *psp \
349 )
350 */
351 
352 #ifdef __USE_POLLING
353 
354 #define take_care(p) \
355 { \
356  if(gc_phase >= PHASE_MARK){ \
357  mutex_lock(&pstack_lock); \
358  ASSERT((p) == NULL || !ispointer(p) || \
359  ((unsigned)(p) >= mingcheap && ((unsigned)(p) < maxgcheap))); \
360  pointerpush(p); \
361  mutex_unlock(&pstack_lock); \
362  } \
363 }
364 /*
365  * 'l' must not have side effects,
366  * because it is evaluated more than once.
367  * 'r' may have side effects, because it is evaluated only once.
368  */
369 #define pointer_update(l, r) \
370 { \
371  if(gc_phase >= PHASE_MARK){ \
372  mutex_lock(&pstack_lock); \
373  ASSERT((l) == NULL || !ispointer(l) || \
374  ((unsigned)(l) >= mingcheap && ((unsigned)(l) < maxgcheap))); \
375  pointerpush(l); \
376  mutex_unlock(&pstack_lock); \
377  } \
378  (l)=(r); \
379 }
380 
381 #define noticeCollector(p1, p2) \
382 { \
383  if (gc_phase >= PHASE_MARK) { \
384  ASSERT((p1) == NULL || !ispointer(p1) || \
385  ((unsigned)(p1) >= mingcheap && ((unsigned)(p1) < maxgcheap))); \
386  ASSERT((p2) == NULL || !ispointer(p2) || \
387  ((unsigned)(p2) >= mingcheap && ((unsigned)(p2) < maxgcheap))); \
388  mutex_lock(&pstack_lock); \
389  pointerpush(p1); \
390  pointerpush(p2); \
391  mutex_unlock(&pstack_lock); \
392  } \
393 }
394 #define noticeCollector1(p) take_care(p)
395 #endif /* __USE_POLLING */
396 
397 #ifdef __USE_SIGNAL
398 /* this is not safe, since signals might cut in
399  * the execution of write barriers. */
400 #define take_care(p){ \
401  if((((unsigned)p) < mingcheap || ((unsigned)p >= maxgcheap)) \
402  && (p) != 0 && ispointer(p)) \
403  hoge(); \
404  if(gc_phase >= PHASE_MARK){ \
405  mutex_lock(&pstack_lock); \
406  pointerpush(p); \
407  mutex_unlock(&pstack_lock); \
408  } \
409 }
410 /*
411  * 'l' must not have side effects,
412  * since they are evaluated more than once
413  * ('r' is evaluated once.)
414  */
415 #endif /* __USE_SIGNAL */
416 
417 
418 typedef struct barrier_struct {
419  pthread_mutex_t lock;
422  int phase;
423  pthread_cond_t wait_cv;
424 } *barrier_t;
425 
427 void barrier_reset(barrier_t, int);
428 void barrier_wait(barrier_t);
429 extern barrier_t startup_barrier;
430 
431 #ifdef __USE_SIGNAL
432 void sighandler(int);
433 #endif
434 
435 unsigned int allocate_heap();
436 extern volatile long sweepheap, newheap, pastfree;
437 
438 void notify_gc();
439 void do_a_little_gc_work();
440 
441 #endif /* __COLLECTOR_H */
pointer pstack[]
Definition: collector.c:1119
mutex_t gcstate_lock
int gc_net_free
Definition: collector.h:222
volatile long pastfree
int active_mutator_num
Definition: collector.h:206
int gc_cmp_cnt
Definition: collector.h:221
ms_entry * collector_sp
Definition: collector.h:203
struct cell * pointer
Definition: eus.h:163
pthread_cond_t wait_cv
Definition: collector.h:423
int gc_counter
Definition: collector.h:207
volatile long newheap
volatile int gc_region_sync
Definition: collector.h:209
int gc_wakeup_cnt
Definition: collector.h:220
pthread_mutex_t lock
Definition: collector.h:419
void do_a_little_gc_work()
void barrier_wait(barrier_t)
Definition: collector.c:1155
cond_t ri_end_cv
Definition: collector.h:218
cond_t gc_wakeup_cv
Definition: collector.h:219
barrier_t barrier_init(int)
Definition: collector.c:1128
ms_entry * collector_stack
Definition: collector.h:202
unsigned int allocate_heap()
Definition: collector.c:1195
Definition: eus.h:445
Definition: collector.h:163
struct bcell * p
Definition: collector.h:92
struct _sweeping_state sweeping_state
volatile pointer * oldpsp
int gc_point_sync
Definition: collector.h:208
Definition: eus.h:379
struct bcell * tail
Definition: collector.h:93
volatile long sweepheap
int ps_sem
Definition: collector.c:1036
volatile pointer * psp
Definition: collector.c:1120
struct _gc_data gc_data
Definition: collector.c:53
barrier_t startup_barrier
Definition: collector.c:54
unsigned int offset
Definition: collector.h:165
int gc_phase
Definition: collector.h:205
ms_entry * collector_stacklimit
Definition: collector.h:204
char * minmemory
Definition: memory.c:54
mutex_t * lock
Definition: collector.c:49
struct chunk * chp
Definition: collector.h:91
mutex_t collector_lock
Definition: collector.h:217
struct barrier_struct * barrier_t
pointer * pstacklimit
Definition: collector.c:1121
volatile int ri_core_phase
Definition: collector.h:210
Definition: eus.h:437
pointer addr
Definition: collector.h:164
void barrier_reset(barrier_t, int)
Definition: collector.c:1141
mutex_t pstack_lock
Definition: collector.c:1122
mutex_t gc_state_lock
Definition: collector.h:216
int mut_stat_phase
Definition: collector.h:211
void notify_gc()
Definition: collector.c:716


euslisp
Author(s): Toshihiro Matsui
autogenerated on Fri Feb 21 2020 03:20:54