collector.h
Go to the documentation of this file.
00001 /* 
00002  * collector.h: R.Hanai
00003  * concurrent snapshot collector with return barrier for eulisp
00004  */
00005 
00006 #ifndef __COLLECTOR_H
00007 #define __COLLECTOR_H
00008 
00009 #include "rgc_utils.h"
00010 #include "xccmem.h"
00011 
00025 //#define INITIAL_HEAP_SIZE 520   //  2M
00026 //#define INITIAL_HEAP_SIZE 800   //  3M
00027 //#define INITIAL_HEAP_SIZE 1250  //  5M
00028 //#define INITIAL_HEAP_SIZE 1800  //
00029 #define INITIAL_HEAP_SIZE 2500  // 10M
00030 //#define INITIAL_HEAP_SIZE 5000  // 20M
00031 //#define INITIAL_HEAP_SIZE 12600 // 50M
00032 //#define INITIAL_HEAP_SIZE 25200 // 100M
00033 
00034 #define DEFAULT_MAX_RGCSTACK 32768 /* 16384 */
00035 
00036 #define DEFAULT_EXPAND_SIZE_IDX 24 /* about ??? KB */
00037 
00038 #define DEFAULT_GC_THRESHOLD 0.25
00039 
00040 #define REALTIME_ALLOC_LIMIT_IDX 12 /* 1131 words */
00041 /* 12:699, 13:1131, 14:1830, 15:2961, 16:4791.. */
00042 
00043 #define GC_ACTIVATE_CUSHION 15 /* 10...20 */
00044 
00045 #define M_UNIT 8000 /* 4000 */
00046 #define S_UNIT 256  /* 128, 256, 512 */
00047 #define AM_UNIT (M_UNIT)
00048 #define AS_UNIT (S_UNIT)
00049 
00050 #define GC_GRANULARITY 2 /* 2, 4 */
00051 
00052 
00053 extern char *minmemory;
00054 
00055 #if PTHREAD // the same as those in eus_thr.h 
00056 typedef pthread_t thread_t;
00057 typedef pthread_mutex_t mutex_t;
00058 typedef pthread_cond_t cond_t;
00059 #define thr_exit(x) pthread_exit(x)
00060 #define thr_join(x,y,z) pthread_join(x,z)
00061 #define mutex_lock pthread_mutex_lock
00062 #define mutex_trylock pthread_mutex_trylock
00063 #define mutex_unlock pthread_mutex_unlock
00064 #define cond_wait pthread_cond_wait
00065 #define cond_signal pthread_cond_signal
00066 #define cond_broadcast pthread_cond_broadcast
00067 #define mutex_init(x,y) pthread_mutex_init(x,y)
00068 #define mutex_destroy pthread_mutex_destroy
00069 #define cond_init(x,y) pthread_cond_init(x,y)
00070 #define cond_destroy pthread_cond_destroy
00071 #endif
00072 
00073 /* 
00074  * collector state
00075  *
00076  * Don't edit:
00077  * the next inequality must be satisfied: 
00078  * PHASE_ROOT_* > PHASE_MARK > PHASE_SWEEP > PHASE_*
00079  *           write barrier <=|
00080  *                        allocate black <=|
00081  */
00082 #define PHASE_NOGC 0
00083 #define PHASE_PROLOGUE 1
00084 #define PHASE_EPILOGUE 2
00085 #define PHASE_ROOT_CORE 5
00086 #define PHASE_ROOT_REM 6
00087 #define PHASE_MARK 4
00088 #define PHASE_SWEEP 3
00089 
00090 extern struct _sweeping_state {
00091   struct chunk *chp;
00092   struct bcell *p; /* bpointer */
00093   struct bcell *tail;
00094 } sweeping_state;
00095 
00096 #ifdef __RETURN_BARRIER
00097 typedef struct {
00098   pointer *pointer;
00099   mutex_t lock;
00100 } rbar_t;
00101 
00102 #define check_return_barrier(ctx) \
00103 { \
00104   if((ctx)->rbar.pointer){\
00105     register pointer *p, *newbase; \
00106     if((ctx)->callfp) newbase = (pointer *)((ctx)->callfp); \
00107     else newbase = (ctx)->stack; \
00108     if(newbase < (ctx)->rbar.pointer){ \
00109       mutex_lock(&(ctx)->rbar.lock); \
00110       if(newbase < (ctx)->rbar.pointer){ \
00111          /* printf("thread ID inserting root \n"); */ \
00112         for(p = (ctx)->rbar.pointer - 1; p >= newbase; p--) { \
00113           if (*p == NULL) continue; \
00114           if (((int)(*p) & 3)) continue; \
00115           if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit)) \
00116             continue; \
00117           if ((pointer *)*p >= (pointer *)hmax) continue; \
00118           if ((pointer *)*p < (pointer *)hmin) continue; \
00119               pgcpush(*p); \
00120             /*  ASSERT((pointer *)*p >= hmin); */ \
00121             /*  ASSERT((pointer *)*p < hmax); */ \
00122         } \
00123           if(newbase == (ctx)->stack) \
00124             (ctx)->rbar.pointer = NULL; \
00125           else \
00126             (ctx)->rbar.pointer = newbase; \
00127       } \
00128       mutex_unlock(&(ctx)->rbar.lock); \
00129     }\
00130   }\
00131 }
00132 
00133 /* 
00134  * old version code 
00135  */
00136 /*
00137 #define check_return_barrier(ctx) \
00138 { \
00139   int id = thr_self(); \
00140   if(rbar[id].pointer){\
00141     register pointer *p, *newbase = (ctx)->stack;\
00142     if((ctx)->callfp) newbase = (pointer *)((ctx)->callfp); \
00143     if(newbase < rbar[id].pointer){ \
00144       mutex_lock(&rbar[id].lock); \
00145       if(newbase < rbar[id].pointer){ \
00146         printf("thread %d inserting root \n",id); \
00147         for(p = rbar[id].pointer - 1; p >= newbase; p--) \
00148           if((((int)(*p) & 3)==0) && (((ctx)->stack > (pointer *)*p) || \
00149               ((pointer *)*p > (ctx)->stacklimit))) \
00150               pgcpush(*p);\
00151           if(newbase == (ctx)->stack) \
00152             rbar[id].pointer = NULL; \
00153           else \
00154             rbar[id].pointer = newbase; \
00155       } \
00156       mutex_unlock(&rbar[id].lock); \
00157     }\
00158   }\
00159 }\
00160 */
00161 #endif /* __RETURN_BARRIER */
00162 
00163 typedef struct {
00164   pointer addr;
00165   unsigned int offset;
00166 } ms_entry;
00167 
00168 #define TAGMASK  0x1f
00169 #define FREETAG  0x20
00170 #define GRAYTAG  0x20
00171 #define BLACKTAG 0x40
00172 #define COLORTAG 0x60
00173 
00174 #ifndef __USE_MARK_BITMAP
00175 #define colored_map(p) colored_hd(p)
00176 #define blacked_map(p) blacked_hd(p)
00177 #define markon_map(p) markon_hd(p)
00178 #define markoff_map(p) markoff_hd(p)
00179 #define marked_map(p) marked_hd(p)
00180 #endif
00181 #define colored_hd(p) (p->h.bix & COLORTAG)
00182 #define blacked_hd(p) (p->h.bix & BLACKTAG)
00183 #define markon_hd(p) (p->h.bix |= BLACKTAG)
00184 #define markoff_hd(p) (p->h.bix &= TAGMASK)
00185 #define marked_hd(p) (p->h.bix & BLACKTAG)
00186 
00187 #ifdef __USE_POLLING
00188 struct _mut_stat_table {
00189   unsigned int stat;
00190   /* 
00191      from lower bit
00192      1:(may be) blocked
00193      2:need to be scanned
00194      3:being scanned
00195      (4:standby)
00196   */
00197   mutex_t lock;
00198 };
00199 #endif
00200 
00201 struct _gc_data {
00202   ms_entry *collector_stack;
00203   ms_entry *collector_sp;
00204   ms_entry *collector_stacklimit;
00205   int gc_phase;
00206   int active_mutator_num;
00207   int gc_counter;
00208   int gc_point_sync;
00209   volatile int gc_region_sync;
00210   volatile int ri_core_phase;
00211   int mut_stat_phase;
00212 #ifdef __USE_POLLING
00213   volatile int gc_request_flag;
00214   struct _mut_stat_table mut_stat_table[MAXTHREAD];
00215 #endif
00216   mutex_t gc_state_lock;
00217   mutex_t collector_lock;
00218   cond_t ri_end_cv;
00219   cond_t gc_wakeup_cv;
00220   int gc_wakeup_cnt;
00221   int gc_cmp_cnt;
00222   int gc_net_free;
00223 };
00224 
00225 extern struct _gc_data gc_data;
00226 
00227 #define collector_stack gc_data.collector_stack
00228 #define collector_sp gc_data.collector_sp
00229 #define collector_stacklimit gc_data.collector_stacklimit
00230 #define gc_phase gc_data.gc_phase
00231 #define active_mutator_num gc_data.active_mutator_num
00232 #define gc_counter gc_data.gc_counter
00233 #define gc_point_sync gc_data.gc_point_sync
00234 #define gc_region_sync gc_data.gc_region_sync
00235 #define ri_core_phase gc_data.ri_core_phase
00236 #define mut_stat_phase gc_data.mut_stat_phase
00237 
00238 #ifdef __USE_POLLING
00239 #define gc_request_flag gc_data.gc_request_flag
00240 #define mut_stat_table gc_data.mut_stat_table
00241 #endif
00242 
00243 #define gc_state_lock gc_data.gc_state_lock
00244 #define collector_lock gc_data.collector_lock
00245 #define ri_end_cv gc_data.ri_end_cv
00246 #define gc_wakeup_cv gc_data.gc_wakeup_cv
00247 #define gc_wakeup_cnt gc_data.gc_wakeup_cnt
00248 #define gc_cmp_cnt gc_data.gc_cmp_cnt
00249 #define gc_net_free gc_data.gc_net_free
00250 
00251 #define lock_collector mutex_lock(&collector_lock)
00252 #define unlock_collector mutex_unlock(&collector_lock)
00253 
00254 extern mutex_t pstack_lock;
00255 extern mutex_t gcstate_lock;
00256 extern pointer pstack[];
00257 extern volatile pointer *psp;
00258 extern volatile pointer *oldpsp;
00259 extern pointer *pstacklimit;
00260 
00261 #ifdef __PROFILE_GC
00262 extern int allocd_words;
00263 #endif
00264 
00265 #ifdef __USE_POLLING
00266 
00267 #define GC_POINT _check_gc_request()
00268 #define _check_gc_request() { \
00269   if (gc_request_flag) scan_roots(); \
00270 }
00271 /* <= memory barrier instructions may be needed */
00272 #define ENTER_GC_SAFE_REGION(id) enter_gc_region(id)
00273 #define EXIT_GC_SAFE_REGION(id) exit_gc_region(id)
00274 #define GC_REGION(comp_statement) \
00275   { \
00276     int _tmp_id = thr_self(); \
00277     ENTER_GC_SAFE_REGION(_tmp_id); \
00278     comp_statement \
00279     EXIT_GC_SAFE_REGION(_tmp_id); \
00280   }
00281 
00282 #else /* __USE_SIGNAL */
00283 
00284 #define GC_POINT
00285 #define GC_REGION(cmp_statement) cmp_statement
00286 
00287 #endif
00288 
00289 #define pgpush(v) ( *ctx->gsp++ = ((pointer)v) )
00290 #define pgcpush(v) ( ctx->gsp < ctx->gcstacklimit ? \
00291     pgpush(v) : error(E_GCSTACKOVER) )
00292 #define pgcpop() ( *(--(ctx->gsp)) )
00293 #define ppush(v) ( *psp++ = ((pointer)v) ) 
00294 
00295 extern int ps_sem;
00296 #define busy_sema_wait(k) { \
00297   int i; \
00298   do { \
00299     while ((i = read_volatile_int(k)) <= 0); \
00300   } while (cas_int(k, i, i - 1)); \
00301   start_access_after_write(); \
00302 }
00303 #define busy_sema_post(k) { \
00304   int i; \
00305   finish_access_before_read(); \
00306   do { \
00307     i = read_volatile_int(k); \
00308   } while (cas_int(k, i, i + 1)); \
00309 }
00310 
00311 #define pointerpush(v) { \
00312   busy_sema_wait(ps_sem); \
00313   psp < pstacklimit ? ppush(v) : (pointer)error(E_PSTACKOVER); \
00314   busy_sema_post(ps_sem); \
00315 }
00316 #define pointerpop(lv) { \
00317   busy_sema_wait(ps_sem); \
00318   lv = *(--psp); \
00319   busy_sema_post(ps_sem); \
00320 }
00321 
00322 /*
00323 #define pgcpush(v, off) \
00324 { \
00325   register ms_entry *_ms_gsp = (ms_entry *)ctx->gsp; \
00326   if((pointer *)_ms_gsp < ctx->gcstacklimit) { \
00327     _ms_gsp->addr = (pointer)v; \
00328     _ms_gsp->offset = off; \
00329     ctx->gsp += (sizeof(ms_entry)/sizeof(pointer)); \
00330   }else{ \
00331     error(E_GCSTACKOVER); \
00332   } \
00333 }
00334 
00335 #define pointerpush(v) \
00336 { \
00337   if (psp < pstacklimit) { \
00338     *psp = v; \
00339     psp++; \
00340   } else { \
00341     error(E_PSTACKOVER); \
00342   } \
00343 }
00344 
00345 #define pointerpop() \
00346 ( \
00347   psp--, \
00348   *psp \
00349 )
00350 */
00351 
00352 #ifdef __USE_POLLING
00353 
00354 #define take_care(p) \
00355 { \
00356   if(gc_phase >= PHASE_MARK){ \
00357     mutex_lock(&pstack_lock);  \
00358     ASSERT((p) == NULL || !ispointer(p) || \
00359       ((unsigned)(p) >= mingcheap && ((unsigned)(p) < maxgcheap))); \
00360     pointerpush(p); \
00361     mutex_unlock(&pstack_lock); \
00362   } \
00363 }
00364 /* 
00365  * 'l' must not have side effects, 
00366  * because it is evaluated more than once.
00367  * 'r' may have side effects, because it is evaluated only once.
00368  */
00369 #define pointer_update(l, r) \
00370 { \
00371   if(gc_phase >= PHASE_MARK){ \
00372     mutex_lock(&pstack_lock);  \
00373     ASSERT((l) == NULL || !ispointer(l) || \
00374       ((unsigned)(l) >= mingcheap && ((unsigned)(l) < maxgcheap))); \
00375     pointerpush(l); \
00376     mutex_unlock(&pstack_lock);  \
00377   } \
00378   (l)=(r); \
00379 }
00380 
00381 #define noticeCollector(p1, p2) \
00382 { \
00383   if (gc_phase >= PHASE_MARK) { \
00384     ASSERT((p1) == NULL || !ispointer(p1) || \
00385       ((unsigned)(p1) >= mingcheap && ((unsigned)(p1) < maxgcheap))); \
00386     ASSERT((p2) == NULL || !ispointer(p2) || \
00387       ((unsigned)(p2) >= mingcheap && ((unsigned)(p2) < maxgcheap))); \
00388     mutex_lock(&pstack_lock); \
00389     pointerpush(p1); \
00390     pointerpush(p2); \
00391     mutex_unlock(&pstack_lock); \
00392   } \
00393 }
00394 #define noticeCollector1(p) take_care(p)
00395 #endif /* __USE_POLLING */
00396 
00397 #ifdef __USE_SIGNAL 
00398 /* this is not safe, since signals might cut in 
00399  * the execution of write barriers. */
00400 #define take_care(p){ \
00401   if((((unsigned)p) < mingcheap || ((unsigned)p >= maxgcheap)) \
00402       && (p) != 0 && ispointer(p)) \
00403     hoge(); \
00404   if(gc_phase >= PHASE_MARK){ \
00405     mutex_lock(&pstack_lock); \
00406     pointerpush(p); \
00407     mutex_unlock(&pstack_lock); \
00408   } \
00409 }
00410 /* 
00411  * 'l' must not have side effects, 
00412  * since they are evaluated more than once 
00413  * ('r' is evaluated once.)
00414  */
00415 #endif /* __USE_SIGNAL */
00416 
00417 
00418 typedef struct barrier_struct {
00419   pthread_mutex_t lock;
00420   int n_clients;
00421   int n_waiting;
00422   int phase;
00423   pthread_cond_t wait_cv;
00424 } *barrier_t;
00425 
00426 barrier_t barrier_init(int);
00427 void barrier_reset(barrier_t, int);
00428 void barrier_wait(barrier_t);
00429 extern barrier_t startup_barrier;   
00430 
00431 #ifdef __USE_SIGNAL
00432 void sighandler(int);
00433 #endif
00434 
00435 unsigned int allocate_heap();
00436 extern volatile long sweepheap, newheap, pastfree;
00437 
00438 void notify_gc();
00439 void do_a_little_gc_work();
00440 
00441 #endif /* __COLLECTOR_H */


euslisp
Author(s): Toshihiro Matsui
autogenerated on Thu Jun 6 2019 18:05:53