collector.c
Go to the documentation of this file.
1 /*
2  * 2003-
3  * collector.c : R.Hanai
4  * parallel root scanning, concurrent snapshot garbage collector
5  * with return barrier
6  *
7  * memos
8  * BUGS:
9  * FIXED: copyobj in leo.c
10  * TODO:
11  * memory barrier instructions
12  * heap expansion function
13  * write-barriers (copyobj() etc.)
14  * memory management (BIBOP/Lazy Buddy/etc.)
15  * polling / how to scan stacks of suspending threads
16  * mutexes => real-time locks
17  * make thr_self() faster => caching ids.
18  * scan large objects incrementally.
19  *
20  * mark stack overflow
21  * parallel marking (scalability)
22  *
23  * <problematic functions>
24  * bindspecial: increment stack collectively
25  */
26 
27 #include <sys/times.h>
28 #include "eus.h"
29 #include <sys/param.h>
30 #include "time.h"
31 #include "rgc_utils.h"
32 #include "xccmem.h"
33 
34 
35 #ifdef __ART_LINUX
36 #include <linux/art_task.h>
37 #endif
38 
39 #if Linux
40 char *minmemory=(char *)1000000;
41 #endif
42 extern pointer K_DISPOSE;
43 #define MAXDISPOSE 256
45 static int gcmerge, dispose_count;
46 
47 extern struct {
48  char using;
49  mutex_t *lock;
50  thread_t tid;
51 } thread_table[]; /* defined in "mthread_posix.c" */
52 
55 
56 #define GCDEBUG
57 //#undef GCDEBUG
58 #ifdef GCDEBUG
60 #endif
61 
62 #ifdef __PROFILE_GC
63 static int gctime = 0;
64 int allocd_words = 0;
65 #endif
66 
67 static void do_scan_roots();
68 static void init_sync_data();
69 
70 #define gcpush(v, off) \
71 { \
72  lgcsp->addr = (pointer)v; \
73  lgcsp->offset = off; \
74  lgcsp++; \
75 }
76 
77 static pnewgcstack(oldsp)
78  register ms_entry *oldsp;
79 {
80  register ms_entry *oldstack, *stk, *newstack, *newgcsp;
81  long top, oldsize, newsize;
82 
83  oldstack=stk=collector_stack;
84  oldsize=collector_stacklimit-oldstack;
85  newsize=oldsize*2;
86  top=oldsp-collector_stack;
87  // newgcsp=newstack=(pointer *)malloc(newsize * sizeof(pointer)+16);
88  newgcsp=newstack=(ms_entry *)malloc(newsize * sizeof(ms_entry)+16);
89  fprintf(stderr, "\n\x1b[1;31m;; extending pgcstack 0x%x[%d] --> 0x%x[%d] top=%x\x1b[0m\n",
90  oldstack, oldsize, newstack, newsize, top);
91  while (stk<oldsp) *newgcsp++= *stk++;
92  collector_stack=newstack;
93  collector_stacklimit= &(collector_stack[newsize-10]);
94  collector_sp = &(collector_stack[top]);
95  cfree(oldstack);
96 }
97 
99 { int i;
100  context *ctx=current_ctx;
101  pointer p,a,curclass;
102  /*if (debug) fprintf(stderr, ";; disposal call=%d\n", dispose_count);*/
103  for (i=0; i<dispose_count; i++) {
104  p=dispose[i];
105  p->nodispose=0;
106  a=(pointer)findmethod(ctx,K_DISPOSE,classof(p), &curclass);
107  if (debug) fprintf(stderr, ";; (send %x :dispose)\n", p);
108  if (a!=NIL) csend(ctx,p,K_DISPOSE,0);
109  }}
110 
111 static struct _marking_state {
114 } marking_state;
115 
117 
118 static inline void go_on_to_sweep_phase()
119 {
120  numunion nu;
121  DPRINT2("mark->sweep: free rate = %lf", (double)freeheap / totalheap);
123  * max(0.1, fltval(speval(GCMERGE)));
124  /* default: GCMERGIN=0.25, GCMERGE=0.2
125  ==> no merge if heap occupancy rate is over 95% */
126  dispose_count = 0; /* <= Is this O.K.? */
127 
133 }
134 
135 long marked_words = 0;
136 
137 static int mark_a_little(int m_unit)
138 {
139  extern _end();
140  register ms_entry *lgcsp = collector_sp;
141  register ms_entry *gcstack = collector_stack;
142  register int credit = m_unit;
143  unsigned int offset;
144  register pointer p, p2;
145  register bpointer bp;
146  register int i, s;
147  context *ctx;
148 
149  markloop:
150  if(credit <= 0){
151  /* write back the value of lgcsp */
152  //fprintf(stderr, "GC stack size = %d\n", lgcsp - gcstack);
153  collector_sp = lgcsp;
154  marked_words -= m_unit - credit;
155  return 1; /* still marking work is left */
156  }
157  if(lgcsp > gcstack){
158  /* mark from mark stack */
159 // lgcsp -= (sizeof(ms_entry)/sizeof(pointer));
160  lgcsp--;
161  p = lgcsp->addr;
162  offset = lgcsp->offset;
163 
164  start_mark:
165  if(offset == 0){
166 // if(!ispointer(p)) goto markloop; /* p may be an immediate */
167  if(!ispointer(p) || !p) goto markloop;
168 
169  ASSERT((unsigned)p >= mingcheap);
170  ASSERT((unsigned)p < maxgcheap);
171 
172  /* these checks aren't normally needed,
173  since this is not a conservative collector */
174 // if((int)p < (int)_end) goto markloop;
175 //
176  if(maxmemory < (char *)p) goto markloop;
177 // if((char *)p < minmemory) goto markloop;
178  }
179 
180  /* here, p is a pointer to a live object */
181  bp = bpointerof(p);
182 
183  if(marked(bp)) goto markloop; /* already marked */
184 // if(blacked(bp)) goto markloop; /* already marked */
185 
186  markon(bp); /* mark it first to avoid endless marking */
187 
188  if(pisclosure(p)){
189  /*
190  if (p->c.clo.env1>minmemory && p->c.clo.env1<maxmemory)
191  fprintf(stderr, "Mark: closure %x's env1 points into heap %x\n",
192  p, p->c.clo.env1);
193  if (p->c.clo.env2>minmemory && p->c.clo.env2<maxmemory)
194  fprintf(stderr, "Mark: closure %x's env2 points into heap %x\n",
195  p, p->c.clo.env2);
196  */
197  goto markloop; /* avoid marking contents of closure */
198  }
199  if(bp->h.elmtype == ELM_FIXED){ /* contents are all pointers */
200  s = buddysize[bp->h.bix & TAGMASK] - 1;
201 
202  if(s > 300){
203  fprintf (stderr, "do_mark: too big object s=%d, header=%x at %x\n",
204  s, bp->h, bp);
205  //goto markloop;
206  }
207  while(lgcsp + s > collector_stacklimit){
208  pnewgcstack(lgcsp);
209  gcstack = collector_stack;
210  lgcsp = collector_sp;
211  }
212  credit -= (s + 1);
213  for(i = 0; i < s; i++){
214  p2 = p->c.obj.iv[i];
215  if(ispointer(p2))
216  gcpush(p2, 0);
217  }
218  goto markloop;
219  } else if (bp->h.elmtype == ELM_POINTER) { /* varing number of pointers */
220  s = buddysize[bp->h.bix & TAGMASK] - 2;
221  while (lgcsp + s > collector_stacklimit) {
222  pnewgcstack(lgcsp);
223  gcstack = collector_stack;
224  lgcsp = collector_sp; /* 961003 kagami */
225  }
226  credit -= (s + 2);
227  for (i = 0; i < s; i++) {
228  p2 = p->c.vec.v[i];
229  if (ispointer(p2))
230  gcpush(p2, 0);
231  }
232  goto markloop;
233  }
234 
235  credit -= buddysize[bp->h.bix & TAGMASK];
236  goto markloop;
237 
238  } else {
239 
240  /* get another root */
241  next_root:
242  credit--;
244  for (i = marking_state.cur_mut_num; i < MAXTHREAD; i++) {
245  ctx = euscontexts[i];
246  if (ctx) {
247  if (ctx->gsp > ctx->gcstack) {
248  p = *--(ctx->gsp);
249 
250  ASSERT((unsigned)p >= mingcheap);
251  ASSERT((unsigned)p < maxgcheap);
252 
253  offset = 0;
255 /*
256  if(credit <= 0){
257  // write back the value of lgcsp
258  gcpush(p, 0);
259  collector_sp = lgcsp;
260  marked_words -= m_unit - credit;
261  return 1; // still marking work is left
262  }
263  */
264  goto start_mark;
265  }
266  }
267  }
269  goto next_root;
270  } else {
271  mutex_lock(&pstack_lock);
272  if(psp > pstack) {
273 #ifdef COLLECTCACHE /* this is not yet correctly implemented */
274 #define COLCACHE 10
275  int i, ii;
276  pointer array[COLCACHE];
277  for (i = 0; i < COLCACHE; i++) {
278  pointerpop(array[i]);
279  if(psp > pstack)
280  continue;
281  break;
282  }
283  pcount = pcount + COLCACHE;
284  mutex_unlock(&pstack_lock);
285  for(ii = 0; ii < i; ii++){
286 // mark_a_little(array[ii], 0);
287  }
288  mutex_lock(&pstack_lock);
289 #else
290  pointerpop(p);
291  offset = 0;
292  mutex_unlock(&pstack_lock);
293 /*
294  if (credit <= 0) {
295  // write back the value of lgcsp
296  gcpush(p, 0);
297  collector_sp = lgcsp;
298  marked_words -= m_unit - credit;
299  return 1; // still marking work is left
300  }
301  */
302  goto start_mark;
303 #endif
304  }
305  mutex_unlock(&pstack_lock);
306  }
307  }
308 
309  /* marking finished, now we prepare for following sweeping */
311  return 0;
312 }
313 
314 
316 {
317  register int rbix, stat;
318  register pointer s;
319 
320  s = makepointer(p);
321  if(pisfilestream(s)){
322  if(!isint (s->c.fstream.fname) && s->c.fstream.direction != NIL){
323  if(s->c.fstream.fd == makeint (0)
324  || s->c.fstream.fd == makeint (1)){
325  fprintf(stderr, ";; gc! bogus stream at %x fd=%d\n",
326  (int) s, intval (s->c.fstream.fd));
327  }else if((closestream (s) == 0) && debug)
328  fprintf (stderr,
329  ";; gc: dangling stream(address=%x fd=%d) is closed\n",
330  (int) s, intval (s->c.fstream.fd));
331  }
332  }
333  p->h.cix = -1; /* free tag */
334  rbix = p->h.bix & TAGMASK;
335 
336  mutex_lock(&alloc_lock);
337  rw_rdlock(&gc_lock);
338 
339  p->b.nextbcell = buddy[rbix].bp;
340  buddy[rbix].bp = p;
341  buddy[rbix].count++;
342 
343  freeheap += buddysize[rbix];
344  // sweepheap += buddysize[rbix];
345 
346  rw_unlock(&gc_lock);
347  mutex_unlock(&alloc_lock);
348  return 0;
349 }
350 
351 static int rgc_credit = 0;
352 
353 /* the cell pointed by 'p' must not be marked */
354 /* mergecell kindly returns next uncollectable cell address */
355 static bpointer mergecell(register bpointer p, int cbix)
356 {
357  register bpointer np, p2;
358 
359  np = nextbuddy(p);
360  while (p->h.b == 0 && ((int) (p->h.bix & TAGMASK)) < cbix) {
361 // if (colored(np)) return np;
362 // if (marked(np)) return np;
363  rgc_credit--;
364  if (marked(np) || np->h.cix == -1) return np;
365  if (np->h.nodispose == 1) return np;
366 
367  p2 = mergecell(np, cbix); /* merge neighbor cell */
368  if (np->h.b == 1 && rgc_credit >= 0) { /* can be merged */
369  p->h.b = p->h.m; /* merge them into bigger cell */
370  p->h.m = np->h.m;
371  p->h.bix++;
372  np = p2;
373 #ifdef GCDEBUG
374  white_cells++;
375 #endif
376  } else {
377  reclaim(np);
378  return p2;
379  }
380  }
381  return np;
382 }
383 
384 /*
385  * suppose that { sweeping_state.p,
386  * sweeping_state.chp,
387  * sweeping_state.tail } are correctly set.
388  */
389 static int sweep_a_little(int gcmerge, int s_unit)
390 {
391  register struct chunk *chp;
392  register bpointer p, np, tail;
393 
394  rgc_credit = s_unit;
395  /* restore the state of sweeper */
396  chp = sweeping_state.chp;
397  p = sweeping_state.p;
398  tail = sweeping_state.tail;
399 
400  if (p == NULL) {
401  goto next_chunk;
402  }
403  //ASSERT( tail && chp );
404 
405 cont_sweep:
406  /* continue sweeping */
407  while (p < tail) {
408  if (rgc_credit <= 0) {
409  sweeping_state.p = p;
411  return 1;
412  }
413 //#ifndef __USE_MARK_BITMAP
414 // sweeping_state.p = p;
415 //#endif
416  rgc_credit--;
417  if (p->h.cix == -1) { /* free object */
418 #ifdef GCDEBUG
419  free_cells++;
420 #endif
421  p = nextbuddy(p);
422  continue;
423  }
424  if (marked(p)) { /* (possibly) live object */
425 // if (blacked(p)) { /* (possibly) live object */
426 #ifdef GCDEBUG
427  black_cells++;
428 #endif
429  markoff(p);
430  p = nextbuddy(p);
431  continue;
432  }
433  if (p->h.nodispose == 1) {
435  fprintf(stderr, "no more space for disposal processing\n");
436  else
437  dispose[dispose_count++] = makepointer(p);
438  p = nextbuddy(p);
439  }
440  if (gcmerge > freeheap) { /* reclaim and no merge */
441 #ifdef GCDEBUG
442  white_cells++;
443 #endif
444  np = nextbuddy(p);
445  reclaim(p);
446  p = np;
447  } else { /* reclaim and merge *//* update free buddy list */
448  np = mergecell(p, chp->chunkbix);
449  reclaim(p);
450  p = np;
451  }
452  }
453 
454 next_chunk:
456  if (chp == NULL) {
457  DPRINT2("sweeping finished: free rate = %lf", (double)freeheap / totalheap);
458  DPRINT2("white: %d black: %d free: %d", white_cells, black_cells, free_cells);
460  return 0; /* sweeping finished */
461  }
462  sweeping_state.chp = chp;
463  p = &chp->rootcell;
464  tail = (bpointer)((int)p + (buddysize[chp->chunkbix] << 2));
465  goto cont_sweep;
466 
467 }
468 
469 #ifdef __USE_SIGNAL
470 static void send_root_insertion_signals()
471 {
472  int i;
473  thread_t self = pthread_self();
474 
475  for (i = 0; i < MAXTHREAD; i++)
476  if (!pthread_equal(thread_table[i].tid, self) && euscontexts[i]) {
477  if (pthread_kill(thread_table[i].tid, SIGUSR1) != 0) {
478  perror("pthread_kill");
479  }
480  }
481 }
482 #endif
483 
484 void init_rgc(){
485  void collect();
486  unsigned int gc_thread;
487 
488  active_mutator_num = 1;
489  gc_region_sync = 0;
490  startup_barrier = barrier_init(2); /* mainthread and collector thread */
492  ri_core_phase = 0;
493  mut_stat_phase = 0x2;
495 #ifdef __USE_POLLING
496  gc_request_flag = 0;
497 #endif
498  init_sync_data();
499  initmemory_rgc(); /* initialize object heap. define in "eus.c" */
500  init_utils();
501 #ifdef __USE_MARK_BITMAP
502  allocate_bit_table(); /* allocate mark bit table */
503  clear_bit_table();
504 #endif
505 
507  (ms_entry *)malloc(sizeof(ms_entry) * DEFAULT_MAX_RGCSTACK);
509 
510 #ifdef __GC_SEPARATE_THREAD
511  thr_create(0, 0, collect, 0, 0, &gc_thread);
512  barrier_wait(startup_barrier);
513 #endif
514 }
515 
517 
519  static int clsidx = 0;
520  int i;
521  /* allocate class table for marking */
522  if (rgc_classtable == NULL) {
523  rgc_classtable =
524  rgc_alloc((MAXCLASS + 1), ELM_POINTER, vectorcp.cix, MAXCLASS + 1);
525  rgc_classtable->c.vec.size = makeint(MAXCLASS);
526  for (i = 0; i < MAXCLASS; i++)
527  rgc_classtable->c.vec.v[i] = NIL;
528  }
529  rgc_classtable->c.vec.v[clsidx++] = newclass;
530 }
531 
532 static void scan_global_roots()
533 {
534  int i;
537  /* minimize scanning time for class table */
538  pointerpush(rgc_classtable);
539  /*
540  for(i = 0; i < MAXCLASS; i++){
541  if(ispointer(classtab[i].def)){
542  pointerpush (classtab[i].def);
543 // ASSERT((unsigned)(classtab[i].def == 0) ||
544 // (unsigned)(classtab[i].def) >= mingcheap);
545 // ASSERT((unsigned)(classtab[i].def) < maxgcheap);
546  }
547  }
548  */
549 }
550 
551 static void scan_local_roots(int i)
552 {
553  register pointer *p;
554  register bpointer q;
555  register context *ctx = euscontexts[i];
556 
557  pgcpush(ctx->threadobj);
558  pgcpush(ctx->specials);
559 
560  q = bpointerof(ctx->lastalloc);
561  if (q && ispointer(q)) {
562  pgcpush(ctx->lastalloc);
563  ASSERT((unsigned)q >= mingcheap);
564  ASSERT((unsigned)q < maxgcheap);
565  }
566 
567 #ifdef __RETURN_BARRIER
568  {
569  pointer *frame_base, *p;
570 
571  //DPRINT3("start scanning current frame: %d ",i);
572  mutex_lock(&ctx->rbar.lock); /* <-- this lock wouldn't be needed */
573 
574  if (ctx->callfp != NULL)
575  frame_base = (pointer *)ctx->callfp;
576  else
577  frame_base = ctx->stack;
578 
579  for (p = ctx->vsp - 1; p >= frame_base; p--) {
580  /*
581  * stack frame can contain
582  * 1, immediates
583  * 2, references to the words in this LISP stack (static links, dynamic links)
584  * 3, this would be a bug: references to the words in a native stack.
585  * (jmp_buf in blockframe and catchframe.
586  * See "makeblock", "eussetjmp", "funlambda", "mkcatchframe")
587  */
588  if (*p == NULL) continue;
589  if (((int)(*p) & 3)) continue;
590  if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit))
591  continue;
592  if ((pointer *)*p >= (pointer *)maxgcheap) continue;
593  if ((pointer *)*p < (pointer *)mingcheap) continue;
594 
595  pgcpush(*p);
596  ASSERT((unsigned)(*p) >= mingcheap);
597  ASSERT((unsigned)(*p) < maxgcheap);
598  }
599 
600  if (frame_base == ctx->stack) {
601  ctx->rbar.pointer = NULL;
602  } else {
603  ctx->rbar.pointer = frame_base;
604  }
605  mutex_unlock(&ctx->rbar.lock); /* <-- this lock wouldn't be needed */
606  //DPRINT3("scanning current frame completed");
607  }
608 
609 #else /* original snapshot gc */
610 
611  /* push roots in thread's stack */
612  for (p = ctx->vsp - 1; p >= ctx->stack; p--) {
613  // for(p = ctx->stack; p < ctx->vsp; p++) {
614  if (*p == NULL) continue;
615  if (((int)(*p) & 3)) continue;
616  if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit))
617  continue;
618  if ((pointer *)*p >= (pointer *)maxgcheap) continue;
619  if ((pointer *)*p < (pointer *)mingcheap) continue;
620 
621  pgcpush(*p);
622  ASSERT((unsigned)(*p) >= mingcheap);
623  ASSERT((unsigned)(*p) < maxgcheap);
624  }
625 #endif
626 }
627 
628 #if 0
629 static void scan_suspending_thread_roots()
630 {
631  int id, c;
632  for(id = 0; id < MAXTHREAD; id++){
633  if(thread_table[id].using){
634  mutex_lock(&mut_stat_table[id].lock);
635  if(mut_stat_table[id].stat == 0x3){ /* 'suspended' & 'need_scan' */
636  mut_stat_table[id].stat = 0x5; /* 'suspended' & 'scanning' */
637  mutex_unlock(&mut_stat_table[id].lock);
638  scan_local_roots(id);
640  do{
641  c = read_volatile_int(frame_scan_sync);
642  }while(cas_int(frame_scan_sync, c, c + 1));
643  mutex_lock(&mut_stat_table[id].lock);
644  mut_stat_table[id].stat = 0x1; /* 'suspended' */
645  }
646  mutex_unlock(&mut_stat_table[id].lock);
647  }
648  }
649 }
650 #endif
651 
652 #ifdef __RETURN_BARRIER
653 #define INSERT_UNIT 4 /* 2 or 4 will be good */
654 
655 static void scan_remaining_roots()
656 {
657  int i, local_root_count, inserted_root_count;
658  static char idx[MAXTHREAD];
659 
660  local_root_count = 0;
661 
662  for (i = 0; i < MAXTHREAD; i++) {
663  if (euscontexts[i] && euscontexts[i]->rbar.pointer) {
664  idx[local_root_count] = i;
665  local_root_count++;
666  }
667  }
668 
669  inserted_root_count = local_root_count;
670 
671  do {
672  for (i = 0; i < local_root_count; i++) {
673  context *ctx;
674  register pointer *p;
675  int counter, tid;
676 
677  tid = idx[i];
678  ctx = euscontexts[tid];
679  if ((ctx)->rbar.pointer == NULL) continue;
680 
681  mutex_lock(&((ctx)->rbar.lock));
682  //DPRINT3("scheduler inserting thread : %d's local roots", i);
683  p = (ctx)->rbar.pointer - 1;
684  counter = INSERT_UNIT;
685 
686  while (1) {
687  if (p < ctx->stack) break;
688  if ((((int)(*p) & 3) == 0)
689  && ((ctx->stack > (pointer *)*p) || ((pointer *)*p > ctx->stacklimit))
690  && (((pointer *)*p >= (pointer *)mingcheap && (pointer *)*p < (pointer *)maxgcheap))) {
691  pgcpush(*p);
692  ASSERT((unsigned)(*p) >= mingcheap);
693  ASSERT((unsigned)(*p) < maxgcheap);
694  }
695  p--;
696  counter--;
697  if(counter == 0) break;
698  }
699  (ctx)->rbar.pointer = p + 1;
700 
701  if (p < ctx->stack) {
702  (ctx)->rbar.pointer = NULL;
703  inserted_root_count--;
704  }
705  mutex_unlock(&(ctx)->rbar.lock);
706  }
707  }
708  while (inserted_root_count != 0);
709 }
710 #endif /* __RETURN_BARRIER */
711 
712 unsigned int gs[MAXTHREAD];
713 /*
714  * suppose that we don't have collector_lock
715  */
716 void notify_gc()
717 {
718  int id, phase, c;
719  unsigned int s, e;
720 // unlock_collector;
721  /* reset synchronization variables */
722 // lock_collector;
723 /* if (gc_phase != PHASE_NOGC) {
724  unlock_collector;
725  return;
726  }
727 */
728  id = thr_self();
729 // gs[id] = current_utime();
730 
732  gc_point_sync = 0;
733  phase = ri_core_phase;
735 #ifdef __USE_POLLING
736  // for(id = 0; id < MAXTHREAD; id++){
737  // if(thread_table[id].using){
738  // mutex_lock(&mut_stat_table[id].lock);
739  // mut_stat_table[id].stat |= 0x2; /* set 'need_scan' flag */
740  // if(mut_stat_table[id].stat & 0x1){ /* 'suspended'? */
741  // do{
742  // c = gc_point_sync;
743  // }while(cas_int(gc_point_sync, c, c + 1));
744  // }
745  // mutex_unlock(&mut_stat_table[id].lock);
746  // }
747  // }
748 #endif
749 #ifdef __USE_SIGNAL
750  send_root_insertion_signals();
751 #else /* __USE_POLLING */
752  gc_request_flag = 1;
753 #endif
756  marked_words = 0;
758 
759  do {
760  c = gc_point_sync;
761  } while(cas_int(gc_point_sync, c, c + 1));
762 
764  sched_yield(); // nanosleep(0) might be better
765  } else {
767  if (phase == ri_core_phase) {
768  do_scan_roots();
769  }
771  }
772 
773  /* wait until root scanning(core) is finished */
775  while (phase == ri_core_phase)
776  cond_wait(&ri_end_cv, &collector_lock);
777 // unlock_collector;
778 // e = current_utime();
779 // if(id == 0)fprintf(stderr, " (%d:%d) ", e - gs[id], id);
780 }
781 
782 
783 /* suppose that we have collector lock */
784 static void do_scan_roots()
785 {
786  int tid;
787  unsigned int s, e;
788 
789  //s = current_utime();
791 
792  /* write barriers get activated */
793  /* objects are allocated as black after here */
795  for (tid = 0; tid < MAXTHREAD; tid++) {
796  if (euscontexts[tid]) {
797  mutex_lock(&mut_stat_table[tid].lock);
798  scan_local_roots(tid);
799  // mut_stat_table[tid].stat = 0x0; /* 'running' */
800  mut_stat_table[tid].stat =
801  (mut_stat_table[tid].stat ^ 0x2) & 0xfffffffd; /* 'running' */
802  mutex_unlock(&mut_stat_table[tid].lock);
803  }
804  }
805  //e = current_utime();
806  //fprintf(stderr, "stopped: %d\n", e - s);
807 
808  gc_request_flag = 0;
809  ri_core_phase = 1 - ri_core_phase; /* release other mutator threads */
811  cond_broadcast(&ri_end_cv);
812  gc_wakeup_cnt++; /* now, we release collector threads */
813  cond_broadcast(&gc_wakeup_cv);
814  DPRINT2("root scan finished: free rate = %lf", (double)freeheap / totalheap);
815 }
816 
817 
819 {
820  /*
821  numunion nu;
822  int thr;
823  double threshold;
824  static long used;
825 
826  used += pastfree + newheap + sweepheap - freeheap;
827  newheap = 0;
828  threshold = max(DEFAULT_GC_THRESHOLD, fltval(speval(GCMARGIN)));
829  thr = (int)((double)totalheap * threshold);
830  used = freeheap;
831  while(freeheap > thr && gc_counter >= gc_request_counter){
832  nanosleep(&treq, NULL); // take a rest
833  }
834  used = used - freeheap;
835  pastfree = freeheap;
836  */
837 
838  /*
839  mutex_lock(&gc_state_lock);
840  while(gc_counter >= gc_request_counter){
841  cond_wait(&wake_up_gc_thread_cv, &gc_state_lock);
842  }
843  mutex_unlock(&gc_state_lock);
844  */
845 }
846 
847 //#define myctx (euscontexts[thr_self()])
848 //static long rgc_marktime, rgc_sweeptime;
849 
850 static int do_gc_epilogue()
851 {
852  /*
853  if (gc_net_free < 0.8) { // hard external fragmentation
854  DPRINT1("\x1b[1;31mexpand heap(do_gc_epilogue, free/total=%d/%d)\x1b[0m",
855  freeheap, totalheap);
856  newchunk(DEFAULT_EXPAND_SIZE_IDX);
857  //do_allocate_heap(totalheap * (0.9 - gc_net_free));
858  }
859  */
860 #ifdef __USE_MARK_BITMAP
861  clear_bit_table();
862 #endif
864  gc_cmp_cnt++;
866 
867  if (debug) {
868  fprintf(stderr, " free/total=%d/%d\n",
870 // fprintf(stderr, " mark=%d sweep%d\n", rgc_marktime, rgc_sweeptime);
871  }
872 /* GC thread doesn't have its own context.
873  if (speval(QGCHOOK) != NIL) {
874  pointer gchook=speval(QGCHOOK);
875  vpush(makeint(freeheap)); vpush(makeint(totalheap));
876  ufuncall(ctx,gchook,gchook,(pointer)(ctx->vsp-2),ctx->bindfp,2);
877  ctx->vsp -= 2;
878  }
879  breakck;
880 */
881 
882  DPRINT2("GC cycle finished: free rate = %lf", (double)freeheap / totalheap);
883  return 0;
884 }
885 
886 void do_a_little_gc_work(int m_unit, int s_unit)
887 {
888  unsigned int s, e;
889 // s = current_utime();
890  switch (gc_phase) {
891  case PHASE_ROOT_REM:
892 #ifdef __RETURN_BARRIER
893  scan_remaining_roots();
894 #endif
896  break;
897  case PHASE_MARK:
898  mark_a_little(m_unit);
899  break;
900  case PHASE_SWEEP:
901  sweep_a_little(gcmerge, s_unit);
902  break;
903  case PHASE_EPILOGUE:
904  do_gc_epilogue();
905  default:
906  ;
907  }
908 // e = current_utime();
909 // if(e-s > 100) printf("<<%d, %d::%d, %d>>\n", e-s, gc_phase,
910 // rgc_credit, marking_state.is_checking_pstack);
911 }
912 
913 void collect()
914 {
915  int i;
916  unsigned s, e;
917 
918 #ifdef __PROFILE_GC
919  int tmp_free;
920  reset_utime(); /* for rdtsc */
921 #endif
922 
923  /* synchronize with mainthread */
924  barrier_wait(startup_barrier);
925 
926 #ifdef __PROFILE_GC
927  // times(&buf1);
928 #endif
929 
930  /* gc main loop */
931  for (;;) {
932 
933  /* gc thread waits until the core of root scanning is finished. */
935  while (gc_cmp_cnt == gc_wakeup_cnt) {
936  cond_wait(&gc_wakeup_cv, &collector_lock);
937  }
938 
939  while (gc_phase != PHASE_NOGC) {
940  // printf(".");fflush(stdout);
943  //usleep(0);
945  };
947 
948 #ifdef __PROFILE_GC
949  // times(&buf2);
950  // gctime = buf2.tms_utime+buf2.tms_stime-buf1.tms_utime-buf1.tms_stime;
951  // fprintf(stderr, "gc thread time = %d\n", gctime*1000/HZ);
952  // fprintf(stderr, "freeheap=%d\n", freeheap*4);
953  // tmp_free = freeheap;
954 #endif
955 
956  // DPRINT3("took %d micro, not gc consump_rate %f",
957  // e-s, (float)(tmp_free-freeheap)/(e-s));
958  }
959 
960  /* never come here */
961 }
962 
963 
964 #ifdef __EAGER_GC_CONTROL
965 static int change_collector_thread_sched_policy(int t_sect_length)
966 {
967 #ifdef __ART_LINUX
968  if(art_enter(ART_PRIO_MAX, ART_TASK_PERIODIC, t_sect_length) == -1){
969  DPRINT2("collector error: art_enter");
970  return -1;
971  }
972 #else /* LINUX */
973 #endif
974  return 0;
975 }
976 static int restore_collector_thread_sched_policy()
977 {
978 #ifdef __ART_LINUX
979  if(art_exit() == -1){
980  DPRINT2("collector error: art_exit");
981  return -1;
982  }
983 #else /* LINUX */
984 #endif
985  return 0;
986 }
987 #endif /* __EAGER_GC_CONTROL */
988 
989 #ifdef __USE_POLLING
990 void enter_gc_region(int id)
991 {
992  int c, phase;
993  mutex_lock(&mut_stat_table[id].lock);
994  // mut_stat_table[id].stat |= 0x1; /* set 'suspended' flag */
995  // if(mut_stat_table[id].stat & 0x2){ /* 'need_scan'? */
996  do {
997  c = gc_region_sync;
998  } while (cas_int(gc_region_sync, c, c + 1));
999  mutex_unlock(&mut_stat_table[id].lock);
1000 
1001  if (gc_request_flag) {
1002  phase = ri_core_phase;
1005  if (phase == ri_core_phase)
1006  do_scan_roots();
1008  }
1009  }
1010  // }
1011 }
1012 
1013 void exit_gc_region(int id)
1014 {
1015  int c;
1016  try_exit:
1017  mutex_lock(&mut_stat_table[id].lock);
1018  if (mut_stat_table[id].stat & 0x2 == mut_stat_phase) { /* 'need_scan'? */
1019  // mut_stat_table[id].stat = 0x4; /* set 'scanning' and clear 'need_scan' */
1020  // mutex_unlock(&mut_stat_table[id].lock);
1021  // insert_my_roots();
1022  mutex_unlock(&mut_stat_table[id].lock);
1023  sched_yield(); /* this wouldn't go well on ART-Linux */
1024  goto try_exit;
1025  } else {
1026  // mut_stat_table[id].stat &= 0x0; /* clear 'suspended' flag */
1027  do {
1028  c = gc_region_sync;
1029  } while (cas_int(gc_region_sync, c, c - 1));
1030  mutex_unlock(&mut_stat_table[id].lock);
1031  }
1032 }
1033 
1034 #endif /* __USE_POLLING */
1035 
1036 int ps_sem = 1;
1037 /* initialize data for syncronization */
1038 static void init_sync_data()
1039 {
1040  int i;
1041  mutex_init(&pstack_lock, NULL);
1042  mutex_init(&collector_lock, NULL);
1043  cond_init(&gc_wakeup_cv, NULL);
1044  cond_init(&ri_end_cv, NULL);
1045  mutex_init(&gc_state_lock, NULL);
1046  for (i = 0; i < MAXTHREAD; i++) {
1047  mutex_init(&mut_stat_table[i].lock, NULL);
1048  }
1049 }
1050 
1051 /**********************************************************
1052  mutator interface routines
1053 *********************************************************/
1054 
1055 #ifdef __USE_POLLING
1056 void scan_roots()
1057 {
1058  int c;
1059  unsigned int e;
1060  int myid = thr_self();
1061  int phase = ri_core_phase;
1062 
1063  myid = thr_self();
1064  //gs[myid] = current_utime();
1065 
1066  do {
1067  c = gc_point_sync;
1068  } while (cas_int(gc_point_sync, c, c + 1));
1069 
1071  sched_yield(); // nanosleep(0) might be better
1072  } else {
1074  if (phase == ri_core_phase)
1075  do_scan_roots();
1077  }
1078 
1079  /* wait until root scanning(core) is finished */
1081  while (phase == ri_core_phase)
1082  cond_wait(&ri_end_cv, &collector_lock);
1084  //e = current_utime();
1085  //if(myid == 0)fprintf(stderr, " (%d:%d) ", e-gs[myid], myid);
1086 
1087  return;
1088 }
1089 
1090 /* function-version polling code */
1091 int check_gc_request()
1092 {
1093  if (!gc_request_flag)
1094  return 0;
1095  scan_roots();
1096  return 1;
1097 }
1098 #endif
1099 
1100 #ifdef __USE_SIGNAL
1101 /*
1102  * it is not recommended to use pthreads tools in signal handlers
1103  * (mutex, conditional variables,...)
1104  */
1105 void sighandler(int x)
1106 {
1107  int idx;
1108  DPRINT2("start root scanning");
1109  notify_ri_start();
1110  idx = thr_self();
1112  barrier_wait(end_ri_barrier);
1113  DPRINT2("mutators restart");
1114 }
1115 #endif
1116 
1117 /* MAXSTACK 65536 */
1118 #define PMAXSTACK (MAXSTACK * 110)
1120 volatile pointer *psp = pstack;
1122 mutex_t pstack_lock;
1123 
1124 /***********************************************************
1125  barrier-synchronization functions
1126 ***********************************************************/
1127 
1128 barrier_t barrier_init(int n_clients)
1129 {
1130  barrier_t barrier = (barrier_t)malloc(sizeof(struct barrier_struct));
1131  if (barrier != NULL) {
1132  barrier->n_clients = n_clients;
1133  barrier->n_waiting = 0;
1134  barrier->phase = 0;
1135  mutex_init(&barrier->lock, NULL);
1136  cond_init(&barrier->wait_cv, NULL);
1137  }
1138  return barrier;
1139 }
1140 
1141 void barrier_reset(barrier_t barrier, int n_clients)
1142 {
1143  /* called when active_mutator_num was changed */
1144  /* this implementation is not good */
1145  barrier->n_clients = n_clients;
1146 }
1147 
1149 {
1150  mutex_destroy(&barrier->lock);
1151  cond_destroy(&barrier->wait_cv);
1152  free(barrier);
1153 }
1154 
1156 {
1157  int my_phase;
1158  mutex_lock(&barrier->lock);
1159  my_phase = barrier->phase;
1160  barrier->n_waiting++;
1161  if (barrier->n_waiting == barrier->n_clients) {
1162  barrier->n_waiting = 0;
1163  barrier->phase = 1 - my_phase;
1164  cond_broadcast(&barrier->wait_cv);
1165  }
1166  while (barrier->phase == my_phase) {
1167  cond_wait(&barrier->wait_cv, &barrier->lock);
1168  }
1169  mutex_unlock(&barrier->lock);
1170 }
1171 
1172 /***********************************************************
1173  other functions
1174 ***********************************************************/
1175 
1176 
1177 unsigned int do_allocate_heap(unsigned int req_words)
1178 {
1179  int i, k;
1180  unsigned int rem_words = req_words;
1181 
1182  while (buddysize[MAXBUDDY-1] <= rem_words) {
1183  k = newchunk(MAXBUDDY-1);
1184  rem_words -= buddysize[k];
1185  }
1186  for (i = MAXBUDDY - 2; i >= 20/* or DEFAULTCHUNKINDEX */; i--) {
1187  if (buddysize[i] < rem_words){
1188  k = newchunk(i);
1189  rem_words -= buddysize[k];
1190  }
1191  }
1192  return req_words - rem_words;
1193 }
1194 
1195 unsigned int allocate_heap()
1196 { /*
1197  * k buddy[k]
1198  * 22 85971 word 343884 byte
1199  * 23 139104 word 556416 byte
1200  * 24 225075 word 900300 byte
1201  * 25 364179 word 1456716 byte
1202  * 26 589254 word 2357016 byte
1203  * 27 953433 word 3813732 byte
1204  * 28 1542687 word 6170748 byte
1205  * 29 2496120 word 9984480 byte
1206  * 30 4038807 word 16155228 byte
1207  */
1208  return do_allocate_heap(INITIAL_HEAP_SIZE * 1000);
1209 }
1210 
1211 extern long long values[];
1212 
1213 pointer RGCCOUNT(register context *ctx, int n, pointer argv[])
1214 {
1215  ckarg(0);
1216  return makeint(gc_cmp_cnt);
1217 }
1218 
1219 pointer RGC_GCTIME(register context *ctx, int n, pointer argv[])
1220 {
1221  struct tms buf;
1222  ckarg(0);
1223  times(&buf);
1224  return makeint((buf.tms_utime + buf.tms_stime) * 1000/HZ );
1225 }
1226 
1227 #ifdef __PROFILE_GC
1228 pointer RGCALLOCATED(register context *ctx, int n, pointer argv[])
1229 {
1230  ckarg(0);
1231  return makeint(allocd_words);
1232 }
1233 #endif
1234 
1235 void rgcfunc(register context *ctx, pointer mod)
1236 {
1237  pointer p = Spevalof(PACKAGE);
1238  pointer_update(Spevalof(PACKAGE), syspkg);
1239  defun(ctx, "RGCCOUNT", mod, RGCCOUNT,NULL);
1240  defun(ctx, "RGCTIME", mod, RGC_GCTIME,NULL);
1241 #ifdef __PROFILE_GC
1242  defun(ctx, "RGCALLOCATED", mod, RGCALLOCATED,NULL);
1243 #endif
1244  pointer_update(Spevalof(PACKAGE), p);
1245 }
#define gc_wakeup_cnt
Definition: collector.h:247
context * euscontexts[MAXTHREAD]
Definition: eus.c:105
pointer GCMARGIN
Definition: eus.c:173
void collect()
Definition: collector.c:913
static call_disposers()
Definition: collector.c:98
pointer * stack
Definition: eus.h:523
long long values[]
unsigned elmtype
Definition: eus.h:180
pointer * pstacklimit
Definition: collector.c:1121
static int bp
barrier_t startup_barrier
Definition: collector.c:54
cixpair vectorcp
Definition: eus.c:88
#define DEFAULT_MAX_RGCSTACK
Definition: collector.h:34
#define gc_cmp_cnt
Definition: collector.h:248
struct vector vec
Definition: eus.h:412
pointer * stacklimit
Definition: eus.h:523
struct chunk * chunklist
Definition: memory.c:57
static void scan_global_roots()
Definition: collector.c:532
#define read_volatile_int(loc)
Definition: xccmem.h:1039
rwlock_t gc_lock
Definition: mthread.c:18
static int gcmerge
Definition: collector.c:45
#define makeint(v)
Definition: sfttest.c:2
struct cell * pointer
Definition: eus.h:163
pthread_cond_t wait_cv
Definition: collector.h:423
pointer speval
Definition: calleus.c:42
Definition: eus.h:522
unsigned nodispose
Definition: eus.h:388
struct filestream fstream
Definition: eus.h:404
pointer GCMERGE
Definition: eus.c:173
static void go_on_to_sweep_phase()
Definition: collector.c:118
#define ri_core_phase
Definition: collector.h:235
pthread_mutex_t lock
Definition: collector.h:419
static void wait_until_next_gc_cycle()
Definition: collector.c:818
pointer * vsp
Definition: eus.h:523
#define pgcpush(v)
Definition: collector.h:290
int closestream(pointer)
Definition: eusstream.c:53
struct bcell * bpointer
Definition: eus.h:443
pointer findmethod(context *, pointer, pointer, pointer *)
Definition: leo.c:203
GLfloat n[6][3]
Definition: cube.c:15
void initmemory_rgc()
Definition: eus.c:490
#define gc_phase
Definition: collector.h:230
int ps_sem
Definition: collector.c:1036
bpointer bp
Definition: eus.h:563
barrier_t barrier_init(int n_clients)
Definition: collector.c:1128
pointer csend(context *,...)
#define collector_sp
Definition: collector.h:228
#define DPRINT2
Definition: rgc_utils.h:20
#define collector_lock
Definition: collector.h:244
#define intval(p)
Definition: sfttest.c:1
defun("ADR_TO_STRING", mod, ADR_TO_STRING)
int _end
Definition: eus.c:33
static pnewgcstack(ms_entry *oldsp)
Definition: collector.c:77
#define ri_end_cv
Definition: collector.h:245
static void scan_local_roots(int i)
Definition: collector.c:551
pointer * gcstack
Definition: eus.h:525
static pointer dispose[MAXDISPOSE]
Definition: collector.c:44
unsigned int gs[MAXTHREAD]
Definition: collector.c:712
void rgcfunc(register context *ctx, pointer mod)
Definition: collector.c:1235
Definition: eus.h:445
static int mark_a_little(int m_unit)
Definition: collector.c:137
Definition: collector.h:163
ckarg(2)
struct bcell * p
Definition: collector.h:92
#define finish_access_before_read()
Definition: xccmem.h:1115
#define min(x, y)
Definition: rmflags.c:17
unsigned int do_allocate_heap(unsigned int req_words)
Definition: collector.c:1177
pointer RGC_GCTIME(register context *ctx, int n, pointer argv[])
Definition: collector.c:1219
static void init_sync_data()
Definition: collector.c:1038
static int free_cells
Definition: collector.c:59
pointer RGCCOUNT(register context *ctx, int n, pointer argv[])
Definition: collector.c:1213
void rgc_add_to_classtable(pointer newclass)
Definition: collector.c:518
static int sweep_a_little(int gcmerge, int s_unit)
Definition: collector.c:389
union cell::cellunion c
#define collector_stacklimit
Definition: collector.h:229
void clear_bit_table()
#define PHASE_PROLOGUE
Definition: collector.h:83
static void do_scan_roots()
Definition: collector.c:784
pointer iv[2]
Definition: eus.h:319
thread_t tid
Definition: collector.c:50
pointer pstack[PMAXSTACK]
Definition: collector.c:1119
pointer lastalloc
Definition: eus.h:534
char * maxmemory
Definition: memory.c:50
Definition: eus.h:426
#define pointerpop(lv)
Definition: collector.h:316
pointer K_DISPOSE
Definition: eus.c:135
unsigned int maxgcheap
Definition: eus.h:379
#define ASSERT(condition)
Definition: rgc_utils.h:28
struct bcell * tail
Definition: collector.h:93
pointer fname
Definition: eus.h:285
#define MAXDISPOSE
Definition: memory.c:63
struct cellheader h
Definition: eus.h:438
#define PMAXSTACK
Definition: collector.c:1118
int chunkbix
Definition: eus.h:447
#define S_UNIT
Definition: collector.h:46
struct callframe * callfp
Definition: eus.h:527
#define collector_stack
Definition: collector.h:227
long buddysize[MAXBUDDY+1]
Definition: eus.c:103
#define gc_region_sync
Definition: collector.h:234
short s
Definition: structsize.c:2
struct bcell * nextbcell
Definition: eus.h:440
static void reset_utime()
Definition: rgc_utils.c:77
pointer size
Definition: eus.h:298
struct @10 thread_table[]
#define gc_wakeup_cv
Definition: collector.h:246
#define pointerpush(v)
Definition: collector.h:311
#define PHASE_ROOT_CORE
Definition: collector.h:85
#define cas_int(loc, ov, nv)
Definition: xccmem.h:1087
#define PHASE_SWEEP
Definition: collector.h:88
int thr_create(void *, size_t, void(*)(), void *, long, int *)
Definition: pthreads.c:43
struct _sweeping_state sweeping_state
Definition: collector.c:116
unsigned int offset
Definition: collector.h:165
union bcell::@12 b
pointer rgc_alloc(register int s, int e, int cid, register int nils)
Definition: rgc_mem.c:347
long freeheap
Definition: memory.c:56
static int white_cells
Definition: collector.c:59
struct bcell rootcell
Definition: eus.h:448
int rw_rdlock(rwlock_t *)
Definition: pthreads.c:179
volatile pointer * psp
Definition: collector.c:1120
pointer PACKAGE
Definition: eus.c:110
struct _gc_data gc_data
Definition: collector.c:53
#define gc_point_sync
Definition: collector.h:233
static int rgc_credit
Definition: collector.c:351
struct buddyfree buddy[MAXBUDDY+1]
Definition: eus.c:46
char * minmemory
Definition: memory.c:54
mutex_t * lock
Definition: collector.c:49
float fltval()
pointer specials
Definition: eus.h:543
void notify_gc()
Definition: collector.c:716
rbar_t rbar
Definition: eus.h:546
tail(char *cp)
Definition: eustags.c:1156
unsigned bix
Definition: eus.h:183
static int black_cells
Definition: collector.c:59
mutex_t pstack_lock
Definition: collector.c:1122
#define unlock_collector
Definition: collector.h:252
static int dispose_count
Definition: collector.c:45
void init_utils()
Definition: rgc_utils.c:142
int rw_unlock(rwlock_t *)
Definition: pthreads.c:197
short cix
Definition: eus.h:188
#define TAGMASK
Definition: collector.h:168
#define PHASE_ROOT_REM
Definition: collector.h:86
#define active_mutator_num
Definition: collector.h:231
void init_rgc()
Definition: collector.c:484
struct chunk * chp
Definition: collector.h:91
#define max(I1, I2)
Definition: eustags.c:134
#define NULL
Definition: transargv.c:8
struct barrier_struct * barrier_t
#define PHASE_NOGC
Definition: collector.h:82
struct object obj
Definition: eus.h:415
#define INITIAL_HEAP_SIZE
Definition: collector.h:29
#define gc_state_lock
Definition: collector.h:243
long marked_words
Definition: collector.c:135
#define mut_stat_phase
Definition: collector.h:236
pointer fd
Definition: eus.h:284
#define gcpush(v, off)
Definition: memory.mutex.c:296
unsigned b
Definition: eus.h:176
static struct _marking_state marking_state
pointer direction
Definition: eus.h:280
pointer sysobj
Definition: eus.c:54
int newchunk(int)
Definition: memory.c:67
Definition: eus.h:437
pointer addr
Definition: collector.h:164
void barrier_destroy(barrier_t barrier)
Definition: collector.c:1148
int reclaim(bpointer p)
Definition: collector.c:315
unsigned int thr_self()
Definition: eus.c:25
void barrier_reset(barrier_t barrier, int n_clients)
Definition: collector.c:1141
pointer * gsp
Definition: eus.h:525
struct chunk * nextchunk
Definition: eus.h:446
static pointer rgc_classtable
Definition: collector.c:516
void barrier_wait(barrier_t barrier)
Definition: collector.c:1155
unsigned nodispose
Definition: eus.h:181
short cix
Definition: eus.h:451
unsigned m
Definition: eus.h:177
pointer * gcstack
Definition: memory.c:411
unsigned int allocate_heap()
Definition: collector.c:1195
pointer pkglist
Definition: eus.c:109
pointer NIL
Definition: eus.c:110
pointer syspkg
Definition: eus.c:109
long totalheap
Definition: memory.c:56
static int do_gc_epilogue()
Definition: collector.c:850
int count
Definition: eus.h:562
pointer v[1]
Definition: eus.h:299
int is_checking_pstack
Definition: collector.c:112
mutex_t alloc_lock
Definition: memory.mutex.c:42
#define PHASE_EPILOGUE
Definition: collector.h:84
#define PHASE_MARK
Definition: collector.h:87
pointer threadobj
Definition: eus.h:536
if(n==1)
Definition: unixcall.c:491
char a[26]
Definition: freq.c:4
#define M_UNIT
Definition: collector.h:45
#define lock_collector
Definition: collector.h:251
void allocate_bit_table()
unsigned int mingcheap
void do_a_little_gc_work(int m_unit, int s_unit)
Definition: collector.c:886
static bpointer mergecell(register bpointer p, int cbix)
Definition: collector.c:355


euslisp
Author(s): Toshihiro Matsui
autogenerated on Thu Jun 6 2019 20:00:43