00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027 #include <sys/times.h>
00028 #include "eus.h"
00029 #include <sys/param.h>
00030 #include "time.h"
00031 #include "rgc_utils.h"
00032 #include "xccmem.h"
00033
00034
00035 #ifdef __ART_LINUX
00036 #include <linux/art_task.h>
00037 #endif
00038
00039 #if Linux
00040 char *minmemory=(char *)1000000;
00041 #endif
00042 extern pointer K_DISPOSE;
00043 #define MAXDISPOSE 256
00044 static pointer dispose[MAXDISPOSE];
00045 static int gcmerge, dispose_count;
00046
00047 extern struct {
00048 char using;
00049 mutex_t *lock;
00050 thread_t tid;
00051 } thread_table[];
00052
00053 struct _gc_data gc_data;
00054 barrier_t startup_barrier;
00055
00056 #define GCDEBUG
00057
00058 #ifdef GCDEBUG
00059 static int white_cells, black_cells, free_cells;
00060 #endif
00061
00062 #ifdef __PROFILE_GC
00063 static int gctime = 0;
00064 int allocd_words = 0;
00065 #endif
00066
00067 static void do_scan_roots();
00068 static void init_sync_data();
00069
00070 #define gcpush(v, off) \
00071 { \
00072 lgcsp->addr = (pointer)v; \
00073 lgcsp->offset = off; \
00074 lgcsp++; \
00075 }
00076
00077 static pnewgcstack(oldsp)
00078 register ms_entry *oldsp;
00079 {
00080 register ms_entry *oldstack, *stk, *newstack, *newgcsp;
00081 long top, oldsize, newsize;
00082
00083 oldstack=stk=collector_stack;
00084 oldsize=collector_stacklimit-oldstack;
00085 newsize=oldsize*2;
00086 top=oldsp-collector_stack;
00087
00088 newgcsp=newstack=(ms_entry *)malloc(newsize * sizeof(ms_entry)+16);
00089 fprintf(stderr, "\n\x1b[1;31m;; extending pgcstack 0x%x[%d] --> 0x%x[%d] top=%x\x1b[0m\n",
00090 oldstack, oldsize, newstack, newsize, top);
00091 while (stk<oldsp) *newgcsp++= *stk++;
00092 collector_stack=newstack;
00093 collector_stacklimit= &(collector_stack[newsize-10]);
00094 collector_sp = &(collector_stack[top]);
00095 cfree(oldstack);
00096 }
00097
00098 static call_disposers()
00099 { int i;
00100 context *ctx=current_ctx;
00101 pointer p,a,curclass;
00102
00103 for (i=0; i<dispose_count; i++) {
00104 p=dispose[i];
00105 p->nodispose=0;
00106 a=(pointer)findmethod(ctx,K_DISPOSE,classof(p), &curclass);
00107 if (debug) fprintf(stderr, ";; (send %x :dispose)\n", p);
00108 if (a!=NIL) csend(ctx,p,K_DISPOSE,0);
00109 }}
00110
00111 static struct _marking_state {
00112 int is_checking_pstack;
00113 int cur_mut_num;
00114 } marking_state;
00115
00116 struct _sweeping_state sweeping_state;
00117
00118 static inline void go_on_to_sweep_phase()
00119 {
00120 numunion nu;
00121 DPRINT2("mark->sweep: free rate = %lf", (double)freeheap / totalheap);
00122 gcmerge = totalheap * min(1.0, fltval(speval(GCMARGIN)))
00123 * max(0.1, fltval(speval(GCMERGE)));
00124
00125
00126 dispose_count = 0;
00127
00128 sweeping_state.chp = chunklist;
00129 sweeping_state.p = &chunklist->rootcell;
00130 sweeping_state.tail =
00131 (bpointer)((int)sweeping_state.p + (buddysize[sweeping_state.chp->chunkbix] << 2));
00132 gc_phase = PHASE_SWEEP;
00133 }
00134
00135 long marked_words = 0;
00136
00137 static int mark_a_little(int m_unit)
00138 {
00139 extern _end();
00140 register ms_entry *lgcsp = collector_sp;
00141 register ms_entry *gcstack = collector_stack;
00142 register int credit = m_unit;
00143 unsigned int offset;
00144 register pointer p, p2;
00145 register bpointer bp;
00146 register int i, s;
00147 context *ctx;
00148
00149 markloop:
00150 if(credit <= 0){
00151
00152
00153 collector_sp = lgcsp;
00154 marked_words -= m_unit - credit;
00155 return 1;
00156 }
00157 if(lgcsp > gcstack){
00158
00159
00160 lgcsp--;
00161 p = lgcsp->addr;
00162 offset = lgcsp->offset;
00163
00164 start_mark:
00165 if(offset == 0){
00166
00167 if(!ispointer(p) || !p) goto markloop;
00168
00169 ASSERT((unsigned)p >= mingcheap);
00170 ASSERT((unsigned)p < maxgcheap);
00171
00172
00173
00174
00175
00176 if(maxmemory < (char *)p) goto markloop;
00177
00178 }
00179
00180
00181 bp = bpointerof(p);
00182
00183 if(marked(bp)) goto markloop;
00184
00185
00186 markon(bp);
00187
00188 if(pisclosure(p)){
00189
00190
00191
00192
00193
00194
00195
00196
00197 goto markloop;
00198 }
00199 if(bp->h.elmtype == ELM_FIXED){
00200 s = buddysize[bp->h.bix & TAGMASK] - 1;
00201
00202 if(s > 300){
00203 fprintf (stderr, "do_mark: too big object s=%d, header=%x at %x\n",
00204 s, bp->h, bp);
00205
00206 }
00207 while(lgcsp + s > collector_stacklimit){
00208 pnewgcstack(lgcsp);
00209 gcstack = collector_stack;
00210 lgcsp = collector_sp;
00211 }
00212 credit -= (s + 1);
00213 for(i = 0; i < s; i++){
00214 p2 = p->c.obj.iv[i];
00215 if(ispointer(p2))
00216 gcpush(p2, 0);
00217 }
00218 goto markloop;
00219 } else if (bp->h.elmtype == ELM_POINTER) {
00220 s = buddysize[bp->h.bix & TAGMASK] - 2;
00221 while (lgcsp + s > collector_stacklimit) {
00222 pnewgcstack(lgcsp);
00223 gcstack = collector_stack;
00224 lgcsp = collector_sp;
00225 }
00226 credit -= (s + 2);
00227 for (i = 0; i < s; i++) {
00228 p2 = p->c.vec.v[i];
00229 if (ispointer(p2))
00230 gcpush(p2, 0);
00231 }
00232 goto markloop;
00233 }
00234
00235 credit -= buddysize[bp->h.bix & TAGMASK];
00236 goto markloop;
00237
00238 } else {
00239
00240
00241 next_root:
00242 credit--;
00243 if (!marking_state.is_checking_pstack) {
00244 for (i = marking_state.cur_mut_num; i < MAXTHREAD; i++) {
00245 ctx = euscontexts[i];
00246 if (ctx) {
00247 if (ctx->gsp > ctx->gcstack) {
00248 p = *--(ctx->gsp);
00249
00250 ASSERT((unsigned)p >= mingcheap);
00251 ASSERT((unsigned)p < maxgcheap);
00252
00253 offset = 0;
00254 marking_state.cur_mut_num = i;
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264 goto start_mark;
00265 }
00266 }
00267 }
00268 marking_state.is_checking_pstack = 1;
00269 goto next_root;
00270 } else {
00271 mutex_lock(&pstack_lock);
00272 if(psp > pstack) {
00273 #ifdef COLLECTCACHE
00274 #define COLCACHE 10
00275 int i, ii;
00276 pointer array[COLCACHE];
00277 for (i = 0; i < COLCACHE; i++) {
00278 pointerpop(array[i]);
00279 if(psp > pstack)
00280 continue;
00281 break;
00282 }
00283 pcount = pcount + COLCACHE;
00284 mutex_unlock(&pstack_lock);
00285 for(ii = 0; ii < i; ii++){
00286
00287 }
00288 mutex_lock(&pstack_lock);
00289 #else
00290 pointerpop(p);
00291 offset = 0;
00292 mutex_unlock(&pstack_lock);
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302 goto start_mark;
00303 #endif
00304 }
00305 mutex_unlock(&pstack_lock);
00306 }
00307 }
00308
00309
00310 go_on_to_sweep_phase();
00311 return 0;
00312 }
00313
00314
00315 int reclaim(bpointer p)
00316 {
00317 register int rbix, stat;
00318 register pointer s;
00319
00320 s = makepointer(p);
00321 if(pisfilestream(s)){
00322 if(!isint (s->c.fstream.fname) && s->c.fstream.direction != NIL){
00323 if(s->c.fstream.fd == makeint (0)
00324 || s->c.fstream.fd == makeint (1)){
00325 fprintf(stderr, ";; gc! bogus stream at %x fd=%d\n",
00326 (int) s, intval (s->c.fstream.fd));
00327 }else if((closestream (s) == 0) && debug)
00328 fprintf (stderr,
00329 ";; gc: dangling stream(address=%x fd=%d) is closed\n",
00330 (int) s, intval (s->c.fstream.fd));
00331 }
00332 }
00333 p->h.cix = -1;
00334 rbix = p->h.bix & TAGMASK;
00335
00336 mutex_lock(&alloc_lock);
00337 rw_rdlock(&gc_lock);
00338
00339 p->b.nextbcell = buddy[rbix].bp;
00340 buddy[rbix].bp = p;
00341 buddy[rbix].count++;
00342
00343 freeheap += buddysize[rbix];
00344
00345
00346 rw_unlock(&gc_lock);
00347 mutex_unlock(&alloc_lock);
00348 return 0;
00349 }
00350
00351 static int rgc_credit = 0;
00352
00353
00354
00355 static bpointer mergecell(register bpointer p, int cbix)
00356 {
00357 register bpointer np, p2;
00358
00359 np = nextbuddy(p);
00360 while (p->h.b == 0 && ((int) (p->h.bix & TAGMASK)) < cbix) {
00361
00362
00363 rgc_credit--;
00364 if (marked(np) || np->h.cix == -1) return np;
00365 if (np->h.nodispose == 1) return np;
00366
00367 p2 = mergecell(np, cbix);
00368 if (np->h.b == 1 && rgc_credit >= 0) {
00369 p->h.b = p->h.m;
00370 p->h.m = np->h.m;
00371 p->h.bix++;
00372 np = p2;
00373 #ifdef GCDEBUG
00374 white_cells++;
00375 #endif
00376 } else {
00377 reclaim(np);
00378 return p2;
00379 }
00380 }
00381 return np;
00382 }
00383
00384
00385
00386
00387
00388
00389 static int sweep_a_little(int gcmerge, int s_unit)
00390 {
00391 register struct chunk *chp;
00392 register bpointer p, np, tail;
00393
00394 rgc_credit = s_unit;
00395
00396 chp = sweeping_state.chp;
00397 p = sweeping_state.p;
00398 tail = sweeping_state.tail;
00399
00400 if (p == NULL) {
00401 goto next_chunk;
00402 }
00403
00404
00405 cont_sweep:
00406
00407 while (p < tail) {
00408 if (rgc_credit <= 0) {
00409 sweeping_state.p = p;
00410 sweeping_state.tail = tail;
00411 return 1;
00412 }
00413
00414
00415
00416 rgc_credit--;
00417 if (p->h.cix == -1) {
00418 #ifdef GCDEBUG
00419 free_cells++;
00420 #endif
00421 p = nextbuddy(p);
00422 continue;
00423 }
00424 if (marked(p)) {
00425
00426 #ifdef GCDEBUG
00427 black_cells++;
00428 #endif
00429 markoff(p);
00430 p = nextbuddy(p);
00431 continue;
00432 }
00433 if (p->h.nodispose == 1) {
00434 if(dispose_count >= MAXDISPOSE)
00435 fprintf(stderr, "no more space for disposal processing\n");
00436 else
00437 dispose[dispose_count++] = makepointer(p);
00438 p = nextbuddy(p);
00439 }
00440 if (gcmerge > freeheap) {
00441 #ifdef GCDEBUG
00442 white_cells++;
00443 #endif
00444 np = nextbuddy(p);
00445 reclaim(p);
00446 p = np;
00447 } else {
00448 np = mergecell(p, chp->chunkbix);
00449 reclaim(p);
00450 p = np;
00451 }
00452 }
00453
00454 next_chunk:
00455 chp = sweeping_state.chp->nextchunk;
00456 if (chp == NULL) {
00457 DPRINT2("sweeping finished: free rate = %lf", (double)freeheap / totalheap);
00458 DPRINT2("white: %d black: %d free: %d", white_cells, black_cells, free_cells);
00459 gc_phase = PHASE_EPILOGUE;
00460 return 0;
00461 }
00462 sweeping_state.chp = chp;
00463 p = &chp->rootcell;
00464 tail = (bpointer)((int)p + (buddysize[chp->chunkbix] << 2));
00465 goto cont_sweep;
00466
00467 }
00468
00469 #ifdef __USE_SIGNAL
00470 static void send_root_insertion_signals()
00471 {
00472 int i;
00473 thread_t self = pthread_self();
00474
00475 for (i = 0; i < MAXTHREAD; i++)
00476 if (!pthread_equal(thread_table[i].tid, self) && euscontexts[i]) {
00477 if (pthread_kill(thread_table[i].tid, SIGUSR1) != 0) {
00478 perror("pthread_kill");
00479 }
00480 }
00481 }
00482 #endif
00483
00484 void init_rgc(){
00485 void collect();
00486 unsigned int gc_thread;
00487
00488 active_mutator_num = 1;
00489 gc_region_sync = 0;
00490 startup_barrier = barrier_init(2);
00491 gc_phase = PHASE_NOGC;
00492 ri_core_phase = 0;
00493 mut_stat_phase = 0x2;
00494 gc_wakeup_cnt = gc_cmp_cnt = 0;
00495 #ifdef __USE_POLLING
00496 gc_request_flag = 0;
00497 #endif
00498 init_sync_data();
00499 initmemory_rgc();
00500 init_utils();
00501 #ifdef __USE_MARK_BITMAP
00502 allocate_bit_table();
00503 clear_bit_table();
00504 #endif
00505
00506 collector_stack = collector_sp =
00507 (ms_entry *)malloc(sizeof(ms_entry) * DEFAULT_MAX_RGCSTACK);
00508 collector_stacklimit = collector_stack + DEFAULT_MAX_RGCSTACK - 10;
00509
00510 #ifdef __GC_SEPARATE_THREAD
00511 thr_create(0, 0, collect, 0, 0, &gc_thread);
00512 barrier_wait(startup_barrier);
00513 #endif
00514 }
00515
00516 static pointer rgc_classtable = NULL;
00517
00518 void rgc_add_to_classtable(pointer newclass) {
00519 static int clsidx = 0;
00520 int i;
00521
00522 if (rgc_classtable == NULL) {
00523 rgc_classtable =
00524 rgc_alloc((MAXCLASS + 1), ELM_POINTER, vectorcp.cix, MAXCLASS + 1);
00525 rgc_classtable->c.vec.size = makeint(MAXCLASS);
00526 for (i = 0; i < MAXCLASS; i++)
00527 rgc_classtable->c.vec.v[i] = NIL;
00528 }
00529 rgc_classtable->c.vec.v[clsidx++] = newclass;
00530 }
00531
00532 static void scan_global_roots()
00533 {
00534 int i;
00535 pointerpush(sysobj);
00536 pointerpush(pkglist);
00537
00538 pointerpush(rgc_classtable);
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549 }
00550
00551 static void scan_local_roots(int i)
00552 {
00553 register pointer *p;
00554 register bpointer q;
00555 register context *ctx = euscontexts[i];
00556
00557 pgcpush(ctx->threadobj);
00558 pgcpush(ctx->specials);
00559
00560 q = bpointerof(ctx->lastalloc);
00561 if (q && ispointer(q)) {
00562 pgcpush(ctx->lastalloc);
00563 ASSERT((unsigned)q >= mingcheap);
00564 ASSERT((unsigned)q < maxgcheap);
00565 }
00566
00567 #ifdef __RETURN_BARRIER
00568 {
00569 pointer *frame_base, *p;
00570
00571
00572 mutex_lock(&ctx->rbar.lock);
00573
00574 if (ctx->callfp != NULL)
00575 frame_base = (pointer *)ctx->callfp;
00576 else
00577 frame_base = ctx->stack;
00578
00579 for (p = ctx->vsp - 1; p >= frame_base; p--) {
00580
00581
00582
00583
00584
00585
00586
00587
00588 if (*p == NULL) continue;
00589 if (((int)(*p) & 3)) continue;
00590 if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit))
00591 continue;
00592 if ((pointer *)*p >= (pointer *)maxgcheap) continue;
00593 if ((pointer *)*p < (pointer *)mingcheap) continue;
00594
00595 pgcpush(*p);
00596 ASSERT((unsigned)(*p) >= mingcheap);
00597 ASSERT((unsigned)(*p) < maxgcheap);
00598 }
00599
00600 if (frame_base == ctx->stack) {
00601 ctx->rbar.pointer = NULL;
00602 } else {
00603 ctx->rbar.pointer = frame_base;
00604 }
00605 mutex_unlock(&ctx->rbar.lock);
00606
00607 }
00608
00609 #else
00610
00611
00612 for (p = ctx->vsp - 1; p >= ctx->stack; p--) {
00613
00614 if (*p == NULL) continue;
00615 if (((int)(*p) & 3)) continue;
00616 if ((ctx->stack <= (pointer *)*p) && ((pointer *)*p <= ctx->stacklimit))
00617 continue;
00618 if ((pointer *)*p >= (pointer *)maxgcheap) continue;
00619 if ((pointer *)*p < (pointer *)mingcheap) continue;
00620
00621 pgcpush(*p);
00622 ASSERT((unsigned)(*p) >= mingcheap);
00623 ASSERT((unsigned)(*p) < maxgcheap);
00624 }
00625 #endif
00626 }
00627
00628 #if 0
00629 static void scan_suspending_thread_roots()
00630 {
00631 int id, c;
00632 for(id = 0; id < MAXTHREAD; id++){
00633 if(thread_table[id].using){
00634 mutex_lock(&mut_stat_table[id].lock);
00635 if(mut_stat_table[id].stat == 0x3){
00636 mut_stat_table[id].stat = 0x5;
00637 mutex_unlock(&mut_stat_table[id].lock);
00638 scan_local_roots(id);
00639 finish_access_before_read();
00640 do{
00641 c = read_volatile_int(frame_scan_sync);
00642 }while(cas_int(frame_scan_sync, c, c + 1));
00643 mutex_lock(&mut_stat_table[id].lock);
00644 mut_stat_table[id].stat = 0x1;
00645 }
00646 mutex_unlock(&mut_stat_table[id].lock);
00647 }
00648 }
00649 }
00650 #endif
00651
00652 #ifdef __RETURN_BARRIER
00653 #define INSERT_UNIT 4
00654
00655 static void scan_remaining_roots()
00656 {
00657 int i, local_root_count, inserted_root_count;
00658 static char idx[MAXTHREAD];
00659
00660 local_root_count = 0;
00661
00662 for (i = 0; i < MAXTHREAD; i++) {
00663 if (euscontexts[i] && euscontexts[i]->rbar.pointer) {
00664 idx[local_root_count] = i;
00665 local_root_count++;
00666 }
00667 }
00668
00669 inserted_root_count = local_root_count;
00670
00671 do {
00672 for (i = 0; i < local_root_count; i++) {
00673 context *ctx;
00674 register pointer *p;
00675 int counter, tid;
00676
00677 tid = idx[i];
00678 ctx = euscontexts[tid];
00679 if ((ctx)->rbar.pointer == NULL) continue;
00680
00681 mutex_lock(&((ctx)->rbar.lock));
00682
00683 p = (ctx)->rbar.pointer - 1;
00684 counter = INSERT_UNIT;
00685
00686 while (1) {
00687 if (p < ctx->stack) break;
00688 if ((((int)(*p) & 3) == 0)
00689 && ((ctx->stack > (pointer *)*p) || ((pointer *)*p > ctx->stacklimit))
00690 && (((pointer *)*p >= (pointer *)mingcheap && (pointer *)*p < (pointer *)maxgcheap))) {
00691 pgcpush(*p);
00692 ASSERT((unsigned)(*p) >= mingcheap);
00693 ASSERT((unsigned)(*p) < maxgcheap);
00694 }
00695 p--;
00696 counter--;
00697 if(counter == 0) break;
00698 }
00699 (ctx)->rbar.pointer = p + 1;
00700
00701 if (p < ctx->stack) {
00702 (ctx)->rbar.pointer = NULL;
00703 inserted_root_count--;
00704 }
00705 mutex_unlock(&(ctx)->rbar.lock);
00706 }
00707 }
00708 while (inserted_root_count != 0);
00709 }
00710 #endif
00711
00712 unsigned int gs[MAXTHREAD];
00713
00714
00715
00716 void notify_gc()
00717 {
00718 int id, phase, c;
00719 unsigned int s, e;
00720
00721
00722
00723
00724
00725
00726
00727
00728 id = thr_self();
00729
00730
00731 gc_phase = PHASE_PROLOGUE;
00732 gc_point_sync = 0;
00733 phase = ri_core_phase;
00734 mut_stat_phase = mut_stat_phase ^ 0x2;
00735 #ifdef __USE_POLLING
00736
00737
00738
00739
00740
00741
00742
00743
00744
00745
00746
00747
00748 #endif
00749 #ifdef __USE_SIGNAL
00750 send_root_insertion_signals();
00751 #else
00752 gc_request_flag = 1;
00753 #endif
00754 marking_state.is_checking_pstack = 0;
00755 marking_state.cur_mut_num = 0;
00756 marked_words = 0;
00757 unlock_collector;
00758
00759 do {
00760 c = gc_point_sync;
00761 } while(cas_int(gc_point_sync, c, c + 1));
00762
00763 if (gc_point_sync + gc_region_sync < active_mutator_num) {
00764 sched_yield();
00765 } else {
00766 lock_collector;
00767 if (phase == ri_core_phase) {
00768 do_scan_roots();
00769 }
00770 unlock_collector;
00771 }
00772
00773
00774 lock_collector;
00775 while (phase == ri_core_phase)
00776 cond_wait(&ri_end_cv, &collector_lock);
00777
00778
00779
00780 }
00781
00782
00783
00784 static void do_scan_roots()
00785 {
00786 int tid;
00787 unsigned int s, e;
00788
00789
00790 scan_global_roots();
00791
00792
00793
00794 gc_phase = PHASE_ROOT_CORE;
00795 for (tid = 0; tid < MAXTHREAD; tid++) {
00796 if (euscontexts[tid]) {
00797 mutex_lock(&mut_stat_table[tid].lock);
00798 scan_local_roots(tid);
00799
00800 mut_stat_table[tid].stat =
00801 (mut_stat_table[tid].stat ^ 0x2) & 0xfffffffd;
00802 mutex_unlock(&mut_stat_table[tid].lock);
00803 }
00804 }
00805
00806
00807
00808 gc_request_flag = 0;
00809 ri_core_phase = 1 - ri_core_phase;
00810 gc_phase = PHASE_ROOT_REM;
00811 cond_broadcast(&ri_end_cv);
00812 gc_wakeup_cnt++;
00813 cond_broadcast(&gc_wakeup_cv);
00814 DPRINT2("root scan finished: free rate = %lf", (double)freeheap / totalheap);
00815 }
00816
00817
00818 static void wait_until_next_gc_cycle()
00819 {
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845 }
00846
00847
00848
00849
00850 static int do_gc_epilogue()
00851 {
00852
00853
00854
00855
00856
00857
00858
00859
00860 #ifdef __USE_MARK_BITMAP
00861 clear_bit_table();
00862 #endif
00863 if (gc_cmp_cnt < gc_wakeup_cnt)
00864 gc_cmp_cnt++;
00865 gc_phase = PHASE_NOGC;
00866
00867 if (debug) {
00868 fprintf(stderr, " free/total=%d/%d\n",
00869 freeheap, totalheap);
00870
00871 }
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882 DPRINT2("GC cycle finished: free rate = %lf", (double)freeheap / totalheap);
00883 return 0;
00884 }
00885
00886 void do_a_little_gc_work(int m_unit, int s_unit)
00887 {
00888 unsigned int s, e;
00889
00890 switch (gc_phase) {
00891 case PHASE_ROOT_REM:
00892 #ifdef __RETURN_BARRIER
00893 scan_remaining_roots();
00894 #endif
00895 gc_phase = PHASE_MARK;
00896 break;
00897 case PHASE_MARK:
00898 mark_a_little(m_unit);
00899 break;
00900 case PHASE_SWEEP:
00901 sweep_a_little(gcmerge, s_unit);
00902 break;
00903 case PHASE_EPILOGUE:
00904 do_gc_epilogue();
00905 default:
00906 ;
00907 }
00908
00909
00910
00911 }
00912
00913 void collect()
00914 {
00915 int i;
00916 unsigned s, e;
00917
00918 #ifdef __PROFILE_GC
00919 int tmp_free;
00920 reset_utime();
00921 #endif
00922
00923
00924 barrier_wait(startup_barrier);
00925
00926 #ifdef __PROFILE_GC
00927
00928 #endif
00929
00930
00931 for (;;) {
00932
00933
00934 lock_collector;
00935 while (gc_cmp_cnt == gc_wakeup_cnt) {
00936 cond_wait(&gc_wakeup_cv, &collector_lock);
00937 }
00938
00939 while (gc_phase != PHASE_NOGC) {
00940
00941 do_a_little_gc_work(M_UNIT, S_UNIT);
00942 unlock_collector;
00943
00944 lock_collector;
00945 };
00946 unlock_collector;
00947
00948 #ifdef __PROFILE_GC
00949
00950
00951
00952
00953
00954 #endif
00955
00956
00957
00958 }
00959
00960
00961 }
00962
00963
00964 #ifdef __EAGER_GC_CONTROL
00965 static int change_collector_thread_sched_policy(int t_sect_length)
00966 {
00967 #ifdef __ART_LINUX
00968 if(art_enter(ART_PRIO_MAX, ART_TASK_PERIODIC, t_sect_length) == -1){
00969 DPRINT2("collector error: art_enter");
00970 return -1;
00971 }
00972 #else
00973 #endif
00974 return 0;
00975 }
00976 static int restore_collector_thread_sched_policy()
00977 {
00978 #ifdef __ART_LINUX
00979 if(art_exit() == -1){
00980 DPRINT2("collector error: art_exit");
00981 return -1;
00982 }
00983 #else
00984 #endif
00985 return 0;
00986 }
00987 #endif
00988
00989 #ifdef __USE_POLLING
00990 void enter_gc_region(int id)
00991 {
00992 int c, phase;
00993 mutex_lock(&mut_stat_table[id].lock);
00994
00995
00996 do {
00997 c = gc_region_sync;
00998 } while (cas_int(gc_region_sync, c, c + 1));
00999 mutex_unlock(&mut_stat_table[id].lock);
01000
01001 if (gc_request_flag) {
01002 phase = ri_core_phase;
01003 if (gc_point_sync + gc_region_sync == active_mutator_num) {
01004 lock_collector;
01005 if (phase == ri_core_phase)
01006 do_scan_roots();
01007 unlock_collector;
01008 }
01009 }
01010
01011 }
01012
01013 void exit_gc_region(int id)
01014 {
01015 int c;
01016 try_exit:
01017 mutex_lock(&mut_stat_table[id].lock);
01018 if (mut_stat_table[id].stat & 0x2 == mut_stat_phase) {
01019
01020
01021
01022 mutex_unlock(&mut_stat_table[id].lock);
01023 sched_yield();
01024 goto try_exit;
01025 } else {
01026
01027 do {
01028 c = gc_region_sync;
01029 } while (cas_int(gc_region_sync, c, c - 1));
01030 mutex_unlock(&mut_stat_table[id].lock);
01031 }
01032 }
01033
01034 #endif
01035
01036 int ps_sem = 1;
01037
01038 static void init_sync_data()
01039 {
01040 int i;
01041 mutex_init(&pstack_lock, NULL);
01042 mutex_init(&collector_lock, NULL);
01043 cond_init(&gc_wakeup_cv, NULL);
01044 cond_init(&ri_end_cv, NULL);
01045 mutex_init(&gc_state_lock, NULL);
01046 for (i = 0; i < MAXTHREAD; i++) {
01047 mutex_init(&mut_stat_table[i].lock, NULL);
01048 }
01049 }
01050
01051
01052
01053
01054
01055 #ifdef __USE_POLLING
01056 void scan_roots()
01057 {
01058 int c;
01059 unsigned int e;
01060 int myid = thr_self();
01061 int phase = ri_core_phase;
01062
01063 myid = thr_self();
01064
01065
01066 do {
01067 c = gc_point_sync;
01068 } while (cas_int(gc_point_sync, c, c + 1));
01069
01070 if (gc_point_sync + gc_region_sync < active_mutator_num) {
01071 sched_yield();
01072 } else {
01073 lock_collector;
01074 if (phase == ri_core_phase)
01075 do_scan_roots();
01076 unlock_collector;
01077 }
01078
01079
01080 lock_collector;
01081 while (phase == ri_core_phase)
01082 cond_wait(&ri_end_cv, &collector_lock);
01083 unlock_collector;
01084
01085
01086
01087 return;
01088 }
01089
01090
01091 int check_gc_request()
01092 {
01093 if (!gc_request_flag)
01094 return 0;
01095 scan_roots();
01096 return 1;
01097 }
01098 #endif
01099
01100 #ifdef __USE_SIGNAL
01101
01102
01103
01104
01105 void sighandler(int x)
01106 {
01107 int idx;
01108 DPRINT2("start root scanning");
01109 notify_ri_start();
01110 idx = thr_self();
01111 scan_local_roots(thr_self());
01112 barrier_wait(end_ri_barrier);
01113 DPRINT2("mutators restart");
01114 }
01115 #endif
01116
01117
01118 #define PMAXSTACK (MAXSTACK * 110)
01119 pointer pstack[PMAXSTACK];
01120 volatile pointer *psp = pstack;
01121 pointer *pstacklimit = pstack + PMAXSTACK;
01122 mutex_t pstack_lock;
01123
01124
01125
01126
01127
01128 barrier_t barrier_init(int n_clients)
01129 {
01130 barrier_t barrier = (barrier_t)malloc(sizeof(struct barrier_struct));
01131 if (barrier != NULL) {
01132 barrier->n_clients = n_clients;
01133 barrier->n_waiting = 0;
01134 barrier->phase = 0;
01135 mutex_init(&barrier->lock, NULL);
01136 cond_init(&barrier->wait_cv, NULL);
01137 }
01138 return barrier;
01139 }
01140
01141 void barrier_reset(barrier_t barrier, int n_clients)
01142 {
01143
01144
01145 barrier->n_clients = n_clients;
01146 }
01147
01148 void barrier_destroy(barrier_t barrier)
01149 {
01150 mutex_destroy(&barrier->lock);
01151 cond_destroy(&barrier->wait_cv);
01152 free(barrier);
01153 }
01154
01155 void barrier_wait(barrier_t barrier)
01156 {
01157 int my_phase;
01158 mutex_lock(&barrier->lock);
01159 my_phase = barrier->phase;
01160 barrier->n_waiting++;
01161 if (barrier->n_waiting == barrier->n_clients) {
01162 barrier->n_waiting = 0;
01163 barrier->phase = 1 - my_phase;
01164 cond_broadcast(&barrier->wait_cv);
01165 }
01166 while (barrier->phase == my_phase) {
01167 cond_wait(&barrier->wait_cv, &barrier->lock);
01168 }
01169 mutex_unlock(&barrier->lock);
01170 }
01171
01172
01173
01174
01175
01176
01177 unsigned int do_allocate_heap(unsigned int req_words)
01178 {
01179 int i, k;
01180 unsigned int rem_words = req_words;
01181
01182 while (buddysize[MAXBUDDY-1] <= rem_words) {
01183 k = newchunk(MAXBUDDY-1);
01184 rem_words -= buddysize[k];
01185 }
01186 for (i = MAXBUDDY - 2; i >= 20; i--) {
01187 if (buddysize[i] < rem_words){
01188 k = newchunk(i);
01189 rem_words -= buddysize[k];
01190 }
01191 }
01192 return req_words - rem_words;
01193 }
01194
01195 unsigned int allocate_heap()
01196 {
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208 return do_allocate_heap(INITIAL_HEAP_SIZE * 1000);
01209 }
01210
01211 extern long long values[];
01212
01213 pointer RGCCOUNT(register context *ctx, int n, pointer argv[])
01214 {
01215 ckarg(0);
01216 return makeint(gc_cmp_cnt);
01217 }
01218
01219 pointer RGC_GCTIME(register context *ctx, int n, pointer argv[])
01220 {
01221 struct tms buf;
01222 ckarg(0);
01223 times(&buf);
01224 return makeint((buf.tms_utime + buf.tms_stime) * 1000/HZ );
01225 }
01226
01227 #ifdef __PROFILE_GC
01228 pointer RGCALLOCATED(register context *ctx, int n, pointer argv[])
01229 {
01230 ckarg(0);
01231 return makeint(allocd_words);
01232 }
01233 #endif
01234
01235 void rgcfunc(register context *ctx, pointer mod)
01236 {
01237 pointer p = Spevalof(PACKAGE);
01238 pointer_update(Spevalof(PACKAGE), syspkg);
01239 defun(ctx, "RGCCOUNT", mod, RGCCOUNT,NULL);
01240 defun(ctx, "RGCTIME", mod, RGC_GCTIME,NULL);
01241 #ifdef __PROFILE_GC
01242 defun(ctx, "RGCALLOCATED", mod, RGCALLOCATED,NULL);
01243 #endif
01244 pointer_update(Spevalof(PACKAGE), p);
01245 }