rgc_mem.c
Go to the documentation of this file.
1 /*
2  * rgc_mem.c
3  */
4 
5 #include <sys/types.h>
6 #include <sys/times.h>
7 #include "eus.h"
8 
9 #define myctx (euscontexts[thr_self()])
10 
11 // defined in "memory.c"
12 extern long freeheap, totalheap, marked_words;
13 /* size of heap left and allocated */
14 
15 /* timers */
16 long alloccount[MAXBUDDY];
17 
18 /* disposal processing */
19 //#define MAXDISPOSE 256
20 //static pointer dispose[MAXDISPOSE];
21 //static int dispose_count;
22 
23 /*
24  * suppose that we have allocate lock
25  */
26 static int rgc_expand_heap_or_complete_gc(int req)
27 {
28  int idx = ERR;
29 
30 #ifdef __HEAP_EXPANDABLE
31  /* acquire locks again in the correct order */
32  mutex_unlock(&alloc_lock);
34  mutex_lock(&alloc_lock);
35 
36  idx = newchunk(req);
38 #else
39  /* this case is under construction */
40  mutex_unlock(&alloc_lock);
41  if (gc_phase == PHASE_NOGC) {
42  DPRINT1("\x1b[0;32mstart GC: free rate = %lf\x1b[0m", (double)freeheap / (double)totalheap);
43  notify_gc();
44  }
45 
47  while (gc_phase != PHASE_NOGC) {
49  }
51  mutex_lock(&alloc_lock);
52 #endif
53 
54  return idx;
55 }
56 
57 static inline unsigned long net_free(int req) {
58  unsigned long total = 0;
59  register int idx;
60 
61  for (idx = (req < REALTIME_ALLOC_LIMIT_IDX ? req : REALTIME_ALLOC_LIMIT_IDX);
62  idx < MAXBUDDY; idx++)
63  total += buddysize[idx] * buddy[idx].count;
64 
65  return total;
66 }
67 
68 static int km[10] = {4096, 4800, 6000, 8192};
69 static int ks[10] = {256, 256, 384, 512};
70 
71 static int recalc_gc_priority(int req) {
72  /* now, return a value between 0 to 3
73  * 0: no gc
74  * 1: gc thread
75  * 2: normal thread
76  * greater: realtime thread
77  */
78  long estimation;
79  int i;
80 
81 #if 0
82  for (i = 0; i < 4; i++) {
83  if (gc_phase == PHASE_NOGC || gc_phase >= PHASE_MARK) {
84  estimation = (totalheap - freeheap - marked_words) / km[i]
85  + (totalheap >> 4) / ks[i];
86  } else {
87  estimation = (totalheap >> 4) / ks[i];
88  }
89  estimation *= 1024;
90  if (net_free(req) > estimation)
91  break;
92  }
93 // DPRINT1("estimation = %ld", estimation);
94  return i;
95 #endif
96 
97  {
98  unsigned long nfree = net_free(req);
99  for (i = 0; i < 4; i++) {
100  if (nfree < totalheap * DEFAULT_GC_THRESHOLD)
101  nfree *= 2;
102  else
103  break;
104  }
105  }
106 
107 #if 0
108  i = (net_free(req) < totalheap * DEFAULT_GC_THRESHOLD);
109 #endif
110 
111  return i;
112 }
113 
114 
115 volatile static int rem_cnt = 0;
116 volatile static int gc_pri = 0;
117 #define my_gc_pri ctx->my_gc_pri
118 
119 /* suppose we have collector lock */
120 static int should_start_gc(int req) {
121  static int gc_cushion = GC_ACTIVATE_CUSHION;
122  register int idx;
123 
124 // if (net_free(req) < (double)totalheap * DEFAULT_GC_THRESHOLD)
125  if (recalc_gc_priority(req) > 0)
126  gc_cushion--;
127 
128  return gc_cushion <= 0
129  ? (gc_cushion = GC_ACTIVATE_CUSHION, 1) : 0;
130 }
131 
132 
133 bpointer rgc_root_alloc_big(register context *ctx, register int req)
134 { /* req: index to buddy: must be greater than 0 */
135  register int i, k;
136  register bpointer b, b2;
137  numunion nu;
138  pointer gcm;
139 
140  mutex_lock(&alloc_lock);
141  ctx->alloc_big_count++;
142 
143  alloc_again:
144 
145  for (k = req; buddy[k].bp == 0; )
146  k++; /* find blocks of adequate size */
147 
148  if (k >= MAXBUDDY) { /* no bigger free cell */
149  if (buddysize[req] < totalheap / 8) { /*relatively small cell is requested;*/
150  gcm = speval(GCMARGIN);
151  DPRINT1("\x1b[1;31mstarved(alloc_big:1, free/total=%d/%d)\x1b[0m",
154  for (k = req; buddy[k].bp == 0;) k++;
155  }
156  if (k >= MAXBUDDY) {
157  DPRINT1("\x1b[1;31mstarved(alloc_big:1, free/total=%d/%d)\x1b[0m",
160  for (k = req; buddy[k].bp == 0;) k++;
161 
162  if (k == ERR) {
163  mutex_unlock(&alloc_lock);
165  }
166  }
167  }
168  while (req < k) {
169  splitheap(k--, buddy);
170  if (k > req) k--;
171  }
172  k = buddysize[req] - 1;
173  b = buddy[req].bp;
174  b2 = b->b.nextbcell;
175  for (i = 0; i < k; i++) b->b.c[i] = 0;
176 #ifdef RGC
177  // take_care(ctx->lastalloc);
178 #endif
179  ctx->lastalloc = makepointer(b);
180  buddy[req].bp = b2;
181  buddy[req].count--;
182 #ifdef DEBUG
183  printf( "root_alloc_big: alloc 1 block (%d), 0x%lx\n", req, b );
184 #endif
185  freeheap -= buddysize[req];
186  alloccount[req]++;
187 
188 #if THREADED
189  mutex_unlock(&alloc_lock);
190 #endif
191 
192  /* work other than allocation */
193  rem_cnt--;
194  if (rem_cnt < 0) {
196  switch(gc_phase) {
197  case PHASE_NOGC:
198  /* start a new GC cycle or not? */
199  if (should_start_gc(req)) {
200  DPRINT2("\x1b[0;32mstart GC: free rate = %lf, frag rate[%d] = %lf\x1b[0m",
201  (double)freeheap / (double)totalheap, req, (double)net_free(req) / (double)freeheap);
202 
203  notify_gc();
204  gc_pri = 1;
205  }
206  break;
207  default:
208 #ifdef __GC_ALLOC_DRIVEN
209  /* change GC priority */
210  gc_pri = recalc_gc_priority(req);
211 #endif
212  }
213 
216  } else {
217 #ifdef __GC_ALLOC_DRIVEN
218  /* do a little gc work if needed */
219  if (gc_phase != PHASE_NOGC) {
221  if (gc_phase != PHASE_NOGC) {
222  if (my_gc_pri <= gc_pri) {
223 // DPRINT1("alloc gc[%d]", gc_pri);
225  }
226  }
228  }
229 #endif
230  }
231 
232  return b;
233 }
234 
235 
236 void rgc_root_alloc_small(register context *ctx, register int req)
237 { /* index to buddy: must be greater than 0 */
238  register int i, j, k, kk;
239  register bpointer b, b2;
240  register struct buddyfree *tb = ctx->thr_buddy;
241  static long buddyfill[MAXTHRBUDDY + 1] = {0, 500, 300, 20, 15, 10, 0};
242  numunion nu;
243 
244  mutex_lock(&alloc_lock);
245  ctx->alloc_small_count++;
246 
247  alloc_again:
248  for (i = 1; i < MAXTHRBUDDY; i++) {
249  k = kk = buddyfill[i] - tb[i].count; /* how many cells are needed? */
250  while (buddy[i].count < k) { /* do we have enough free in the root? */
251  /* fprintf(stderr, "free_count = %d; k = %d\n", buddy[i].count, k); */
252  j = i + 1;
253  while (buddy[j].bp == 0) j++;
254 
255  if (j >= MAXBUDDY) {/* no free cell */
256  DPRINT1("\x1b[1;31mstarved(alloc_small:1, free/total=%d/%d)\x1b[0m",
259  //unlock_collector;
260  if (j == ERR) {
261  mutex_unlock(&alloc_lock);
263  }
264  }
265  splitheap(j, buddy);
266  }
267 
268  /* sufficient free cells are collected in the root free list */
269  if (k > 0) {
270  b = buddy[i].bp;
271  while (k > 0) {
272  b2 = b;
273  b->h.cix = -1;
274  b = b->b.nextbcell;
275  k--;
276  }
277  b2->b.nextbcell = tb[i].bp;
278  tb[i].bp = buddy[i].bp;
279  buddy[i].bp = b;
280  buddy[i].count -= kk;
281  tb[i].count = buddyfill[i];
282  freeheap -= buddysize[i] * kk;
283  alloccount[i] += kk;
284 #ifdef DEBUG
285  printf("root_alloc_small: alloc %d block(s) (%d)\n", kk, i);
286 #endif
287  }
288  }
289 
290 #if THREADED
291  mutex_unlock(&alloc_lock);
292 #endif
293  /*
294  {
295  int j;
296  bpointer p;
297  for (i = 1; i < MAXTHRBUDDY; i++) {
298  // fprintf(stderr, "tb[i].count = %d\n", tb[i].count);
299  for (j = 0, p = tb[i].bp; p != 0; p = p->b.nextbcell) j++;
300  // fprintf(stderr, "real list length is = %d\n", j);
301  ASSERT(tb[i].count == j);
302  }
303  }
304  */
305 
306  /* work other than allocation */
307  rem_cnt--;
308  if (rem_cnt < 0) {
310  switch(gc_phase) {
311  case PHASE_NOGC:
312  /* start a new GC cycle or not? */
313  if (should_start_gc(req)) {
314  DPRINT2("\x1b[0;32mstart GC: free rate = %lf, frag rate[%d] = %lf\x1b[0m",
315  (double)freeheap / (double)totalheap, req, (double)net_free(req) / (double)freeheap);
316  notify_gc();
317  gc_pri = 1;
318  }
319  break;
320  default:
321 #ifdef __GC_ALLOC_DRIVEN
322  /* change GC priority */
323  gc_pri = recalc_gc_priority(req);
324 #endif
325  }
326 
329  } else {
330 #ifdef __GC_ALLOC_DRIVEN
331  /* do a little gc work if needed */
332  if (gc_phase != PHASE_NOGC) {
334  if (gc_phase != PHASE_NOGC) {
335  if (my_gc_pri <= gc_pri) {
337  }
338  }
340  }
341 #endif
342  }
343 
344  /*return(b);*/
345 }
346 
347 pointer rgc_alloc(register int s, int e, int cid, register int nils)
348 { /* allocate heap of 's' longwords */
349  register int req = 1, i, ss;
350  register pointer p;
351  register pointer *v;
352  register bpointer b, b2;
353  register context *ctx = myctx;
354  register struct buddyfree *tb = ctx->thr_buddy;
355 
356 #if defined(DEBUG) || defined(DEBUG_COUNT)
357  static int count = 0;
358 
359  count++;
360 
361  if (nils > s) {
362  printf("alloc:%d:nils(=%d) > s(=%d)!!\n", count, nils, s);
363  }
364 #endif
365  ss = max(3, s + 1); /* one more word for the allocation information */
366  while (buddysize[req] < ss) req++;
367 #ifdef DEBUG
368  printf("alloc:%d:s=%d, e=%d, cid=%d, nils=%d\n",
369  count, s, e, cid, nils);
370 #endif
371  if (req >= MAXTHRBUDDY)
372  b = rgc_root_alloc_big(ctx, req);
373  else { /* small cell is requested */
374  if (tb[req].count == 0) { /* find a cell in the local free list */
375  rgc_root_alloc_small(ctx, req);
376 #ifdef DEBUG
377  printf("alloc:");
378  dump_bcell(req,ctx->thr_buddy);
379 #endif
380  }
381  ASSERT(tb[req].bp != 0);
382 #if THREADED
383  rw_rdlock(&gc_lock);
384 #endif
385 #ifdef DEBUG
386  fflush( stdout );
387  printf("alloc:%d:", count);
388  dump_bcell( req, tb );
389 #endif
390  b = tb[req].bp;
391 #ifdef RGC
392  // take_care(ctx->lastalloc);
393 #endif
394  ctx->lastalloc=makepointer(b);
395  ss = buddysize[req]-1;
396  tb[req].bp = b->b.nextbcell;
397 #if defined(DEBUG) || defined(UALLOC_DEBUG)
398  printf("alloc:%d:allocate for user[%d(buddysize=%d)] = 0x%lx: new list top = 0x%lx\n",
399  count, req, buddysize[req], b, tb[req].bp);
400 #endif
401  for (i = 0; i < ss; i++)
402  b->b.c[i] = 0;
403  tb[req].count--;
404 #if THREADED
405  rw_unlock(&gc_lock);
406 #endif
407  }
408 
409 #ifdef __USE_MARK_BITMAP
410  /* PHASE_ROOT_REM or PHASE_MARK or PHASE_SWEEP */
411  if (gc_phase >= PHASE_SWEEP) {
412  markon(b);
413  } else {
414  b->h.cix = cid;
415  }
416  // fprintf(stderr, "tag=%x\n", b->h.bix & 0xff);
417 #else
418  /* sweeping_state.chp synchronization needs only memory barriers */
419  /* sweeping_state.p synchronization needs collector_lock */
420  if (gc_phase >= PHASE_SWEEP) {
421 // lock_collector;
422  if (gc_phase >= PHASE_MARK) {
423  /*** critical section start ***/
424  markon(b);
425  b->h.cix = cid;
426  /*** critical section end ***/
427  if (gc_phase < PHASE_MARK) {
428  /* sweeper might hava passed, because gc_phase has changed. */
430  /* we have locked collector, so sweeping_state is consistent. */
431  if (sweeping_state.p > b) {
432  markoff(b);
433  }
435  }
436  } else if (gc_phase == PHASE_SWEEP) {
437  if (b >= (bpointer)sweeping_state.chp) {
438  /*** critical section start ***/
439  markon(b);
440  b->h.cix = cid;
441  /*** critical section end ***/
442  if (b <= (bpointer)sweeping_state.chp->nextchunk) {
443  if (b < (bpointer)sweeping_state.chp) /* correct memory barriers are needed. */
444  markoff(b); /* sweeper have passed. */
445  else { /* sweeper may have passed or may not. */
447  if (b < sweeping_state.p) {
448  markoff(b);
449  }
451  }
452  }
453  }
454 // unlock_collector;
455  b->h.cix = cid;
456  }
457  } else {
458  b->h.cix = cid;
459  }
460 #endif
461 
462  b->h.elmtype = e;
463 #ifndef RGC
464  b->h.extra = 0;
465 #endif
466  b->h.nodispose = 0;
467  p = makepointer(b);
468  v = p->c.obj.iv;
469 #ifdef DEBUG
470  printf( "alloc:%d:fill NIL:nils = %d, s = %d\n",
471  count, nils, s );
472 #endif
473  i = 0;
474  while (i < nils) v[i++] = NIL; /* fill NILs in user's slots */
475  /* while (nils<s) v[nils++]=NIL; */
476  i = buddysize[req] - 1;
477  while (s < i) v[s++] = NULL; /* fill NULLs in buddy-cells extra slots */
478 #ifdef DEBUG
479  printf( "alloc:%d:after filling NIL:", count );
480  dump_bcell( req, tb );
481 #endif
482 #ifdef RGC
483  // take_care(ctx->lastalloc);
484 #endif
485 #ifdef __PROFILE_GC
486  allocd_words += buddysize[req];
487 #endif
488  return(p);
489 }
490 
static int ks[10]
Definition: rgc_mem.c:69
Definition: eus.h:561
pointer GCMARGIN
Definition: eus.c:173
unsigned elmtype
Definition: eus.h:180
static int bp
rwlock_t gc_lock
Definition: mthread.c:18
Definition: eus.h:522
#define REALTIME_ALLOC_LIMIT_IDX
Definition: collector.h:40
#define gc_phase
Definition: collector.h:230
#define AM_UNIT
Definition: collector.h:47
bpointer rgc_root_alloc_big(register context *ctx, register int req)
Definition: rgc_mem.c:133
bpointer bp
Definition: eus.h:563
long alloccount[MAXBUDDY]
Definition: rgc_mem.c:16
long totalheap
Definition: memory.c:56
#define DPRINT2
Definition: rgc_utils.h:20
#define DEFAULT_GC_THRESHOLD
Definition: collector.h:38
dump_bcell(int k, struct buddyfree *b)
Definition: memory.c:830
static int km[10]
Definition: rgc_mem.c:68
long freeheap
Definition: memory.c:56
#define DEFAULT_EXPAND_SIZE_IDX
Definition: collector.h:36
struct bcell * p
Definition: collector.h:92
union cell::cellunion c
pointer iv[2]
Definition: eus.h:319
pointer lastalloc
Definition: eus.h:534
#define GC_ACTIVATE_CUSHION
Definition: collector.h:43
Definition: eus.h:426
#define myctx
Definition: rgc_mem.c:9
Definition: eus.h:379
#define ASSERT(condition)
Definition: rgc_utils.h:28
struct cellheader h
Definition: eus.h:438
long buddysize[MAXBUDDY+1]
Definition: eus.c:103
short s
Definition: structsize.c:2
static int recalc_gc_priority(int req)
Definition: rgc_mem.c:71
struct bcell * nextbcell
Definition: eus.h:440
#define AS_UNIT
Definition: collector.h:48
#define PHASE_SWEEP
Definition: collector.h:88
struct _sweeping_state sweeping_state
Definition: collector.c:116
pointer error(enum errorcode ec,...) pointer error(va_alist) va_dcl
Definition: eus.c:297
union bcell::@12 b
pointer rgc_alloc(register int s, int e, int cid, register int nils)
Definition: rgc_mem.c:347
int rw_rdlock(rwlock_t *)
Definition: pthreads.c:179
int alloc_small_count
Definition: eus.h:540
struct buddyfree buddy[MAXBUDDY+1]
Definition: eus.c:46
void notify_gc()
Definition: collector.c:716
#define unlock_collector
Definition: collector.h:252
static int should_start_gc(int req)
Definition: rgc_mem.c:120
static int rgc_expand_heap_or_complete_gc(int req)
Definition: rgc_mem.c:26
int count
Definition: thrtest.c:11
int rw_unlock(rwlock_t *)
Definition: pthreads.c:197
short cix
Definition: eus.h:188
struct chunk * chp
Definition: collector.h:91
#define GC_GRANULARITY
Definition: collector.h:50
struct buddyfree * thr_buddy
Definition: eus.h:538
int alloc_big_count
Definition: eus.h:539
#define max(I1, I2)
Definition: eustags.c:134
unsigned extra
Definition: eus.h:185
#define NULL
Definition: transargv.c:8
#define PHASE_NOGC
Definition: collector.h:82
struct object obj
Definition: eus.h:415
#define my_gc_pri
Definition: rgc_mem.c:117
GLfloat v[8][3]
Definition: cube.c:21
int newchunk(int)
Definition: memory.c:67
Definition: eus.h:437
#define DPRINT1
Definition: rgc_utils.h:19
static volatile int rem_cnt
Definition: rgc_mem.c:115
static unsigned long net_free(int req)
Definition: rgc_mem.c:57
struct chunk * nextchunk
Definition: eus.h:446
unsigned nodispose
Definition: eus.h:181
struct cell * c[2]
Definition: eus.h:441
static volatile int gc_pri
Definition: rgc_mem.c:116
pointer NIL
Definition: eus.c:110
int count
Definition: eus.h:562
mutex_t alloc_lock
Definition: memory.mutex.c:42
long marked_words
Definition: collector.c:135
#define PHASE_MARK
Definition: collector.h:87
#define lock_collector
Definition: collector.h:251
void do_a_little_gc_work(int m_unit, int s_unit)
Definition: collector.c:886
void splitheap(int k, struct buddyfree *buddy)
Definition: memory.c:157
void rgc_root_alloc_small(register context *ctx, register int req)
Definition: rgc_mem.c:236


euslisp
Author(s): Toshihiro Matsui
autogenerated on Thu Jun 6 2019 20:00:44