mrubyを超漢字で動作させる
Révision | 3ab2f9371e60039936356afaee9f509d782259fd (tree) |
---|---|
l'heure | 2015-10-20 05:29:43 |
Auteur | furunkel <julian@linu...> |
Commiter | furunkel |
Clean up GC code
@@ -35,6 +35,7 @@ | ||
35 | 35 | #include "mrbconf.h" |
36 | 36 | #include "mruby/common.h" |
37 | 37 | #include "mruby/value.h" |
38 | +#include "mruby/gc.h" | |
38 | 39 | #include "mruby/version.h" |
39 | 40 | |
40 | 41 | /** |
@@ -114,16 +115,11 @@ struct mrb_context { | ||
114 | 115 | struct RFiber *fib; |
115 | 116 | }; |
116 | 117 | |
117 | -enum gc_state { | |
118 | - GC_STATE_ROOT = 0, | |
119 | - GC_STATE_MARK, | |
120 | - GC_STATE_SWEEP | |
121 | -}; | |
122 | - | |
123 | 118 | struct mrb_jmpbuf; |
124 | 119 | |
125 | 120 | typedef void (*mrb_atexit_func)(struct mrb_state*); |
126 | 121 | |
122 | + | |
127 | 123 | typedef struct mrb_state { |
128 | 124 | struct mrb_jmpbuf *jmp; |
129 | 125 |
@@ -153,32 +149,8 @@ typedef struct mrb_state { | ||
153 | 149 | struct RClass *symbol_class; |
154 | 150 | struct RClass *kernel_module; |
155 | 151 | |
156 | - struct heap_page *heaps; /* heaps for GC */ | |
157 | - struct heap_page *sweeps; | |
158 | - struct heap_page *free_heaps; | |
159 | - size_t live; /* count of live objects */ | |
160 | -#ifdef MRB_GC_FIXED_ARENA | |
161 | - struct RBasic *arena[MRB_GC_ARENA_SIZE]; /* GC protection array */ | |
162 | -#else | |
163 | - struct RBasic **arena; /* GC protection array */ | |
164 | - int arena_capa; | |
165 | -#endif | |
166 | - int arena_idx; | |
167 | - | |
168 | - enum gc_state gc_state; /* state of gc */ | |
169 | - int current_white_part; /* make white object by white_part */ | |
170 | - struct RBasic *gray_list; /* list of gray objects to be traversed incrementally */ | |
171 | - struct RBasic *atomic_gray_list; /* list of objects to be traversed atomically */ | |
172 | - size_t gc_live_after_mark; | |
173 | - size_t gc_threshold; | |
174 | - int gc_interval_ratio; | |
175 | - int gc_step_ratio; | |
176 | - mrb_bool gc_disabled:1; | |
177 | - mrb_bool gc_full:1; | |
178 | - mrb_bool is_generational_gc_mode:1; | |
179 | - mrb_bool out_of_memory:1; | |
180 | - size_t majorgc_old_threshold; | |
181 | 152 | struct alloca_header *mems; |
153 | + mrb_gc gc; | |
182 | 154 | |
183 | 155 | mrb_sym symidx; |
184 | 156 | struct kh_n2s *name2sym; /* symbol hash */ |
@@ -14,9 +14,68 @@ | ||
14 | 14 | */ |
15 | 15 | MRB_BEGIN_DECL |
16 | 16 | |
17 | -typedef void (mrb_each_object_callback)(mrb_state *mrb, struct RBasic *obj, void *data); | |
18 | -void mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data); | |
19 | -MRB_API void mrb_free_context(mrb_state *mrb, struct mrb_context *c); | |
17 | + | |
18 | +struct mrb_state; | |
19 | + | |
20 | +typedef void (mrb_each_object_callback)(struct mrb_state *mrb, struct RBasic *obj, void *data); | |
21 | +void mrb_objspace_each_objects(struct mrb_state *mrb, mrb_each_object_callback *callback, void *data); | |
22 | +MRB_API void mrb_free_context(struct mrb_state *mrb, struct mrb_context *c); | |
23 | + | |
24 | + | |
25 | +/* white: 011, black: 100, gray: 000 */ | |
26 | +#define MRB_GC_GRAY 0 | |
27 | +#define MRB_GC_WHITE_A 1 | |
28 | +#define MRB_GC_WHITE_B (1 << 1) | |
29 | +#define MRB_GC_BLACK (1 << 2) | |
30 | +#define MRB_GC_WHITES (MRB_GC_WHITE_A | MRB_GC_WHITE_B) | |
31 | +#define MRB_GC_COLOR_MASK 7 | |
32 | + | |
33 | +typedef enum { | |
34 | + GC_STATE_ROOT = 0, | |
35 | + GC_STATE_MARK, | |
36 | + GC_STATE_SWEEP | |
37 | +} mrb_gc_state; | |
38 | + | |
39 | +typedef struct mrb_heap_page { | |
40 | + struct RBasic *freelist; | |
41 | + struct mrb_heap_page *prev; | |
42 | + struct mrb_heap_page *next; | |
43 | + struct mrb_heap_page *free_next; | |
44 | + struct mrb_heap_page *free_prev; | |
45 | + mrb_bool old:1; | |
46 | + void *objects[]; | |
47 | +} mrb_heap_page; | |
48 | + | |
49 | +typedef struct mrb_gc { | |
50 | + mrb_heap_page *heaps; /* heaps for GC */ | |
51 | + mrb_heap_page *sweeps; | |
52 | + mrb_heap_page *free_heaps; | |
53 | + size_t live; /* count of live objects */ | |
54 | +#ifdef MRB_GC_FIXED_ARENA | |
55 | + struct RBasic *arena[MRB_GC_ARENA_SIZE]; /* GC protection array */ | |
56 | +#else | |
57 | + struct RBasic **arena; /* GC protection array */ | |
58 | + int arena_capa; | |
59 | +#endif | |
60 | + int arena_idx; | |
61 | + | |
62 | + mrb_gc_state gc_state; /* state of gc */ | |
63 | + int current_white_part; /* make white object by white_part */ | |
64 | + struct RBasic *gray_list; /* list of gray objects to be traversed incrementally */ | |
65 | + struct RBasic *atomic_gray_list; /* list of objects to be traversed atomically */ | |
66 | + size_t gc_live_after_mark; | |
67 | + size_t gc_threshold; | |
68 | + int gc_interval_ratio; | |
69 | + int gc_step_ratio; | |
70 | + mrb_bool disabled :1; | |
71 | + mrb_bool full :1; | |
72 | + mrb_bool generational :1; | |
73 | + mrb_bool out_of_memory :1; | |
74 | + size_t majorgc_old_threshold; | |
75 | +} mrb_gc; | |
76 | + | |
77 | +MRB_API mrb_bool | |
78 | +mrb_object_dead_p(struct mrb_state *mrb, struct RObject *object); | |
20 | 79 | |
21 | 80 | MRB_END_DECL |
22 | 81 |
@@ -16,24 +16,6 @@ | ||
16 | 16 | |
17 | 17 | #define MRB_FLAG_TEST(obj, flag) ((obj)->flags & flag) |
18 | 18 | |
19 | -/* white: 011, black: 100, gray: 000 */ | |
20 | -#define MRB_GC_GRAY 0 | |
21 | -#define MRB_GC_WHITE_A 1 | |
22 | -#define MRB_GC_WHITE_B (1 << 1) | |
23 | -#define MRB_GC_BLACK (1 << 2) | |
24 | -#define MRB_GC_WHITES (MRB_GC_WHITE_A | MRB_GC_WHITE_B) | |
25 | -#define MRB_GC_COLOR_MASK 7 | |
26 | - | |
27 | -#define paint_gray(o) ((o)->color = MRB_GC_GRAY) | |
28 | -#define paint_black(o) ((o)->color = MRB_GC_BLACK) | |
29 | -#define paint_white(o) ((o)->color = MRB_GC_WHITES) | |
30 | -#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part) | |
31 | -#define is_gray(o) ((o)->color == MRB_GC_GRAY) | |
32 | -#define is_white(o) ((o)->color & MRB_GC_WHITES) | |
33 | -#define is_black(o) ((o)->color & MRB_GC_BLACK) | |
34 | -#define is_dead(s, o) (((o)->color & other_white_part(s) & MRB_GC_WHITES) || (o)->tt == MRB_TT_FREE) | |
35 | -#define flip_white_part(s) ((s)->current_white_part = other_white_part(s)) | |
36 | -#define other_white_part(s) ((s)->current_white_part ^ MRB_GC_WHITES) | |
37 | 19 | |
38 | 20 | struct RBasic { |
39 | 21 | MRB_OBJECT_HEADER; |
@@ -17,7 +17,7 @@ os_count_object_type(mrb_state *mrb, struct RBasic *obj, void *data) | ||
17 | 17 | |
18 | 18 | obj_count->total++; |
19 | 19 | |
20 | - if (is_dead(mrb, obj)) { | |
20 | + if (mrb_object_dead_p(mrb, obj)) { | |
21 | 21 | obj_count->freed++; |
22 | 22 | } |
23 | 23 | else { |
@@ -115,7 +115,7 @@ os_each_object_cb(mrb_state *mrb, struct RBasic *obj, void *ud) | ||
115 | 115 | struct os_each_object_data *d = (struct os_each_object_data*)ud; |
116 | 116 | |
117 | 117 | /* filter dead objects */ |
118 | - if (is_dead(mrb, obj)) { | |
118 | + if (mrb_object_dead_p(mrb, obj)) { | |
119 | 119 | return; |
120 | 120 | } |
121 | 121 |
@@ -206,7 +206,7 @@ MRB_API mrb_noreturn void | ||
206 | 206 | mrb_exc_raise(mrb_state *mrb, mrb_value exc) |
207 | 207 | { |
208 | 208 | mrb->exc = mrb_obj_ptr(exc); |
209 | - if (!mrb->out_of_memory) { | |
209 | + if (!mrb->gc.out_of_memory) { | |
210 | 210 | exc_debug_info(mrb, mrb->exc); |
211 | 211 | } |
212 | 212 | if (!mrb->jmp) { |
@@ -101,6 +101,40 @@ typedef struct { | ||
101 | 101 | union { |
102 | 102 | struct free_obj free; |
103 | 103 | struct RBasic basic; |
104 | + struct RClass klass; | |
105 | + struct RProc proc; | |
106 | + struct RException exc; | |
107 | + } as; | |
108 | +} infreq_value; | |
109 | + | |
110 | +typedef struct { | |
111 | + union { | |
112 | + struct free_obj free; | |
113 | + struct RBasic basic; | |
114 | + struct RObject object; | |
115 | +#ifdef MRB_WORD_BOXING | |
116 | + struct RFloat floatv; | |
117 | + struct RCptr cptr; | |
118 | +#endif | |
119 | + } as; | |
120 | +} small_value; | |
121 | + | |
122 | +typedef struct { | |
123 | + union { | |
124 | + struct free_obj free; | |
125 | + struct RBasic basic; | |
126 | + struct RString string; | |
127 | + struct RArray array; | |
128 | + struct RHash hash; | |
129 | + struct RRange range; | |
130 | + struct RData data; | |
131 | + } as; | |
132 | +} large_value; | |
133 | + | |
134 | +typedef struct { | |
135 | + union { | |
136 | + struct free_obj free; | |
137 | + struct RBasic basic; | |
104 | 138 | struct RObject object; |
105 | 139 | struct RClass klass; |
106 | 140 | struct RString string; |
@@ -136,7 +170,7 @@ gettimeofday_time(void) | ||
136 | 170 | #define GC_INVOKE_TIME_REPORT(with) do {\ |
137 | 171 | fprintf(stderr, "%s\n", with);\ |
138 | 172 | fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\ |
139 | - fprintf(stderr, "is_generational: %d\n", is_generational(mrb));\ | |
173 | + fprintf(stderr, "is_generational: %d\n", is_generational(gc));\ | |
140 | 174 | fprintf(stderr, "is_major_gc: %d\n", is_major_gc(mrb));\ |
141 | 175 | } while(0) |
142 | 176 |
@@ -147,10 +181,10 @@ gettimeofday_time(void) | ||
147 | 181 | #define GC_TIME_STOP_AND_REPORT do {\ |
148 | 182 | gc_time = gettimeofday_time() - gc_time;\ |
149 | 183 | gc_total_time += gc_time;\ |
150 | - fprintf(stderr, "gc_state: %d\n", mrb->gc_state);\ | |
151 | - fprintf(stderr, "live: %zu\n", mrb->live);\ | |
152 | - fprintf(stderr, "majorgc_old_threshold: %zu\n", mrb->majorgc_old_threshold);\ | |
153 | - fprintf(stderr, "gc_threshold: %zu\n", mrb->gc_threshold);\ | |
184 | + fprintf(stderr, "gc_state: %d\n", gc->gc_state);\ | |
185 | + fprintf(stderr, "live: %zu\n", gc->live);\ | |
186 | + fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\ | |
187 | + fprintf(stderr, "gc_threshold: %zu\n", gc->gc_threshold);\ | |
154 | 188 | fprintf(stderr, "gc_time: %30.20f\n", gc_time);\ |
155 | 189 | fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\ |
156 | 190 | } while(0) |
@@ -166,8 +200,24 @@ gettimeofday_time(void) | ||
166 | 200 | #define DEBUG(x) |
167 | 201 | #endif |
168 | 202 | |
203 | +#ifndef MRB_HEAP_PAGE_SIZE | |
204 | +#define MRB_HEAP_PAGE_SIZE 1024 | |
205 | +#endif | |
206 | + | |
169 | 207 | #define GC_STEP_SIZE 1024 |
170 | 208 | |
209 | +#define paint_gray(o) ((o)->color = MRB_GC_GRAY) | |
210 | +#define paint_black(o) ((o)->color = MRB_GC_BLACK) | |
211 | +#define paint_white(o) ((o)->color = MRB_GC_WHITES) | |
212 | +#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part) | |
213 | +#define is_gray(o) ((o)->color == MRB_GC_GRAY) | |
214 | +#define is_white(o) ((o)->color & MRB_GC_WHITES) | |
215 | +#define is_black(o) ((o)->color & MRB_GC_BLACK) | |
216 | +#define flip_white_part(s) ((s)->current_white_part = other_white_part(s)) | |
217 | +#define other_white_part(s) ((s)->current_white_part ^ MRB_GC_WHITES) | |
218 | +#define is_dead(s, o) (((o)->color & other_white_part(s) & MRB_GC_WHITES) || (o)->tt == MRB_TT_FREE) | |
219 | + | |
220 | +#define objects(p) ((RVALUE *)p->objects) | |
171 | 221 | |
172 | 222 | MRB_API void* |
173 | 223 | mrb_realloc_simple(mrb_state *mrb, void *p, size_t len) |
@@ -175,7 +225,7 @@ mrb_realloc_simple(mrb_state *mrb, void *p, size_t len) | ||
175 | 225 | void *p2; |
176 | 226 | |
177 | 227 | p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); |
178 | - if (!p2 && len > 0 && mrb->heaps) { | |
228 | + if (!p2 && len > 0 && mrb->gc.heaps) { | |
179 | 229 | mrb_full_gc(mrb); |
180 | 230 | p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud); |
181 | 231 | } |
@@ -183,7 +233,6 @@ mrb_realloc_simple(mrb_state *mrb, void *p, size_t len) | ||
183 | 233 | return p2; |
184 | 234 | } |
185 | 235 | |
186 | - | |
187 | 236 | MRB_API void* |
188 | 237 | mrb_realloc(mrb_state *mrb, void *p, size_t len) |
189 | 238 | { |
@@ -191,16 +240,16 @@ mrb_realloc(mrb_state *mrb, void *p, size_t len) | ||
191 | 240 | |
192 | 241 | p2 = mrb_realloc_simple(mrb, p, len); |
193 | 242 | if (!p2 && len) { |
194 | - if (mrb->out_of_memory) { | |
243 | + if (mrb->gc.out_of_memory) { | |
195 | 244 | /* mrb_panic(mrb); */ |
196 | 245 | } |
197 | 246 | else { |
198 | - mrb->out_of_memory = TRUE; | |
247 | + mrb->gc.out_of_memory = TRUE; | |
199 | 248 | mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err)); |
200 | 249 | } |
201 | 250 | } |
202 | 251 | else { |
203 | - mrb->out_of_memory = FALSE; | |
252 | + mrb->gc.out_of_memory = FALSE; | |
204 | 253 | } |
205 | 254 | |
206 | 255 | return p2; |
@@ -244,101 +293,98 @@ mrb_free(mrb_state *mrb, void *p) | ||
244 | 293 | (mrb->allocf)(mrb, p, 0, mrb->allocf_ud); |
245 | 294 | } |
246 | 295 | |
247 | -#ifndef MRB_HEAP_PAGE_SIZE | |
248 | -#define MRB_HEAP_PAGE_SIZE 1024 | |
249 | -#endif | |
250 | - | |
251 | -struct heap_page { | |
252 | - struct RBasic *freelist; | |
253 | - struct heap_page *prev; | |
254 | - struct heap_page *next; | |
255 | - struct heap_page *free_next; | |
256 | - struct heap_page *free_prev; | |
257 | - mrb_bool old:1; | |
258 | - RVALUE objects[MRB_HEAP_PAGE_SIZE]; | |
259 | -}; | |
296 | +MRB_API mrb_bool | |
297 | +mrb_object_dead_p(mrb_state *mrb, struct RObject *object) { | |
298 | + return is_dead(&mrb->gc, object); | |
299 | +} | |
260 | 300 | |
261 | 301 | static void |
262 | -link_heap_page(mrb_state *mrb, struct heap_page *page) | |
302 | +link_heap_page(mrb_gc *gc, mrb_heap_page *page) | |
263 | 303 | { |
264 | - page->next = mrb->heaps; | |
265 | - if (mrb->heaps) | |
266 | - mrb->heaps->prev = page; | |
267 | - mrb->heaps = page; | |
304 | + page->next = gc->heaps; | |
305 | + if (gc->heaps) | |
306 | + gc->heaps->prev = page; | |
307 | + gc->heaps = page; | |
268 | 308 | } |
269 | 309 | |
270 | 310 | static void |
271 | -unlink_heap_page(mrb_state *mrb, struct heap_page *page) | |
311 | +unlink_heap_page(mrb_gc *gc, mrb_heap_page *page) | |
272 | 312 | { |
273 | 313 | if (page->prev) |
274 | 314 | page->prev->next = page->next; |
275 | 315 | if (page->next) |
276 | 316 | page->next->prev = page->prev; |
277 | - if (mrb->heaps == page) | |
278 | - mrb->heaps = page->next; | |
317 | + if (gc->heaps == page) | |
318 | + gc->heaps = page->next; | |
279 | 319 | page->prev = NULL; |
280 | 320 | page->next = NULL; |
281 | 321 | } |
282 | 322 | |
283 | 323 | static void |
284 | -link_free_heap_page(mrb_state *mrb, struct heap_page *page) | |
324 | +link_free_heap_page(mrb_gc *gc, mrb_heap_page *page) | |
285 | 325 | { |
286 | - page->free_next = mrb->free_heaps; | |
287 | - if (mrb->free_heaps) { | |
288 | - mrb->free_heaps->free_prev = page; | |
326 | + page->free_next = gc->free_heaps; | |
327 | + if (gc->free_heaps) { | |
328 | + gc->free_heaps->free_prev = page; | |
289 | 329 | } |
290 | - mrb->free_heaps = page; | |
330 | + gc->free_heaps = page; | |
291 | 331 | } |
292 | 332 | |
293 | 333 | static void |
294 | -unlink_free_heap_page(mrb_state *mrb, struct heap_page *page) | |
334 | +unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page) | |
295 | 335 | { |
296 | 336 | if (page->free_prev) |
297 | 337 | page->free_prev->free_next = page->free_next; |
298 | 338 | if (page->free_next) |
299 | 339 | page->free_next->free_prev = page->free_prev; |
300 | - if (mrb->free_heaps == page) | |
301 | - mrb->free_heaps = page->free_next; | |
340 | + if (gc->free_heaps == page) | |
341 | + gc->free_heaps = page->free_next; | |
302 | 342 | page->free_prev = NULL; |
303 | 343 | page->free_next = NULL; |
304 | 344 | } |
305 | 345 | |
306 | 346 | static void |
307 | -add_heap(mrb_state *mrb) | |
347 | +add_heap(mrb_state *mrb, mrb_gc *gc) | |
308 | 348 | { |
309 | - struct heap_page *page = (struct heap_page *)mrb_calloc(mrb, 1, sizeof(struct heap_page)); | |
349 | + mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE)); | |
310 | 350 | RVALUE *p, *e; |
311 | 351 | struct RBasic *prev = NULL; |
312 | 352 | |
313 | - for (p = page->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { | |
353 | + for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { | |
314 | 354 | p->as.free.tt = MRB_TT_FREE; |
315 | 355 | p->as.free.next = prev; |
316 | 356 | prev = &p->as.basic; |
317 | 357 | } |
318 | 358 | page->freelist = prev; |
319 | 359 | |
320 | - link_heap_page(mrb, page); | |
321 | - link_free_heap_page(mrb, page); | |
360 | + link_heap_page(gc, page); | |
361 | + link_free_heap_page(gc, page); | |
322 | 362 | } |
323 | 363 | |
324 | 364 | #define DEFAULT_GC_INTERVAL_RATIO 200 |
325 | 365 | #define DEFAULT_GC_STEP_RATIO 200 |
326 | 366 | #define DEFAULT_MAJOR_GC_INC_RATIO 200 |
327 | -#define is_generational(mrb) ((mrb)->is_generational_gc_mode) | |
328 | -#define is_major_gc(mrb) (is_generational(mrb) && (mrb)->gc_full) | |
329 | -#define is_minor_gc(mrb) (is_generational(mrb) && !(mrb)->gc_full) | |
367 | +#define is_generational(gc) ((gc)->generational) | |
368 | +#define is_major_gc(gc) (is_generational(gc) && (gc)->full) | |
369 | +#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full) | |
330 | 370 | |
331 | 371 | void |
332 | -mrb_init_heap(mrb_state *mrb) | |
372 | +mrb_gc_init(mrb_state *mrb, mrb_gc *gc) | |
333 | 373 | { |
334 | - mrb->heaps = NULL; | |
335 | - mrb->free_heaps = NULL; | |
336 | - add_heap(mrb); | |
337 | - mrb->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO; | |
338 | - mrb->gc_step_ratio = DEFAULT_GC_STEP_RATIO; | |
374 | +#ifndef MRB_GC_FIXED_ARENA | |
375 | + gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE); | |
376 | + gc->arena_capa = MRB_GC_ARENA_SIZE; | |
377 | +#endif | |
378 | + | |
379 | + gc->current_white_part = MRB_GC_WHITE_A; | |
380 | + gc->heaps = NULL; | |
381 | + gc->free_heaps = NULL; | |
382 | + add_heap(mrb, gc); | |
383 | + gc->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO; | |
384 | + gc->gc_step_ratio = DEFAULT_GC_STEP_RATIO; | |
339 | 385 | #ifndef MRB_GC_TURN_OFF_GENERATIONAL |
340 | - mrb->is_generational_gc_mode = TRUE; | |
341 | - mrb->gc_full = TRUE; | |
386 | + gc->generational = TRUE; | |
387 | + gc->full = TRUE; | |
342 | 388 | #endif |
343 | 389 | |
344 | 390 | #ifdef GC_PROFILE |
@@ -349,16 +395,16 @@ mrb_init_heap(mrb_state *mrb) | ||
349 | 395 | static void obj_free(mrb_state *mrb, struct RBasic *obj); |
350 | 396 | |
351 | 397 | void |
352 | -mrb_free_heap(mrb_state *mrb) | |
398 | +free_heap(mrb_state *mrb, mrb_gc *gc) | |
353 | 399 | { |
354 | - struct heap_page *page = mrb->heaps; | |
355 | - struct heap_page *tmp; | |
400 | + mrb_heap_page *page = gc->heaps; | |
401 | + mrb_heap_page *tmp; | |
356 | 402 | RVALUE *p, *e; |
357 | 403 | |
358 | 404 | while (page) { |
359 | 405 | tmp = page; |
360 | 406 | page = page->next; |
361 | - for (p = tmp->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { | |
407 | + for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) { | |
362 | 408 | if (p->as.free.tt != MRB_TT_FREE) |
363 | 409 | obj_free(mrb, &p->as.basic); |
364 | 410 | } |
@@ -366,23 +412,32 @@ mrb_free_heap(mrb_state *mrb) | ||
366 | 412 | } |
367 | 413 | } |
368 | 414 | |
415 | +void | |
416 | +mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc) | |
417 | +{ | |
418 | + free_heap(mrb, gc); | |
419 | +#ifndef MRB_GC_FIXED_ARENA | |
420 | + mrb_free(mrb, gc->arena); | |
421 | +#endif | |
422 | +} | |
423 | + | |
369 | 424 | static void |
370 | -gc_protect(mrb_state *mrb, struct RBasic *p) | |
425 | +gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p) | |
371 | 426 | { |
372 | 427 | #ifdef MRB_GC_FIXED_ARENA |
373 | - if (mrb->arena_idx >= MRB_GC_ARENA_SIZE) { | |
428 | + if (gc->arena_idx >= MRB_GC_ARENA_SIZE) { | |
374 | 429 | /* arena overflow error */ |
375 | - mrb->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */ | |
376 | - mrb_raise(mrb, E_RUNTIME_ERROR, "arena overflow error"); | |
430 | + gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */ | |
431 | + mrb_raise(gc, E_RUNTIME_ERROR, "arena overflow error"); | |
377 | 432 | } |
378 | 433 | #else |
379 | - if (mrb->arena_idx >= mrb->arena_capa) { | |
434 | + if (gc->arena_idx >= gc->arena_capa) { | |
380 | 435 | /* extend arena */ |
381 | - mrb->arena_capa = (int)(mrb->arena_capa * 1.5); | |
382 | - mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*mrb->arena_capa); | |
436 | + gc->arena_capa = (int)(gc->arena_capa * 1.5); | |
437 | + gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa); | |
383 | 438 | } |
384 | 439 | #endif |
385 | - mrb->arena[mrb->arena_idx++] = p; | |
440 | + gc->arena[gc->arena_idx++] = p; | |
386 | 441 | } |
387 | 442 | |
388 | 443 | /* mrb_gc_protect() leaves the object in the arena */ |
@@ -390,7 +445,7 @@ MRB_API void | ||
390 | 445 | mrb_gc_protect(mrb_state *mrb, mrb_value obj) |
391 | 446 | { |
392 | 447 | if (mrb_immediate_p(obj)) return; |
393 | - gc_protect(mrb, mrb_basic_ptr(obj)); | |
448 | + gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj)); | |
394 | 449 | } |
395 | 450 | |
396 | 451 | #define GC_ROOT_NAME "_gc_root_" |
@@ -445,34 +500,35 @@ mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls) | ||
445 | 500 | { |
446 | 501 | struct RBasic *p; |
447 | 502 | static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } }; |
503 | + mrb_gc *gc = &mrb->gc; | |
448 | 504 | |
449 | 505 | #ifdef MRB_GC_STRESS |
450 | 506 | mrb_full_gc(mrb); |
451 | 507 | #endif |
452 | - if (mrb->gc_threshold < mrb->live) { | |
508 | + if (gc->gc_threshold < gc->live) { | |
453 | 509 | mrb_incremental_gc(mrb); |
454 | 510 | } |
455 | - if (mrb->free_heaps == NULL) { | |
456 | - add_heap(mrb); | |
511 | + if (gc->free_heaps == NULL) { | |
512 | + add_heap(mrb, gc); | |
457 | 513 | } |
458 | 514 | |
459 | - p = mrb->free_heaps->freelist; | |
460 | - mrb->free_heaps->freelist = ((struct free_obj*)p)->next; | |
461 | - if (mrb->free_heaps->freelist == NULL) { | |
462 | - unlink_free_heap_page(mrb, mrb->free_heaps); | |
515 | + p = gc->free_heaps->freelist; | |
516 | + gc->free_heaps->freelist = ((struct free_obj*)p)->next; | |
517 | + if (gc->free_heaps->freelist == NULL) { | |
518 | + unlink_free_heap_page(gc, gc->free_heaps); | |
463 | 519 | } |
464 | 520 | |
465 | - mrb->live++; | |
466 | - gc_protect(mrb, p); | |
521 | + gc->live++; | |
522 | + gc_protect(mrb, gc, p); | |
467 | 523 | *(RVALUE *)p = RVALUE_zero; |
468 | 524 | p->tt = ttype; |
469 | 525 | p->c = cls; |
470 | - paint_partial_white(mrb, p); | |
526 | + paint_partial_white(gc, p); | |
471 | 527 | return p; |
472 | 528 | } |
473 | 529 | |
474 | 530 | static inline void |
475 | -add_gray_list(mrb_state *mrb, struct RBasic *obj) | |
531 | +add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) | |
476 | 532 | { |
477 | 533 | #ifdef MRB_GC_STRESS |
478 | 534 | if (obj->tt > MRB_TT_MAXDEFINE) { |
@@ -480,8 +536,8 @@ add_gray_list(mrb_state *mrb, struct RBasic *obj) | ||
480 | 536 | } |
481 | 537 | #endif |
482 | 538 | paint_gray(obj); |
483 | - obj->gcnext = mrb->gray_list; | |
484 | - mrb->gray_list = obj; | |
539 | + obj->gcnext = gc->gray_list; | |
540 | + gc->gray_list = obj; | |
485 | 541 | } |
486 | 542 | |
487 | 543 | static void |
@@ -538,11 +594,11 @@ mark_context(mrb_state *mrb, struct mrb_context *c) | ||
538 | 594 | } |
539 | 595 | |
540 | 596 | static void |
541 | -gc_mark_children(mrb_state *mrb, struct RBasic *obj) | |
597 | +gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) | |
542 | 598 | { |
543 | 599 | mrb_assert(is_gray(obj)); |
544 | 600 | paint_black(obj); |
545 | - mrb->gray_list = obj->gcnext; | |
601 | + gc->gray_list = obj->gcnext; | |
546 | 602 | mrb_gc_mark(mrb, (struct RBasic*)obj->c); |
547 | 603 | switch (obj->tt) { |
548 | 604 | case MRB_TT_ICLASS: |
@@ -644,7 +700,7 @@ mrb_gc_mark(mrb_state *mrb, struct RBasic *obj) | ||
644 | 700 | if (obj == 0) return; |
645 | 701 | if (!is_white(obj)) return; |
646 | 702 | mrb_assert((obj)->tt != MRB_TT_FREE); |
647 | - add_gray_list(mrb, obj); | |
703 | + add_gray_list(mrb, &mrb->gc, obj); | |
648 | 704 | } |
649 | 705 | |
650 | 706 | static void |
@@ -748,19 +804,19 @@ obj_free(mrb_state *mrb, struct RBasic *obj) | ||
748 | 804 | } |
749 | 805 | |
750 | 806 | static void |
751 | -root_scan_phase(mrb_state *mrb) | |
807 | +root_scan_phase(mrb_state *mrb, mrb_gc *gc) | |
752 | 808 | { |
753 | 809 | size_t i, e; |
754 | 810 | |
755 | - if (!is_minor_gc(mrb)) { | |
756 | - mrb->gray_list = NULL; | |
757 | - mrb->atomic_gray_list = NULL; | |
811 | + if (!is_minor_gc(gc)) { | |
812 | + gc->gray_list = NULL; | |
813 | + gc->atomic_gray_list = NULL; | |
758 | 814 | } |
759 | 815 | |
760 | 816 | mrb_gc_mark_gv(mrb); |
761 | 817 | /* mark arena */ |
762 | - for (i=0,e=mrb->arena_idx; i<e; i++) { | |
763 | - mrb_gc_mark(mrb, mrb->arena[i]); | |
818 | + for (i=0,e=gc->arena_idx; i<e; i++) { | |
819 | + mrb_gc_mark(mrb, gc->arena[i]); | |
764 | 820 | } |
765 | 821 | /* mark class hierarchy */ |
766 | 822 | mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class); |
@@ -781,11 +837,11 @@ root_scan_phase(mrb_state *mrb) | ||
781 | 837 | } |
782 | 838 | |
783 | 839 | static size_t |
784 | -gc_gray_mark(mrb_state *mrb, struct RBasic *obj) | |
840 | +gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) | |
785 | 841 | { |
786 | 842 | size_t children = 0; |
787 | 843 | |
788 | - gc_mark_children(mrb, obj); | |
844 | + gc_mark_children(mrb, gc, obj); | |
789 | 845 | |
790 | 846 | switch (obj->tt) { |
791 | 847 | case MRB_TT_ICLASS: |
@@ -864,68 +920,68 @@ gc_gray_mark(mrb_state *mrb, struct RBasic *obj) | ||
864 | 920 | |
865 | 921 | |
866 | 922 | static void |
867 | -gc_mark_gray_list(mrb_state *mrb) { | |
868 | - while (mrb->gray_list) { | |
869 | - if (is_gray(mrb->gray_list)) | |
870 | - gc_mark_children(mrb, mrb->gray_list); | |
923 | +gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) { | |
924 | + while (gc->gray_list) { | |
925 | + if (is_gray(gc->gray_list)) | |
926 | + gc_mark_children(mrb, gc, gc->gray_list); | |
871 | 927 | else |
872 | - mrb->gray_list = mrb->gray_list->gcnext; | |
928 | + gc->gray_list = gc->gray_list->gcnext; | |
873 | 929 | } |
874 | 930 | } |
875 | 931 | |
876 | 932 | |
877 | 933 | static size_t |
878 | -incremental_marking_phase(mrb_state *mrb, size_t limit) | |
934 | +incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit) | |
879 | 935 | { |
880 | 936 | size_t tried_marks = 0; |
881 | 937 | |
882 | - while (mrb->gray_list && tried_marks < limit) { | |
883 | - tried_marks += gc_gray_mark(mrb, mrb->gray_list); | |
938 | + while (gc->gray_list && tried_marks < limit) { | |
939 | + tried_marks += gc_gray_mark(mrb, gc, gc->gray_list); | |
884 | 940 | } |
885 | 941 | |
886 | 942 | return tried_marks; |
887 | 943 | } |
888 | 944 | |
889 | 945 | static void |
890 | -final_marking_phase(mrb_state *mrb) | |
946 | +final_marking_phase(mrb_state *mrb, mrb_gc *gc) | |
891 | 947 | { |
892 | 948 | mark_context_stack(mrb, mrb->root_c); |
893 | - gc_mark_gray_list(mrb); | |
894 | - mrb_assert(mrb->gray_list == NULL); | |
895 | - mrb->gray_list = mrb->atomic_gray_list; | |
896 | - mrb->atomic_gray_list = NULL; | |
897 | - gc_mark_gray_list(mrb); | |
898 | - mrb_assert(mrb->gray_list == NULL); | |
949 | + gc_mark_gray_list(mrb, gc); | |
950 | + mrb_assert(gc->gray_list == NULL); | |
951 | + gc->gray_list = gc->atomic_gray_list; | |
952 | + gc->atomic_gray_list = NULL; | |
953 | + gc_mark_gray_list(mrb, gc); | |
954 | + mrb_assert(gc->gray_list == NULL); | |
899 | 955 | } |
900 | 956 | |
901 | 957 | static void |
902 | -prepare_incremental_sweep(mrb_state *mrb) | |
958 | +prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc) | |
903 | 959 | { |
904 | - mrb->gc_state = GC_STATE_SWEEP; | |
905 | - mrb->sweeps = mrb->heaps; | |
906 | - mrb->gc_live_after_mark = mrb->live; | |
960 | + gc->gc_state = GC_STATE_SWEEP; | |
961 | + gc->sweeps = gc->heaps; | |
962 | + gc->gc_live_after_mark = gc->live; | |
907 | 963 | } |
908 | 964 | |
909 | 965 | static size_t |
910 | -incremental_sweep_phase(mrb_state *mrb, size_t limit) | |
966 | +incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit) | |
911 | 967 | { |
912 | - struct heap_page *page = mrb->sweeps; | |
968 | + mrb_heap_page *page = gc->sweeps; | |
913 | 969 | size_t tried_sweep = 0; |
914 | 970 | |
915 | 971 | while (page && (tried_sweep < limit)) { |
916 | - RVALUE *p = page->objects; | |
972 | + RVALUE *p = objects(page); | |
917 | 973 | RVALUE *e = p + MRB_HEAP_PAGE_SIZE; |
918 | 974 | size_t freed = 0; |
919 | 975 | mrb_bool dead_slot = TRUE; |
920 | 976 | mrb_bool full = (page->freelist == NULL); |
921 | 977 | |
922 | - if (is_minor_gc(mrb) && page->old) { | |
978 | + if (is_minor_gc(gc) && page->old) { | |
923 | 979 | /* skip a slot which doesn't contain any young object */ |
924 | 980 | p = e; |
925 | 981 | dead_slot = FALSE; |
926 | 982 | } |
927 | 983 | while (p<e) { |
928 | - if (is_dead(mrb, &p->as.basic)) { | |
984 | + if (is_dead(gc, &p->as.basic)) { | |
929 | 985 | if (p->as.basic.tt != MRB_TT_FREE) { |
930 | 986 | obj_free(mrb, &p->as.basic); |
931 | 987 | p->as.free.next = page->freelist; |
@@ -934,8 +990,8 @@ incremental_sweep_phase(mrb_state *mrb, size_t limit) | ||
934 | 990 | } |
935 | 991 | } |
936 | 992 | else { |
937 | - if (!is_generational(mrb)) | |
938 | - paint_partial_white(mrb, &p->as.basic); /* next gc target */ | |
993 | + if (!is_generational(gc)) | |
994 | + paint_partial_white(gc, &p->as.basic); /* next gc target */ | |
939 | 995 | dead_slot = 0; |
940 | 996 | } |
941 | 997 | p++; |
@@ -943,54 +999,54 @@ incremental_sweep_phase(mrb_state *mrb, size_t limit) | ||
943 | 999 | |
944 | 1000 | /* free dead slot */ |
945 | 1001 | if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) { |
946 | - struct heap_page *next = page->next; | |
1002 | + mrb_heap_page *next = page->next; | |
947 | 1003 | |
948 | - unlink_heap_page(mrb, page); | |
949 | - unlink_free_heap_page(mrb, page); | |
1004 | + unlink_heap_page(gc, page); | |
1005 | + unlink_free_heap_page(gc, page); | |
950 | 1006 | mrb_free(mrb, page); |
951 | 1007 | page = next; |
952 | 1008 | } |
953 | 1009 | else { |
954 | 1010 | if (full && freed > 0) { |
955 | - link_free_heap_page(mrb, page); | |
1011 | + link_free_heap_page(gc, page); | |
956 | 1012 | } |
957 | - if (page->freelist == NULL && is_minor_gc(mrb)) | |
1013 | + if (page->freelist == NULL && is_minor_gc(gc)) | |
958 | 1014 | page->old = TRUE; |
959 | 1015 | else |
960 | 1016 | page->old = FALSE; |
961 | 1017 | page = page->next; |
962 | 1018 | } |
963 | 1019 | tried_sweep += MRB_HEAP_PAGE_SIZE; |
964 | - mrb->live -= freed; | |
965 | - mrb->gc_live_after_mark -= freed; | |
1020 | + gc->live -= freed; | |
1021 | + gc->gc_live_after_mark -= freed; | |
966 | 1022 | } |
967 | - mrb->sweeps = page; | |
1023 | + gc->sweeps = page; | |
968 | 1024 | return tried_sweep; |
969 | 1025 | } |
970 | 1026 | |
971 | 1027 | static size_t |
972 | -incremental_gc(mrb_state *mrb, size_t limit) | |
1028 | +incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit) | |
973 | 1029 | { |
974 | - switch (mrb->gc_state) { | |
1030 | + switch (gc->gc_state) { | |
975 | 1031 | case GC_STATE_ROOT: |
976 | - root_scan_phase(mrb); | |
977 | - mrb->gc_state = GC_STATE_MARK; | |
978 | - flip_white_part(mrb); | |
1032 | + root_scan_phase(mrb, gc); | |
1033 | + gc->gc_state = GC_STATE_MARK; | |
1034 | + flip_white_part(gc); | |
979 | 1035 | return 0; |
980 | 1036 | case GC_STATE_MARK: |
981 | - if (mrb->gray_list) { | |
982 | - return incremental_marking_phase(mrb, limit); | |
1037 | + if (gc->gray_list) { | |
1038 | + return incremental_marking_phase(mrb, gc, limit); | |
983 | 1039 | } |
984 | 1040 | else { |
985 | - final_marking_phase(mrb); | |
986 | - prepare_incremental_sweep(mrb); | |
1041 | + final_marking_phase(mrb, gc); | |
1042 | + prepare_incremental_sweep(mrb, gc); | |
987 | 1043 | return 0; |
988 | 1044 | } |
989 | 1045 | case GC_STATE_SWEEP: { |
990 | 1046 | size_t tried_sweep = 0; |
991 | - tried_sweep = incremental_sweep_phase(mrb, limit); | |
1047 | + tried_sweep = incremental_sweep_phase(mrb, gc, limit); | |
992 | 1048 | if (tried_sweep == 0) |
993 | - mrb->gc_state = GC_STATE_ROOT; | |
1049 | + gc->gc_state = GC_STATE_ROOT; | |
994 | 1050 | return tried_sweep; |
995 | 1051 | } |
996 | 1052 | default: |
@@ -1001,79 +1057,81 @@ incremental_gc(mrb_state *mrb, size_t limit) | ||
1001 | 1057 | } |
1002 | 1058 | |
1003 | 1059 | static void |
1004 | -incremental_gc_until(mrb_state *mrb, enum gc_state to_state) | |
1060 | +incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state) | |
1005 | 1061 | { |
1006 | 1062 | do { |
1007 | - incremental_gc(mrb, SIZE_MAX); | |
1008 | - } while (mrb->gc_state != to_state); | |
1063 | + incremental_gc(mrb, gc, SIZE_MAX); | |
1064 | + } while (gc->gc_state != to_state); | |
1009 | 1065 | } |
1010 | 1066 | |
1011 | 1067 | static void |
1012 | -incremental_gc_step(mrb_state *mrb) | |
1068 | +incremental_gc_step(mrb_state *mrb, mrb_gc *gc) | |
1013 | 1069 | { |
1014 | 1070 | size_t limit = 0, result = 0; |
1015 | - limit = (GC_STEP_SIZE/100) * mrb->gc_step_ratio; | |
1071 | + limit = (GC_STEP_SIZE/100) * gc->gc_step_ratio; | |
1016 | 1072 | while (result < limit) { |
1017 | - result += incremental_gc(mrb, limit); | |
1018 | - if (mrb->gc_state == GC_STATE_ROOT) | |
1073 | + result += incremental_gc(mrb, gc, limit); | |
1074 | + if (gc->gc_state == GC_STATE_ROOT) | |
1019 | 1075 | break; |
1020 | 1076 | } |
1021 | 1077 | |
1022 | - mrb->gc_threshold = mrb->live + GC_STEP_SIZE; | |
1078 | + gc->gc_threshold = gc->live + GC_STEP_SIZE; | |
1023 | 1079 | } |
1024 | 1080 | |
1025 | 1081 | static void |
1026 | -clear_all_old(mrb_state *mrb) | |
1082 | +clear_all_old(mrb_state *mrb, mrb_gc *gc) | |
1027 | 1083 | { |
1028 | - mrb_bool origin_mode = mrb->is_generational_gc_mode; | |
1084 | + mrb_bool origin_mode = gc->generational; | |
1029 | 1085 | |
1030 | - mrb_assert(is_generational(mrb)); | |
1031 | - if (is_major_gc(mrb)) { | |
1086 | + mrb_assert(is_generational(gc)); | |
1087 | + if (is_major_gc(gc)) { | |
1032 | 1088 | /* finish the half baked GC */ |
1033 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1089 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1034 | 1090 | } |
1035 | 1091 | |
1036 | 1092 | /* Sweep the dead objects, then reset all the live objects |
1037 | 1093 | * (including all the old objects, of course) to white. */ |
1038 | - mrb->is_generational_gc_mode = FALSE; | |
1039 | - prepare_incremental_sweep(mrb); | |
1040 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1041 | - mrb->is_generational_gc_mode = origin_mode; | |
1094 | + gc->generational = FALSE; | |
1095 | + prepare_incremental_sweep(mrb, gc); | |
1096 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1097 | + gc->generational = origin_mode; | |
1042 | 1098 | |
1043 | 1099 | /* The gray objects have already been painted as white */ |
1044 | - mrb->atomic_gray_list = mrb->gray_list = NULL; | |
1100 | + gc->atomic_gray_list = gc->gray_list = NULL; | |
1045 | 1101 | } |
1046 | 1102 | |
1047 | 1103 | MRB_API void |
1048 | 1104 | mrb_incremental_gc(mrb_state *mrb) |
1049 | 1105 | { |
1050 | - if (mrb->gc_disabled) return; | |
1106 | + mrb_gc *gc = &mrb->gc; | |
1107 | + | |
1108 | + if (gc->disabled) return; | |
1051 | 1109 | |
1052 | 1110 | GC_INVOKE_TIME_REPORT("mrb_incremental_gc()"); |
1053 | 1111 | GC_TIME_START; |
1054 | 1112 | |
1055 | - if (is_minor_gc(mrb)) { | |
1056 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1113 | + if (is_minor_gc(gc)) { | |
1114 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1057 | 1115 | } |
1058 | 1116 | else { |
1059 | - incremental_gc_step(mrb); | |
1117 | + incremental_gc_step(mrb, gc); | |
1060 | 1118 | } |
1061 | 1119 | |
1062 | - if (mrb->gc_state == GC_STATE_ROOT) { | |
1063 | - mrb_assert(mrb->live >= mrb->gc_live_after_mark); | |
1064 | - mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio; | |
1065 | - if (mrb->gc_threshold < GC_STEP_SIZE) { | |
1066 | - mrb->gc_threshold = GC_STEP_SIZE; | |
1120 | + if (gc->gc_state == GC_STATE_ROOT) { | |
1121 | + mrb_assert(gc->live >= gc->gc_live_after_mark); | |
1122 | + gc->gc_threshold = (gc->gc_live_after_mark/100) * gc->gc_interval_ratio; | |
1123 | + if (gc->gc_threshold < GC_STEP_SIZE) { | |
1124 | + gc->gc_threshold = GC_STEP_SIZE; | |
1067 | 1125 | } |
1068 | 1126 | |
1069 | - if (is_major_gc(mrb)) { | |
1070 | - mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1071 | - mrb->gc_full = FALSE; | |
1127 | + if (is_major_gc(gc)) { | |
1128 | + gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1129 | + gc->full = FALSE; | |
1072 | 1130 | } |
1073 | - else if (is_minor_gc(mrb)) { | |
1074 | - if (mrb->live > mrb->majorgc_old_threshold) { | |
1075 | - clear_all_old(mrb); | |
1076 | - mrb->gc_full = TRUE; | |
1131 | + else if (is_minor_gc(gc)) { | |
1132 | + if (gc->live > gc->majorgc_old_threshold) { | |
1133 | + clear_all_old(mrb, gc); | |
1134 | + gc->full = TRUE; | |
1077 | 1135 | } |
1078 | 1136 | } |
1079 | 1137 | } |
@@ -1085,26 +1143,29 @@ mrb_incremental_gc(mrb_state *mrb) | ||
1085 | 1143 | MRB_API void |
1086 | 1144 | mrb_full_gc(mrb_state *mrb) |
1087 | 1145 | { |
1088 | - if (mrb->gc_disabled) return; | |
1146 | + mrb_gc *gc = &mrb->gc; | |
1147 | + | |
1148 | + if (gc->disabled) return; | |
1149 | + | |
1089 | 1150 | GC_INVOKE_TIME_REPORT("mrb_full_gc()"); |
1090 | 1151 | GC_TIME_START; |
1091 | 1152 | |
1092 | - if (is_generational(mrb)) { | |
1153 | + if (is_generational(gc)) { | |
1093 | 1154 | /* clear all the old objects back to young */ |
1094 | - clear_all_old(mrb); | |
1095 | - mrb->gc_full = TRUE; | |
1155 | + clear_all_old(mrb, gc); | |
1156 | + gc->full = TRUE; | |
1096 | 1157 | } |
1097 | - else if (mrb->gc_state != GC_STATE_ROOT) { | |
1158 | + else if (gc->gc_state != GC_STATE_ROOT) { | |
1098 | 1159 | /* finish half baked GC cycle */ |
1099 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1160 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1100 | 1161 | } |
1101 | 1162 | |
1102 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1103 | - mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio; | |
1163 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1164 | + gc->gc_threshold = (gc->gc_live_after_mark/100) * gc->gc_interval_ratio; | |
1104 | 1165 | |
1105 | - if (is_generational(mrb)) { | |
1106 | - mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1107 | - mrb->gc_full = FALSE; | |
1166 | + if (is_generational(gc)) { | |
1167 | + gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1168 | + gc->full = FALSE; | |
1108 | 1169 | } |
1109 | 1170 | |
1110 | 1171 | GC_TIME_STOP_AND_REPORT; |
@@ -1119,27 +1180,29 @@ mrb_garbage_collect(mrb_state *mrb) | ||
1119 | 1180 | MRB_API int |
1120 | 1181 | mrb_gc_arena_save(mrb_state *mrb) |
1121 | 1182 | { |
1122 | - return mrb->arena_idx; | |
1183 | + return mrb->gc.arena_idx; | |
1123 | 1184 | } |
1124 | 1185 | |
1125 | 1186 | MRB_API void |
1126 | 1187 | mrb_gc_arena_restore(mrb_state *mrb, int idx) |
1127 | 1188 | { |
1189 | + mrb_gc *gc = &mrb->gc; | |
1190 | + | |
1128 | 1191 | #ifndef MRB_GC_FIXED_ARENA |
1129 | - int capa = mrb->arena_capa; | |
1192 | + int capa = gc->arena_capa; | |
1130 | 1193 | |
1131 | 1194 | if (idx < capa / 2) { |
1132 | 1195 | capa = (int)(capa * 0.66); |
1133 | 1196 | if (capa < MRB_GC_ARENA_SIZE) { |
1134 | 1197 | capa = MRB_GC_ARENA_SIZE; |
1135 | 1198 | } |
1136 | - if (capa != mrb->arena_capa) { | |
1137 | - mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*capa); | |
1138 | - mrb->arena_capa = capa; | |
1199 | + if (capa != gc->arena_capa) { | |
1200 | + gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa); | |
1201 | + gc->arena_capa = capa; | |
1139 | 1202 | } |
1140 | 1203 | } |
1141 | 1204 | #endif |
1142 | - mrb->arena_idx = idx; | |
1205 | + gc->arena_idx = idx; | |
1143 | 1206 | } |
1144 | 1207 | |
1145 | 1208 | /* |
@@ -1150,18 +1213,20 @@ mrb_gc_arena_restore(mrb_state *mrb, int idx) | ||
1150 | 1213 | MRB_API void |
1151 | 1214 | mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value) |
1152 | 1215 | { |
1216 | + mrb_gc *gc = &mrb->gc; | |
1217 | + | |
1153 | 1218 | if (!is_black(obj)) return; |
1154 | 1219 | if (!is_white(value)) return; |
1155 | 1220 | |
1156 | - mrb_assert(!is_dead(mrb, value) && !is_dead(mrb, obj)); | |
1157 | - mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT); | |
1221 | + mrb_assert(!is_dead(gc, value) && !is_dead(gc, obj)); | |
1222 | + mrb_assert(is_generational(gc) || mrb->gc.gc_state != GC_STATE_ROOT); | |
1158 | 1223 | |
1159 | - if (is_generational(mrb) || mrb->gc_state == GC_STATE_MARK) { | |
1160 | - add_gray_list(mrb, value); | |
1224 | + if (is_generational(gc) || mrb->gc.gc_state == GC_STATE_MARK) { | |
1225 | + add_gray_list(mrb, gc, value); | |
1161 | 1226 | } |
1162 | 1227 | else { |
1163 | - mrb_assert(mrb->gc_state == GC_STATE_SWEEP); | |
1164 | - paint_partial_white(mrb, obj); /* for never write barriers */ | |
1228 | + mrb_assert(mrb->gc.gc_state == GC_STATE_SWEEP); | |
1229 | + paint_partial_white(gc, obj); /* for never write barriers */ | |
1165 | 1230 | } |
1166 | 1231 | } |
1167 | 1232 |
@@ -1177,13 +1242,15 @@ mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value | ||
1177 | 1242 | MRB_API void |
1178 | 1243 | mrb_write_barrier(mrb_state *mrb, struct RBasic *obj) |
1179 | 1244 | { |
1245 | + mrb_gc *gc = &mrb->gc; | |
1246 | + | |
1180 | 1247 | if (!is_black(obj)) return; |
1181 | 1248 | |
1182 | - mrb_assert(!is_dead(mrb, obj)); | |
1183 | - mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT); | |
1249 | + mrb_assert(!is_dead(gc, obj)); | |
1250 | + mrb_assert(is_generational(gc) || gc->gc_state != GC_STATE_ROOT); | |
1184 | 1251 | paint_gray(obj); |
1185 | - obj->gcnext = mrb->atomic_gray_list; | |
1186 | - mrb->atomic_gray_list = obj; | |
1252 | + obj->gcnext = gc->atomic_gray_list; | |
1253 | + gc->atomic_gray_list = obj; | |
1187 | 1254 | } |
1188 | 1255 | |
1189 | 1256 | /* |
@@ -1217,9 +1284,9 @@ gc_start(mrb_state *mrb, mrb_value obj) | ||
1217 | 1284 | static mrb_value |
1218 | 1285 | gc_enable(mrb_state *mrb, mrb_value obj) |
1219 | 1286 | { |
1220 | - mrb_bool old = mrb->gc_disabled; | |
1287 | + mrb_bool old = mrb->gc.disabled; | |
1221 | 1288 | |
1222 | - mrb->gc_disabled = FALSE; | |
1289 | + mrb->gc.disabled = FALSE; | |
1223 | 1290 | |
1224 | 1291 | return mrb_bool_value(old); |
1225 | 1292 | } |
@@ -1239,9 +1306,9 @@ gc_enable(mrb_state *mrb, mrb_value obj) | ||
1239 | 1306 | static mrb_value |
1240 | 1307 | gc_disable(mrb_state *mrb, mrb_value obj) |
1241 | 1308 | { |
1242 | - mrb_bool old = mrb->gc_disabled; | |
1309 | + mrb_bool old = mrb->gc.disabled; | |
1243 | 1310 | |
1244 | - mrb->gc_disabled = TRUE; | |
1311 | + mrb->gc.disabled = TRUE; | |
1245 | 1312 | |
1246 | 1313 | return mrb_bool_value(old); |
1247 | 1314 | } |
@@ -1257,7 +1324,7 @@ gc_disable(mrb_state *mrb, mrb_value obj) | ||
1257 | 1324 | static mrb_value |
1258 | 1325 | gc_interval_ratio_get(mrb_state *mrb, mrb_value obj) |
1259 | 1326 | { |
1260 | - return mrb_fixnum_value(mrb->gc_interval_ratio); | |
1327 | + return mrb_fixnum_value(mrb->gc.gc_interval_ratio); | |
1261 | 1328 | } |
1262 | 1329 | |
1263 | 1330 | /* |
@@ -1275,7 +1342,7 @@ gc_interval_ratio_set(mrb_state *mrb, mrb_value obj) | ||
1275 | 1342 | mrb_int ratio; |
1276 | 1343 | |
1277 | 1344 | mrb_get_args(mrb, "i", &ratio); |
1278 | - mrb->gc_interval_ratio = ratio; | |
1345 | + mrb->gc.gc_interval_ratio = ratio; | |
1279 | 1346 | return mrb_nil_value(); |
1280 | 1347 | } |
1281 | 1348 |
@@ -1290,7 +1357,7 @@ gc_interval_ratio_set(mrb_state *mrb, mrb_value obj) | ||
1290 | 1357 | static mrb_value |
1291 | 1358 | gc_step_ratio_get(mrb_state *mrb, mrb_value obj) |
1292 | 1359 | { |
1293 | - return mrb_fixnum_value(mrb->gc_step_ratio); | |
1360 | + return mrb_fixnum_value(mrb->gc.gc_step_ratio); | |
1294 | 1361 | } |
1295 | 1362 | |
1296 | 1363 | /* |
@@ -1308,24 +1375,24 @@ gc_step_ratio_set(mrb_state *mrb, mrb_value obj) | ||
1308 | 1375 | mrb_int ratio; |
1309 | 1376 | |
1310 | 1377 | mrb_get_args(mrb, "i", &ratio); |
1311 | - mrb->gc_step_ratio = ratio; | |
1378 | + mrb->gc.gc_step_ratio = ratio; | |
1312 | 1379 | return mrb_nil_value(); |
1313 | 1380 | } |
1314 | 1381 | |
1315 | 1382 | static void |
1316 | -change_gen_gc_mode(mrb_state *mrb, mrb_bool enable) | |
1383 | +change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable) | |
1317 | 1384 | { |
1318 | - if (is_generational(mrb) && !enable) { | |
1319 | - clear_all_old(mrb); | |
1320 | - mrb_assert(mrb->gc_state == GC_STATE_ROOT); | |
1321 | - mrb->gc_full = FALSE; | |
1385 | + if (is_generational(gc) && !enable) { | |
1386 | + clear_all_old(mrb, gc); | |
1387 | + mrb_assert(gc->gc_state == GC_STATE_ROOT); | |
1388 | + gc->full = FALSE; | |
1322 | 1389 | } |
1323 | - else if (!is_generational(mrb) && enable) { | |
1324 | - incremental_gc_until(mrb, GC_STATE_ROOT); | |
1325 | - mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1326 | - mrb->gc_full = FALSE; | |
1390 | + else if (!is_generational(gc) && enable) { | |
1391 | + incremental_gc_until(mrb, gc, GC_STATE_ROOT); | |
1392 | + gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO; | |
1393 | + gc->full = FALSE; | |
1327 | 1394 | } |
1328 | - mrb->is_generational_gc_mode = enable; | |
1395 | + gc->generational = enable; | |
1329 | 1396 | } |
1330 | 1397 | |
1331 | 1398 | /* |
@@ -1339,7 +1406,7 @@ change_gen_gc_mode(mrb_state *mrb, mrb_bool enable) | ||
1339 | 1406 | static mrb_value |
1340 | 1407 | gc_generational_mode_get(mrb_state *mrb, mrb_value self) |
1341 | 1408 | { |
1342 | - return mrb_bool_value(mrb->is_generational_gc_mode); | |
1409 | + return mrb_bool_value(mrb->gc.generational); | |
1343 | 1410 | } |
1344 | 1411 | |
1345 | 1412 | /* |
@@ -1356,21 +1423,22 @@ gc_generational_mode_set(mrb_state *mrb, mrb_value self) | ||
1356 | 1423 | mrb_bool enable; |
1357 | 1424 | |
1358 | 1425 | mrb_get_args(mrb, "b", &enable); |
1359 | - if (mrb->is_generational_gc_mode != enable) | |
1360 | - change_gen_gc_mode(mrb, enable); | |
1426 | + if (mrb->gc.generational != enable) | |
1427 | + change_gen_gc_mode(mrb, &mrb->gc, enable); | |
1361 | 1428 | |
1362 | 1429 | return mrb_bool_value(enable); |
1363 | 1430 | } |
1364 | 1431 | |
1365 | -void | |
1366 | -mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data) | |
1432 | + | |
1433 | +static void | |
1434 | +gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data) | |
1367 | 1435 | { |
1368 | - struct heap_page* page = mrb->heaps; | |
1436 | + mrb_heap_page* page = gc->heaps; | |
1369 | 1437 | |
1370 | 1438 | while (page != NULL) { |
1371 | 1439 | RVALUE *p, *pend; |
1372 | 1440 | |
1373 | - p = page->objects; | |
1441 | + p = objects(page); | |
1374 | 1442 | pend = p + MRB_HEAP_PAGE_SIZE; |
1375 | 1443 | for (;p < pend; p++) { |
1376 | 1444 | (*callback)(mrb, &p->as.basic, data); |
@@ -1380,6 +1448,12 @@ mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, vo | ||
1380 | 1448 | } |
1381 | 1449 | } |
1382 | 1450 | |
1451 | +void | |
1452 | +mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data) | |
1453 | +{ | |
1454 | + return gc_each_objects(mrb, &mrb->gc, callback, data); | |
1455 | +} | |
1456 | + | |
1383 | 1457 | #ifdef GC_TEST |
1384 | 1458 | #ifdef GC_DEBUG |
1385 | 1459 | static mrb_value gc_test(mrb_state *, mrb_value); |
@@ -1416,9 +1490,10 @@ test_mrb_field_write_barrier(void) | ||
1416 | 1490 | { |
1417 | 1491 | mrb_state *mrb = mrb_open(); |
1418 | 1492 | struct RBasic *obj, *value; |
1493 | + mrb_gc *gc = &mrb->gc; | |
1419 | 1494 | |
1420 | 1495 | puts("test_mrb_field_write_barrier"); |
1421 | - mrb->is_generational_gc_mode = FALSE; | |
1496 | + gc->generational = FALSE; | |
1422 | 1497 | obj = mrb_basic_ptr(mrb_ary_new(mrb)); |
1423 | 1498 | value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value")); |
1424 | 1499 | paint_black(obj); |
@@ -1426,7 +1501,7 @@ test_mrb_field_write_barrier(void) | ||
1426 | 1501 | |
1427 | 1502 | |
1428 | 1503 | puts(" in GC_STATE_MARK"); |
1429 | - mrb->gc_state = GC_STATE_MARK; | |
1504 | + gc->gc_state = GC_STATE_MARK; | |
1430 | 1505 | mrb_field_write_barrier(mrb, obj, value); |
1431 | 1506 | |
1432 | 1507 | mrb_assert(is_gray(value)); |
@@ -1434,24 +1509,24 @@ test_mrb_field_write_barrier(void) | ||
1434 | 1509 | |
1435 | 1510 | puts(" in GC_STATE_SWEEP"); |
1436 | 1511 | paint_partial_white(mrb, value); |
1437 | - mrb->gc_state = GC_STATE_SWEEP; | |
1512 | + gc->gc_state = GC_STATE_SWEEP; | |
1438 | 1513 | mrb_field_write_barrier(mrb, obj, value); |
1439 | 1514 | |
1440 | - mrb_assert(obj->color & mrb->current_white_part); | |
1441 | - mrb_assert(value->color & mrb->current_white_part); | |
1515 | + mrb_assert(obj->color & gc->current_white_part); | |
1516 | + mrb_assert(value->color & gc->current_white_part); | |
1442 | 1517 | |
1443 | 1518 | |
1444 | 1519 | puts(" fail with black"); |
1445 | - mrb->gc_state = GC_STATE_MARK; | |
1520 | + gc->gc_state = GC_STATE_MARK; | |
1446 | 1521 | paint_white(obj); |
1447 | 1522 | paint_partial_white(mrb, value); |
1448 | 1523 | mrb_field_write_barrier(mrb, obj, value); |
1449 | 1524 | |
1450 | - mrb_assert(obj->color & mrb->current_white_part); | |
1525 | + mrb_assert(obj->color & gc->current_white_part); | |
1451 | 1526 | |
1452 | 1527 | |
1453 | 1528 | puts(" fail with gray"); |
1454 | - mrb->gc_state = GC_STATE_MARK; | |
1529 | + gc->gc_state = GC_STATE_MARK; | |
1455 | 1530 | paint_black(obj); |
1456 | 1531 | paint_gray(value); |
1457 | 1532 | mrb_field_write_barrier(mrb, obj, value); |
@@ -1466,7 +1541,7 @@ test_mrb_field_write_barrier(void) | ||
1466 | 1541 | paint_black(obj); |
1467 | 1542 | paint_partial_white(mrb, mrb_basic_ptr(value)); |
1468 | 1543 | |
1469 | - mrb->gc_state = GC_STATE_MARK; | |
1544 | + gc->gc_state = GC_STATE_MARK; | |
1470 | 1545 | mrb_field_write_barrier_value(mrb, obj, value); |
1471 | 1546 | |
1472 | 1547 | mrb_assert(is_gray(mrb_basic_ptr(value))); |
@@ -1480,17 +1555,18 @@ test_mrb_write_barrier(void) | ||
1480 | 1555 | { |
1481 | 1556 | mrb_state *mrb = mrb_open(); |
1482 | 1557 | struct RBasic *obj; |
1558 | + mrb_gc *gc = &mrb->gc; | |
1483 | 1559 | |
1484 | 1560 | puts("test_mrb_write_barrier"); |
1485 | 1561 | obj = mrb_basic_ptr(mrb_ary_new(mrb)); |
1486 | 1562 | paint_black(obj); |
1487 | 1563 | |
1488 | 1564 | puts(" in GC_STATE_MARK"); |
1489 | - mrb->gc_state = GC_STATE_MARK; | |
1565 | + gc->gc_state = GC_STATE_MARK; | |
1490 | 1566 | mrb_write_barrier(mrb, obj); |
1491 | 1567 | |
1492 | 1568 | mrb_assert(is_gray(obj)); |
1493 | - mrb_assert(mrb->atomic_gray_list == obj); | |
1569 | + mrb_assert(gc->atomic_gray_list == obj); | |
1494 | 1570 | |
1495 | 1571 | |
1496 | 1572 | puts(" fail with gray"); |
@@ -1507,19 +1583,20 @@ test_add_gray_list(void) | ||
1507 | 1583 | { |
1508 | 1584 | mrb_state *mrb = mrb_open(); |
1509 | 1585 | struct RBasic *obj1, *obj2; |
1586 | + mrb_gc *gc = &mrb->gc; | |
1510 | 1587 | |
1511 | 1588 | puts("test_add_gray_list"); |
1512 | 1589 | change_gen_gc_mode(mrb, FALSE); |
1513 | - mrb_assert(mrb->gray_list == NULL); | |
1590 | + mrb_assert(gc->gray_list == NULL); | |
1514 | 1591 | obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); |
1515 | 1592 | add_gray_list(mrb, obj1); |
1516 | - mrb_assert(mrb->gray_list == obj1); | |
1593 | + mrb_assert(gc->gray_list == obj1); | |
1517 | 1594 | mrb_assert(is_gray(obj1)); |
1518 | 1595 | |
1519 | 1596 | obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test")); |
1520 | 1597 | add_gray_list(mrb, obj2); |
1521 | - mrb_assert(mrb->gray_list == obj2); | |
1522 | - mrb_assert(mrb->gray_list->gcnext == obj1); | |
1598 | + mrb_assert(gc->gray_list == obj2); | |
1599 | + mrb_assert(gc->gray_list->gcnext == obj1); | |
1523 | 1600 | mrb_assert(is_gray(obj2)); |
1524 | 1601 | |
1525 | 1602 | mrb_close(mrb); |
@@ -1532,6 +1609,7 @@ test_gc_gray_mark(void) | ||
1532 | 1609 | mrb_value obj_v, value_v; |
1533 | 1610 | struct RBasic *obj; |
1534 | 1611 | size_t gray_num = 0; |
1612 | + mrb_gc *gc = &mrb->gc; | |
1535 | 1613 | |
1536 | 1614 | puts("test_gc_gray_mark"); |
1537 | 1615 |
@@ -1562,7 +1640,8 @@ test_incremental_gc(void) | ||
1562 | 1640 | mrb_state *mrb = mrb_open(); |
1563 | 1641 | size_t max = ~0, live = 0, total = 0, freed = 0; |
1564 | 1642 | RVALUE *free; |
1565 | - struct heap_page *page; | |
1643 | + mrb_heap_page *page; | |
1644 | + mrb_gc *gc = &mrb->gc; | |
1566 | 1645 | |
1567 | 1646 | puts("test_incremental_gc"); |
1568 | 1647 | change_gen_gc_mode(mrb, FALSE); |
@@ -1570,18 +1649,18 @@ test_incremental_gc(void) | ||
1570 | 1649 | puts(" in mrb_full_gc"); |
1571 | 1650 | mrb_full_gc(mrb); |
1572 | 1651 | |
1573 | - mrb_assert(mrb->gc_state == GC_STATE_ROOT); | |
1652 | + mrb_assert(gc->gc_state == GC_STATE_ROOT); | |
1574 | 1653 | puts(" in GC_STATE_ROOT"); |
1575 | 1654 | incremental_gc(mrb, max); |
1576 | - mrb_assert(mrb->gc_state == GC_STATE_MARK); | |
1655 | + mrb_assert(gc->gc_state == GC_STATE_MARK); | |
1577 | 1656 | puts(" in GC_STATE_MARK"); |
1578 | 1657 | incremental_gc_until(mrb, GC_STATE_SWEEP); |
1579 | - mrb_assert(mrb->gc_state == GC_STATE_SWEEP); | |
1658 | + mrb_assert(gc->gc_state == GC_STATE_SWEEP); | |
1580 | 1659 | |
1581 | 1660 | puts(" in GC_STATE_SWEEP"); |
1582 | - page = mrb->heaps; | |
1661 | + page = gc->heaps; | |
1583 | 1662 | while (page) { |
1584 | - RVALUE *p = page->objects; | |
1663 | + RVALUE *p = objects(page); | |
1585 | 1664 | RVALUE *e = p + MRB_HEAP_PAGE_SIZE; |
1586 | 1665 | while (p<e) { |
1587 | 1666 | if (is_black(&p->as.basic)) { |
@@ -1596,44 +1675,44 @@ test_incremental_gc(void) | ||
1596 | 1675 | total += MRB_HEAP_PAGE_SIZE; |
1597 | 1676 | } |
1598 | 1677 | |
1599 | - mrb_assert(mrb->gray_list == NULL); | |
1678 | + mrb_assert(gc->gray_list == NULL); | |
1600 | 1679 | |
1601 | 1680 | incremental_gc(mrb, max); |
1602 | - mrb_assert(mrb->gc_state == GC_STATE_SWEEP); | |
1681 | + mrb_assert(gc->gc_state == GC_STATE_SWEEP); | |
1603 | 1682 | |
1604 | 1683 | incremental_gc(mrb, max); |
1605 | - mrb_assert(mrb->gc_state == GC_STATE_ROOT); | |
1684 | + mrb_assert(gc->gc_state == GC_STATE_ROOT); | |
1606 | 1685 | |
1607 | - free = (RVALUE*)mrb->heaps->freelist; | |
1686 | + free = (RVALUE*)gc->heaps->freelist; | |
1608 | 1687 | while (free) { |
1609 | 1688 | freed++; |
1610 | 1689 | free = (RVALUE*)free->as.free.next; |
1611 | 1690 | } |
1612 | 1691 | |
1613 | - mrb_assert(mrb->live == live); | |
1614 | - mrb_assert(mrb->live == total-freed); | |
1692 | + mrb_assert(gc->live == live); | |
1693 | + mrb_assert(gc->live == total-freed); | |
1615 | 1694 | |
1616 | 1695 | puts("test_incremental_gc(gen)"); |
1617 | 1696 | incremental_gc_until(mrb, GC_STATE_SWEEP); |
1618 | 1697 | change_gen_gc_mode(mrb, TRUE); |
1619 | 1698 | |
1620 | - mrb_assert(mrb->gc_full == FALSE); | |
1621 | - mrb_assert(mrb->gc_state == GC_STATE_ROOT); | |
1699 | + mrb_assert(gc->full == FALSE); | |
1700 | + mrb_assert(gc->gc_state == GC_STATE_ROOT); | |
1622 | 1701 | |
1623 | 1702 | puts(" in minor"); |
1624 | 1703 | mrb_assert(is_minor_gc(mrb)); |
1625 | - mrb_assert(mrb->majorgc_old_threshold > 0); | |
1626 | - mrb->majorgc_old_threshold = 0; | |
1704 | + mrb_assert(gc->majorgc_old_threshold > 0); | |
1705 | + gc->majorgc_old_threshold = 0; | |
1627 | 1706 | mrb_incremental_gc(mrb); |
1628 | - mrb_assert(mrb->gc_full == TRUE); | |
1629 | - mrb_assert(mrb->gc_state == GC_STATE_ROOT); | |
1707 | + mrb_assert(gc->full == TRUE); | |
1708 | + mrb_assert(gc->gc_state == GC_STATE_ROOT); | |
1630 | 1709 | |
1631 | 1710 | puts(" in major"); |
1632 | 1711 | mrb_assert(is_major_gc(mrb)); |
1633 | 1712 | do { |
1634 | 1713 | mrb_incremental_gc(mrb); |
1635 | - } while (mrb->gc_state != GC_STATE_ROOT); | |
1636 | - mrb_assert(mrb->gc_full == FALSE); | |
1714 | + } while (gc->gc_state != GC_STATE_ROOT); | |
1715 | + mrb_assert(gc->full == FALSE); | |
1637 | 1716 | |
1638 | 1717 | mrb_close(mrb); |
1639 | 1718 | } |
@@ -1642,18 +1721,19 @@ void | ||
1642 | 1721 | test_incremental_sweep_phase(void) |
1643 | 1722 | { |
1644 | 1723 | mrb_state *mrb = mrb_open(); |
1724 | + mrb_gc *gc = &mrb->gc; | |
1645 | 1725 | |
1646 | 1726 | puts("test_incremental_sweep_phase"); |
1647 | 1727 | |
1648 | 1728 | add_heap(mrb); |
1649 | - mrb->sweeps = mrb->heaps; | |
1729 | + gc->sweeps = gc->heaps; | |
1650 | 1730 | |
1651 | - mrb_assert(mrb->heaps->next->next == NULL); | |
1652 | - mrb_assert(mrb->free_heaps->next->next == NULL); | |
1731 | + mrb_assert(gc->heaps->next->next == NULL); | |
1732 | + mrb_assert(gc->free_heaps->next->next == NULL); | |
1653 | 1733 | incremental_sweep_phase(mrb, MRB_HEAP_PAGE_SIZE*3); |
1654 | 1734 | |
1655 | - mrb_assert(mrb->heaps->next == NULL); | |
1656 | - mrb_assert(mrb->heaps == mrb->free_heaps); | |
1735 | + mrb_assert(gc->heaps->next == NULL); | |
1736 | + mrb_assert(gc->heaps == gc->free_heaps); | |
1657 | 1737 | |
1658 | 1738 | mrb_close(mrb); |
1659 | 1739 | } |
@@ -12,10 +12,12 @@ | ||
12 | 12 | #include "mruby/debug.h" |
13 | 13 | #include "mruby/string.h" |
14 | 14 | |
15 | -void mrb_init_heap(mrb_state*); | |
16 | 15 | void mrb_init_core(mrb_state*); |
17 | 16 | void mrb_init_mrbgems(mrb_state*); |
18 | 17 | |
18 | +void mrb_gc_init(mrb_state*, mrb_gc *gc); | |
19 | +void mrb_gc_destroy(mrb_state*, mrb_gc *gc); | |
20 | + | |
19 | 21 | static mrb_value |
20 | 22 | inspect_main(mrb_state *mrb, mrb_value mod) |
21 | 23 | { |
@@ -35,15 +37,9 @@ mrb_open_core(mrb_allocf f, void *ud) | ||
35 | 37 | *mrb = mrb_state_zero; |
36 | 38 | mrb->allocf_ud = ud; |
37 | 39 | mrb->allocf = f; |
38 | - mrb->current_white_part = MRB_GC_WHITE_A; | |
39 | 40 | mrb->atexit_stack_len = 0; |
40 | 41 | |
41 | -#ifndef MRB_GC_FIXED_ARENA | |
42 | - mrb->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE); | |
43 | - mrb->arena_capa = MRB_GC_ARENA_SIZE; | |
44 | -#endif | |
45 | - | |
46 | - mrb_init_heap(mrb); | |
42 | + mrb_gc_init(mrb, &mrb->gc); | |
47 | 43 | mrb->c = (struct mrb_context*)mrb_malloc(mrb, sizeof(struct mrb_context)); |
48 | 44 | *mrb->c = mrb_context_zero; |
49 | 45 | mrb->root_c = mrb->c; |
@@ -122,7 +118,6 @@ mrb_open_allocf(mrb_allocf f, void *ud) | ||
122 | 118 | } |
123 | 119 | |
124 | 120 | void mrb_free_symtbl(mrb_state *mrb); |
125 | -void mrb_free_heap(mrb_state *mrb); | |
126 | 121 | |
127 | 122 | void |
128 | 123 | mrb_irep_incref(mrb_state *mrb, mrb_irep *irep) |
@@ -249,11 +244,8 @@ mrb_close(mrb_state *mrb) | ||
249 | 244 | mrb_gc_free_gv(mrb); |
250 | 245 | mrb_free_context(mrb, mrb->root_c); |
251 | 246 | mrb_free_symtbl(mrb); |
252 | - mrb_free_heap(mrb); | |
253 | 247 | mrb_alloca_free(mrb); |
254 | -#ifndef MRB_GC_FIXED_ARENA | |
255 | - mrb_free(mrb, mrb->arena); | |
256 | -#endif | |
248 | + mrb_gc_destroy(mrb, &mrb->gc); | |
257 | 249 | mrb_free(mrb, mrb); |
258 | 250 | } |
259 | 251 |
@@ -52,7 +52,7 @@ The value below allows about 60000 recursive calls in the simplest case. */ | ||
52 | 52 | # define DEBUG(x) |
53 | 53 | #endif |
54 | 54 | |
55 | -#define ARENA_RESTORE(mrb,ai) (mrb)->arena_idx = (ai) | |
55 | +#define ARENA_RESTORE(mrb,ai) (mrb)->gc.arena_idx = (ai) | |
56 | 56 | |
57 | 57 | static inline void |
58 | 58 | stack_clear(mrb_value *from, size_t count) |