@@ -218,6 +218,7 @@ rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags
218218 if (alloc_size > 24 ) alloc_obj [3 ] = v2 ;
219219 if (alloc_size > 32 ) alloc_obj [4 ] = v3 ;
220220
221+ mmtk_post_alloc (cache_ptr , (void * )alloc_obj , alloc_size + 8 , MMTK_ALLOCATION_SEMANTICS_DEFAULT );
221222
222223 if (rb_gc_shutdown_call_finalizer_p ((VALUE )alloc_obj )) {
223224 mmtk_add_obj_free_candidate (alloc_obj );
@@ -337,7 +338,38 @@ void rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b) { }
337338void rb_gc_impl_writebarrier_unprotect (void * objspace_ptr , VALUE obj ) { }
338339void rb_gc_impl_writebarrier_remember (void * objspace_ptr , VALUE obj ) { }
339340// Heap walking
340- void rb_gc_impl_each_objects (void * objspace_ptr , int (* callback )(void * , void * , size_t , void * ), void * data ) { }
341+ struct each_objects_data {
342+ bool stop ;
343+ int (* callback )(void * , void * , size_t , void * );
344+ void * data ;
345+ };
346+
347+ static void
348+ each_objects_i (MMTk_ObjectReference obj , void * d )
349+ {
350+ struct each_objects_data * data = d ;
351+
352+ if (data -> stop ) return ;
353+
354+ size_t slot_size = rb_gc_impl_obj_slot_size ((VALUE )obj );
355+
356+ if (data -> callback (obj , (void * )((char * )obj + slot_size ), slot_size , data -> data ) != 0 ) {
357+ data -> stop = true;
358+ }
359+ }
360+
361+ void
362+ rb_gc_impl_each_objects (void * objspace_ptr , int (* callback )(void * , void * , size_t , void * ), void * data )
363+ {
364+ struct each_objects_data each_objects_data = {
365+ .stop = false,
366+ .callback = callback ,
367+ .data = data ,
368+ };
369+
370+ mmtk_enumerate_objects (each_objects_i , & each_objects_data );
371+ }
372+
341373void rb_gc_impl_each_object (void * objspace_ptr , void (* func )(VALUE obj , void * data ), void * data ) { }
342374// Finalizers
343375void
0 commit comments