14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
17#include "ruby/internal/config.h"
24#define sighandler_t ruby_sighandler_t
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
41#ifndef HAVE_MALLOC_USABLE_SIZE
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
54# elif defined(HAVE_MALLOC_H)
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
63#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
73#ifdef HAVE_SYS_RESOURCE_H
74# include <sys/resource.h>
77#if defined _WIN32 || defined __CYGWIN__
79#elif defined(HAVE_POSIX_MEMALIGN)
80#elif defined(HAVE_MEMALIGN)
87#include <emscripten.h>
90#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
91# include <mach/task.h>
92# include <mach/mach_init.h>
93# include <mach/mach_port.h>
98#include "debug_counter.h"
99#include "eval_intern.h"
103#include "internal/class.h"
104#include "internal/complex.h"
105#include "internal/cont.h"
106#include "internal/error.h"
107#include "internal/eval.h"
108#include "internal/gc.h"
109#include "internal/hash.h"
110#include "internal/imemo.h"
111#include "internal/io.h"
112#include "internal/numeric.h"
113#include "internal/object.h"
114#include "internal/proc.h"
115#include "internal/rational.h"
116#include "internal/sanitizers.h"
117#include "internal/struct.h"
118#include "internal/symbol.h"
119#include "internal/thread.h"
120#include "internal/variable.h"
121#include "internal/warnings.h"
131#include "ruby_assert.h"
132#include "ruby_atomic.h"
134#include "transient_heap.h"
137#include "vm_callinfo.h"
138#include "ractor_core.h"
143#define rb_setjmp(env) RUBY_SETJMP(env)
144#define rb_jmp_buf rb_jmpbuf_t
145#undef rb_data_object_wrap
147#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
148#define MAP_ANONYMOUS MAP_ANON
151static inline struct rbimpl_size_mul_overflow_tag
152size_add_overflow(size_t x, size_t y)
158#elif __has_builtin(__builtin_add_overflow)
159 p = __builtin_add_overflow(x, y, &z);
161#elif defined(DSIZE_T)
173 return (
struct rbimpl_size_mul_overflow_tag) { p, z, };
176static inline struct rbimpl_size_mul_overflow_tag
177size_mul_add_overflow(size_t x, size_t y, size_t z)
179 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
180 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
181 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
184static inline struct rbimpl_size_mul_overflow_tag
185size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
189 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
190 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
193PRINTF_ARGS(NORETURN(
static void gc_raise(
VALUE,
const char*, ...)), 2, 3);
196size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
198 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
199 if (LIKELY(!t.left)) {
202 else if (rb_during_gc()) {
208 "integer overflow: %"PRIuSIZE
211 x, y, (
size_t)SIZE_MAX);
216rb_size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
218 return size_mul_or_raise(x, y, exc);
222size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
224 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
225 if (LIKELY(!t.left)) {
228 else if (rb_during_gc()) {
234 "integer overflow: %"PRIuSIZE
238 x, y, z, (
size_t)SIZE_MAX);
243rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
245 return size_mul_add_or_raise(x, y, z, exc);
249size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
251 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
252 if (LIKELY(!t.left)) {
255 else if (rb_during_gc()) {
261 "integer overflow: %"PRIdSIZE
266 x, y, z, w, (
size_t)SIZE_MAX);
270#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
272volatile VALUE rb_gc_guarded_val;
274rb_gc_guarded_ptr_val(
volatile VALUE *ptr,
VALUE val)
276 rb_gc_guarded_val = val;
282#ifndef GC_HEAP_INIT_SLOTS
283#define GC_HEAP_INIT_SLOTS 10000
285#ifndef GC_HEAP_FREE_SLOTS
286#define GC_HEAP_FREE_SLOTS 4096
288#ifndef GC_HEAP_GROWTH_FACTOR
289#define GC_HEAP_GROWTH_FACTOR 1.8
291#ifndef GC_HEAP_GROWTH_MAX_SLOTS
292#define GC_HEAP_GROWTH_MAX_SLOTS 0
294#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
295#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
298#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
299#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
301#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
302#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
304#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
305#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
308#ifndef GC_MALLOC_LIMIT_MIN
309#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
311#ifndef GC_MALLOC_LIMIT_MAX
312#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
314#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
315#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
318#ifndef GC_OLDMALLOC_LIMIT_MIN
319#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
321#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
322#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
324#ifndef GC_OLDMALLOC_LIMIT_MAX
325#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
328#ifndef PRINT_MEASURE_LINE
329#define PRINT_MEASURE_LINE 0
331#ifndef PRINT_ENTER_EXIT_TICK
332#define PRINT_ENTER_EXIT_TICK 0
334#ifndef PRINT_ROOT_TICKS
335#define PRINT_ROOT_TICKS 0
338#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
342 size_t heap_init_slots;
343 size_t heap_free_slots;
344 double growth_factor;
345 size_t growth_max_slots;
347 double heap_free_slots_min_ratio;
348 double heap_free_slots_goal_ratio;
349 double heap_free_slots_max_ratio;
350 double oldobject_limit_factor;
352 size_t malloc_limit_min;
353 size_t malloc_limit_max;
354 double malloc_limit_growth_factor;
356 size_t oldmalloc_limit_min;
357 size_t oldmalloc_limit_max;
358 double oldmalloc_limit_growth_factor;
366 GC_HEAP_GROWTH_FACTOR,
367 GC_HEAP_GROWTH_MAX_SLOTS,
369 GC_HEAP_FREE_SLOTS_MIN_RATIO,
370 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
371 GC_HEAP_FREE_SLOTS_MAX_RATIO,
372 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
376 GC_MALLOC_LIMIT_GROWTH_FACTOR,
378 GC_OLDMALLOC_LIMIT_MIN,
379 GC_OLDMALLOC_LIMIT_MAX,
380 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
401#define RGENGC_DEBUG -1
403#define RGENGC_DEBUG 0
406#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
407# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
408#elif defined(HAVE_VA_ARGS_MACRO)
409# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
411# define RGENGC_DEBUG_ENABLED(level) 0
413int ruby_rgengc_debug;
423#ifndef RGENGC_CHECK_MODE
424#define RGENGC_CHECK_MODE 0
428#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
436#ifndef RGENGC_OLD_NEWOBJ_CHECK
437#define RGENGC_OLD_NEWOBJ_CHECK 0
445#ifndef RGENGC_PROFILE
446#define RGENGC_PROFILE 0
455#ifndef RGENGC_ESTIMATE_OLDMALLOC
456#define RGENGC_ESTIMATE_OLDMALLOC 1
462#ifndef RGENGC_FORCE_MAJOR_GC
463#define RGENGC_FORCE_MAJOR_GC 0
466#ifndef GC_PROFILE_MORE_DETAIL
467#define GC_PROFILE_MORE_DETAIL 0
469#ifndef GC_PROFILE_DETAIL_MEMORY
470#define GC_PROFILE_DETAIL_MEMORY 0
472#ifndef GC_ENABLE_INCREMENTAL_MARK
473#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
475#ifndef GC_ENABLE_LAZY_SWEEP
476#define GC_ENABLE_LAZY_SWEEP 1
478#ifndef CALC_EXACT_MALLOC_SIZE
479#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
481#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
482#ifndef MALLOC_ALLOCATED_SIZE
483#define MALLOC_ALLOCATED_SIZE 0
486#define MALLOC_ALLOCATED_SIZE 0
488#ifndef MALLOC_ALLOCATED_SIZE_CHECK
489#define MALLOC_ALLOCATED_SIZE_CHECK 0
492#ifndef GC_DEBUG_STRESS_TO_CLASS
493#define GC_DEBUG_STRESS_TO_CLASS 0
496#ifndef RGENGC_OBJ_INFO
497#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
501 GPR_FLAG_NONE = 0x000,
503 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
504 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
505 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
506 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
507#if RGENGC_ESTIMATE_OLDMALLOC
508 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
510 GPR_FLAG_MAJOR_MASK = 0x0ff,
513 GPR_FLAG_NEWOBJ = 0x100,
514 GPR_FLAG_MALLOC = 0x200,
515 GPR_FLAG_METHOD = 0x400,
516 GPR_FLAG_CAPI = 0x800,
517 GPR_FLAG_STRESS = 0x1000,
520 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
521 GPR_FLAG_HAVE_FINALIZE = 0x4000,
522 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
523 GPR_FLAG_FULL_MARK = 0x10000,
524 GPR_FLAG_COMPACT = 0x20000,
527 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
528 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
529} gc_profile_record_flag;
535 double gc_invoke_time;
537 size_t heap_total_objects;
538 size_t heap_use_size;
539 size_t heap_total_size;
540 size_t moved_objects;
542#if GC_PROFILE_MORE_DETAIL
544 double gc_sweep_time;
546 size_t heap_use_pages;
547 size_t heap_live_objects;
548 size_t heap_free_objects;
550 size_t allocate_increase;
551 size_t allocate_limit;
554 size_t removing_objects;
555 size_t empty_objects;
556#if GC_PROFILE_DETAIL_MEMORY
562#if MALLOC_ALLOCATED_SIZE
563 size_t allocated_size;
566#if RGENGC_PROFILE > 0
568 size_t remembered_normal_objects;
569 size_t remembered_shady_objects;
577 shape_id_t original_shape_id;
580#define RMOVED(obj) ((struct RMoved *)(obj))
631 uint32_t _ractor_belonging_id;
640# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
642# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
644# define RVALUE_OVERHEAD 0
650typedef uintptr_t bits_t;
652 BITS_SIZE =
sizeof(bits_t),
653 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
655#define popcount_bits rb_popcount_intptr
672#define STACK_CHUNK_SIZE 500
675 VALUE data[STACK_CHUNK_SIZE];
685 size_t unused_cache_size;
688#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
689#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
696 uintptr_t compact_cursor_index;
697#if GC_ENABLE_INCREMENTAL_MARK
707 size_t allocatable_pages;
710 size_t total_allocated_pages;
711 size_t total_freed_pages;
712 size_t force_major_gc_count;
735#if MALLOC_ALLOCATED_SIZE
736 size_t allocated_size;
743 unsigned int mode : 2;
744 unsigned int immediate_sweep : 1;
745 unsigned int dont_gc : 1;
746 unsigned int dont_incremental : 1;
747 unsigned int during_gc : 1;
748 unsigned int during_compacting : 1;
749 unsigned int gc_stressful: 1;
750 unsigned int has_hook: 1;
751 unsigned int during_minor_gc : 1;
752#if GC_ENABLE_INCREMENTAL_MARK
753 unsigned int during_incremental_marking : 1;
755 unsigned int measure_gc : 1;
759 size_t total_allocated_objects;
760 VALUE next_object_id;
773 size_t allocated_pages;
774 size_t allocatable_pages;
775 size_t sorted_length;
777 size_t freeable_pages;
781 VALUE deferred_final;
788 unsigned int latest_gc_info;
794#if GC_PROFILE_MORE_DETAIL
799 size_t minor_gc_count;
800 size_t major_gc_count;
801 size_t compact_count;
802 size_t read_barrier_faults;
803#if RGENGC_PROFILE > 0
804 size_t total_generated_normal_object_count;
805 size_t total_generated_shady_object_count;
806 size_t total_shade_operation_count;
807 size_t total_promoted_count;
808 size_t total_remembered_normal_object_count;
809 size_t total_remembered_shady_object_count;
811#if RGENGC_PROFILE >= 2
812 size_t generated_normal_object_count_types[
RUBY_T_MASK];
813 size_t generated_shady_object_count_types[
RUBY_T_MASK];
816 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
817 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
822 double gc_sweep_start_time;
823 size_t total_allocated_objects_at_gc_start;
824 size_t heap_used_at_gc_start;
828 size_t total_freed_objects;
829 uint64_t total_time_ns;
834 VALUE gc_stress_mode;
839 size_t last_major_gc;
840 size_t uncollectible_wb_unprotected_objects;
841 size_t uncollectible_wb_unprotected_objects_limit;
843 size_t old_objects_limit;
845#if RGENGC_ESTIMATE_OLDMALLOC
846 size_t oldmalloc_increase;
847 size_t oldmalloc_increase_limit;
850#if RGENGC_CHECK_MODE >= 2
857 size_t considered_count_table[
T_MASK];
858 size_t moved_count_table[
T_MASK];
859 size_t moved_up_count_table[
T_MASK];
860 size_t moved_down_count_table[
T_MASK];
864#if GC_ENABLE_INCREMENTAL_MARK
874#if GC_DEBUG_STRESS_TO_CLASS
875 VALUE stress_to_class;
880#ifndef HEAP_PAGE_ALIGN_LOG
882#define HEAP_PAGE_ALIGN_LOG 16
885#define BASE_SLOT_SIZE sizeof(RVALUE)
887#define CEILDIV(i, mod) roomof(i, mod)
889 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
890 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
891 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
892 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)) / BASE_SLOT_SIZE),
893 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
894 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
896#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
897#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
899#if GC_ENABLE_INCREMENTAL_MARK && !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
900# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
903#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
909static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
911#elif defined(__wasm__)
915static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
917#elif HAVE_CONST_PAGE_SIZE
919static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
921#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
923static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
925#elif defined(PAGE_SIZE)
927# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
929#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
931# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
935static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
938#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
940# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
942static bool heap_page_alloc_use_mmap;
951 unsigned int before_sweep : 1;
952 unsigned int has_remembered_objects : 1;
953 unsigned int has_uncollectible_shady_objects : 1;
954 unsigned int in_tomb : 1;
964 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
966 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
967 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
968 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
971 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
978asan_lock_freelist(
struct heap_page *page)
980 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
987asan_unlock_freelist(
struct heap_page *page)
989 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
992#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
993#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
994#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
996#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
997#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
998#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
999#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1002#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1003#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1004#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1007#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1008#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1009#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1010#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1011#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1013#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1016#define rb_objspace (*rb_objspace_of(GET_VM()))
1017#define rb_objspace_of(vm) ((vm)->objspace)
1019#define ruby_initial_gc_stress gc_params.gc_stress
1021VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1023#define malloc_limit objspace->malloc_params.limit
1024#define malloc_increase objspace->malloc_params.increase
1025#define malloc_allocated_size objspace->malloc_params.allocated_size
1026#define heap_pages_sorted objspace->heap_pages.sorted
1027#define heap_allocated_pages objspace->heap_pages.allocated_pages
1028#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1029#define heap_pages_lomem objspace->heap_pages.range[0]
1030#define heap_pages_himem objspace->heap_pages.range[1]
1031#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1032#define heap_pages_final_slots objspace->heap_pages.final_slots
1033#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1034#define size_pools objspace->size_pools
1035#define during_gc objspace->flags.during_gc
1036#define finalizing objspace->atomic_flags.finalizing
1037#define finalizer_table objspace->finalizer_table
1038#define global_list objspace->global_list
1039#define ruby_gc_stressful objspace->flags.gc_stressful
1040#define ruby_gc_stress_mode objspace->gc_stress_mode
1041#if GC_DEBUG_STRESS_TO_CLASS
1042#define stress_to_class objspace->stress_to_class
1044#define stress_to_class 0
1048#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1049#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1050#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1051#define dont_gc_val() (objspace->flags.dont_gc)
1053#define dont_gc_on() (objspace->flags.dont_gc = 1)
1054#define dont_gc_off() (objspace->flags.dont_gc = 0)
1055#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1056#define dont_gc_val() (objspace->flags.dont_gc)
1059static inline enum gc_mode
1060gc_mode_verify(
enum gc_mode mode)
1062#if RGENGC_CHECK_MODE > 0
1065 case gc_mode_marking:
1066 case gc_mode_sweeping:
1067 case gc_mode_compacting:
1070 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
1079 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1080 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1091 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1092 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1101 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1102 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1111 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1112 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1121 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1122 count += size_pools[i].allocatable_pages;
1131 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1133 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1134 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1143 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1145 count += size_pool->total_allocated_pages;
1154 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1156 count += size_pool->total_freed_pages;
1161#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1162#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1164#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1165#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1166#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1167#if GC_ENABLE_INCREMENTAL_MARK
1168#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1170#define is_incremental_marking(objspace) FALSE
1172#if GC_ENABLE_INCREMENTAL_MARK
1173#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1175#define will_be_incremental_marking(objspace) FALSE
1177#if GC_ENABLE_INCREMENTAL_MARK
1178#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1180#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1182#if SIZEOF_LONG == SIZEOF_VOIDP
1183# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1184# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1185#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1186# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1187# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1188 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1190# error not supported
1193#define RANY(o) ((RVALUE*)(o))
1198 void (*dfree)(
void *);
1202#define RZOMBIE(o) ((struct RZombie *)(o))
1204#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1206#if RUBY_MARK_FREE_DEBUG
1207int ruby_gc_debug_indent = 0;
1210int ruby_disable_gc = 0;
1211int ruby_enable_autocompact = 0;
1213void rb_iseq_mark(
const rb_iseq_t *iseq);
1214void rb_iseq_update_references(
rb_iseq_t *iseq);
1215void rb_iseq_free(
const rb_iseq_t *iseq);
1216size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1217void rb_vm_update_references(
void *ptr);
1219void rb_gcdebug_print_obj_condition(
VALUE obj);
1223NORETURN(
static void *gc_vraise(
void *ptr));
1224NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
1225NORETURN(
static void negative_size_allocation_error(
const char *));
1231static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1233static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1236enum gc_enter_event {
1237 gc_enter_event_start,
1238 gc_enter_event_mark_continue,
1239 gc_enter_event_sweep_continue,
1240 gc_enter_event_rest,
1241 gc_enter_event_finalizer,
1242 gc_enter_event_rb_memerror,
1245static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1246static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1248static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1249static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1271static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1276NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1281static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1283static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1284static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1291static double getrusage_time(
void);
1292static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1295static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1297static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1298static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1302#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1303 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1304 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1308#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1310#define gc_prof_record(objspace) (objspace)->profile.current_record
1311#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1313#ifdef HAVE_VA_ARGS_MACRO
1314# define gc_report(level, objspace, ...) \
1315 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1317# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1319PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1320static const char *obj_info(
VALUE obj);
1321static const char *obj_type_name(
VALUE obj);
1341#if defined(__GNUC__) && defined(__i386__)
1342typedef unsigned long long tick_t;
1343#define PRItick "llu"
1347 unsigned long long int x;
1348 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1352#elif defined(__GNUC__) && defined(__x86_64__)
1353typedef unsigned long long tick_t;
1354#define PRItick "llu"
1356static __inline__ tick_t
1359 unsigned long hi, lo;
1360 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1361 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1364#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1365typedef unsigned long long tick_t;
1366#define PRItick "llu"
1368static __inline__ tick_t
1371 unsigned long long val = __builtin_ppc_get_timebase();
1378#elif defined(__POWERPC__) && defined(__APPLE__)
1379typedef unsigned long long tick_t;
1380#define PRItick "llu"
1382static __inline__ tick_t
1385 unsigned long int upper, lower, tmp;
1386 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1387 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1392 }
while (tmp != upper);
1393 return ((tick_t)upper << 32) | lower;
1396#elif defined(__aarch64__) && defined(__GNUC__)
1397typedef unsigned long tick_t;
1400static __inline__ tick_t
1404 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1409#elif defined(_WIN32) && defined(_MSC_VER)
1411typedef unsigned __int64 tick_t;
1412#define PRItick "llu"
1421typedef clock_t tick_t;
1422#define PRItick "llu"
1432typedef double tick_t;
1433#define PRItick "4.9f"
1438 return getrusage_time();
1441#error "choose tick type"
1444#define MEASURE_LINE(expr) do { \
1445 volatile tick_t start_time = tick(); \
1446 volatile tick_t end_time; \
1448 end_time = tick(); \
1449 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1453#define MEASURE_LINE(expr) expr
1457asan_unpoison_object_temporary(
VALUE obj)
1459 void *ptr = asan_poisoned_object_p(obj);
1460 asan_unpoison_object(obj,
false);
1465asan_poison_object_restore(
VALUE obj,
void *ptr)
1468 asan_poison_object(obj);
1473#define asan_unpoisoning_object(obj) \
1474 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1475 *unpoisoning = &poisoned; \
1477 unpoisoning = asan_poison_object_restore(obj, poisoned))
1479#define FL_CHECK2(name, x, pred) \
1480 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1481 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1482#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1483#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1484#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1486#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1487#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1488#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1490#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1491#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1492#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1494#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1495#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1496#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1498#define RVALUE_OLD_AGE 3
1499#define RVALUE_AGE_SHIFT 5
1514check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1519 RB_VM_LOCK_ENTER_NO_BARRIER();
1522 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1525 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1528 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1530 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1531 if (page->start <= (uintptr_t)obj &&
1532 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1533 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1534 (
void *)obj, (
void *)page);
1541 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1547 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1548 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1550 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1551 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1553 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1554 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1558 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1562 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1566 obj_memsize_of((
VALUE)obj, FALSE);
1572 if (age > 0 && wb_unprotected_bit) {
1573 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1577 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1578 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1582 if (!is_full_marking(objspace)) {
1583 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1584 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1585 obj_info(obj), age);
1588 if (remembered_bit && age != RVALUE_OLD_AGE) {
1589 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1590 obj_info(obj), age);
1602 if (is_incremental_marking(objspace) && marking_bit) {
1603 if (!is_marking(objspace) && !mark_bit) {
1604 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1610 RB_VM_LOCK_LEAVE_NO_BARRIER();
1612 if (err > 0 && terminate) {
1613 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1618#if RGENGC_CHECK_MODE == 0
1620check_rvalue_consistency(
const VALUE obj)
1626check_rvalue_consistency(
const VALUE obj)
1628 check_rvalue_consistency_force(obj, TRUE);
1636 if (RB_SPECIAL_CONST_P(obj)) {
1640 void *poisoned = asan_unpoison_object_temporary(obj);
1646 asan_poison_object(obj);
1653RVALUE_MARKED(
VALUE obj)
1655 check_rvalue_consistency(obj);
1656 return RVALUE_MARK_BITMAP(obj) != 0;
1660RVALUE_PINNED(
VALUE obj)
1662 check_rvalue_consistency(obj);
1663 return RVALUE_PIN_BITMAP(obj) != 0;
1667RVALUE_WB_UNPROTECTED(
VALUE obj)
1669 check_rvalue_consistency(obj);
1670 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1674RVALUE_MARKING(
VALUE obj)
1676 check_rvalue_consistency(obj);
1677 return RVALUE_MARKING_BITMAP(obj) != 0;
1681RVALUE_REMEMBERED(
VALUE obj)
1683 check_rvalue_consistency(obj);
1684 return RVALUE_MARKING_BITMAP(obj) != 0;
1688RVALUE_UNCOLLECTIBLE(
VALUE obj)
1690 check_rvalue_consistency(obj);
1691 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1695RVALUE_OLD_P_RAW(
VALUE obj)
1698 return (
RBASIC(obj)->flags & promoted) == promoted;
1702RVALUE_OLD_P(
VALUE obj)
1704 check_rvalue_consistency(obj);
1705 return RVALUE_OLD_P_RAW(obj);
1708#if RGENGC_CHECK_MODE || GC_DEBUG
1710RVALUE_AGE(
VALUE obj)
1712 check_rvalue_consistency(obj);
1713 return RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1720 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1721 objspace->rgengc.old_objects++;
1722 rb_transient_heap_promote(obj);
1724#if RGENGC_PROFILE >= 2
1725 objspace->profile.total_promoted_count++;
1733 RB_DEBUG_COUNTER_INC(obj_promote);
1734 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1738RVALUE_FLAGS_AGE_SET(
VALUE flags,
int age)
1741 flags |= (age << RVALUE_AGE_SHIFT);
1750 int age = RVALUE_FLAGS_AGE(flags);
1752 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1753 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1757 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1759 if (age == RVALUE_OLD_AGE) {
1760 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1762 check_rvalue_consistency(obj);
1769 check_rvalue_consistency(obj);
1770 GC_ASSERT(!RVALUE_OLD_P(obj));
1772 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE);
1773 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1775 check_rvalue_consistency(obj);
1782 check_rvalue_consistency(obj);
1783 GC_ASSERT(!RVALUE_OLD_P(obj));
1785 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1787 check_rvalue_consistency(obj);
1793 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1794 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1800 check_rvalue_consistency(obj);
1801 GC_ASSERT(RVALUE_OLD_P(obj));
1803 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1804 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1807 RVALUE_DEMOTE_RAW(objspace, obj);
1809 if (RVALUE_MARKED(obj)) {
1810 objspace->rgengc.old_objects--;
1813 check_rvalue_consistency(obj);
1817RVALUE_AGE_RESET_RAW(
VALUE obj)
1819 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1823RVALUE_AGE_RESET(
VALUE obj)
1825 check_rvalue_consistency(obj);
1826 GC_ASSERT(!RVALUE_OLD_P(obj));
1828 RVALUE_AGE_RESET_RAW(obj);
1829 check_rvalue_consistency(obj);
1833RVALUE_BLACK_P(
VALUE obj)
1835 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1840RVALUE_GREY_P(
VALUE obj)
1842 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1847RVALUE_WHITE_P(
VALUE obj)
1849 return RVALUE_MARKED(obj) == FALSE;
1859 return calloc(1, n);
1863rb_objspace_alloc(
void)
1866 objspace->flags.measure_gc = 1;
1867 malloc_limit = gc_params.malloc_limit_min;
1869 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1872 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1874 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1875 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1890 if (is_lazy_sweeping(objspace))
1891 rb_bug(
"lazy sweeping underway when freeing object space");
1893 if (objspace->profile.records) {
1894 free(objspace->profile.records);
1895 objspace->profile.records = 0;
1900 for (list = global_list; list; list = next) {
1905 if (heap_pages_sorted) {
1907 for (i = 0; i < heap_allocated_pages; ++i) {
1908 heap_page_free(objspace, heap_pages_sorted[i]);
1910 free(heap_pages_sorted);
1911 heap_allocated_pages = 0;
1912 heap_pages_sorted_length = 0;
1913 heap_pages_lomem = 0;
1914 heap_pages_himem = 0;
1916 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1918 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1919 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1922 st_free_table(objspace->id_to_obj_tbl);
1923 st_free_table(objspace->obj_to_id_tbl);
1925 free_stack_chunks(&objspace->mark_stack);
1926 mark_stack_free_cache(&objspace->mark_stack);
1932heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1937 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1940 if (heap_pages_sorted_length > 0) {
1941 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1942 if (sorted) heap_pages_sorted = sorted;
1945 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
1952 heap_pages_sorted_length = next_length;
1963 size_t next_length = heap_allocatable_pages(objspace);
1964 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1966 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1967 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1970 if (next_length > heap_pages_sorted_length) {
1971 heap_pages_expand_sorted_to(objspace, next_length);
1974 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1975 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1981 size_pool->allocatable_pages = s;
1982 heap_pages_expand_sorted(objspace);
1988 ASSERT_vm_locking();
1992 asan_unpoison_object(obj,
false);
1994 asan_unlock_freelist(page);
1996 p->as.free.flags = 0;
1997 p->as.free.next = page->freelist;
1999 asan_lock_freelist(page);
2001 if (RGENGC_CHECK_MODE &&
2003 !(page->start <= (uintptr_t)obj &&
2004 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2005 obj % BASE_SLOT_SIZE == 0)) {
2006 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
2009 asan_poison_object(obj);
2010 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
2016 asan_unlock_freelist(page);
2017 GC_ASSERT(page->free_slots != 0);
2018 GC_ASSERT(page->freelist != NULL);
2020 page->free_next = heap->free_pages;
2021 heap->free_pages = page;
2023 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
2025 asan_lock_freelist(page);
2028#if GC_ENABLE_INCREMENTAL_MARK
2032 asan_unlock_freelist(page);
2033 GC_ASSERT(page->free_slots != 0);
2034 GC_ASSERT(page->freelist != NULL);
2036 page->free_next = heap->pooled_pages;
2037 heap->pooled_pages = page;
2038 objspace->rincgc.pooled_slots += page->free_slots;
2040 asan_lock_freelist(page);
2047 ccan_list_del(&page->page_node);
2048 heap->total_pages--;
2049 heap->total_slots -= page->total_slots;
2052static void rb_aligned_free(
void *ptr,
size_t size);
2057 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2059 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2061 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2062 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2063 rb_bug(
"heap_page_body_free: munmap failed");
2068 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2075 heap_allocated_pages--;
2076 page->size_pool->total_freed_pages++;
2077 heap_page_body_free(GET_PAGE_BODY(page->start));
2086 bool has_pages_in_tomb_heap = FALSE;
2087 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2088 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2089 has_pages_in_tomb_heap = TRUE;
2094 if (has_pages_in_tomb_heap) {
2095 for (i = j = 1; j < heap_allocated_pages; i++) {
2096 struct heap_page *page = heap_pages_sorted[i];
2098 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2099 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2100 heap_page_free(objspace, page);
2104 heap_pages_sorted[j] = page;
2110 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2111 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2112 GC_ASSERT(himem <= heap_pages_himem);
2113 heap_pages_himem = himem;
2115 GC_ASSERT(j == heap_allocated_pages);
2120heap_page_body_allocate(
void)
2124 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2126 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2128 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2129 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2130 if (ptr == MAP_FAILED) {
2134 char *aligned = ptr + HEAP_PAGE_ALIGN;
2135 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2136 GC_ASSERT(aligned > ptr);
2137 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2139 size_t start_out_of_range_size = aligned - ptr;
2140 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2141 if (start_out_of_range_size > 0) {
2142 if (munmap(ptr, start_out_of_range_size)) {
2143 rb_bug(
"heap_page_body_allocate: munmap failed for start");
2147 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2148 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2149 if (end_out_of_range_size > 0) {
2150 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2151 rb_bug(
"heap_page_body_allocate: munmap failed for end");
2159 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2162 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2170 uintptr_t start, end, p;
2172 uintptr_t hi, lo, mid;
2173 size_t stride = size_pool->slot_size;
2174 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(
int)stride;
2178 if (page_body == 0) {
2183 page = calloc1(
sizeof(
struct heap_page));
2185 heap_page_body_free(page_body);
2192 if (start % BASE_SLOT_SIZE != 0) {
2193 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2194 start = start + delta;
2195 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2201 if (NUM_IN_PAGE(start) == 1) {
2202 start += stride - BASE_SLOT_SIZE;
2205 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2207 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2209 end = start + (limit * (int)stride);
2213 hi = (uintptr_t)heap_allocated_pages;
2217 mid = (lo + hi) / 2;
2218 mid_page = heap_pages_sorted[mid];
2219 if ((uintptr_t)mid_page->start < start) {
2222 else if ((uintptr_t)mid_page->start > start) {
2226 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2230 if (hi < (uintptr_t)heap_allocated_pages) {
2231 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2234 heap_pages_sorted[hi] = page;
2236 heap_allocated_pages++;
2238 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2239 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2240 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2242 size_pool->total_allocated_pages++;
2244 if (heap_allocated_pages > heap_pages_sorted_length) {
2245 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2246 heap_allocated_pages, heap_pages_sorted_length);
2249 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2250 if (heap_pages_himem < end) heap_pages_himem = end;
2252 page->start = start;
2253 page->total_slots = limit;
2254 page->slot_size = size_pool->slot_size;
2255 page->size_pool = size_pool;
2256 page_body->header.page = page;
2258 for (p = start; p != end; p += stride) {
2259 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2260 heap_page_add_freeobj(objspace, page, (
VALUE)p);
2262 page->free_slots = limit;
2264 asan_lock_freelist(page);
2273 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2274 asan_unlock_freelist(page);
2275 if (page->freelist != NULL) {
2276 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2277 asan_lock_freelist(page);
2289 const char *method =
"recycle";
2291 size_pool->allocatable_pages--;
2293 page = heap_page_resurrect(objspace, size_pool);
2296 page = heap_page_allocate(objspace, size_pool);
2297 method =
"allocate";
2299 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2300 "heap_allocated_pages: %"PRIdSIZE
", "
2301 "heap_allocated_pages: %"PRIdSIZE
", "
2302 "tomb->total_pages: %"PRIdSIZE
"\n",
2303 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2311 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2312 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2313 ccan_list_add_tail(&heap->pages, &page->page_node);
2314 heap->total_pages++;
2315 heap->total_slots += page->total_slots;
2321 struct heap_page *page = heap_page_create(objspace, size_pool);
2322 heap_add_page(objspace, size_pool, heap, page);
2323 heap_add_freepage(heap, page);
2331 size_pool_allocatable_pages_set(objspace, size_pool, add);
2333 for (i = 0; i < add; i++) {
2334 heap_assign_page(objspace, size_pool, heap);
2337 GC_ASSERT(size_pool->allocatable_pages == 0);
2343 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2346 if (goal_ratio == 0.0) {
2347 next_used = (size_t)(used * gc_params.growth_factor);
2349 else if (total_slots == 0) {
2350 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2351 next_used = (gc_params.heap_init_slots * multiple) / HEAP_PAGE_OBJ_LIMIT;
2357 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2359 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2360 if (f < 1.0) f = 1.1;
2362 next_used = (size_t)(f * used);
2366 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2367 " G(%1.2f), f(%1.2f),"
2368 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2369 free_slots, total_slots, free_slots/(
double)total_slots,
2370 goal_ratio, f, used, next_used);
2374 if (gc_params.growth_max_slots > 0) {
2375 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2376 if (next_used > max_used) next_used = max_used;
2379 size_t extend_page_count = next_used - used;
2381 if (extend_page_count == 0) extend_page_count = 1;
2383 return extend_page_count;
2389 if (size_pool->allocatable_pages > 0) {
2390 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2391 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2392 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2394 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2395 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2397 heap_assign_page(objspace, size_pool, heap);
2407 if (heap->free_pages == NULL && is_incremental_marking(objspace)) {
2408 gc_marks_continue(objspace, size_pool, heap);
2413 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2414 gc_sweep_continue(objspace, size_pool, heap);
2421 GC_ASSERT(heap->free_pages == NULL);
2424 gc_continue(objspace, size_pool, heap);
2428 if (heap->free_pages == NULL &&
2429 (will_be_incremental_marking(objspace) ||
2430 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2431 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2436 gc_continue(objspace, size_pool, heap);
2441 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2442 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2443 rb_bug(
"cannot create a new page after GC");
2446 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2451 gc_continue(objspace, size_pool, heap);
2453 if (heap->free_pages == NULL &&
2454 !heap_increment(objspace, size_pool, heap)) {
2455 rb_bug(
"cannot create a new page after major GC");
2463 GC_ASSERT(heap->free_pages != NULL);
2471 objspace->flags.has_hook = (objspace->hook_events != 0);
2477 const VALUE *pc = ec->cfp->pc;
2478 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2482 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2486#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2487#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2489#define gc_event_hook_prep(objspace, event, data, prep) do { \
2490 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2492 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2496#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2501#if !__has_feature(memory_sanitizer)
2506 p->as.basic.
flags = flags;
2509#if RACTOR_CHECK_MODE
2510 rb_ractor_setup_belonging(obj);
2513#if RGENGC_CHECK_MODE
2514 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2516 RB_VM_LOCK_ENTER_NO_BARRIER();
2518 check_rvalue_consistency(obj);
2520 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2521 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2522 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2523 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2526 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2529 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2531 if (rgengc_remembered(objspace, (
VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2533 RB_VM_LOCK_LEAVE_NO_BARRIER();
2536 if (UNLIKELY(wb_protected == FALSE)) {
2537 ASSERT_vm_locking();
2538 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2542 objspace->total_allocated_objects++;
2546 objspace->profile.total_generated_normal_object_count++;
2547#if RGENGC_PROFILE >= 2
2548 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2552 objspace->profile.total_generated_shady_object_count++;
2553#if RGENGC_PROFILE >= 2
2554 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2560 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2564 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2566#if RGENGC_OLD_NEWOBJ_CHECK > 0
2568 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2570 if (!is_incremental_marking(objspace) &&
2573 if (--newobj_cnt == 0) {
2574 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2576 gc_mark_set(objspace, obj);
2577 RVALUE_AGE_SET_OLD(objspace, obj);
2579 rb_gc_writebarrier_remember(obj);
2589rb_gc_obj_slot_size(
VALUE obj)
2591 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2595size_pool_slot_size(
unsigned char pool_id)
2597 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2599 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2601#if RGENGC_CHECK_MODE
2603 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2606 slot_size -= RVALUE_OVERHEAD;
2612rb_size_pool_slot_size(
unsigned char pool_id)
2614 return size_pool_slot_size(pool_id);
2618rb_gc_size_allocatable_p(
size_t size)
2620 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2625 size_t size_pool_idx)
2628 RVALUE *p = size_pool_cache->freelist;
2630#if GC_ENABLE_INCREMENTAL_MARK
2631 if (is_incremental_marking(objspace)) {
2633 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2638 cache->incremental_mark_step_allocated_slots++;
2645 MAYBE_UNUSED(
const size_t) stride = size_pool_slot_size(size_pool_idx);
2646 size_pool_cache->freelist = p->as.free.next;
2648 asan_unpoison_memory_region(p, stride,
true);
2650 asan_unpoison_object(obj,
true);
2652#if RGENGC_CHECK_MODE
2653 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2655 MEMZERO((
char *)obj,
char, stride);
2667 ASSERT_vm_locking();
2671 if (heap->free_pages == NULL) {
2672 heap_prepare(objspace, size_pool, heap);
2675 page = heap->free_pages;
2676 heap->free_pages = page->free_next;
2678 GC_ASSERT(page->free_slots != 0);
2679 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2681 asan_unlock_freelist(page);
2690 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2694 GC_ASSERT(size_pool_cache->freelist == NULL);
2695 GC_ASSERT(page->free_slots != 0);
2696 GC_ASSERT(page->freelist != NULL);
2698 size_pool_cache->using_page = page;
2699 size_pool_cache->freelist = page->freelist;
2700 page->free_slots = 0;
2701 page->freelist = NULL;
2703 asan_unpoison_object((
VALUE)size_pool_cache->freelist,
false);
2704 GC_ASSERT(RB_TYPE_P((
VALUE)size_pool_cache->freelist,
T_NONE));
2705 asan_poison_object((
VALUE)size_pool_cache->freelist);
2712 p->as.values.v1 = v1;
2713 p->as.values.v2 = v2;
2714 p->as.values.v3 = v3;
2719size_pool_idx_for_size(
size_t size)
2722 size += RVALUE_OVERHEAD;
2724 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2727 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2729 if (size_pool_idx >= SIZE_POOL_COUNT) {
2730 rb_bug(
"size_pool_idx_for_size: allocation size too large");
2733#if RGENGC_CHECK_MODE
2735 GC_ASSERT(size <= (
size_t)size_pools[size_pool_idx].slot_size);
2736 if (size_pool_idx > 0) GC_ASSERT(size > (
size_t)size_pools[size_pool_idx - 1].slot_size);
2739 return size_pool_idx;
2741 GC_ASSERT(size <=
sizeof(
RVALUE));
2750 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2753 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2755 if (UNLIKELY(obj ==
Qfalse)) {
2757 bool unlock_vm =
false;
2760 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2766 ASSERT_vm_locking();
2768#if GC_ENABLE_INCREMENTAL_MARK
2769 if (is_incremental_marking(objspace)) {
2770 gc_marks_continue(objspace, size_pool, heap);
2771 cache->incremental_mark_step_allocated_slots = 0;
2774 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2780 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2781 ractor_cache_set_page(cache, size_pool_idx, page);
2784 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2786 GC_ASSERT(obj !=
Qfalse);
2791 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2806 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2808 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2812 rb_bug(
"object allocation during garbage collection phase");
2815 if (ruby_gc_stressful) {
2816 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2822 obj = newobj_alloc(objspace, cr, size_pool_idx,
true);
2823#if SHAPE_IN_BASIC_FLAGS
2824 flags |= (
VALUE)(size_pool_idx) << SHAPE_FLAG_SHIFT;
2826 newobj_init(klass, flags, wb_protected, objspace, obj);
2830 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2835NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2837NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2843 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2849 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2858 RB_DEBUG_COUNTER_INC(obj_newobj);
2859 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2861#if GC_DEBUG_STRESS_TO_CLASS
2862 if (UNLIKELY(stress_to_class)) {
2864 for (i = 0; i < cnt; ++i) {
2865 if (klass ==
RARRAY_AREF(stress_to_class, i)) rb_memerror();
2870 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2872 if (!UNLIKELY(during_gc ||
2873 ruby_gc_stressful ||
2874 gc_event_hook_available_p(objspace)) &&
2876 obj = newobj_alloc(objspace, cr, size_pool_idx,
false);
2877#if SHAPE_IN_BASIC_FLAGS
2878 flags |= (
VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2880 newobj_init(klass, flags, wb_protected, objspace, obj);
2883 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2885 obj = wb_protected ?
2886 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2887 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2896 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2897 return newobj_fill(obj, v1, v2, v3);
2903 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2904 return newobj_fill(obj, v1, v2, v3);
2908rb_wb_unprotected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2911 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2915rb_wb_protected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2918 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2925 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2933 return newobj_of(0,
T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2937rb_obj_embedded_size(uint32_t numiv)
2943rb_class_instance_allocate_internal(
VALUE klass,
VALUE flags,
bool wb_protected)
2946 GC_ASSERT(flags & ROBJECT_EMBED);
2950 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2952 size = rb_obj_embedded_size(index_tbl_num_entries);
2953 if (!rb_gc_size_allocatable_p(size)) {
2954 size =
sizeof(
struct RObject);
2957 size =
sizeof(
struct RObject);
2960 VALUE obj = newobj_of(klass, flags, 0, 0, 0, wb_protected, size);
2962 rb_shape_get_shape(obj)->
type == SHAPE_INITIAL_CAPACITY);
2966 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2970 VALUE *ptr = ROBJECT_IVPTR(obj);
2971 for (
size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2990#define UNEXPECTED_NODE(func) \
2991 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2992 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2995rb_imemo_name(
enum imemo_type
type)
2999#define IMEMO_NAME(x) case imemo_##x: return #x;
3003 IMEMO_NAME(throw_data);
3010 IMEMO_NAME(parser_strterm);
3011 IMEMO_NAME(callinfo);
3012 IMEMO_NAME(callcache);
3013 IMEMO_NAME(constcache);
3024 size_t size = RVALUE_SIZE;
3026 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
3034 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
3038rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
3040 return rb_imemo_tmpbuf_new((
VALUE)buf, 0, (
VALUE)cnt, 0);
3050imemo_memsize(
VALUE obj)
3053 switch (imemo_type(obj)) {
3055 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
3058 size += rb_iseq_memsize((
rb_iseq_t *)obj);
3061 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
3064 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
3067 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3071 case imemo_throw_data:
3074 case imemo_parser_strterm:
3087 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
3088 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
3093MJIT_FUNC_EXPORTED
VALUE
3094rb_class_allocate_instance(
VALUE klass)
3100rb_data_object_check(
VALUE klass)
3104 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3112 if (klass) rb_data_object_check(klass);
3119 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3127 RBIMPL_NONNULL_ARG(
type);
3128 if (klass) rb_data_object_check(klass);
3135 VALUE obj = rb_data_typed_object_wrap(klass, 0,
type);
3141rb_objspace_data_type_memsize(
VALUE obj)
3143 if (RTYPEDDATA_P(obj)) {
3146 if (ptr &&
type->function.dsize) {
3147 return type->function.dsize(ptr);
3154rb_objspace_data_type_name(
VALUE obj)
3156 if (RTYPEDDATA_P(obj)) {
3157 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
3165ptr_in_page_body_p(
const void *ptr,
const void *memb)
3168 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3170 if ((uintptr_t)ptr >= p_body) {
3171 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3184 if (ptr < (uintptr_t)heap_pages_lomem ||
3185 ptr > (uintptr_t)heap_pages_himem) {
3189 res = bsearch((
void *)ptr, heap_pages_sorted,
3190 (
size_t)heap_allocated_pages,
sizeof(
struct heap_page *),
3191 ptr_in_page_body_p);
3201PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
3205 register uintptr_t p = (uintptr_t)ptr;
3208 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3210 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
3211 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3213 if (p % BASE_SLOT_SIZE != 0)
return FALSE;
3214 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3216 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3218 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3219 if (page->flags.in_tomb) {
3223 if (p < page->start)
return FALSE;
3224 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
3225 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0)
return FALSE;
3233static enum rb_id_table_iterator_result
3234free_const_entry_i(
VALUE value,
void *data)
3238 return ID_TABLE_CONTINUE;
3244 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3245 rb_id_table_free(tbl);
3254 for (
int i=0; i<ccs->len; i++) {
3257 void *ptr = asan_unpoison_object_temporary((
VALUE)cc);
3259 if (is_pointer_to_heap(objspace, (
void *)cc) &&
3260 IMEMO_TYPE_P(cc, imemo_callcache) &&
3261 cc->klass == klass) {
3266 asan_poison_object((
VALUE)cc);
3271 asan_poison_object((
VALUE)cc);
3274 vm_cc_invalidate(cc);
3276 ruby_xfree(ccs->entries);
3284 RB_DEBUG_COUNTER_INC(ccs_free);
3285 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
3294static enum rb_id_table_iterator_result
3295cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
3299 VM_ASSERT(vm_ccs_p(ccs));
3300 VM_ASSERT(
id == ccs->cme->called_id);
3302 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3303 rb_vm_ccs_free(ccs);
3304 return ID_TABLE_DELETE;
3307 gc_mark(data->objspace, (
VALUE)ccs->cme);
3309 for (
int i=0; i<ccs->len; i++) {
3310 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3311 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3313 gc_mark(data->objspace, (
VALUE)ccs->entries[i].ci);
3314 gc_mark(data->objspace, (
VALUE)ccs->entries[i].cc);
3316 return ID_TABLE_CONTINUE;
3323 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3326 .objspace = objspace,
3329 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3333static enum rb_id_table_iterator_result
3334cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
3338 VM_ASSERT(vm_ccs_p(ccs));
3339 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3340 return ID_TABLE_CONTINUE;
3346 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3350 .objspace = objspace,
3354 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3355 rb_id_table_free(cc_tbl);
3359static enum rb_id_table_iterator_result
3360cvar_table_free_i(
VALUE value,
void * ctx)
3362 xfree((
void *) value);
3363 return ID_TABLE_CONTINUE;
3367rb_cc_table_free(
VALUE klass)
3375 struct RZombie *zombie = RZOMBIE(obj);
3377 zombie->dfree = dfree;
3378 zombie->data = data;
3379 VALUE prev, next = heap_pages_deferred_final;
3381 zombie->next = prev = next;
3383 }
while (next != prev);
3385 struct heap_page *page = GET_HEAP_PAGE(obj);
3386 page->final_slots++;
3387 heap_pages_final_slots++;
3393 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3394 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3400 ASSERT_vm_locking();
3401 st_data_t o = (st_data_t)obj,
id;
3406 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3408 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3411 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3418 RB_DEBUG_COUNTER_INC(obj_free);
3428 rb_bug(
"obj_free() called for broken object");
3440 obj_free_object_id(objspace, obj);
3443 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3445#if RGENGC_CHECK_MODE
3446#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3447 CHECK(RVALUE_WB_UNPROTECTED);
3448 CHECK(RVALUE_MARKED);
3449 CHECK(RVALUE_MARKING);
3450 CHECK(RVALUE_UNCOLLECTIBLE);
3456 if (rb_shape_obj_too_complex(obj)) {
3457 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3458 rb_id_table_free(ROBJECT_IV_HASH(obj));
3460 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3461 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3463 else if (ROBJ_TRANSIENT_P(obj)) {
3464 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3467 xfree(RANY(obj)->as.object.as.heap.ivptr);
3468 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3473 rb_id_table_free(RCLASS_M_TBL(obj));
3474 cc_table_free(objspace, obj, FALSE);
3475 if (RCLASS_IVPTR(obj)) {
3476 xfree(RCLASS_IVPTR(obj));
3478 if (RCLASS_CONST_TBL(obj)) {
3479 rb_free_const_table(RCLASS_CONST_TBL(obj));
3481 if (RCLASS_CVC_TBL(obj)) {
3482 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3483 rb_id_table_free(RCLASS_CVC_TBL(obj));
3485 rb_class_remove_subclass_head(obj);
3486 rb_class_remove_from_module_subclasses(obj);
3487 rb_class_remove_from_super_subclasses(obj);
3488 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3489 xfree(RCLASS_SUPERCLASSES(obj));
3492#if SIZE_POOL_COUNT == 1
3493 if (RCLASS_EXT(obj))
3494 xfree(RCLASS_EXT(obj));
3507#if USE_DEBUG_COUNTER
3510 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3513 RB_DEBUG_COUNTER_INC(obj_hash_1);
3516 RB_DEBUG_COUNTER_INC(obj_hash_2);
3519 RB_DEBUG_COUNTER_INC(obj_hash_3);
3522 RB_DEBUG_COUNTER_INC(obj_hash_4);
3528 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3532 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3535 if (RHASH_AR_TABLE_P(obj)) {
3536 if (RHASH_AR_TABLE(obj) == NULL) {
3537 RB_DEBUG_COUNTER_INC(obj_hash_null);
3540 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3544 RB_DEBUG_COUNTER_INC(obj_hash_st);
3551 if (RHASH_TRANSIENT_P(obj)) {
3552 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3560 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3561 st_free_table(RHASH(obj)->as.st);
3565 if (RANY(obj)->as.regexp.ptr) {
3566 onig_free(RANY(obj)->as.regexp.ptr);
3567 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3572 int free_immediately = FALSE;
3573 void (*dfree)(
void *);
3576 if (RTYPEDDATA_P(obj)) {
3577 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3578 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3579 if (0 && free_immediately == 0) {
3581 fprintf(stderr,
"not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3585 dfree = RANY(obj)->as.data.dfree;
3591 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3593 else if (free_immediately) {
3595 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3598 make_zombie(objspace, obj, dfree, data);
3599 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3604 RB_DEBUG_COUNTER_INC(obj_data_empty);
3609 if (RANY(obj)->as.match.rmatch) {
3610 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3611#if USE_DEBUG_COUNTER
3612 if (rm->
regs.num_regs >= 8) {
3613 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3615 else if (rm->
regs.num_regs >= 4) {
3616 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3618 else if (rm->
regs.num_regs >= 1) {
3619 RB_DEBUG_COUNTER_INC(obj_match_under4);
3622 onig_region_free(&rm->
regs, 0);
3627 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3631 if (RANY(obj)->as.file.fptr) {
3632 make_io_zombie(objspace, obj);
3633 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3638 RB_DEBUG_COUNTER_INC(obj_rational);
3641 RB_DEBUG_COUNTER_INC(obj_complex);
3647 if (RICLASS_OWNS_M_TBL_P(obj)) {
3649 rb_id_table_free(RCLASS_M_TBL(obj));
3651 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3652 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3654 rb_class_remove_subclass_head(obj);
3655 cc_table_free(objspace, obj, FALSE);
3656 rb_class_remove_from_module_subclasses(obj);
3657 rb_class_remove_from_super_subclasses(obj);
3659 xfree(RCLASS_EXT(obj));
3662 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3666 RB_DEBUG_COUNTER_INC(obj_float);
3670 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3671 xfree(BIGNUM_DIGITS(obj));
3672 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3675 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3680 UNEXPECTED_NODE(obj_free);
3684 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3685 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3686 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3688 else if (RSTRUCT_TRANSIENT_P(obj)) {
3689 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3692 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3693 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3699 rb_gc_free_dsymbol(obj);
3700 RB_DEBUG_COUNTER_INC(obj_symbol);
3705 switch (imemo_type(obj)) {
3707 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3708 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3711 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3712 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3715 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3717 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3720 xfree(RANY(obj)->as.imemo.alloc.ptr);
3721 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3724 rb_ast_free(&RANY(obj)->as.imemo.ast);
3725 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3728 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3731 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3733 case imemo_throw_data:
3734 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3737 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3740 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3742 case imemo_parser_strterm:
3743 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3745 case imemo_callinfo:
3746 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3748 case imemo_callcache:
3749 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3751 case imemo_constcache:
3752 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3758 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3763 make_zombie(objspace, obj, 0, 0);
3772#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3773#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3776object_id_cmp(st_data_t x, st_data_t y)
3778 if (RB_BIGNUM_TYPE_P(x)) {
3779 return !rb_big_eql(x, y);
3787object_id_hash(st_data_t n)
3789 if (RB_BIGNUM_TYPE_P(n)) {
3793 return st_numhash(n);
3796static const struct st_hash_type object_id_hash_type = {
3806#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3808 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3811 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3812 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3813 objspace->obj_to_id_tbl = st_init_numtable();
3815#if RGENGC_ESTIMATE_OLDMALLOC
3816 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3819 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3822 for (
int i = 1; i < SIZE_POOL_COUNT; i++) {
3824 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
3825 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3827 heap_pages_expand_sorted(objspace);
3829 init_mark_stack(&objspace->mark_stack);
3831 objspace->profile.invoke_time = getrusage_time();
3832 finalizer_table = st_init_numtable();
3840 gc_stress_set(objspace, ruby_initial_gc_stress);
3843typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3845static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3846static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3850 bool reenable_incremental;
3852 each_obj_callback *callback;
3855 struct heap_page **pages[SIZE_POOL_COUNT];
3856 size_t pages_counts[SIZE_POOL_COUNT];
3860objspace_each_objects_ensure(
VALUE arg)
3866 if (data->reenable_incremental) {
3867 objspace->flags.dont_incremental = FALSE;
3870 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3871 struct heap_page **pages = data->pages[i];
3883objspace_each_objects_try(
VALUE arg)
3889 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3891 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *),
rb_eRuntimeError);
3893 struct heap_page **pages = malloc(size);
3894 if (!pages) rb_memerror();
3902 size_t pages_count = 0;
3903 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3904 pages[pages_count] = page;
3907 data->pages[i] = pages;
3908 data->pages_counts[i] = pages_count;
3909 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3912 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3914 size_t pages_count = data->pages_counts[i];
3915 struct heap_page **pages = data->pages[i];
3917 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3918 for (
size_t i = 0; i < pages_count; i++) {
3921 if (page == NULL)
break;
3925 if (pages[i] != page)
continue;
3927 uintptr_t pstart = (uintptr_t)page->start;
3928 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3930 if (!__asan_region_is_poisoned((
void *)pstart, pend - pstart) &&
3931 (*data->callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3935 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3979rb_objspace_each_objects(each_obj_callback *callback,
void *data)
3981 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
3985objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
3988 bool reenable_incremental = FALSE;
3990 reenable_incremental = !objspace->flags.dont_incremental;
3993 objspace->flags.dont_incremental = TRUE;
3997 .objspace = objspace,
3998 .reenable_incremental = reenable_incremental,
4000 .callback = callback,
4004 .pages_counts = {0},
4011rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
4013 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
4022internal_object_p(
VALUE obj)
4025 void *ptr = asan_unpoison_object_temporary(obj);
4026 bool used_p = p->as.basic.
flags;
4031 UNEXPECTED_NODE(internal_object_p);
4040 if (!p->as.basic.
klass)
break;
4042 return rb_singleton_class_internal_p(obj);
4046 if (!p->as.basic.
klass)
break;
4050 if (ptr || ! used_p) {
4051 asan_poison_object(obj);
4057rb_objspace_internal_object_p(
VALUE obj)
4059 return internal_object_p(obj);
4063os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
4068 for (; v != (
VALUE)vend; v += stride) {
4069 if (!internal_object_p(v)) {
4089 rb_objspace_each_objects(os_obj_of_i, &oes);
4134 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
4136 return os_obj_of(of);
4150 return rb_undefine_finalizer(obj);
4157 st_data_t data = obj;
4159 st_delete(finalizer_table, &data, 0);
4165should_be_callable(
VALUE block)
4174should_be_finalizable(
VALUE obj)
4250 rb_scan_args(argc, argv,
"11", &obj, &block);
4251 should_be_finalizable(obj);
4256 should_be_callable(block);
4259 if (rb_callable_receiver(block) == obj) {
4260 rb_warn(
"finalizer references object to be finalized");
4263 return define_final0(obj, block);
4275 if (st_lookup(finalizer_table, obj, &data)) {
4276 table = (
VALUE)data;
4283 for (i = 0; i < len; i++) {
4292 rb_ary_push(table, block);
4296 RBASIC_CLEAR_CLASS(table);
4297 st_add_direct(finalizer_table, obj, table);
4308 should_be_finalizable(obj);
4309 should_be_callable(block);
4310 return define_final0(obj, block);
4321 if (st_lookup(finalizer_table, obj, &data)) {
4322 table = (
VALUE)data;
4323 st_insert(finalizer_table, dest, table);
4338 VALUE errinfo = ec->errinfo;
4339 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
4340 rb_ec_error_print(ec, errinfo);
4348 enum ruby_tag_type state;
4357#define RESTORE_FINALIZER() (\
4358 ec->cfp = saved.cfp, \
4359 ec->errinfo = saved.errinfo)
4361 saved.errinfo = ec->errinfo;
4362 saved.objid = rb_obj_id(obj);
4363 saved.cfp = ec->cfp;
4368 state = EC_EXEC_TAG();
4369 if (state != TAG_NONE) {
4371 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4373 for (i = saved.finished;
4375 saved.finished = ++i) {
4376 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4379#undef RESTORE_FINALIZER
4385 st_data_t key, table;
4387 if (RZOMBIE(zombie)->dfree) {
4388 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4391 key = (st_data_t)zombie;
4392 if (st_delete(finalizer_table, &key, &table)) {
4393 run_finalizer(objspace, zombie, (
VALUE)table);
4403 asan_unpoison_object(zombie,
false);
4404 next_zombie = RZOMBIE(zombie)->next;
4405 page = GET_HEAP_PAGE(zombie);
4407 run_final(objspace, zombie);
4413 obj_free_object_id(objspace, zombie);
4416 GC_ASSERT(heap_pages_final_slots > 0);
4417 GC_ASSERT(page->final_slots > 0);
4419 heap_pages_final_slots--;
4420 page->final_slots--;
4422 heap_page_add_freeobj(objspace, page, zombie);
4423 objspace->profile.total_freed_objects++;
4427 zombie = next_zombie;
4435 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4436 finalize_list(objspace, zombie);
4444 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4445 finalize_deferred_heap_pages(objspace);
4446 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4450gc_finalize_deferred(
void *dmy)
4453 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4455 finalize_deferred(objspace);
4456 ATOMIC_SET(finalizing, 0);
4463 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
4474force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4485bool rb_obj_is_main_ractor(
VALUE gv);
4492#if RGENGC_CHECK_MODE >= 2
4493 gc_verify_internal_consistency(objspace);
4497 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4500 finalize_deferred(objspace);
4501 GC_ASSERT(heap_pages_deferred_final == 0);
4505 objspace->flags.dont_incremental = 1;
4508 while (finalizer_table->num_entries) {
4510 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4513 st_data_t obj = (st_data_t)curr->obj;
4514 run_finalizer(objspace, curr->obj, curr->table);
4515 st_delete(finalizer_table, &obj, 0);
4525 unsigned int lock_lev;
4526 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4529 for (i = 0; i < heap_allocated_pages; i++) {
4530 struct heap_page *page = heap_pages_sorted[i];
4531 short stride = page->slot_size;
4533 uintptr_t p = (uintptr_t)page->start;
4534 uintptr_t pend = p + page->total_slots * stride;
4535 for (; p < pend; p += stride) {
4537 void *poisoned = asan_unpoison_object_temporary(vp);
4540 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4541 if (rb_obj_is_thread(vp))
break;
4542 if (rb_obj_is_mutex(vp))
break;
4543 if (rb_obj_is_fiber(vp))
break;
4544 if (rb_obj_is_main_ractor(vp))
break;
4545 if (RTYPEDDATA_P(vp)) {
4546 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4548 RANY(p)->as.free.flags = 0;
4552 else if (RANY(p)->as.data.dfree) {
4553 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4557 if (RANY(p)->as.file.fptr) {
4558 make_io_zombie(objspace, vp);
4566 asan_poison_object(vp);
4571 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4573 finalize_deferred_heap_pages(objspace);
4575 st_free_table(finalizer_table);
4576 finalizer_table = 0;
4577 ATOMIC_SET(finalizing, 0);
4583 struct heap_page *page = GET_HEAP_PAGE(ptr);
4584 return page->flags.before_sweep ? FALSE : TRUE;
4591 if (!is_lazy_sweeping(objspace) ||
4592 is_swept_object(objspace, ptr) ||
4593 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4614 if (!is_garbage_object(objspace, ptr)) {
4626 check_rvalue_consistency(obj);
4631rb_objspace_markable_object_p(
VALUE obj)
4634 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4638rb_objspace_garbage_object_p(
VALUE obj)
4641 return is_garbage_object(objspace, obj);
4648 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4674#if SIZEOF_LONG == SIZEOF_VOIDP
4675#define NUM2PTR(x) NUM2ULONG(x)
4676#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4677#define NUM2PTR(x) NUM2ULL(x)
4685 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4686 ptr = NUM2PTR(objid);
4693 ptr = obj_id_to_ref(objid);
4694 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4697 if (!rb_static_id_valid_p(symid))
4703 if (!UNDEF_P(orig = id2ref_obj_tbl(objspace, objid)) &&
4704 is_live_object(objspace, orig)) {
4710 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4714 if (rb_int_ge(objid, objspace->next_object_id)) {
4726 return id2ref(objid);
4736#if SIZEOF_LONG == SIZEOF_VOIDP
4746 return get_heap_object_id(obj);
4750cached_object_id(
VALUE obj)
4756 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4762 id = objspace->next_object_id;
4763 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4765 VALUE already_disabled = rb_gc_disable_no_rest();
4766 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4767 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4768 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4777nonspecial_obj_id_(
VALUE obj)
4779 return nonspecial_obj_id(obj);
4786 return rb_find_object_id(obj, nonspecial_obj_id_);
4848 return rb_find_object_id(obj, cached_object_id);
4851static enum rb_id_table_iterator_result
4852cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
4854 size_t *total_size = data_ptr;
4856 *total_size +=
sizeof(*ccs);
4857 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
4858 return ID_TABLE_CONTINUE;
4864 size_t total = rb_id_table_memsize(cc_table);
4865 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4870obj_memsize_of(
VALUE obj,
int use_all_types)
4879 size += rb_generic_ivar_memsize(obj);
4884 if (rb_shape_obj_too_complex(obj)) {
4885 size += rb_id_table_memsize(ROBJECT_IV_HASH(obj));
4887 else if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
4888 size += ROBJECT_IV_CAPACITY(obj) *
sizeof(
VALUE);
4893 if (RCLASS_EXT(obj)) {
4894 if (RCLASS_M_TBL(obj)) {
4895 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4898 size +=
SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
4899 if (RCLASS_CVC_TBL(obj)) {
4900 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4902 if (RCLASS_EXT(obj)->const_tbl) {
4903 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4905 if (RCLASS_CC_TBL(obj)) {
4906 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4908 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
4909 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) *
sizeof(
VALUE);
4911#if SIZE_POOL_COUNT == 1
4917 if (RICLASS_OWNS_M_TBL_P(obj)) {
4918 if (RCLASS_M_TBL(obj)) {
4919 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4922 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4923 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4927 size += rb_str_memsize(obj);
4930 size += rb_ary_memsize(obj);
4933 if (RHASH_AR_TABLE_P(obj)) {
4934 if (RHASH_AR_TABLE(obj) != NULL) {
4935 size_t rb_hash_ar_table_size(
void);
4936 size += rb_hash_ar_table_size();
4940 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4941 size += st_memsize(RHASH_ST_TABLE(obj));
4950 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4955 size += onig_region_memsize(&rm->
regs);
4957 size +=
sizeof(
struct rmatch);
4961 if (
RFILE(obj)->fptr) {
4962 size += rb_io_memsize(
RFILE(obj)->fptr);
4969 size += imemo_memsize(obj);
4977 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4978 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
4983 UNEXPECTED_NODE(obj_memsize_of);
4987 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4988 RSTRUCT(obj)->as.heap.ptr) {
4989 size +=
sizeof(
VALUE) * RSTRUCT_LEN(obj);
4998 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
5002 return size + rb_gc_obj_slot_size(obj);
5006rb_obj_memsize_of(
VALUE obj)
5008 return obj_memsize_of(obj, TRUE);
5012set_zero(st_data_t key, st_data_t val, st_data_t arg)
5016 rb_hash_aset(hash, k,
INT2FIX(0));
5021type_sym(
size_t type)
5024#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5093count_objects(
int argc,
VALUE *argv,
VALUE os)
5102 if (rb_check_arity(argc, 0, 1) == 1) {
5104 if (!RB_TYPE_P(hash,
T_HASH))
5108 for (i = 0; i <=
T_MASK; i++) {
5112 for (i = 0; i < heap_allocated_pages; i++) {
5113 struct heap_page *page = heap_pages_sorted[i];
5114 short stride = page->slot_size;
5116 uintptr_t p = (uintptr_t)page->start;
5117 uintptr_t pend = p + page->total_slots * stride;
5118 for (;p < pend; p += stride) {
5120 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5122 void *poisoned = asan_unpoison_object_temporary(vp);
5123 if (RANY(p)->as.basic.flags) {
5131 asan_poison_object(vp);
5134 total += page->total_slots;
5138 hash = rb_hash_new();
5141 rb_hash_stlike_foreach(hash, set_zero, hash);
5146 for (i = 0; i <=
T_MASK; i++) {
5164 size_t total_slots = 0;
5165 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5167 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5168 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5176 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
5182 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5186gc_setup_mark_bits(
struct heap_page *page)
5189 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5196enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5202 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5205enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5206#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5212 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5213 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(errno));
5216 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
5223 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5224 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(errno));
5227 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
5234 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5236 struct heap_page *src_page = GET_HEAP_PAGE(src);
5244 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5246 asan_unlock_freelist(free_page);
5248 asan_lock_freelist(free_page);
5249 asan_unpoison_object(dest,
false);
5255 free_page->freelist = RANY(dest)->as.free.next;
5257 GC_ASSERT(RB_BUILTIN_TYPE(dest) ==
T_NONE);
5259 if (src_page->slot_size > free_page->slot_size) {
5260 objspace->rcompactor.moved_down_count_table[
BUILTIN_TYPE(src)]++;
5262 else if (free_page->slot_size > src_page->slot_size) {
5263 objspace->rcompactor.moved_up_count_table[
BUILTIN_TYPE(src)]++;
5265 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE(src)]++;
5266 objspace->rcompactor.total_moved++;
5268 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5269 gc_pin(objspace, src);
5270 free_page->free_slots--;
5278 struct heap_page *cursor = heap->compact_cursor;
5281 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5282 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5289#ifndef GC_CAN_COMPILE_COMPACTION
5290#if defined(__wasi__)
5291# define GC_CAN_COMPILE_COMPACTION 0
5293# define GC_CAN_COMPILE_COMPACTION 1
5297#if defined(__MINGW32__) || defined(_WIN32)
5298# define GC_COMPACTION_SUPPORTED 1
5302# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5305#if GC_CAN_COMPILE_COMPACTION
5307read_barrier_handler(uintptr_t original_address)
5313 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5315 obj = (
VALUE)address;
5321 if (page_body == NULL) {
5322 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)original_address);
5327 unlock_page_body(objspace, page_body);
5329 objspace->profile.read_barrier_faults++;
5331 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5337#if !GC_CAN_COMPILE_COMPACTION
5339uninstall_handlers(
void)
5345install_handlers(
void)
5349#elif defined(_WIN32)
5350static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5351typedef void (*signal_handler)(int);
5352static signal_handler old_sigsegv_handler;
5355read_barrier_signal(EXCEPTION_POINTERS * info)
5358 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5363 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5364 return EXCEPTION_CONTINUE_EXECUTION;
5367 return EXCEPTION_CONTINUE_SEARCH;
5372uninstall_handlers(
void)
5374 signal(SIGSEGV, old_sigsegv_handler);
5375 SetUnhandledExceptionFilter(old_handler);
5379install_handlers(
void)
5382 old_sigsegv_handler = signal(SIGSEGV, NULL);
5385 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5388static struct sigaction old_sigbus_handler;
5389static struct sigaction old_sigsegv_handler;
5391#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5392static exception_mask_t old_exception_masks[32];
5393static mach_port_t old_exception_ports[32];
5394static exception_behavior_t old_exception_behaviors[32];
5395static thread_state_flavor_t old_exception_flavors[32];
5396static mach_msg_type_number_t old_exception_count;
5399disable_mach_bad_access_exc(
void)
5401 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
5402 task_swap_exception_ports(
5403 mach_task_self(), EXC_MASK_BAD_ACCESS,
5404 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5405 old_exception_masks, &old_exception_count,
5406 old_exception_ports, old_exception_behaviors, old_exception_flavors
5411restore_mach_bad_access_exc(
void)
5413 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5414 task_set_exception_ports(
5416 old_exception_masks[i], old_exception_ports[i],
5417 old_exception_behaviors[i], old_exception_flavors[i]
5424read_barrier_signal(
int sig,
siginfo_t * info,
void * data)
5427 struct sigaction prev_sigbus, prev_sigsegv;
5428 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5429 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5432 sigset_t set, prev_set;
5434 sigaddset(&set, SIGBUS);
5435 sigaddset(&set, SIGSEGV);
5436 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5437#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5438 disable_mach_bad_access_exc();
5441 read_barrier_handler((uintptr_t)info->si_addr);
5444#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5445 restore_mach_bad_access_exc();
5448 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5449 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5453uninstall_handlers(
void)
5455#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5456 restore_mach_bad_access_exc();
5458 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5459 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5463install_handlers(
void)
5466 memset(&action, 0,
sizeof(
struct sigaction));
5467 sigemptyset(&action.sa_mask);
5468 action.sa_sigaction = read_barrier_signal;
5469 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5471 sigaction(SIGBUS, &action, &old_sigbus_handler);
5472 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5473#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5474 disable_mach_bad_access_exc();
5480revert_stack_objects(
VALUE stack_obj,
void *ctx)
5488 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5495 if (is_pointer_to_heap(objspace, (
void *)v)) {
5500 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5511 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5512 rb_vm_each_stack_value(vm, revert_stack_objects, (
void*)objspace);
5513 each_machine_stack_value(ec, revert_machine_stack_references);
5516static void gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode);
5521 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5523 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5524 gc_unprotect_pages(objspace, heap);
5527 uninstall_handlers();
5534 check_stack_for_moved(objspace);
5536 gc_update_references(objspace);
5537 objspace->profile.compact_count++;
5539 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5541 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5542 heap->compact_cursor = NULL;
5543 heap->free_pages = NULL;
5544 heap->compact_cursor_index = 0;
5547 if (gc_prof_enabled(objspace)) {
5549 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5551 objspace->flags.during_compacting = FALSE;
5564 struct heap_page * sweep_page = ctx->page;
5565 short slot_size = sweep_page->slot_size;
5566 short slot_bits = slot_size / BASE_SLOT_SIZE;
5567 GC_ASSERT(slot_bits > 0);
5571 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5573 asan_unpoison_object(vp,
false);
5577 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5578#if RGENGC_CHECK_MODE
5579 if (!is_full_marking(objspace)) {
5580 if (RVALUE_OLD_P(vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5581 if (rgengc_remembered_sweep(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5584 if (obj_free(objspace, vp)) {
5587 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, BASE_SLOT_SIZE);
5588 heap_page_add_freeobj(objspace, sweep_page, vp);
5589 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5598 if (objspace->flags.during_compacting) {
5604 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished\n");
5606 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5608 heap_page_add_freeobj(objspace, sweep_page, vp);
5619 bitset >>= slot_bits;
5626 struct heap_page *sweep_page = ctx->page;
5627 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5630 bits_t *bits, bitset;
5632 gc_report(2, objspace,
"page_sweep: start.\n");
5634#if RGENGC_CHECK_MODE
5635 if (!objspace->flags.immediate_sweep) {
5636 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5639 sweep_page->flags.before_sweep = FALSE;
5640 sweep_page->free_slots = 0;
5642 p = (uintptr_t)sweep_page->start;
5643 bits = sweep_page->mark_bits;
5645 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5646 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5647 if (out_of_range_bits != 0) {
5648 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5654 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5655 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5656 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5660 bitset >>= NUM_IN_PAGE(p);
5662 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5664 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5666 for (
int i = 1; i < bitmap_plane_count; i++) {
5669 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5671 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5674 if (!heap->compact_cursor) {
5675 gc_setup_mark_bits(sweep_page);
5678#if GC_PROFILE_MORE_DETAIL
5679 if (gc_prof_enabled(objspace)) {
5681 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5682 record->empty_objects += ctx->empty_slots;
5685 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5687 sweep_page->total_slots,
5688 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5690 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5691 objspace->profile.total_freed_objects += ctx->freed_slots;
5693 if (heap_pages_deferred_final && !finalizing) {
5696 gc_finalize_deferred_register(objspace);
5700#if RGENGC_CHECK_MODE
5701 short freelist_len = 0;
5702 asan_unlock_freelist(sweep_page);
5703 RVALUE *ptr = sweep_page->freelist;
5706 ptr = ptr->as.free.next;
5708 asan_lock_freelist(sweep_page);
5709 if (freelist_len != sweep_page->free_slots) {
5710 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5714 gc_report(2, objspace,
"page_sweep: end.\n");
5722 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5723 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5725 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5726 if (!heap_increment(objspace, size_pool, heap)) {
5735gc_mode_name(
enum gc_mode mode)
5738 case gc_mode_none:
return "none";
5739 case gc_mode_marking:
return "marking";
5740 case gc_mode_sweeping:
return "sweeping";
5741 case gc_mode_compacting:
return "compacting";
5742 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5747gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5749#if RGENGC_CHECK_MODE
5750 enum gc_mode prev_mode = gc_mode(objspace);
5751 switch (prev_mode) {
5752 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5753 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5754 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
5755 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
5758 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5759 gc_mode_set(objspace, mode);
5766 asan_unlock_freelist(page);
5767 if (page->freelist) {
5768 RVALUE *p = page->freelist;
5769 asan_unpoison_object((
VALUE)p,
false);
5770 while (p->as.free.next) {
5772 p = p->as.free.next;
5773 asan_poison_object((
VALUE)prev);
5774 asan_unpoison_object((
VALUE)p,
false);
5776 p->as.free.next = freelist;
5777 asan_poison_object((
VALUE)p);
5780 page->freelist = freelist;
5782 asan_lock_freelist(page);
5789 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
5790 heap->free_pages = NULL;
5791#if GC_ENABLE_INCREMENTAL_MARK
5792 heap->pooled_pages = NULL;
5794 if (!objspace->flags.immediate_sweep) {
5797 ccan_list_for_each(&heap->pages, page, page_node) {
5798 page->flags.before_sweep = TRUE;
5803#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5809 gc_mode_transition(objspace, gc_mode_sweeping);
5811#if GC_ENABLE_INCREMENTAL_MARK
5812 objspace->rincgc.pooled_slots = 0;
5815 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5817 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5819 gc_sweep_start_heap(objspace, heap);
5823 if (heap->sweeping_page == NULL) {
5824 GC_ASSERT(heap->total_pages == 0);
5825 GC_ASSERT(heap->total_slots == 0);
5826 gc_sweep_finish_size_pool(objspace, size_pool);
5832 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5833 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5841 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5842 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5843 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5844 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5846 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5853 while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
5854 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5855 swept_slots += resurrected_page->free_slots;
5857 heap_add_page(objspace, size_pool, heap, resurrected_page);
5858 heap_add_freepage(heap, resurrected_page);
5863 if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
5864 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
5865 size_t extra_slots = gc_params.heap_init_slots - swept_slots;
5866 size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
5867 if (extend_page_count > size_pool->allocatable_pages) {
5868 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5872 if (swept_slots < min_free_slots) {
5873 bool grow_heap = is_full_marking(objspace);
5875 if (!is_full_marking(objspace)) {
5878 bool is_growth_heap = (size_pool->empty_slots == 0 ||
5879 size_pool->freed_slots > size_pool->empty_slots) &&
5880 size_pool->allocatable_pages == 0;
5882 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5885 else if (is_growth_heap) {
5886 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5887 size_pool->force_major_gc_count++;
5892 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5894 if (extend_page_count > size_pool->allocatable_pages) {
5895 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5905 gc_report(1, objspace,
"gc_sweep_finish\n");
5907 gc_prof_set_heap_info(objspace);
5908 heap_pages_free_unused_pages(objspace);
5910 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5914 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5915 if (size_pool->allocatable_pages < tomb_pages) {
5916 size_pool->allocatable_pages = tomb_pages;
5920 size_pool->freed_slots = 0;
5921 size_pool->empty_slots = 0;
5923#if GC_ENABLE_INCREMENTAL_MARK
5924 if (!will_be_incremental_marking(objspace)) {
5925 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5926 struct heap_page *end_page = eden_heap->free_pages;
5928 while (end_page->free_next) end_page = end_page->free_next;
5929 end_page->free_next = eden_heap->pooled_pages;
5932 eden_heap->free_pages = eden_heap->pooled_pages;
5934 eden_heap->pooled_pages = NULL;
5935 objspace->rincgc.pooled_slots = 0;
5940 heap_pages_expand_sorted(objspace);
5943 gc_mode_transition(objspace, gc_mode_none);
5949 struct heap_page *sweep_page = heap->sweeping_page;
5950 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
5952#if GC_ENABLE_INCREMENTAL_MARK
5953 int swept_slots = 0;
5955 bool need_pool = TRUE;
5957 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5960 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
5962 gc_report(2, objspace,
"gc_sweep_step\n");
5965 if (sweep_page == NULL)
return FALSE;
5967#if GC_ENABLE_LAZY_SWEEP
5968 gc_prof_sweep_timer_start(objspace);
5972 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
5980 gc_sweep_page(objspace, heap, &ctx);
5981 int free_slots = ctx.freed_slots + ctx.empty_slots;
5983 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
5985 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5986 heap_pages_freeable_pages > 0 &&
5988 heap_pages_freeable_pages--;
5991 heap_unlink_page(objspace, heap, sweep_page);
5992 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5994 else if (free_slots > 0) {
5996 size_pool->freed_slots += ctx.freed_slots;
5997 size_pool->empty_slots += ctx.empty_slots;
6000#if GC_ENABLE_INCREMENTAL_MARK
6002 heap_add_poolpage(objspace, heap, sweep_page);
6006 heap_add_freepage(heap, sweep_page);
6007 swept_slots += free_slots;
6008 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6013 heap_add_freepage(heap, sweep_page);
6018 sweep_page->free_next = NULL;
6020 }
while ((sweep_page = heap->sweeping_page));
6022 if (!heap->sweeping_page) {
6024 gc_sweep_finish_size_pool(objspace, size_pool);
6027 if (!has_sweeping_pages(objspace)) {
6028 gc_sweep_finish(objspace);
6032#if GC_ENABLE_LAZY_SWEEP
6033 gc_prof_sweep_timer_stop(objspace);
6036 return heap->free_pages != NULL;
6042 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6045 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6046 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6054 GC_ASSERT(dont_gc_val() == FALSE);
6055 if (!GC_ENABLE_LAZY_SWEEP)
return;
6057 unsigned int lock_lev;
6058 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
6060 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6062 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6065 if (size_pool == sweep_size_pool) {
6066 if (size_pool->allocatable_pages > 0) {
6067 heap_increment(objspace, size_pool, heap);
6071 gc_sweep_rest(objspace);
6079 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
6092 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6093 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6095 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6097 object = rb_gc_location(forwarding_object);
6099 shape_id_t original_shape_id = 0;
6101 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6104 gc_move(objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
6108 if (original_shape_id) {
6109 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6112 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
6113 orig_page->free_slots++;
6114 heap_page_add_freeobj(objspace, orig_page,
object);
6116 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6121 p += BASE_SLOT_SIZE;
6131 bits_t *mark_bits, *pin_bits;
6134 mark_bits = page->mark_bits;
6135 pin_bits = page->pinned_bits;
6137 uintptr_t p = page->start;
6140 bitset = pin_bits[0] & ~mark_bits[0];
6141 bitset >>= NUM_IN_PAGE(p);
6142 invalidate_moved_plane(objspace, page, p, bitset);
6143 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6145 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6148 bitset = pin_bits[i] & ~mark_bits[i];
6150 invalidate_moved_plane(objspace, page, p, bitset);
6151 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6159 gc_mode_transition(objspace, gc_mode_compacting);
6161 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6162 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6163 ccan_list_for_each(&heap->pages, page, page_node) {
6164 page->flags.before_sweep = TRUE;
6167 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
6168 heap->compact_cursor_index = 0;
6171 if (gc_prof_enabled(objspace)) {
6173 record->moved_objects = objspace->rcompactor.total_moved;
6176 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
6177 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
6178 memset(objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
6179 memset(objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
6190 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6192 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
6194 gc_sweep_start(objspace);
6195 if (objspace->flags.during_compacting) {
6196 gc_sweep_compact(objspace);
6199 if (immediate_sweep) {
6200#if !GC_ENABLE_LAZY_SWEEP
6201 gc_prof_sweep_timer_start(objspace);
6203 gc_sweep_rest(objspace);
6204#if !GC_ENABLE_LAZY_SWEEP
6205 gc_prof_sweep_timer_stop(objspace);
6211 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6213 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6219 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6226stack_chunk_alloc(
void)
6240 return stack->chunk == NULL;
6246 size_t size = stack->index;
6247 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6250 size += stack->limit;
6251 chunk = chunk->next;
6259 chunk->next = stack->cache;
6260 stack->cache = chunk;
6261 stack->cache_size++;
6269 if (stack->unused_cache_size > (stack->cache_size/2)) {
6270 chunk = stack->cache;
6271 stack->cache = stack->cache->next;
6272 stack->cache_size--;
6275 stack->unused_cache_size = stack->cache_size;
6283 GC_ASSERT(stack->index == stack->limit);
6285 if (stack->cache_size > 0) {
6286 next = stack->cache;
6287 stack->cache = stack->cache->next;
6288 stack->cache_size--;
6289 if (stack->unused_cache_size > stack->cache_size)
6290 stack->unused_cache_size = stack->cache_size;
6293 next = stack_chunk_alloc();
6295 next->next = stack->chunk;
6296 stack->chunk = next;
6305 prev = stack->chunk->next;
6306 GC_ASSERT(stack->index == 0);
6307 add_stack_chunk_cache(stack, stack->chunk);
6308 stack->chunk = prev;
6309 stack->index = stack->limit;
6317 while (chunk != NULL) {
6327 mark_stack_chunk_list_free(stack->chunk);
6333 mark_stack_chunk_list_free(stack->cache);
6334 stack->cache_size = 0;
6335 stack->unused_cache_size = 0;
6363 if (stack->index == stack->limit) {
6364 push_mark_stack_chunk(stack);
6366 stack->chunk->data[stack->index++] = data;
6376 rb_bug(
"push_mark_stack() called for broken object");
6380 UNEXPECTED_NODE(push_mark_stack);
6384 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6386 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6392 if (is_mark_stack_empty(stack)) {
6395 if (stack->index == 1) {
6396 *data = stack->chunk->data[--stack->index];
6397 pop_mark_stack_chunk(stack);
6400 *data = stack->chunk->data[--stack->index];
6411 stack->index = stack->limit = STACK_CHUNK_SIZE;
6413 for (i=0; i < 4; i++) {
6414 add_stack_chunk_cache(stack, stack_chunk_alloc());
6416 stack->unused_cache_size = stack->cache_size;
6421#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6423#define STACK_START (ec->machine.stack_start)
6424#define STACK_END (ec->machine.stack_end)
6425#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6427#if STACK_GROW_DIRECTION < 0
6428# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6429#elif STACK_GROW_DIRECTION > 0
6430# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6432# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6433 : (size_t)(STACK_END - STACK_START + 1))
6435#if !STACK_GROW_DIRECTION
6436int ruby_stack_grow_direction;
6438ruby_get_stack_grow_direction(
volatile VALUE *addr)
6441 SET_MACHINE_STACK_END(&end);
6443 if (end > addr)
return ruby_stack_grow_direction = 1;
6444 return ruby_stack_grow_direction = -1;
6453 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6454 return STACK_LENGTH;
6457#define PREVENT_STACK_OVERFLOW 1
6458#ifndef PREVENT_STACK_OVERFLOW
6459#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6460# define PREVENT_STACK_OVERFLOW 1
6462# define PREVENT_STACK_OVERFLOW 0
6465#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6471 size_t length = STACK_LENGTH;
6472 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6474 return length > maximum_length;
6477#define stack_check(ec, water_mark) FALSE
6480#define STACKFRAME_FOR_CALL_CFUNC 2048
6482MJIT_FUNC_EXPORTED
int
6485 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6491 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6511 if (end <= start)
return;
6513 each_location(objspace, start, n, cb);
6519 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6527 for (i=0; i<n; i++) {
6528 gc_mark(objspace, values[i]);
6533rb_gc_mark_values(
long n,
const VALUE *values)
6538 for (i=0; i<n; i++) {
6539 gc_mark_and_pin(objspace, values[i]);
6548 for (i=0; i<n; i++) {
6549 if (is_markable_object(objspace, values[i])) {
6550 gc_mark_and_pin(objspace, values[i]);
6556rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6559 gc_mark_stack_values(objspace, n, values);
6563mark_value(st_data_t key, st_data_t value, st_data_t data)
6566 gc_mark(objspace, (
VALUE)value);
6571mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6574 gc_mark_and_pin(objspace, (
VALUE)value);
6581 if (!tbl || tbl->num_entries == 0)
return;
6582 st_foreach(tbl, mark_value, (st_data_t)objspace);
6588 if (!tbl || tbl->num_entries == 0)
return;
6589 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6593mark_key(st_data_t key, st_data_t value, st_data_t data)
6596 gc_mark_and_pin(objspace, (
VALUE)key);
6604 st_foreach(tbl, mark_key, (st_data_t)objspace);
6608pin_value(st_data_t key, st_data_t value, st_data_t data)
6611 gc_mark_and_pin(objspace, (
VALUE)value);
6619 st_foreach(tbl, pin_value, (st_data_t)objspace);
6629mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6633 gc_mark(objspace, (
VALUE)key);
6634 gc_mark(objspace, (
VALUE)value);
6639pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6643 gc_mark_and_pin(objspace, (
VALUE)key);
6644 gc_mark_and_pin(objspace, (
VALUE)value);
6649pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6653 gc_mark_and_pin(objspace, (
VALUE)key);
6654 gc_mark(objspace, (
VALUE)value);
6661 if (rb_hash_compare_by_id_p(hash)) {
6662 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6665 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6668 if (RHASH_AR_TABLE_P(hash)) {
6669 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6670 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6674 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6676 gc_mark(objspace, RHASH(hash)->ifnone);
6683 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6697 gc_mark(objspace, me->owner);
6698 gc_mark(objspace, me->defined_class);
6701 switch (def->type) {
6702 case VM_METHOD_TYPE_ISEQ:
6704 gc_mark(objspace, (
VALUE)def->body.iseq.
cref);
6706 if (def->iseq_overload && me->defined_class) {
6709 gc_mark_and_pin(objspace, (
VALUE)me);
6712 case VM_METHOD_TYPE_ATTRSET:
6713 case VM_METHOD_TYPE_IVAR:
6714 gc_mark(objspace, def->body.attr.location);
6716 case VM_METHOD_TYPE_BMETHOD:
6717 gc_mark(objspace, def->body.bmethod.proc);
6718 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6720 case VM_METHOD_TYPE_ALIAS:
6721 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
6723 case VM_METHOD_TYPE_REFINED:
6724 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
6725 gc_mark(objspace, (
VALUE)def->body.refined.owner);
6727 case VM_METHOD_TYPE_CFUNC:
6728 case VM_METHOD_TYPE_ZSUPER:
6729 case VM_METHOD_TYPE_MISSING:
6730 case VM_METHOD_TYPE_OPTIMIZED:
6731 case VM_METHOD_TYPE_UNDEF:
6732 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6738static enum rb_id_table_iterator_result
6739mark_method_entry_i(
VALUE me,
void *data)
6743 gc_mark(objspace, me);
6744 return ID_TABLE_CONTINUE;
6751 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6755static enum rb_id_table_iterator_result
6756mark_const_entry_i(
VALUE value,
void *data)
6761 gc_mark(objspace, ce->value);
6762 gc_mark(objspace, ce->file);
6763 return ID_TABLE_CONTINUE;
6770 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6773#if STACK_GROW_DIRECTION < 0
6774#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6775#elif STACK_GROW_DIRECTION > 0
6776#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6778#define GET_STACK_BOUNDS(start, end, appendix) \
6779 ((STACK_END < STACK_START) ? \
6780 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6786#if defined(__wasm__)
6789static VALUE *rb_stack_range_tmp[2];
6792rb_mark_locations(
void *begin,
void *end)
6794 rb_stack_range_tmp[0] = begin;
6795 rb_stack_range_tmp[1] = end;
6798# if defined(__EMSCRIPTEN__)
6803 emscripten_scan_stack(rb_mark_locations);
6804 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6806 emscripten_scan_registers(rb_mark_locations);
6807 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6814 VALUE *stack_start, *stack_end;
6816 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6817 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6819 rb_wasm_scan_locals(rb_mark_locations);
6820 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6832 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(
VALUE))];
6833 } save_regs_gc_mark;
6834 VALUE *stack_start, *stack_end;
6836 FLUSH_REGISTER_WINDOWS;
6837 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6839 rb_setjmp(save_regs_gc_mark.j);
6845 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6847 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6849 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6857 VALUE *stack_start, *stack_end;
6859 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6860 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6866 each_machine_stack_value(ec, gc_mark_maybe);
6874 gc_mark_locations(objspace, stack_start, stack_end, cb);
6876#if defined(__mc68000__)
6877 gc_mark_locations(objspace,
6878 (
VALUE*)((
char*)stack_start + 2),
6879 (
VALUE*)((
char*)stack_end - 2), cb);
6898 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6900 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6901 void *ptr = asan_unpoison_object_temporary(obj);
6909 gc_mark_and_pin(objspace, obj);
6915 asan_poison_object(obj);
6929 ASSERT_vm_locking();
6930 if (RVALUE_MARKED(obj))
return 0;
6931 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6938 struct heap_page *page = GET_HEAP_PAGE(obj);
6939 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6941 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6942 page->flags.has_uncollectible_shady_objects = TRUE;
6943 MARK_IN_BITMAP(uncollectible_bits, obj);
6944 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6946#if RGENGC_PROFILE > 0
6947 objspace->profile.total_remembered_shady_object_count++;
6948#if RGENGC_PROFILE >= 2
6949 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
6962 const VALUE old_parent = objspace->rgengc.parent_object;
6965 if (RVALUE_WB_UNPROTECTED(obj)) {
6966 if (gc_remember_unprotected(objspace, obj)) {
6967 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6971 if (!RVALUE_OLD_P(obj)) {
6972 if (RVALUE_MARKED(obj)) {
6974 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6975 RVALUE_AGE_SET_OLD(objspace, obj);
6976 if (is_incremental_marking(objspace)) {
6977 if (!RVALUE_MARKING(obj)) {
6978 gc_grey(objspace, obj);
6982 rgengc_remember(objspace, obj);
6986 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6987 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6993 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6999#if RGENGC_CHECK_MODE
7000 if (RVALUE_MARKED(obj) == FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
7001 if (RVALUE_MARKING(obj) == TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
7004#if GC_ENABLE_INCREMENTAL_MARK
7005 if (is_incremental_marking(objspace)) {
7006 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7010 push_mark_stack(&objspace->mark_stack, obj);
7016 struct heap_page *page = GET_HEAP_PAGE(obj);
7018 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7019 check_rvalue_consistency(obj);
7021 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7022 if (!RVALUE_OLD_P(obj)) {
7023 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
7024 RVALUE_AGE_INC(objspace, obj);
7026 else if (is_full_marking(objspace)) {
7027 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7028 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7031 check_rvalue_consistency(obj);
7033 objspace->marked_slots++;
7037static void reachable_objects_from_callback(
VALUE obj);
7042 if (LIKELY(during_gc)) {
7043 rgengc_check_relation(objspace, obj);
7044 if (!gc_mark_set(objspace, obj))
return;
7047 if (objspace->rgengc.parent_object) {
7048 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
7049 (
void *)obj, obj_type_name(obj),
7050 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7053 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
7057 if (UNLIKELY(RB_TYPE_P(obj,
T_NONE))) {
7059 rb_bug(
"try to mark T_NONE object");
7061 gc_aging(objspace, obj);
7062 gc_grey(objspace, obj);
7065 reachable_objects_from_callback(obj);
7072 GC_ASSERT(is_markable_object(objspace, obj));
7073 if (UNLIKELY(objspace->flags.during_compacting)) {
7074 if (LIKELY(during_gc)) {
7075 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7083 if (!is_markable_object(objspace, obj))
return;
7084 gc_pin(objspace, obj);
7085 gc_mark_ptr(objspace, obj);
7091 if (!is_markable_object(objspace, obj))
return;
7092 gc_mark_ptr(objspace, obj);
7112rb_objspace_marked_object_p(
VALUE obj)
7114 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7120 if (RVALUE_OLD_P(obj)) {
7121 objspace->rgengc.parent_object = obj;
7124 objspace->rgengc.parent_object =
Qfalse;
7131 switch (imemo_type(obj)) {
7136 if (LIKELY(env->ep)) {
7138 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7139 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7140 gc_mark_values(objspace, (
long)env->env_size, env->env);
7141 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7142 gc_mark(objspace, (
VALUE)rb_vm_env_prev_env(env));
7143 gc_mark(objspace, (
VALUE)env->iseq);
7148 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7149 gc_mark(objspace, (
VALUE)RANY(obj)->as.imemo.cref.next);
7150 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7153 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7154 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7155 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7156 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7158 case imemo_throw_data:
7159 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7162 gc_mark_maybe(objspace, (
VALUE)RANY(obj)->as.imemo.ifunc.data);
7165 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7166 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7167 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7170 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7179 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7180 }
while ((m = m->next) != NULL);
7184 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7186 case imemo_parser_strterm:
7187 rb_strterm_mark(obj);
7189 case imemo_callinfo:
7191 case imemo_callcache:
7195 gc_mark(objspace, (
VALUE)vm_cc_cme(cc));
7198 case imemo_constcache:
7201 gc_mark(objspace, ice->value);
7204#if VM_CHECK_MODE > 0
7206 VM_UNREACHABLE(gc_mark_imemo);
7214 register RVALUE *any = RANY(obj);
7215 gc_mark_set_parent(objspace, obj);
7218 rb_mark_generic_ivar(obj);
7231 rb_bug(
"rb_gc_mark() called for broken object");
7235 UNEXPECTED_NODE(rb_gc_mark);
7239 gc_mark_imemo(objspace, obj);
7246 gc_mark(objspace, any->as.basic.
klass);
7254 if (!RCLASS_EXT(obj))
break;
7256 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7257 cc_table_mark(objspace, obj);
7258 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7259 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7261 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7265 if (RICLASS_OWNS_M_TBL_P(obj)) {
7266 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7271 if (!RCLASS_EXT(obj))
break;
7273 if (RCLASS_INCLUDER(obj)) {
7274 gc_mark(objspace, RCLASS_INCLUDER(obj));
7276 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7277 cc_table_mark(objspace, obj);
7281 if (ARY_SHARED_P(obj)) {
7282 VALUE root = ARY_SHARED_ROOT(obj);
7283 gc_mark(objspace, root);
7288 for (i=0; i < len; i++) {
7289 gc_mark(objspace, ptr[i]);
7292 if (LIKELY(during_gc)) {
7293 if (!ARY_EMBED_P(obj) && RARRAY_TRANSIENT_P(obj)) {
7294 rb_transient_heap_mark(obj, ptr);
7301 mark_hash(objspace, obj);
7305 if (STR_SHARED_P(obj)) {
7317 if (mark_func) (*mark_func)(ptr);
7324 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7325 if (rb_shape_obj_too_complex(obj)) {
7326 mark_m_tbl(objspace, ROBJECT_IV_HASH(obj));
7329 const VALUE *
const ptr = ROBJECT_IVPTR(obj);
7331 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7332 for (i = 0; i < len; i++) {
7333 gc_mark(objspace, ptr[i]);
7336 if (LIKELY(during_gc) &&
7337 ROBJ_TRANSIENT_P(obj)) {
7338 rb_transient_heap_mark(obj, ptr);
7342 VALUE klass = RBASIC_CLASS(obj);
7345 uint32_t num_of_ivs = shape->next_iv_index;
7346 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7347 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7354 if (any->as.file.
fptr) {
7355 gc_mark(objspace, any->as.file.
fptr->
self);
7356 gc_mark(objspace, any->as.file.
fptr->
pathv);
7367 gc_mark(objspace, any->as.regexp.
src);
7371 gc_mark(objspace, any->as.match.
regexp);
7372 if (any->as.match.
str) {
7373 gc_mark(objspace, any->as.match.
str);
7378 gc_mark(objspace, any->as.rational.num);
7379 gc_mark(objspace, any->as.rational.den);
7383 gc_mark(objspace, any->as.complex.real);
7384 gc_mark(objspace, any->as.complex.imag);
7390 const long len = RSTRUCT_LEN(obj);
7391 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7393 for (i=0; i<len; i++) {
7394 gc_mark(objspace, ptr[i]);
7397 if (LIKELY(during_gc) &&
7398 RSTRUCT_TRANSIENT_P(obj)) {
7399 rb_transient_heap_mark(obj, ptr);
7406 rb_gcdebug_print_obj_condition((
VALUE)obj);
7411 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7413 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7422gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7426#if GC_ENABLE_INCREMENTAL_MARK
7427 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7428 size_t popped_count = 0;
7431 while (pop_mark_stack(mstack, &obj)) {
7432 if (UNDEF_P(obj))
continue;
7434 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7435 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7437 gc_mark_children(objspace, obj);
7439#if GC_ENABLE_INCREMENTAL_MARK
7441 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7442 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7444 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7447 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7457 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7459 if (is_mark_stack_empty(mstack)) {
7460 shrink_stack_chunk_cache(mstack);
7469gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7471 return gc_mark_stacked_objects(objspace, TRUE, count);
7477 return gc_mark_stacked_objects(objspace, FALSE, 0);
7481#define MAX_TICKS 0x100
7482static tick_t mark_ticks[MAX_TICKS];
7483static const char *mark_ticks_categories[MAX_TICKS];
7486show_mark_ticks(
void)
7489 fprintf(stderr,
"mark ticks result:\n");
7490 for (i=0; i<MAX_TICKS; i++) {
7491 const char *category = mark_ticks_categories[i];
7493 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7504gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7508 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7511 tick_t start_tick = tick();
7513 const char *prev_category = 0;
7515 if (mark_ticks_categories[0] == 0) {
7516 atexit(show_mark_ticks);
7520 if (categoryp) *categoryp =
"xxx";
7522 objspace->rgengc.parent_object =
Qfalse;
7525#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7526 if (prev_category) { \
7527 tick_t t = tick(); \
7528 mark_ticks[tick_count] = t - start_tick; \
7529 mark_ticks_categories[tick_count] = prev_category; \
7532 prev_category = category; \
7533 start_tick = tick(); \
7536#define MARK_CHECKPOINT_PRINT_TICK(category)
7539#define MARK_CHECKPOINT(category) do { \
7540 if (categoryp) *categoryp = category; \
7541 MARK_CHECKPOINT_PRINT_TICK(category); \
7544 MARK_CHECKPOINT(
"vm");
7547 if (vm->self) gc_mark(objspace, vm->self);
7549 MARK_CHECKPOINT(
"finalizers");
7550 mark_finalizer_tbl(objspace, finalizer_table);
7552 MARK_CHECKPOINT(
"machine_context");
7553 mark_current_machine_context(objspace, ec);
7556 MARK_CHECKPOINT(
"global_list");
7557 for (list = global_list; list; list = list->next) {
7558 gc_mark_maybe(objspace, *list->varptr);
7561 MARK_CHECKPOINT(
"end_proc");
7564 MARK_CHECKPOINT(
"global_tbl");
7565 rb_gc_mark_global_tbl();
7567 MARK_CHECKPOINT(
"object_id");
7568 rb_gc_mark(objspace->next_object_id);
7569 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7571 if (stress_to_class) rb_gc_mark(stress_to_class);
7573 MARK_CHECKPOINT(
"finish");
7574#undef MARK_CHECKPOINT
7577#if RGENGC_CHECK_MODE >= 4
7579#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7580#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7581#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7589static struct reflist *
7590reflist_create(
VALUE obj)
7592 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7595 refs->list[0] = obj;
7601reflist_destruct(
struct reflist *refs)
7608reflist_add(
struct reflist *refs,
VALUE obj)
7610 if (refs->pos == refs->size) {
7612 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
7615 refs->list[refs->pos++] = obj;
7619reflist_dump(
struct reflist *refs)
7622 for (i=0; i<refs->pos; i++) {
7623 VALUE obj = refs->list[i];
7624 if (IS_ROOTSIG(obj)) {
7625 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7628 fprintf(stderr,
"<%s>", obj_info(obj));
7630 if (i+1 < refs->pos) fprintf(stderr,
", ");
7635reflist_referred_from_machine_context(
struct reflist *refs)
7638 for (i=0; i<refs->pos; i++) {
7639 VALUE obj = refs->list[i];
7640 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7655 const char *category;
7661allrefs_add(
struct allrefs *data,
VALUE obj)
7663 struct reflist *refs;
7666 if (st_lookup(data->references, obj, &r)) {
7667 refs = (
struct reflist *)r;
7668 reflist_add(refs, data->root_obj);
7672 refs = reflist_create(data->root_obj);
7673 st_insert(data->references, obj, (st_data_t)refs);
7679allrefs_i(
VALUE obj,
void *ptr)
7681 struct allrefs *data = (
struct allrefs *)ptr;
7683 if (allrefs_add(data, obj)) {
7684 push_mark_stack(&data->mark_stack, obj);
7689allrefs_roots_i(
VALUE obj,
void *ptr)
7691 struct allrefs *data = (
struct allrefs *)ptr;
7692 if (strlen(data->category) == 0)
rb_bug(
"!!!");
7693 data->root_obj = MAKE_ROOTSIG(data->category);
7695 if (allrefs_add(data, obj)) {
7696 push_mark_stack(&data->mark_stack, obj);
7699#define PUSH_MARK_FUNC_DATA(v) do { \
7700 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7701 GET_RACTOR()->mfd = (v);
7703#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7708 struct allrefs data;
7709 struct gc_mark_func_data_struct mfd;
7711 int prev_dont_gc = dont_gc_val();
7714 data.objspace = objspace;
7715 data.references = st_init_numtable();
7716 init_mark_stack(&data.mark_stack);
7718 mfd.mark_func = allrefs_roots_i;
7722 PUSH_MARK_FUNC_DATA(&mfd);
7723 GET_RACTOR()->mfd = &mfd;
7724 gc_mark_roots(objspace, &data.category);
7725 POP_MARK_FUNC_DATA();
7728 while (pop_mark_stack(&data.mark_stack, &obj)) {
7729 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7731 free_stack_chunks(&data.mark_stack);
7733 dont_gc_set(prev_dont_gc);
7734 return data.references;
7738objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7740 struct reflist *refs = (
struct reflist *)value;
7741 reflist_destruct(refs);
7746objspace_allrefs_destruct(
struct st_table *refs)
7748 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7749 st_free_table(refs);
7752#if RGENGC_CHECK_MODE >= 5
7754allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7757 struct reflist *refs = (
struct reflist *)v;
7758 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7760 fprintf(stderr,
"\n");
7767 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7768 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7769 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7774gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7777 struct reflist *refs = (
struct reflist *)v;
7781 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7782 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7783 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7786 if (reflist_referred_from_machine_context(refs)) {
7787 fprintf(stderr,
" (marked from machine stack).\n");
7791 objspace->rgengc.error_count++;
7792 fprintf(stderr,
"\n");
7799gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7801 size_t saved_malloc_increase = objspace->malloc_params.increase;
7802#if RGENGC_ESTIMATE_OLDMALLOC
7803 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7805 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7807 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7810 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7813 if (objspace->rgengc.error_count > 0) {
7814#if RGENGC_CHECK_MODE >= 5
7815 allrefs_dump(objspace);
7817 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
7820 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7821 objspace->rgengc.allrefs_table = 0;
7823 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7824 objspace->malloc_params.increase = saved_malloc_increase;
7825#if RGENGC_ESTIMATE_OLDMALLOC
7826 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7834 size_t live_object_count;
7835 size_t zombie_object_count;
7838 size_t old_object_count;
7839 size_t remembered_shady_count;
7843check_generation_i(
const VALUE child,
void *ptr)
7846 const VALUE parent = data->parent;
7848 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7850 if (!RVALUE_OLD_P(child)) {
7851 if (!RVALUE_REMEMBERED(parent) &&
7852 !RVALUE_REMEMBERED(child) &&
7853 !RVALUE_UNCOLLECTIBLE(child)) {
7854 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7861check_color_i(
const VALUE child,
void *ptr)
7864 const VALUE parent = data->parent;
7866 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7867 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7868 obj_info(parent), obj_info(child));
7874check_children_i(
const VALUE child,
void *ptr)
7877 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7878 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
7879 obj_info(child), obj_info(data->parent));
7880 rb_print_backtrace();
7887verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
7893 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
7894 void *poisoned = asan_unpoison_object_temporary(obj);
7896 if (is_live_object(objspace, obj)) {
7898 data->live_object_count++;
7903 if (!gc_object_moved_p(objspace, obj)) {
7905 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
7909 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7910 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7912 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7915 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
7918 if (is_incremental_marking(objspace)) {
7919 if (RVALUE_BLACK_P(obj)) {
7922 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
7929 data->zombie_object_count++;
7934 asan_poison_object(obj);
7944 unsigned int has_remembered_shady = FALSE;
7945 unsigned int has_remembered_old = FALSE;
7946 int remembered_old_objects = 0;
7947 int free_objects = 0;
7948 int zombie_objects = 0;
7950 short slot_size = page->slot_size;
7951 uintptr_t start = (uintptr_t)page->start;
7952 uintptr_t end = start + page->total_slots * slot_size;
7954 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7956 void *poisoned = asan_unpoison_object_temporary(val);
7961 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7962 has_remembered_shady = TRUE;
7964 if (RVALUE_PAGE_MARKING(page, val)) {
7965 has_remembered_old = TRUE;
7966 remembered_old_objects++;
7971 asan_poison_object(val);
7975 if (!is_incremental_marking(objspace) &&
7976 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7978 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7980 if (RVALUE_PAGE_MARKING(page, val)) {
7981 fprintf(stderr,
"marking -> %s\n", obj_info(val));
7984 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7985 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
7988 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7989 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7990 (
void *)page, obj ? obj_info(obj) :
"");
7995 if (page->free_slots != free_objects) {
7996 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, page->free_slots, free_objects);
7999 if (page->final_slots != zombie_objects) {
8000 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, page->final_slots, zombie_objects);
8003 return remembered_old_objects;
8009 int remembered_old_objects = 0;
8012 ccan_list_for_each(head, page, page_node) {
8013 asan_unlock_freelist(page);
8014 RVALUE *p = page->freelist;
8018 asan_unpoison_object(vp,
false);
8020 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8022 p = p->as.free.next;
8023 asan_poison_object(prev);
8025 asan_lock_freelist(page);
8027 if (page->flags.has_remembered_objects == FALSE) {
8028 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
8032 return remembered_old_objects;
8038 int remembered_old_objects = 0;
8039 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8040 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8041 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8043 return remembered_old_objects;
8057gc_verify_internal_consistency_m(
VALUE dummy)
8068 data.objspace = objspace;
8069 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
8072 for (
size_t i = 0; i < heap_allocated_pages; i++) {
8073 struct heap_page *page = heap_pages_sorted[i];
8074 short slot_size = page->slot_size;
8076 uintptr_t start = (uintptr_t)page->start;
8077 uintptr_t end = start + page->total_slots * slot_size;
8079 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
8082 if (data.err_count != 0) {
8083#if RGENGC_CHECK_MODE >= 5
8084 objspace->rgengc.error_count = data.err_count;
8085 gc_marks_check(objspace, NULL, NULL);
8086 allrefs_dump(objspace);
8088 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
8092 gc_verify_heap_pages(objspace);
8096 if (!is_lazy_sweeping(objspace) &&
8098 ruby_single_main_ractor != NULL) {
8099 if (objspace_live_slots(objspace) != data.live_object_count) {
8100 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", "
8101 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
8102 heap_pages_final_slots, objspace->profile.total_freed_objects);
8103 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8104 objspace_live_slots(objspace), data.live_object_count);
8108 if (!is_marking(objspace)) {
8109 if (objspace->rgengc.old_objects != data.old_object_count) {
8110 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8111 objspace->rgengc.old_objects, data.old_object_count);
8113 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8114 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8115 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8120 size_t list_count = 0;
8123 VALUE z = heap_pages_deferred_final;
8126 z = RZOMBIE(z)->next;
8130 if (heap_pages_final_slots != data.zombie_object_count ||
8131 heap_pages_final_slots != list_count) {
8133 rb_bug(
"inconsistent finalizing object count:\n"
8134 " expect %"PRIuSIZE
"\n"
8135 " but %"PRIuSIZE
" zombies\n"
8136 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
8137 heap_pages_final_slots,
8138 data.zombie_object_count,
8143 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
8153 unsigned int prev_during_gc = during_gc;
8156 gc_verify_internal_consistency_(objspace);
8158 during_gc = prev_during_gc;
8164rb_gc_verify_internal_consistency(
void)
8170gc_verify_transient_heap_internal_consistency(
VALUE dmy)
8172 rb_transient_heap_verify();
8176#if GC_ENABLE_INCREMENTAL_MARK
8178heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
8180 if (heap->pooled_pages) {
8181 if (heap->free_pages) {
8182 struct heap_page *free_pages_tail = heap->free_pages;
8183 while (free_pages_tail->free_next) {
8184 free_pages_tail = free_pages_tail->free_next;
8186 free_pages_tail->free_next = heap->pooled_pages;
8189 heap->free_pages = heap->pooled_pages;
8192 heap->pooled_pages = NULL;
8203 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
8204 gc_mode_transition(objspace, gc_mode_marking);
8207#if GC_ENABLE_INCREMENTAL_MARK
8208 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8209 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8211 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
8212 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
8213 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
8214 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8216 objspace->flags.during_minor_gc = FALSE;
8217 if (ruby_enable_autocompact) {
8218 objspace->flags.during_compacting |= TRUE;
8220 objspace->profile.major_gc_count++;
8221 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8222 objspace->rgengc.old_objects = 0;
8223 objspace->rgengc.last_major_gc = objspace->profile.count;
8224 objspace->marked_slots = 0;
8226 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8228 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8229 rgengc_mark_and_rememberset_clear(objspace, heap);
8230 heap_move_pooled_pages_to_free_pages(heap);
8234 objspace->flags.during_minor_gc = TRUE;
8235 objspace->marked_slots =
8236 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
8237 objspace->profile.minor_gc_count++;
8239 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8240 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8244 gc_mark_roots(objspace, NULL);
8246 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
8247 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->
mark_stack));
8250#if GC_ENABLE_INCREMENTAL_MARK
8252gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8257 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
8258 GC_ASSERT(RVALUE_WB_UNPROTECTED((
VALUE)p));
8259 GC_ASSERT(RVALUE_MARKED((
VALUE)p));
8260 gc_mark_children(objspace, (
VALUE)p);
8262 p += BASE_SLOT_SIZE;
8273 ccan_list_for_each(&heap->pages, page, page_node) {
8274 bits_t *mark_bits = page->mark_bits;
8275 bits_t *wbun_bits = page->wb_unprotected_bits;
8276 uintptr_t p = page->start;
8279 bits_t bits = mark_bits[0] & wbun_bits[0];
8280 bits >>= NUM_IN_PAGE(p);
8281 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8282 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8284 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8285 bits_t bits = mark_bits[j] & wbun_bits[j];
8287 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8288 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8292 gc_mark_stacked_objects_all(objspace);
8299#if GC_ENABLE_INCREMENTAL_MARK
8301 if (is_incremental_marking(objspace)) {
8302 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8303 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
8304 mark_stack_size(&objspace->mark_stack));
8307 gc_mark_roots(objspace, 0);
8308 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
false);
8310#if RGENGC_CHECK_MODE >= 2
8311 if (gc_verify_heap_pages(objspace) != 0) {
8312 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
8316 objspace->flags.during_incremental_marking = FALSE;
8318 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8319 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8324#if RGENGC_CHECK_MODE >= 2
8325 gc_verify_internal_consistency(objspace);
8328 if (is_full_marking(objspace)) {
8330 const double r = gc_params.oldobject_limit_factor;
8331 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8332 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8335#if RGENGC_CHECK_MODE >= 4
8337 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
8343 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8344 size_t sweep_slots = total_slots - objspace->marked_slots;
8345 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8346 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8347 int full_marking = is_full_marking(objspace);
8348 const int r_cnt = GET_VM()->ractor.cnt;
8349 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8351 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8354 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8355 max_free_slots = gc_params.heap_init_slots * r_mul;
8358 if (sweep_slots > max_free_slots) {
8359 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8362 heap_pages_freeable_pages = 0;
8366 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8367 min_free_slots = gc_params.heap_free_slots * r_mul;
8370 if (sweep_slots < min_free_slots) {
8371 if (!full_marking) {
8372 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8373 full_marking = TRUE;
8378 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8379 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8386 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
8388 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, size_pool, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8390 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8397 const double r = gc_params.oldobject_limit_factor;
8398 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8399 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8402 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8403 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8405 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8406 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8408 if (RGENGC_FORCE_MAJOR_GC) {
8409 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8412 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8413 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8414 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8415 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8416 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8419 rb_transient_heap_finish_marking();
8420 rb_ractor_finish_marking();
8425#if GC_ENABLE_INCREMENTAL_MARK
8429 GC_ASSERT(is_marking(objspace));
8431 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8432 gc_marks_finish(objspace);
8435 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
"\n", objspace->marked_slots);
8440gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
8442 return heap->sweeping_page == heap->compact_cursor;
8453 obj_size = rb_ary_size_as_embedded(src);
8457 if (rb_shape_obj_too_complex(src)) {
8458 return &size_pools[0];
8461 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8466 obj_size = rb_str_size_as_embedded(src);
8473 if (rb_gc_size_allocatable_p(obj_size)){
8474 idx = size_pool_idx_for_size(obj_size);
8476 return &size_pools[idx];
8483 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8485 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8486 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8490 if (gc_compact_heap_cursors_met_p(dheap)) {
8491 return dheap != heap;
8495 orig_shape = rb_shape_get_shape(src);
8496 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8497 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8498 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8501 dest_pool = size_pool;
8507 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8509 .page = dheap->sweeping_page,
8518 lock_page_body(objspace, GET_PAGE_BODY(src));
8519 gc_sweep_page(objspace, dheap, &ctx);
8520 unlock_page_body(objspace, GET_PAGE_BODY(src));
8522 if (dheap->sweeping_page->free_slots > 0) {
8523 heap_add_freepage(dheap, dheap->sweeping_page);
8526 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8527 if (gc_compact_heap_cursors_met_p(dheap)) {
8528 return dheap != heap;
8534 VALUE dest = rb_gc_location(src);
8535 rb_shape_set_shape(dest, new_shape);
8537 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8546 short slot_size = page->slot_size;
8547 short slot_bits = slot_size / BASE_SLOT_SIZE;
8548 GC_ASSERT(slot_bits > 0);
8552 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
8555 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE(vp)]++;
8557 if (gc_is_moveable_obj(objspace, vp)) {
8558 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8565 bitset >>= slot_bits;
8575 GC_ASSERT(page == heap->compact_cursor);
8577 bits_t *mark_bits, *pin_bits;
8579 uintptr_t p = page->start;
8581 mark_bits = page->mark_bits;
8582 pin_bits = page->pinned_bits;
8585 bitset = (mark_bits[0] & ~pin_bits[0]);
8586 bitset >>= NUM_IN_PAGE(p);
8588 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8591 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8593 for (
int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8594 bitset = (mark_bits[j] & ~pin_bits[j]);
8596 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8599 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8608 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8610 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8612 if (heap->total_pages > 0 &&
8613 !gc_compact_heap_cursors_met_p(heap)) {
8624 gc_compact_start(objspace);
8625#if RGENGC_CHECK_MODE >= 2
8626 gc_verify_internal_consistency(objspace);
8629 while (!gc_compact_all_compacted_p(objspace)) {
8630 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8632 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8634 if (gc_compact_heap_cursors_met_p(heap)) {
8638 struct heap_page *start_page = heap->compact_cursor;
8640 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8641 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8648 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8649 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8653 gc_compact_finish(objspace);
8655#if RGENGC_CHECK_MODE >= 2
8656 gc_verify_internal_consistency(objspace);
8663 gc_report(1, objspace,
"gc_marks_rest\n");
8665#if GC_ENABLE_INCREMENTAL_MARK
8666 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8667 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8671 if (is_incremental_marking(objspace)) {
8672 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8675 gc_mark_stacked_objects_all(objspace);
8678 gc_marks_finish(objspace);
8687 GC_ASSERT(dont_gc_val() == FALSE);
8688#if GC_ENABLE_INCREMENTAL_MARK
8690 unsigned int lock_lev;
8691 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8693 if (heap->free_pages) {
8694 gc_report(2, objspace,
"gc_marks_continue: has pooled pages");
8695 gc_marks_step(objspace, objspace->rincgc.step_slots);
8698 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8699 mark_stack_size(&objspace->mark_stack));
8700 gc_marks_rest(objspace);
8703 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8710 gc_prof_mark_timer_start(objspace);
8714 gc_marks_start(objspace, full_mark);
8715 if (!is_incremental_marking(objspace)) {
8716 gc_marks_rest(objspace);
8719#if RGENGC_PROFILE > 0
8720 if (gc_prof_record(objspace)) {
8722 record->old_objects = objspace->rgengc.old_objects;
8725 gc_prof_mark_timer_stop(objspace);
8731gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8733 if (level <= RGENGC_DEBUG) {
8737 const char *status =
" ";
8740 status = is_full_marking(objspace) ?
"+" :
"-";
8743 if (is_lazy_sweeping(objspace)) {
8746 if (is_incremental_marking(objspace)) {
8751 va_start(args, fmt);
8752 vsnprintf(buf, 1024, fmt, args);
8755 fprintf(out,
"%s|", status);
8765 return RVALUE_REMEMBERED(obj);
8771 struct heap_page *page = GET_HEAP_PAGE(obj);
8772 bits_t *bits = &page->marking_bits[0];
8774 GC_ASSERT(!is_incremental_marking(objspace));
8776 if (MARKED_IN_BITMAP(bits, obj)) {
8780 page->flags.has_remembered_objects = TRUE;
8781 MARK_IN_BITMAP(bits, obj);
8792 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8793 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
8795 check_rvalue_consistency(obj);
8797 if (RGENGC_CHECK_MODE) {
8798 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8801#if RGENGC_PROFILE > 0
8802 if (!rgengc_remembered(objspace, obj)) {
8803 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8804 objspace->profile.total_remembered_normal_object_count++;
8805#if RGENGC_PROFILE >= 2
8806 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8812 return rgengc_remembersetbits_set(objspace, obj);
8818 int result = rgengc_remembersetbits_get(objspace, obj);
8819 check_rvalue_consistency(obj);
8826 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
8827 return rgengc_remembered_sweep(objspace, obj);
8830#ifndef PROFILE_REMEMBERSET_MARK
8831#define PROFILE_REMEMBERSET_MARK 0
8835rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8841 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8842 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8843 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8845 gc_mark_children(objspace, obj);
8847 p += BASE_SLOT_SIZE;
8858#if PROFILE_REMEMBERSET_MARK
8859 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8861 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8863 ccan_list_for_each(&heap->pages, page, page_node) {
8864 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8865 uintptr_t p = page->start;
8866 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8867 bits_t *marking_bits = page->marking_bits;
8868 bits_t *uncollectible_bits = page->uncollectible_bits;
8869 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8870#if PROFILE_REMEMBERSET_MARK
8871 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8872 else if (page->flags.has_remembered_objects) has_old++;
8873 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8875 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8876 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8877 marking_bits[j] = 0;
8879 page->flags.has_remembered_objects = FALSE;
8882 bitset >>= NUM_IN_PAGE(p);
8883 rgengc_rememberset_mark_plane(objspace, p, bitset);
8884 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8886 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8888 rgengc_rememberset_mark_plane(objspace, p, bitset);
8889 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8892#if PROFILE_REMEMBERSET_MARK
8899#if PROFILE_REMEMBERSET_MARK
8900 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8902 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
8910 ccan_list_for_each(&heap->pages, page, page_node) {
8911 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8912 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8913 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8914 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8915 page->flags.has_uncollectible_shady_objects = FALSE;
8916 page->flags.has_remembered_objects = FALSE;
8927 if (RGENGC_CHECK_MODE) {
8928 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8929 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
8930 if (is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8935 if (!rgengc_remembered(objspace, a)) {
8936 RB_VM_LOCK_ENTER_NO_BARRIER();
8938 rgengc_remember(objspace, a);
8940 RB_VM_LOCK_LEAVE_NO_BARRIER();
8941 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8945 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8946 if (RVALUE_WB_UNPROTECTED(b)) {
8947 gc_remember_unprotected(objspace, b);
8950 RVALUE_AGE_SET_OLD(objspace, b);
8951 rgengc_remember(objspace, b);
8954 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8957 check_rvalue_consistency(a);
8958 check_rvalue_consistency(b);
8961#if GC_ENABLE_INCREMENTAL_MARK
8965 gc_mark_set_parent(objspace, parent);
8966 rgengc_check_relation(objspace, obj);
8967 if (gc_mark_set(objspace, obj) == FALSE)
return;
8968 gc_aging(objspace, obj);
8969 gc_grey(objspace, obj);
8977 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
8979 if (RVALUE_BLACK_P(a)) {
8980 if (RVALUE_WHITE_P(b)) {
8981 if (!RVALUE_WB_UNPROTECTED(a)) {
8982 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
8983 gc_mark_from(objspace, b, a);
8986 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8987 if (!RVALUE_WB_UNPROTECTED(b)) {
8988 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
8989 RVALUE_AGE_SET_OLD(objspace, b);
8991 if (RVALUE_BLACK_P(b)) {
8992 gc_grey(objspace, b);
8996 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
8997 gc_remember_unprotected(objspace, b);
9001 if (UNLIKELY(objspace->flags.during_compacting)) {
9002 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9007#define gc_writebarrier_incremental(a, b, objspace)
9015 if (RGENGC_CHECK_MODE) {
9021 if (!is_incremental_marking(objspace)) {
9022 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9026 gc_writebarrier_generational(a, b, objspace);
9032 RB_VM_LOCK_ENTER_NO_BARRIER();
9034 if (is_incremental_marking(objspace)) {
9035 gc_writebarrier_incremental(a, b, objspace);
9041 RB_VM_LOCK_LEAVE_NO_BARRIER();
9043 if (retry)
goto retry;
9051 if (RVALUE_WB_UNPROTECTED(obj)) {
9057 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9058 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
9060 RB_VM_LOCK_ENTER_NO_BARRIER();
9062 if (RVALUE_OLD_P(obj)) {
9063 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9064 RVALUE_DEMOTE(objspace, obj);
9065 gc_mark_set(objspace, obj);
9066 gc_remember_unprotected(objspace, obj);
9069 objspace->profile.total_shade_operation_count++;
9070#if RGENGC_PROFILE >= 2
9071 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
9076 RVALUE_AGE_RESET(obj);
9079 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9080 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9082 RB_VM_LOCK_LEAVE_NO_BARRIER();
9089MJIT_FUNC_EXPORTED
void
9090rb_gc_writebarrier_remember(
VALUE obj)
9094 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9096 if (is_incremental_marking(objspace)) {
9097 if (RVALUE_BLACK_P(obj)) {
9098 gc_grey(objspace, obj);
9102 if (RVALUE_OLD_P(obj)) {
9103 rgengc_remember(objspace, obj);
9108static st_table *rgengc_unprotect_logging_table;
9111rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
9113 fprintf(stderr,
"%s\t%"PRIuVALUE
"\n", (
char *)key, (
VALUE)val);
9118rgengc_unprotect_logging_exit_func(
void)
9120 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
9124rb_gc_unprotect_logging(
void *objptr,
const char *filename,
int line)
9128 if (rgengc_unprotect_logging_table == 0) {
9129 rgengc_unprotect_logging_table = st_init_strtable();
9130 atexit(rgengc_unprotect_logging_exit_func);
9133 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
9138 snprintf(ptr, 0x100 - 1,
"%s|%s:%d", obj_info(obj), filename, line);
9140 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
9145 if (!ptr) rb_memerror();
9147 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
9152rb_copy_wb_protected_attribute(
VALUE dest,
VALUE obj)
9156 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9157 if (!RVALUE_OLD_P(dest)) {
9158 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9159 RVALUE_AGE_RESET_RAW(dest);
9162 RVALUE_DEMOTE(objspace, dest);
9166 check_rvalue_consistency(dest);
9172rb_obj_rgengc_writebarrier_protected_p(
VALUE obj)
9174 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9178rb_obj_rgengc_promoted_p(
VALUE obj)
9184rb_obj_gc_flags(
VALUE obj,
ID* flags,
size_t max)
9187 static ID ID_marked;
9188 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9191#define I(s) ID_##s = rb_intern(#s);
9201 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9202 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9203 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9204 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9205 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9206 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9215#if GC_ENABLE_INCREMENTAL_MARK
9216 newobj_cache->incremental_mark_step_allocated_slots = 0;
9219 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9222 struct heap_page *page = cache->using_page;
9223 RVALUE *freelist = cache->freelist;
9224 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
9226 heap_page_freelist_append(page, freelist);
9228 cache->using_page = NULL;
9229 cache->freelist = NULL;
9239#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9240#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9246 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
9251 VALUE ary_ary = GET_VM()->mark_object_ary;
9252 VALUE ary = rb_ary_last(0, 0, ary_ary);
9255 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9256 rb_ary_push(ary_ary, ary);
9259 rb_ary_push(ary, obj);
9271 tmp->next = global_list;
9280 struct gc_list *tmp = global_list;
9282 if (tmp->varptr == addr) {
9283 global_list = tmp->next;
9288 if (tmp->next->varptr == addr) {
9289 struct gc_list *t = tmp->next;
9291 tmp->next = tmp->next->next;
9302 rb_gc_register_address(var);
9309 gc_stress_no_immediate_sweep,
9310 gc_stress_full_mark_after_malloc,
9314#define gc_stress_full_mark_after_malloc_p() \
9315 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9320 if (!heap->free_pages) {
9321 if (!heap_increment(objspace, size_pool, heap)) {
9322 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9323 heap_increment(objspace, size_pool, heap);
9331 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9332 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9334 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9344gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
9346 gc_prof_set_malloc_info(objspace);
9348 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9349 size_t old_limit = malloc_limit;
9351 if (inc > malloc_limit) {
9352 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9353 if (malloc_limit > gc_params.malloc_limit_max) {
9354 malloc_limit = gc_params.malloc_limit_max;
9358 malloc_limit = (size_t)(malloc_limit * 0.98);
9359 if (malloc_limit < gc_params.malloc_limit_min) {
9360 malloc_limit = gc_params.malloc_limit_min;
9365 if (old_limit != malloc_limit) {
9366 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
9367 rb_gc_count(), old_limit, malloc_limit);
9370 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
9371 rb_gc_count(), malloc_limit);
9377#if RGENGC_ESTIMATE_OLDMALLOC
9379 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9380 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9381 objspace->rgengc.oldmalloc_increase_limit =
9382 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9384 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9385 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9389 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
9391 objspace->rgengc.need_major_gc,
9392 objspace->rgengc.oldmalloc_increase,
9393 objspace->rgengc.oldmalloc_increase_limit,
9394 gc_params.oldmalloc_limit_max);
9398 objspace->rgengc.oldmalloc_increase = 0;
9400 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9401 objspace->rgengc.oldmalloc_increase_limit =
9402 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9403 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9404 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9412garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
9418#if GC_PROFILE_MORE_DETAIL
9419 objspace->profile.prepare_time = getrusage_time();
9424#if GC_PROFILE_MORE_DETAIL
9425 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9428 ret = gc_start(objspace, reason);
9438 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9439#if GC_ENABLE_INCREMENTAL_MARK
9440 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
9444 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9447 if (do_full_mark && ruby_enable_autocompact) {
9448 objspace->flags.during_compacting = TRUE;
9451 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9454 if (!heap_allocated_pages)
return FALSE;
9455 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
9457 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9458 GC_ASSERT(!is_lazy_sweeping(objspace));
9459 GC_ASSERT(!is_incremental_marking(objspace));
9461 unsigned int lock_lev;
9462 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9464#if RGENGC_CHECK_MODE >= 2
9465 gc_verify_internal_consistency(objspace);
9468 if (ruby_gc_stressful) {
9469 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
9471 if ((flag & (1<<gc_stress_no_major)) == 0) {
9472 do_full_mark = TRUE;
9475 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9478 if (objspace->rgengc.need_major_gc) {
9479 reason |= objspace->rgengc.need_major_gc;
9480 do_full_mark = TRUE;
9482 else if (RGENGC_FORCE_MAJOR_GC) {
9483 reason = GPR_FLAG_MAJOR_BY_FORCE;
9484 do_full_mark = TRUE;
9487 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9490 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9491 reason |= GPR_FLAG_MAJOR_BY_FORCE;
9494#if GC_ENABLE_INCREMENTAL_MARK
9495 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
9496 objspace->flags.during_incremental_marking = FALSE;
9499 objspace->flags.during_incremental_marking = do_full_mark;
9503 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9504 objspace->flags.immediate_sweep = TRUE;
9507 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9509 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
9511 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9513#if USE_DEBUG_COUNTER
9514 RB_DEBUG_COUNTER_INC(gc_count);
9516 if (reason & GPR_FLAG_MAJOR_MASK) {
9517 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9518 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9519 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9520 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9521#if RGENGC_ESTIMATE_OLDMALLOC
9522 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9526 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9527 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9528 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9529 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9530 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9534 objspace->profile.count++;
9535 objspace->profile.latest_gc_info = reason;
9536 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
9537 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9538 gc_prof_setup_new_record(objspace, reason);
9539 gc_reset_malloc_info(objspace, do_full_mark);
9540 rb_transient_heap_start_marking(do_full_mark);
9543 GC_ASSERT(during_gc);
9545 gc_prof_timer_start(objspace);
9547 gc_marks(objspace, do_full_mark);
9549 gc_prof_timer_stop(objspace);
9551 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9558 int marking = is_incremental_marking(objspace);
9559 int sweeping = is_lazy_sweeping(objspace);
9561 if (marking || sweeping) {
9562 unsigned int lock_lev;
9563 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9565 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9567 if (is_incremental_marking(objspace)) {
9568 gc_marks_rest(objspace);
9570 if (is_lazy_sweeping(objspace)) {
9571 gc_sweep_rest(objspace);
9573 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9579 unsigned int reason;
9586 if (is_marking(objspace)) {
9588 if (is_full_marking(objspace)) buff[i++] =
'F';
9589#if GC_ENABLE_INCREMENTAL_MARK
9590 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9593 else if (is_sweeping(objspace)) {
9595 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9606 static char buff[0x10];
9607 gc_current_status_fill(objspace, buff);
9611#if PRINT_ENTER_EXIT_TICK
9613static tick_t last_exit_tick;
9614static tick_t enter_tick;
9615static int enter_count = 0;
9616static char last_gc_status[0x10];
9619gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9621 if (direction == 0) {
9623 enter_tick = tick();
9624 gc_current_status_fill(objspace, last_gc_status);
9627 tick_t exit_tick = tick();
9628 char current_gc_status[0x10];
9629 gc_current_status_fill(objspace, current_gc_status);
9632 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9633 enter_tick - last_exit_tick,
9634 exit_tick - enter_tick,
9636 last_gc_status, current_gc_status,
9637 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9638 last_exit_tick = exit_tick;
9641 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9643 exit_tick - enter_tick,
9645 last_gc_status, current_gc_status,
9646 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9652gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9659gc_enter_event_cstr(
enum gc_enter_event event)
9662 case gc_enter_event_start:
return "start";
9663 case gc_enter_event_mark_continue:
return "mark_continue";
9664 case gc_enter_event_sweep_continue:
return "sweep_continue";
9665 case gc_enter_event_rest:
return "rest";
9666 case gc_enter_event_finalizer:
return "finalizer";
9667 case gc_enter_event_rb_memerror:
return "rb_memerror";
9673gc_enter_count(
enum gc_enter_event event)
9676 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9677 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue);
break;
9678 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue);
break;
9679 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9680 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9681 case gc_enter_event_rb_memerror:
break;
9686#define MEASURE_GC (objspace->flags.measure_gc)
9690gc_enter_event_measure_p(
rb_objspace_t *objspace,
enum gc_enter_event event)
9692 if (!MEASURE_GC)
return false;
9695 case gc_enter_event_start:
9696 case gc_enter_event_mark_continue:
9697 case gc_enter_event_sweep_continue:
9698 case gc_enter_event_rest:
9708static bool current_process_time(
struct timespec *ts);
9711gc_enter_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9713 if (gc_enter_event_measure_p(objspace, event)) {
9714 if (!current_process_time(&objspace->profile.start_time)) {
9715 objspace->profile.start_time.tv_sec = 0;
9716 objspace->profile.start_time.tv_nsec = 0;
9722gc_exit_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9724 if (gc_enter_event_measure_p(objspace, event)) {
9727 if ((objspace->profile.start_time.tv_sec > 0 ||
9728 objspace->profile.start_time.tv_nsec > 0) &&
9729 current_process_time(&end_time)) {
9731 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9736 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9737 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9738 objspace->profile.total_time_ns += ns;
9745gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9747 RB_VM_LOCK_ENTER_LEV(lock_lev);
9749 gc_enter_clock(objspace, event);
9752 case gc_enter_event_rest:
9753 if (!is_marking(objspace))
break;
9755 case gc_enter_event_start:
9756 case gc_enter_event_mark_continue:
9764 gc_enter_count(event);
9765 if (UNLIKELY(during_gc != 0))
rb_bug(
"during_gc != 0");
9766 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9769 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9770 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9771 gc_record(objspace, 0, gc_enter_event_cstr(event));
9776gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9778 GC_ASSERT(during_gc != 0);
9781 gc_record(objspace, 1, gc_enter_event_cstr(event));
9782 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9783 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9786 gc_exit_clock(objspace, event);
9787 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9789#if RGENGC_CHECK_MODE >= 2
9790 if (event == gc_enter_event_sweep_continue && gc_mode(objspace) == gc_mode_none) {
9791 GC_ASSERT(!during_gc);
9793 gc_verify_internal_consistency(objspace);
9799gc_with_gvl(
void *ptr)
9802 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9806garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9808 if (dont_gc_val())
return TRUE;
9809 if (ruby_thread_has_gvl_p()) {
9810 return garbage_collect(objspace, reason);
9815 oar.objspace = objspace;
9816 oar.reason = reason;
9821 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9831 unsigned int reason = (GPR_FLAG_FULL_MARK |
9832 GPR_FLAG_IMMEDIATE_MARK |
9833 GPR_FLAG_IMMEDIATE_SWEEP |
9837 if (
RTEST(compact)) {
9838 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9840 reason |= GPR_FLAG_COMPACT;
9843 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9844 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9845 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9848 garbage_collect(objspace, reason);
9849 gc_finalize_deferred(objspace);
9866 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
9894 if (st_is_member(finalizer_table, obj)) {
9898 GC_ASSERT(RVALUE_MARKED(obj));
9899 GC_ASSERT(!RVALUE_PINNED(obj));
9913#define COULD_MALLOC_REGION_START() \
9914 GC_ASSERT(during_gc); \
9915 VALUE _already_disabled = rb_gc_disable_no_rest(); \
9918#define COULD_MALLOC_REGION_END() \
9920 if (_already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9932 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
9935 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9938 marked = rb_objspace_marked_object_p((
VALUE)src);
9939 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
9940 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
9941 marking = RVALUE_MARKING((
VALUE)src);
9944 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)src), (
VALUE)src);
9945 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)src), (
VALUE)src);
9946 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)src), (
VALUE)src);
9947 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)src), (
VALUE)src);
9951 COULD_MALLOC_REGION_START();
9955 COULD_MALLOC_REGION_END();
9958 st_data_t srcid = (st_data_t)src,
id;
9962 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
9963 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
9965 COULD_MALLOC_REGION_START();
9967 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9968 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
9970 COULD_MALLOC_REGION_END();
9974 memcpy(dest, src, MIN(src_slot_size, slot_size));
9976 if (RVALUE_OVERHEAD > 0) {
9977 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
9978 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
9980 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
9983 memset(src, 0, src_slot_size);
9987 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
9990 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((
VALUE)dest), (
VALUE)dest);
9994 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
9997 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10000 if (wb_unprotected) {
10001 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10004 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10007 if (uncollectible) {
10008 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10011 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10015 src->as.moved.flags =
T_MOVED;
10016 src->as.moved.dummy =
Qundef;
10017 src->as.moved.destination = (
VALUE)dest;
10023#if GC_CAN_COMPILE_COMPACTION
10025compare_free_slots(
const void *left,
const void *right,
void *dummy)
10030 left_page = *(
struct heap_page *
const *)left;
10031 right_page = *(
struct heap_page *
const *)right;
10033 return left_page->free_slots - right_page->free_slots;
10039 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
10042 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10044 struct heap_page *page = 0, **page_list = malloc(size);
10047 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10048 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10049 page_list[i++] = page;
10053 GC_ASSERT((
size_t)i == total_pages);
10057 ruby_qsort(page_list, total_pages,
sizeof(
struct heap_page *), compare_free_slots, NULL);
10060 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10062 for (i = 0; i < total_pages; i++) {
10063 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10064 if (page_list[i]->free_slots != 0) {
10065 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10077 if (ARY_SHARED_P(v)) {
10079 VALUE old_root =
RARRAY(v)->as.heap.aux.shared_root;
10082 UPDATE_IF_MOVED(objspace,
RARRAY(v)->as.heap.aux.shared_root);
10085 VALUE new_root =
RARRAY(v)->as.heap.aux.shared_root;
10087 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10088 size_t offset = (size_t)(
RARRAY(v)->as.heap.ptr -
RARRAY(old_root)->as.ary);
10089 GC_ASSERT(
RARRAY(v)->as.heap.ptr >=
RARRAY(old_root)->as.ary);
10090 RARRAY(v)->as.heap.ptr =
RARRAY(new_root)->as.ary + offset;
10099 for (
long i = 0; i < len; i++) {
10100 UPDATE_IF_MOVED(objspace, ptr[i]);
10105 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10106 if (rb_ary_embeddable_p(v)) {
10107 rb_ary_make_embedded(v);
10119 VALUE *ptr = ROBJECT_IVPTR(v);
10121 if (rb_shape_obj_too_complex(v)) {
10122 update_m_tbl(objspace, ROBJECT_IV_HASH(v));
10127 size_t slot_size = rb_gc_obj_slot_size(v);
10128 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10129 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10131 memcpy(
ROBJECT(v)->as.ary, ptr,
sizeof(
VALUE) * ROBJECT_IV_COUNT(v));
10132 RB_FL_SET_RAW(v, ROBJECT_EMBED);
10133 if (ROBJ_TRANSIENT_P(v)) {
10134 ROBJ_TRANSIENT_UNSET(v);
10143 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10144 UPDATE_IF_MOVED(objspace, ptr[i]);
10149hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10153 if (gc_object_moved_p(objspace, (
VALUE)*key)) {
10154 *key = rb_gc_location((
VALUE)*key);
10157 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10158 *value = rb_gc_location((
VALUE)*value);
10161 return ST_CONTINUE;
10165hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
10171 if (gc_object_moved_p(objspace, (
VALUE)key)) {
10175 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10178 return ST_CONTINUE;
10182hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10186 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10187 *value = rb_gc_location((
VALUE)*value);
10190 return ST_CONTINUE;
10194hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
10200 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10203 return ST_CONTINUE;
10209 if (!tbl || tbl->num_entries == 0)
return;
10211 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10219 if (!tbl || tbl->num_entries == 0)
return;
10221 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10231 gc_update_table_refs(objspace, ptr);
10237 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10245 UPDATE_IF_MOVED(objspace, me->owner);
10246 UPDATE_IF_MOVED(objspace, me->defined_class);
10249 switch (def->type) {
10250 case VM_METHOD_TYPE_ISEQ:
10251 if (def->body.iseq.
iseqptr) {
10254 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
10256 case VM_METHOD_TYPE_ATTRSET:
10257 case VM_METHOD_TYPE_IVAR:
10258 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10260 case VM_METHOD_TYPE_BMETHOD:
10261 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10263 case VM_METHOD_TYPE_ALIAS:
10266 case VM_METHOD_TYPE_REFINED:
10268 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
10270 case VM_METHOD_TYPE_CFUNC:
10271 case VM_METHOD_TYPE_ZSUPER:
10272 case VM_METHOD_TYPE_MISSING:
10273 case VM_METHOD_TYPE_OPTIMIZED:
10274 case VM_METHOD_TYPE_UNDEF:
10275 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10286 for (i=0; i<n; i++) {
10287 UPDATE_IF_MOVED(objspace, values[i]);
10294 switch (imemo_type(obj)) {
10298 if (LIKELY(env->ep)) {
10300 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
10301 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10302 gc_update_values(objspace, (
long)env->env_size, (
VALUE *)env->env);
10307 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10308 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10309 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10312 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10313 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10314 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10315 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10317 case imemo_throw_data:
10318 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10323 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10324 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10327 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10330 rb_iseq_update_references((
rb_iseq_t *)obj);
10333 rb_ast_update_references((
rb_ast_t *)obj);
10335 case imemo_callcache:
10339 UPDATE_IF_MOVED(objspace, cc->klass);
10340 if (!is_live_object(objspace, cc->klass)) {
10347 if (!is_live_object(objspace, (
VALUE)cc->cme_)) {
10353 case imemo_constcache:
10356 UPDATE_IF_MOVED(objspace, ice->value);
10359 case imemo_parser_strterm:
10361 case imemo_callinfo:
10364 rb_bug(
"not reachable %d", imemo_type(obj));
10369static enum rb_id_table_iterator_result
10370check_id_table_move(
VALUE value,
void *data)
10374 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10375 return ID_TABLE_REPLACE;
10378 return ID_TABLE_CONTINUE;
10390 void *poisoned = asan_unpoison_object_temporary(value);
10393 destination = (
VALUE)RMOVED(value)->destination;
10397 destination = value;
10403 asan_poison_object(value);
10407 destination = value;
10410 return destination;
10413static enum rb_id_table_iterator_result
10414update_id_table(
VALUE *value,
void *data,
int existing)
10418 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10419 *value = rb_gc_location((
VALUE)*value);
10422 return ID_TABLE_CONTINUE;
10429 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10433static enum rb_id_table_iterator_result
10434update_cc_tbl_i(
VALUE ccs_ptr,
void *data)
10438 VM_ASSERT(vm_ccs_p(ccs));
10440 if (gc_object_moved_p(objspace, (
VALUE)ccs->cme)) {
10444 for (
int i=0; i<ccs->len; i++) {
10445 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].ci)) {
10446 ccs->entries[i].ci = (
struct rb_callinfo *)rb_gc_location((
VALUE)ccs->entries[i].ci);
10448 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].cc)) {
10449 ccs->entries[i].cc = (
struct rb_callcache *)rb_gc_location((
VALUE)ccs->entries[i].cc);
10454 return ID_TABLE_CONTINUE;
10462 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10466static enum rb_id_table_iterator_result
10467update_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10473 entry->class_value = rb_gc_location(entry->class_value);
10475 return ID_TABLE_CONTINUE;
10483 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10487static enum rb_id_table_iterator_result
10488update_const_table(
VALUE value,
void *data)
10493 if (gc_object_moved_p(objspace, ce->value)) {
10494 ce->value = rb_gc_location(ce->value);
10497 if (gc_object_moved_p(objspace, ce->file)) {
10498 ce->file = rb_gc_location(ce->file);
10501 return ID_TABLE_CONTINUE;
10508 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10515 UPDATE_IF_MOVED(objspace, entry->klass);
10516 entry = entry->next;
10523 UPDATE_IF_MOVED(objspace, ext->origin_);
10524 UPDATE_IF_MOVED(objspace, ext->includer);
10525 UPDATE_IF_MOVED(objspace, ext->refined_class);
10526 update_subclass_entries(objspace, ext->subclasses);
10532 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10533 for (
size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10534 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10542 RVALUE *any = RANY(obj);
10544 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
10550 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10552 if (!RCLASS_EXT(obj))
break;
10553 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10554 update_cc_tbl(objspace, obj);
10555 update_cvc_tbl(objspace, obj);
10556 update_superclasses(objspace, obj);
10558 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10559 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10562 update_class_ext(objspace, RCLASS_EXT(obj));
10563 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10567 if (
FL_TEST(obj, RICLASS_IS_ORIGIN) &&
10568 !
FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
10569 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10572 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10574 if (!RCLASS_EXT(obj))
break;
10575 update_class_ext(objspace, RCLASS_EXT(obj));
10576 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10577 update_cc_tbl(objspace, obj);
10581 gc_ref_update_imemo(objspace, obj);
10593 gc_ref_update_array(objspace, obj);
10597 gc_ref_update_hash(objspace, obj);
10598 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10606 if (STR_SHARED_P(obj)) {
10613 rb_str_update_shared_ary(obj, old_root, new_root);
10620 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10621 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10622 rb_str_make_embedded(obj);
10634 if (RTYPEDDATA_P(obj)) {
10636 if (compact_func) (*compact_func)(ptr);
10643 gc_ref_update_object(objspace, obj);
10647 if (any->as.file.
fptr) {
10648 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10649 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10658 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10663 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10672 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10674 if (any->as.match.str) {
10675 UPDATE_IF_MOVED(objspace, any->as.match.str);
10680 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10681 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10685 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10686 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10692 long i, len = RSTRUCT_LEN(obj);
10693 VALUE *ptr = (
VALUE *)RSTRUCT_CONST_PTR(obj);
10695 for (i = 0; i < len; i++) {
10696 UPDATE_IF_MOVED(objspace, ptr[i]);
10702 rb_gcdebug_print_obj_condition((
VALUE)obj);
10703 rb_obj_info_dump(obj);
10710 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10712 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10719 asan_unlock_freelist(page);
10720 asan_lock_freelist(page);
10721 page->flags.has_uncollectible_shady_objects = FALSE;
10722 page->flags.has_remembered_objects = FALSE;
10725 for (; v != (
VALUE)vend; v += stride) {
10726 void *poisoned = asan_unpoison_object_temporary(v);
10734 if (RVALUE_WB_UNPROTECTED(v)) {
10735 page->flags.has_uncollectible_shady_objects = TRUE;
10737 if (RVALUE_PAGE_MARKING(page, v)) {
10738 page->flags.has_remembered_objects = TRUE;
10740 if (page->flags.before_sweep) {
10741 if (RVALUE_MARKED(v)) {
10742 gc_update_object_references(objspace, v);
10746 gc_update_object_references(objspace, v);
10751 asan_poison_object(v);
10759#define global_symbols ruby_global_symbols
10765 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10769 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10770 bool should_set_mark_bits = TRUE;
10772 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10774 ccan_list_for_each(&heap->pages, page, page_node) {
10775 uintptr_t start = (uintptr_t)page->start;
10776 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10778 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10779 if (page == heap->sweeping_page) {
10780 should_set_mark_bits = FALSE;
10782 if (should_set_mark_bits) {
10783 gc_setup_mark_bits(page);
10787 rb_vm_update_references(vm);
10788 rb_transient_heap_update_references();
10789 rb_gc_update_global_tbl();
10790 global_symbols.ids = rb_gc_location(global_symbols.ids);
10791 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10792 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10793 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10794 gc_update_table_refs(objspace, global_symbols.str_sym);
10795 gc_update_table_refs(objspace, finalizer_table);
10798#if GC_CAN_COMPILE_COMPACTION
10812gc_compact_stats(
VALUE self)
10816 VALUE h = rb_hash_new();
10817 VALUE considered = rb_hash_new();
10818 VALUE moved = rb_hash_new();
10819 VALUE moved_up = rb_hash_new();
10820 VALUE moved_down = rb_hash_new();
10822 for (i=0; i<
T_MASK; i++) {
10823 if (objspace->rcompactor.considered_count_table[i]) {
10824 rb_hash_aset(considered, type_sym(i),
SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10827 if (objspace->rcompactor.moved_count_table[i]) {
10828 rb_hash_aset(moved, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10831 if (objspace->rcompactor.moved_up_count_table[i]) {
10832 rb_hash_aset(moved_up, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
10835 if (objspace->rcompactor.moved_down_count_table[i]) {
10836 rb_hash_aset(moved_down, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
10840 rb_hash_aset(h,
ID2SYM(rb_intern(
"considered")), considered);
10841 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved")), moved);
10842 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_up")), moved_up);
10843 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_down")), moved_down);
10848# define gc_compact_stats rb_f_notimplement
10851#if GC_CAN_COMPILE_COMPACTION
10853root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
10856 rb_bug(
"ROOT %s points to MOVED: %p -> %s\n", category, (
void *)obj, obj_info(rb_gc_location(obj)));
10861reachable_object_check_moved_i(
VALUE ref,
void *data)
10865 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(rb_gc_location(ref)));
10870heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
10873 for (; v != (
VALUE)vend; v += stride) {
10878 void *poisoned = asan_unpoison_object_temporary(v);
10885 if (!rb_objspace_garbage_object_p(v)) {
10886 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
10892 asan_poison_object(v);
10918gc_compact(
VALUE self)
10923 return gc_compact_stats(self);
10926# define gc_compact rb_f_notimplement
10929#if GC_CAN_COMPILE_COMPACTION
10937 size_t growth_slots = gc_params.heap_init_slots;
10939 if (
RTEST(double_heap)) {
10940 rb_warn(
"double_heap is deprecated, please use expand_heap instead");
10943 RB_VM_LOCK_ENTER();
10948 if (
RTEST(double_heap) ||
RTEST(expand_heap)) {
10949 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10951 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10953 if (
RTEST(expand_heap)) {
10954 size_t required_pages = growth_slots / size_pool->slot_size;
10955 heap_add_pages(objspace, size_pool, heap, MAX(required_pages, heap->total_pages));
10958 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10963 if (
RTEST(toward_empty)) {
10964 gc_sort_heap_by_empty_slots(objspace);
10967 RB_VM_LOCK_LEAVE();
10971 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10972 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10974 return gc_compact_stats(self);
10977# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
10991 unsigned int reason = GPR_DEFAULT_REASON;
10992 garbage_collect(objspace, reason);
11002#if RGENGC_PROFILE >= 2
11004static const char *type_name(
int type,
VALUE obj);
11007gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
11011 for (i=0; i<
T_MASK; i++) {
11012 const char *
type = type_name(i, 0);
11015 rb_hash_aset(hash,
ID2SYM(rb_intern(name)), result);
11032gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
11034 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11035 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11036#if RGENGC_ESTIMATE_OLDMALLOC
11037 static VALUE sym_oldmalloc;
11039 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11040 static VALUE sym_none, sym_marking, sym_sweeping;
11042 VALUE major_by, need_major_by;
11043 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11048 else if (RB_TYPE_P(hash_or_key,
T_HASH)) {
11049 hash = hash_or_key;
11055 if (
NIL_P(sym_major_by)) {
11056#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11059 S(immediate_sweep);
11069#if RGENGC_ESTIMATE_OLDMALLOC
11083#define SET(name, attr) \
11084 if (key == sym_##name) \
11086 else if (hash != Qnil) \
11087 rb_hash_aset(hash, sym_##name, (attr));
11090 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11091 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11092 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11093 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11094#if RGENGC_ESTIMATE_OLDMALLOC
11095 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11098 SET(major_by, major_by);
11100 if (orig_flags == 0) {
11101 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11103 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11104 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11105 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11106 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11107#if RGENGC_ESTIMATE_OLDMALLOC
11108 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11111 SET(need_major_by, need_major_by);
11115 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11116 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11117 (flags & GPR_FLAG_METHOD) ? sym_method :
11118 (flags & GPR_FLAG_CAPI) ? sym_capi :
11119 (flags & GPR_FLAG_STRESS) ? sym_stress :
11123 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11124 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11126 if (orig_flags == 0) {
11127 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11128 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11143 return gc_info_decode(objspace, key, 0);
11152 arg = rb_hash_new();
11158 return gc_info_decode(objspace, arg, 0);
11164 gc_stat_sym_heap_allocated_pages,
11165 gc_stat_sym_heap_sorted_length,
11166 gc_stat_sym_heap_allocatable_pages,
11167 gc_stat_sym_heap_available_slots,
11168 gc_stat_sym_heap_live_slots,
11169 gc_stat_sym_heap_free_slots,
11170 gc_stat_sym_heap_final_slots,
11171 gc_stat_sym_heap_marked_slots,
11172 gc_stat_sym_heap_eden_pages,
11173 gc_stat_sym_heap_tomb_pages,
11174 gc_stat_sym_total_allocated_pages,
11175 gc_stat_sym_total_freed_pages,
11176 gc_stat_sym_total_allocated_objects,
11177 gc_stat_sym_total_freed_objects,
11178 gc_stat_sym_malloc_increase_bytes,
11179 gc_stat_sym_malloc_increase_bytes_limit,
11180 gc_stat_sym_minor_gc_count,
11181 gc_stat_sym_major_gc_count,
11182 gc_stat_sym_compact_count,
11183 gc_stat_sym_read_barrier_faults,
11184 gc_stat_sym_total_moved_objects,
11185 gc_stat_sym_remembered_wb_unprotected_objects,
11186 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11187 gc_stat_sym_old_objects,
11188 gc_stat_sym_old_objects_limit,
11189#if RGENGC_ESTIMATE_OLDMALLOC
11190 gc_stat_sym_oldmalloc_increase_bytes,
11191 gc_stat_sym_oldmalloc_increase_bytes_limit,
11194 gc_stat_sym_total_generated_normal_object_count,
11195 gc_stat_sym_total_generated_shady_object_count,
11196 gc_stat_sym_total_shade_operation_count,
11197 gc_stat_sym_total_promoted_count,
11198 gc_stat_sym_total_remembered_normal_object_count,
11199 gc_stat_sym_total_remembered_shady_object_count,
11204static VALUE gc_stat_symbols[gc_stat_sym_last];
11207setup_gc_stat_symbols(
void)
11209 if (gc_stat_symbols[0] == 0) {
11210#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11213 S(heap_allocated_pages);
11214 S(heap_sorted_length);
11215 S(heap_allocatable_pages);
11216 S(heap_available_slots);
11217 S(heap_live_slots);
11218 S(heap_free_slots);
11219 S(heap_final_slots);
11220 S(heap_marked_slots);
11221 S(heap_eden_pages);
11222 S(heap_tomb_pages);
11223 S(total_allocated_pages);
11224 S(total_freed_pages);
11225 S(total_allocated_objects);
11226 S(total_freed_objects);
11227 S(malloc_increase_bytes);
11228 S(malloc_increase_bytes_limit);
11232 S(read_barrier_faults);
11233 S(total_moved_objects);
11234 S(remembered_wb_unprotected_objects);
11235 S(remembered_wb_unprotected_objects_limit);
11237 S(old_objects_limit);
11238#if RGENGC_ESTIMATE_OLDMALLOC
11239 S(oldmalloc_increase_bytes);
11240 S(oldmalloc_increase_bytes_limit);
11243 S(total_generated_normal_object_count);
11244 S(total_generated_shady_object_count);
11245 S(total_shade_operation_count);
11246 S(total_promoted_count);
11247 S(total_remembered_normal_object_count);
11248 S(total_remembered_shady_object_count);
11255gc_stat_internal(
VALUE hash_or_sym)
11260 setup_gc_stat_symbols();
11262 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11263 hash = hash_or_sym;
11272#define SET(name, attr) \
11273 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11275 else if (hash != Qnil) \
11276 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11278 SET(count, objspace->profile.count);
11279 SET(time, (
size_t) (objspace->profile.total_time_ns / (1000 * 1000) ));
11282 SET(heap_allocated_pages, heap_allocated_pages);
11283 SET(heap_sorted_length, heap_pages_sorted_length);
11284 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11285 SET(heap_available_slots, objspace_available_slots(objspace));
11286 SET(heap_live_slots, objspace_live_slots(objspace));
11287 SET(heap_free_slots, objspace_free_slots(objspace));
11288 SET(heap_final_slots, heap_pages_final_slots);
11289 SET(heap_marked_slots, objspace->marked_slots);
11290 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11291 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11292 SET(total_allocated_pages, total_allocated_pages(objspace));
11293 SET(total_freed_pages, total_freed_pages(objspace));
11294 SET(total_allocated_objects, objspace->total_allocated_objects);
11295 SET(total_freed_objects, objspace->profile.total_freed_objects);
11296 SET(malloc_increase_bytes, malloc_increase);
11297 SET(malloc_increase_bytes_limit, malloc_limit);
11298 SET(minor_gc_count, objspace->profile.minor_gc_count);
11299 SET(major_gc_count, objspace->profile.major_gc_count);
11300 SET(compact_count, objspace->profile.compact_count);
11301 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11302 SET(total_moved_objects, objspace->rcompactor.total_moved);
11303 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11304 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11305 SET(old_objects, objspace->rgengc.old_objects);
11306 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11307#if RGENGC_ESTIMATE_OLDMALLOC
11308 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11309 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11313 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11314 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11315 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11316 SET(total_promoted_count, objspace->profile.total_promoted_count);
11317 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11318 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11326#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11327 if (hash !=
Qnil) {
11328 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11329 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11330 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
11331 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
11332 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11333 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11344 arg = rb_hash_new();
11347 size_t value = gc_stat_internal(arg);
11350 else if (RB_TYPE_P(arg,
T_HASH)) {
11357 gc_stat_internal(arg);
11365 size_t value = gc_stat_internal(key);
11369 gc_stat_internal(key);
11375enum gc_stat_heap_sym {
11376 gc_stat_heap_sym_slot_size,
11377 gc_stat_heap_sym_heap_allocatable_pages,
11378 gc_stat_heap_sym_heap_eden_pages,
11379 gc_stat_heap_sym_heap_eden_slots,
11380 gc_stat_heap_sym_heap_tomb_pages,
11381 gc_stat_heap_sym_heap_tomb_slots,
11382 gc_stat_heap_sym_total_allocated_pages,
11383 gc_stat_heap_sym_total_freed_pages,
11384 gc_stat_heap_sym_force_major_gc_count,
11385 gc_stat_heap_sym_last
11388static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11391setup_gc_stat_heap_symbols(
void)
11393 if (gc_stat_heap_symbols[0] == 0) {
11394#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11396 S(heap_allocatable_pages);
11397 S(heap_eden_pages);
11398 S(heap_eden_slots);
11399 S(heap_tomb_pages);
11400 S(heap_tomb_slots);
11401 S(total_allocated_pages);
11402 S(total_freed_pages);
11403 S(force_major_gc_count);
11409gc_stat_heap_internal(
int size_pool_idx,
VALUE hash_or_sym)
11414 setup_gc_stat_heap_symbols();
11416 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11417 hash = hash_or_sym;
11426 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11432#define SET(name, attr) \
11433 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11435 else if (hash != Qnil) \
11436 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11438 SET(slot_size, size_pool->slot_size);
11439 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11440 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11441 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11442 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11443 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11444 SET(total_allocated_pages, size_pool->total_allocated_pages);
11445 SET(total_freed_pages, size_pool->total_freed_pages);
11446 SET(force_major_gc_count, size_pool->force_major_gc_count);
11459 if (
NIL_P(heap_name)) {
11461 arg = rb_hash_new();
11463 else if (RB_TYPE_P(arg,
T_HASH)) {
11470 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11473 hash = rb_hash_new();
11474 rb_hash_aset(arg,
INT2FIX(i), hash);
11476 gc_stat_heap_internal(i, hash);
11480 int size_pool_idx =
FIX2INT(heap_name);
11483 arg = rb_hash_new();
11486 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11489 else if (RB_TYPE_P(arg,
T_HASH)) {
11496 gc_stat_heap_internal(size_pool_idx, arg);
11509 return ruby_gc_stress_mode;
11515 objspace->flags.gc_stressful =
RTEST(flag);
11516 objspace->gc_stress_mode = flag;
11523 gc_stress_set(objspace, flag);
11531 return rb_objspace_gc_enable(objspace);
11537 int old = dont_gc_val();
11546 return rb_gc_enable();
11550rb_gc_disable_no_rest(
void)
11553 return gc_disable_no_rest(objspace);
11559 int old = dont_gc_val();
11568 return rb_objspace_gc_disable(objspace);
11575 return gc_disable_no_rest(objspace);
11581 return rb_gc_disable();
11584#if GC_CAN_COMPILE_COMPACTION
11598 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11600 ruby_enable_autocompact =
RTEST(v);
11604# define gc_set_auto_compact rb_f_notimplement
11607#if GC_CAN_COMPILE_COMPACTION
11615gc_get_auto_compact(
VALUE _)
11617 return RBOOL(ruby_enable_autocompact);
11620# define gc_get_auto_compact rb_f_notimplement
11624get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
11626 const char *ptr = getenv(name);
11629 if (ptr != NULL && *ptr) {
11632#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11633 val = strtoll(ptr, &end, 0);
11635 val = strtol(ptr, &end, 0);
11638 case 'k':
case 'K':
11642 case 'm':
case 'M':
11646 case 'g':
case 'G':
11647 unit = 1024*1024*1024;
11651 while (*end && isspace((
unsigned char)*end)) end++;
11653 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11657 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11658 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
11663 if (val > 0 && (
size_t)val > lower_bound) {
11665 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
11667 *default_value = (size_t)val;
11672 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
11673 name, val, *default_value, lower_bound);
11682get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
11684 const char *ptr = getenv(name);
11687 if (ptr != NULL && *ptr) {
11689 val =
strtod(ptr, &end);
11690 if (!*ptr || *end) {
11691 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11695 if (accept_zero && val == 0.0) {
11698 else if (val <= lower_bound) {
11700 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
11701 name, val, *default_value, lower_bound);
11704 else if (upper_bound != 0.0 &&
11705 val > upper_bound) {
11707 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
11708 name, val, *default_value, upper_bound);
11718 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
11719 *default_value = val;
11728 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11731 if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
11732 size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
11733 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
11734 size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
11739 size_pool->allocatable_pages = 0;
11742 heap_pages_expand_sorted(objspace);
11788ruby_gc_set_params(
void)
11792 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
11797 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
11798 gc_set_initial_pages(objspace);
11801 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
11802 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
11803 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
11805 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
11806 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
11807 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
11808 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
11809 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
11811 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
11812 malloc_limit = gc_params.malloc_limit_min;
11814 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
11815 if (!gc_params.malloc_limit_max) {
11816 gc_params.malloc_limit_max = SIZE_MAX;
11818 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
11820#if RGENGC_ESTIMATE_OLDMALLOC
11821 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
11822 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
11824 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
11825 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
11830reachable_objects_from_callback(
VALUE obj)
11833 cr->mfd->mark_func(obj, cr->mfd->data);
11837rb_objspace_reachable_objects_from(
VALUE obj,
void (func)(
VALUE,
void *),
void *data)
11841 RB_VM_LOCK_ENTER();
11843 if (during_gc)
rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11845 if (is_markable_object(objspace, obj)) {
11847 struct gc_mark_func_data_struct mfd = {
11850 }, *prev_mfd = cr->mfd;
11853 gc_mark_children(objspace, obj);
11854 cr->mfd = prev_mfd;
11857 RB_VM_LOCK_LEAVE();
11861 const char *category;
11862 void (*func)(
const char *category,
VALUE,
void *);
11867root_objects_from(
VALUE obj,
void *ptr)
11870 (*data->func)(data->category, obj, data->data);
11874rb_objspace_reachable_objects_from_root(
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11877 objspace_reachable_objects_from_root(objspace, func, passing_data);
11881objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
11883 if (during_gc)
rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
11888 .data = passing_data,
11890 struct gc_mark_func_data_struct mfd = {
11891 .mark_func = root_objects_from,
11893 }, *prev_mfd = cr->mfd;
11896 gc_mark_roots(objspace, &data.category);
11897 cr->mfd = prev_mfd;
11911gc_vraise(
void *ptr)
11914 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11919gc_raise(
VALUE exc,
const char *fmt, ...)
11927 if (ruby_thread_has_gvl_p()) {
11937 fprintf(stderr,
"%s",
"[FATAL] ");
11938 vfprintf(stderr, fmt, ap);
11945static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
11948negative_size_allocation_error(
const char *msg)
11954ruby_memerror_body(
void *dummy)
11960NORETURN(
static void ruby_memerror(
void));
11965 if (ruby_thread_has_gvl_p()) {
11974 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
11977 exit(EXIT_FAILURE);
11984 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11995 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12000 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12001 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12002 exit(EXIT_FAILURE);
12004 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12005 rb_ec_raised_clear(ec);
12008 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12009 exc = ruby_vm_special_exception_copy(exc);
12012 EC_JUMP_TAG(ec, TAG_RAISE);
12016rb_aligned_malloc(
size_t alignment,
size_t size)
12019 GC_ASSERT(((alignment - 1) & alignment) == 0);
12020 GC_ASSERT(alignment %
sizeof(
void*) == 0);
12024#if defined __MINGW32__
12025 res = __mingw_aligned_malloc(size, alignment);
12026#elif defined _WIN32
12027 void *_aligned_malloc(
size_t,
size_t);
12028 res = _aligned_malloc(size, alignment);
12029#elif defined(HAVE_POSIX_MEMALIGN)
12030 if (posix_memalign(&res, alignment, size) != 0) {
12033#elif defined(HAVE_MEMALIGN)
12034 res = memalign(alignment, size);
12037 res = malloc(alignment + size +
sizeof(
void*));
12038 aligned = (
char*)res + alignment +
sizeof(
void*);
12039 aligned -= ((
VALUE)aligned & (alignment - 1));
12040 ((
void**)aligned)[-1] = res;
12041 res = (
void*)aligned;
12044 GC_ASSERT((uintptr_t)res % alignment == 0);
12050rb_aligned_free(
void *ptr,
size_t size)
12052#if defined __MINGW32__
12053 __mingw_aligned_free(ptr);
12054#elif defined _WIN32
12055 _aligned_free(ptr);
12056#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12059 free(((
void**)ptr)[-1]);
12063static inline size_t
12064objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
12066#ifdef HAVE_MALLOC_USABLE_SIZE
12067 return malloc_usable_size(ptr);
12074 MEMOP_TYPE_MALLOC = 0,
12080atomic_sub_nounderflow(
size_t *var,
size_t sub)
12082 if (sub == 0)
return;
12086 if (val < sub) sub = val;
12087 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
12095 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12096 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12098 if (gc_stress_full_mark_after_malloc_p()) {
12099 reason |= GPR_FLAG_FULL_MARK;
12101 garbage_collect_with_gvl(objspace, reason);
12106objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12108 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
12110 type == MEMOP_TYPE_MALLOC ?
"malloc" :
12111 type == MEMOP_TYPE_FREE ?
"free " :
12112 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
12113 new_size, old_size);
12118objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12120 if (new_size > old_size) {
12121 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12122#if RGENGC_ESTIMATE_OLDMALLOC
12123 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12127 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12128#if RGENGC_ESTIMATE_OLDMALLOC
12129 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12133 if (
type == MEMOP_TYPE_MALLOC) {
12136 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12140 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12144#if MALLOC_ALLOCATED_SIZE
12145 if (new_size >= old_size) {
12146 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12149 size_t dec_size = old_size - new_size;
12150 size_t allocated_size = objspace->malloc_params.allocated_size;
12152#if MALLOC_ALLOCATED_SIZE_CHECK
12153 if (allocated_size < dec_size) {
12154 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
12157 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12161 case MEMOP_TYPE_MALLOC:
12162 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12164 case MEMOP_TYPE_FREE:
12166 size_t allocations = objspace->malloc_params.allocations;
12167 if (allocations > 0) {
12168 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12170#if MALLOC_ALLOCATED_SIZE_CHECK
12172 GC_ASSERT(objspace->malloc_params.allocations > 0);
12177 case MEMOP_TYPE_REALLOC:
break;
12183#define objspace_malloc_increase(...) \
12184 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12185 !malloc_increase_done; \
12186 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12190#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12197#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12198const char *ruby_malloc_info_file;
12199int ruby_malloc_info_line;
12202static inline size_t
12203objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
12205 if (size == 0) size = 1;
12207#if CALC_EXACT_MALLOC_SIZE
12221 return during_gc && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12224static inline void *
12225objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
12227 size = objspace_malloc_size(objspace, mem, size);
12228 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12230#if CALC_EXACT_MALLOC_SIZE
12234#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12235 info->gen = objspace->profile.count;
12236 info->file = ruby_malloc_info_file;
12237 info->line = info->file ? ruby_malloc_info_line : 0;
12246#if defined(__GNUC__) && RUBY_DEBUG
12247#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12250#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12251# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12254#define GC_MEMERROR(...) \
12255 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12257#define TRY_WITH_GC(siz, expr) do { \
12258 const gc_profile_record_flag gpr = \
12259 GPR_FLAG_FULL_MARK | \
12260 GPR_FLAG_IMMEDIATE_MARK | \
12261 GPR_FLAG_IMMEDIATE_SWEEP | \
12263 objspace_malloc_gc_stress(objspace); \
12265 if (LIKELY((expr))) { \
12268 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12270 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12272 else if ((expr)) { \
12276 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12277 "%"PRIdSIZE" bytes for %s", \
12288 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12289 rb_warn(
"malloc during GC detected, this could cause crashes if it triggers another GC");
12290#if RGENGC_CHECK_MODE || RUBY_DEBUG
12291 rb_bug(
"Cannot malloc during GC");
12297 size = objspace_malloc_prepare(objspace, size);
12298 TRY_WITH_GC(size, mem = malloc(size));
12299 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12300 return objspace_malloc_fixup(objspace, mem, size);
12303static inline size_t
12304xmalloc2_size(
const size_t count,
const size_t elsize)
12306 return size_mul_or_raise(count, elsize,
rb_eArgError);
12310objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
12312 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12313 rb_warn(
"realloc during GC detected, this could cause crashes if it triggers another GC");
12314#if RGENGC_CHECK_MODE || RUBY_DEBUG
12315 rb_bug(
"Cannot realloc during GC");
12321 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
12328 if (new_size == 0) {
12329 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12352 objspace_xfree(objspace, ptr, old_size);
12366#if CALC_EXACT_MALLOC_SIZE
12371 old_size = info->size;
12375 old_size = objspace_malloc_size(objspace, ptr, old_size);
12377 new_size = objspace_malloc_size(objspace, mem, new_size);
12379#if CALC_EXACT_MALLOC_SIZE
12382 info->size = new_size;
12387 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12389 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12393#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12395#define MALLOC_INFO_GEN_SIZE 100
12396#define MALLOC_INFO_SIZE_SIZE 10
12397static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12398static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12399static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12400static st_table *malloc_info_file_table;
12403mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12405 const char *file = (
void *)key;
12406 const size_t *data = (
void *)val;
12408 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
12410 return ST_CONTINUE;
12415rb_malloc_info_show_results(
void)
12419 fprintf(stderr,
"* malloc_info gen statistics\n");
12420 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12421 if (i == MALLOC_INFO_GEN_SIZE-1) {
12422 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12425 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12429 fprintf(stderr,
"* malloc_info size statistics\n");
12430 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12432 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
12434 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
12436 if (malloc_info_file_table) {
12437 fprintf(stderr,
"* malloc_info file statistics\n");
12438 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12443rb_malloc_info_show_results(
void)
12449objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
12458#if CALC_EXACT_MALLOC_SIZE
12461 old_size = info->size;
12463#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12465 int gen = (int)(objspace->profile.count - info->gen);
12466 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12469 malloc_info_gen_cnt[gen_index]++;
12470 malloc_info_gen_size[gen_index] += info->size;
12472 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12473 size_t s = 16 << i;
12474 if (info->size <= s) {
12475 malloc_info_size[i]++;
12479 malloc_info_size[i]++;
12483 st_data_t key = (st_data_t)info->file, d;
12486 if (malloc_info_file_table == NULL) {
12487 malloc_info_file_table = st_init_numtable_with_size(1024);
12489 if (st_lookup(malloc_info_file_table, key, &d)) {
12491 data = (
size_t *)d;
12494 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
12495 if (data == NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
12496 data[0] = data[1] = 0;
12497 st_insert(malloc_info_file_table, key, (st_data_t)data);
12500 data[1] += info->size;
12502 if (0 && gen >= 2) {
12504 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
12505 info->size, gen, info->file, info->line);
12508 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
12515 old_size = objspace_malloc_size(objspace, ptr, old_size);
12517 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12520 RB_DEBUG_COUNTER_INC(heap_xfree);
12525ruby_xmalloc0(
size_t size)
12531ruby_xmalloc_body(
size_t size)
12533 if ((ssize_t)size < 0) {
12534 negative_size_allocation_error(
"too large allocation size");
12536 return ruby_xmalloc0(size);
12540ruby_malloc_size_overflow(
size_t count,
size_t elsize)
12543 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
12548ruby_xmalloc2_body(
size_t n,
size_t size)
12550 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
12556 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12557 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
12558#if RGENGC_CHECK_MODE || RUBY_DEBUG
12559 rb_bug(
"Cannot calloc during GC");
12565 size = objspace_malloc_prepare(objspace, size);
12566 TRY_WITH_GC(size, mem = calloc1(size));
12567 return objspace_malloc_fixup(objspace, mem, size);
12571ruby_xcalloc_body(
size_t n,
size_t size)
12573 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
12576#ifdef ruby_sized_xrealloc
12577#undef ruby_sized_xrealloc
12580ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
12582 if ((ssize_t)new_size < 0) {
12583 negative_size_allocation_error(
"too large allocation size");
12586 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
12590ruby_xrealloc_body(
void *ptr,
size_t new_size)
12592 return ruby_sized_xrealloc(ptr, new_size, 0);
12595#ifdef ruby_sized_xrealloc2
12596#undef ruby_sized_xrealloc2
12599ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
12601 size_t len = xmalloc2_size(n, size);
12602 return objspace_xrealloc(&
rb_objspace, ptr, len, old_n * size);
12606ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
12608 return ruby_sized_xrealloc2(ptr, n, size, 0);
12611#ifdef ruby_sized_xfree
12612#undef ruby_sized_xfree
12615ruby_sized_xfree(
void *x,
size_t size)
12625 ruby_sized_xfree(x, 0);
12629rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
12631 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12632 return ruby_xmalloc(w);
12636rb_xcalloc_mul_add(
size_t x,
size_t y,
size_t z)
12638 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12639 return ruby_xcalloc(w, 1);
12643rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
12645 size_t w = size_mul_add_or_raise(x, y, z,
rb_eArgError);
12646 return ruby_xrealloc((
void *)p, w);
12650rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12652 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
12653 return ruby_xmalloc(u);
12657rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12659 size_t u = size_mul_add_mul_or_raise(x, y, z, w,
rb_eArgError);
12660 return ruby_xcalloc(u, 1);
12667ruby_mimmalloc(
size_t size)
12670#if CALC_EXACT_MALLOC_SIZE
12673 mem = malloc(size);
12674#if CALC_EXACT_MALLOC_SIZE
12683#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12695ruby_mimfree(
void *ptr)
12697#if CALC_EXACT_MALLOC_SIZE
12705rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
12713 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
12715 ptr = ruby_xmalloc0(size);
12723rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
12727 if (len < 0 || (cnt = (
long)roomof(len,
sizeof(
VALUE))) < 0) {
12731 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
12735rb_free_tmp_buffer(
volatile VALUE *store)
12739 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
12745#if MALLOC_ALLOCATED_SIZE
12756gc_malloc_allocated_size(
VALUE self)
12771gc_malloc_allocations(
VALUE self)
12778rb_gc_adjust_memory_usage(ssize_t diff)
12782 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
12784 else if (diff < 0) {
12785 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
12799#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
12801#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12803wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
12807 if (!is_live_object(objspace, obj))
return ST_DELETE;
12808 return ST_CONTINUE;
12813wmap_compact(
void *ptr)
12816 if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
12817 if (w->obj2wmap) rb_gc_update_tbl_refs(w->obj2wmap);
12818 w->final = rb_gc_location(w->final);
12822wmap_mark(
void *ptr)
12825#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12826 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&
rb_objspace);
12828 rb_gc_mark_movable(w->final);
12832wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12835 ruby_sized_xfree(ptr, (ptr[0] + 1) *
sizeof(
VALUE));
12836 return ST_CONTINUE;
12840wmap_free(
void *ptr)
12843 st_foreach(w->obj2wmap, wmap_free_map, 0);
12844 st_free_table(w->obj2wmap);
12845 st_free_table(w->wmap2obj);
12850wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12853 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
12854 return ST_CONTINUE;
12858wmap_memsize(
const void *ptr)
12861 const struct weakmap *w = ptr;
12863 size += st_memsize(w->obj2wmap);
12864 size += st_memsize(w->wmap2obj);
12865 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12877 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12883wmap_allocate(
VALUE klass)
12887 w->obj2wmap = rb_init_identtable();
12888 w->wmap2obj = rb_init_identtable();
12889 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12899 if (!is_pointer_to_heap(objspace, (
void *)obj))
return FALSE;
12901 void *poisoned = asan_unpoison_object_temporary(obj);
12905 is_live_object(objspace, obj));
12908 asan_poison_object(obj);
12915wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg,
int existing)
12917 VALUE wmap, *ptr, size, i, j;
12918 if (!existing)
return ST_STOP;
12920 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12921 if (ptr[i] != wmap) {
12926 ruby_sized_xfree(ptr, i *
sizeof(
VALUE));
12930 SIZED_REALLOC_N(ptr,
VALUE, j + 1, i);
12932 *value = (st_data_t)ptr;
12934 return ST_CONTINUE;
12941 st_data_t orig, wmap, data;
12942 VALUE obj, *rids, i, size;
12947 if (UNDEF_P(obj = id2ref_obj_tbl(&
rb_objspace, objid))) {
12948 rb_bug(
"wmap_finalize: objid is not found.");
12952 orig = (st_data_t)obj;
12953 if (st_delete(w->obj2wmap, &orig, &data)) {
12954 rids = (
VALUE *)data;
12956 for (i = 0; i < size; ++i) {
12957 wmap = (st_data_t)rids[i];
12958 st_delete(w->wmap2obj, &wmap, NULL);
12960 ruby_sized_xfree((
VALUE *)data, (size + 1) *
sizeof(
VALUE));
12963 wmap = (st_data_t)obj;
12964 if (st_delete(w->wmap2obj, &wmap, &orig)) {
12965 wmap = (st_data_t)obj;
12966 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12982 else if (wmap_live_p(objspace, obj)) {
12986 return rb_str_catf(str,
"#<collected:%p>", (
void*)obj);
12991wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12995 VALUE str = argp->value;
12998 if (RSTRING_PTR(str)[0] ==
'#') {
13003 RSTRING_PTR(str)[0] =
'#';
13005 wmap_inspect_append(objspace, str, k);
13007 wmap_inspect_append(objspace, str, v);
13009 return ST_CONTINUE;
13013wmap_inspect(
VALUE self)
13021 str =
rb_sprintf(
"-<%"PRIsVALUE
":%p", c, (
void *)self);
13025 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
13027 RSTRING_PTR(str)[0] =
'#';
13033wmap_live_entry_p(
rb_objspace_t *objspace, st_data_t key, st_data_t val)
13035 return wmap_live_p(objspace, (
VALUE)key) && wmap_live_p(objspace, (
VALUE)val);
13039wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
13043 if (wmap_live_entry_p(objspace, key, val)) {
13045 return ST_CONTINUE;
13054wmap_each(
VALUE self)
13060 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
13065wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
13069 if (wmap_live_entry_p(objspace, key, val)) {
13071 return ST_CONTINUE;
13080wmap_each_key(
VALUE self)
13086 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
13091wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
13095 if (wmap_live_entry_p(objspace, key, val)) {
13097 return ST_CONTINUE;
13106wmap_each_value(
VALUE self)
13112 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
13117wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
13121 VALUE ary = argp->value;
13123 if (wmap_live_entry_p(objspace, key, val)) {
13124 rb_ary_push(ary, (
VALUE)key);
13125 return ST_CONTINUE;
13134wmap_keys(
VALUE self)
13141 args.value = rb_ary_new();
13142 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
13147wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
13151 VALUE ary = argp->value;
13153 if (wmap_live_entry_p(objspace, key, val)) {
13154 rb_ary_push(ary, (
VALUE)val);
13155 return ST_CONTINUE;
13164wmap_values(
VALUE self)
13171 args.value = rb_ary_new();
13172 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
13177wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg,
int existing)
13179 VALUE size, *ptr, *optr;
13181 size = (ptr = optr = (
VALUE *)*val)[0];
13183 SIZED_REALLOC_N(ptr,
VALUE, size + 1, size);
13188 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
13191 ptr[size] = (
VALUE)arg;
13192 if (ptr == optr)
return ST_STOP;
13193 *val = (st_data_t)ptr;
13194 return ST_CONTINUE;
13205 define_final0(value, w->final);
13208 define_final0(key, w->final);
13211 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
13212 st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
13213 return nonspecial_obj_id(value);
13224 GC_ASSERT(wmap_live_p(objspace, key));
13227 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data))
return Qundef;
13229 if (!wmap_live_p(objspace, obj))
return Qundef;
13237 VALUE obj = wmap_lookup(self, key);
13238 return !UNDEF_P(obj) ? obj :
Qnil;
13245 return RBOOL(!UNDEF_P(wmap_lookup(self, key)));
13250wmap_size(
VALUE self)
13256 n = w->wmap2obj->num_entries;
13257#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
13268#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13271current_process_time(
struct timespec *ts)
13273#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13275 static int try_clock_gettime = 1;
13276 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13280 try_clock_gettime = 0;
13287 struct rusage usage;
13289 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13290 time = usage.ru_utime;
13291 ts->tv_sec = time.tv_sec;
13292 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13300 FILETIME creation_time, exit_time, kernel_time, user_time;
13303 if (GetProcessTimes(GetCurrentProcess(),
13304 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13305 memcpy(&ui, &user_time,
sizeof(FILETIME));
13306#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13307 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13308 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13318getrusage_time(
void)
13321 if (current_process_time(&ts)) {
13322 return ts.tv_sec + ts.tv_nsec * 1e-9;
13331gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
13333 if (objspace->profile.run) {
13334 size_t index = objspace->profile.next_index;
13338 objspace->profile.next_index++;
13340 if (!objspace->profile.records) {
13341 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13342 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13344 if (index >= objspace->profile.size) {
13346 objspace->profile.size += 1000;
13347 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13348 if (!ptr) rb_memerror();
13349 objspace->profile.records = ptr;
13351 if (!objspace->profile.records) {
13352 rb_bug(
"gc_profile malloc or realloc miss");
13354 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13358 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13359#if MALLOC_ALLOCATED_SIZE
13360 record->allocated_size = malloc_allocated_size;
13362#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13365 struct rusage usage;
13366 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13367 record->maxrss = usage.ru_maxrss;
13368 record->minflt = usage.ru_minflt;
13369 record->majflt = usage.ru_majflt;
13380 if (gc_prof_enabled(objspace)) {
13382#if GC_PROFILE_MORE_DETAIL
13383 record->prepare_time = objspace->profile.prepare_time;
13385 record->gc_time = 0;
13386 record->gc_invoke_time = getrusage_time();
13391elapsed_time_from(
double time)
13393 double now = getrusage_time();
13405 if (gc_prof_enabled(objspace)) {
13407 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13408 record->gc_invoke_time -= objspace->profile.invoke_time;
13412#define RUBY_DTRACE_GC_HOOK(name) \
13413 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13417 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13418#if GC_PROFILE_MORE_DETAIL
13419 if (gc_prof_enabled(objspace)) {
13420 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13428 RUBY_DTRACE_GC_HOOK(MARK_END);
13429#if GC_PROFILE_MORE_DETAIL
13430 if (gc_prof_enabled(objspace)) {
13432 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13440 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13441 if (gc_prof_enabled(objspace)) {
13444 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13445 objspace->profile.gc_sweep_start_time = getrusage_time();
13453 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13455 if (gc_prof_enabled(objspace)) {
13459 if (record->gc_time > 0) {
13460 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13462 record->gc_time += sweep_time;
13464 else if (GC_PROFILE_MORE_DETAIL) {
13465 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13468#if GC_PROFILE_MORE_DETAIL
13469 record->gc_sweep_time += sweep_time;
13470 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13472 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13479#if GC_PROFILE_MORE_DETAIL
13480 if (gc_prof_enabled(objspace)) {
13482 record->allocate_increase = malloc_increase;
13483 record->allocate_limit = malloc_limit;
13491 if (gc_prof_enabled(objspace)) {
13493 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
13494 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13496#if GC_PROFILE_MORE_DETAIL
13497 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13498 record->heap_live_objects = live;
13499 record->heap_free_objects = total - live;
13502 record->heap_total_objects = total;
13503 record->heap_use_size = live *
sizeof(
RVALUE);
13504 record->heap_total_size = total *
sizeof(
RVALUE);
13517gc_profile_clear(
VALUE _)
13520 void *p = objspace->profile.records;
13521 objspace->profile.records = NULL;
13522 objspace->profile.size = 0;
13523 objspace->profile.next_index = 0;
13524 objspace->profile.current_record = 0;
13582gc_profile_record_get(
VALUE _)
13585 VALUE gc_profile = rb_ary_new();
13589 if (!objspace->profile.run) {
13593 for (i =0; i < objspace->profile.next_index; i++) {
13596 prof = rb_hash_new();
13597 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
13598 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_TIME")),
DBL2NUM(record->gc_time));
13599 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_INVOKE_TIME")),
DBL2NUM(record->gc_invoke_time));
13600 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_SIZE")),
SIZET2NUM(record->heap_use_size));
13601 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_SIZE")),
SIZET2NUM(record->heap_total_size));
13602 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_OBJECTS")),
SIZET2NUM(record->heap_total_objects));
13603 rb_hash_aset(prof,
ID2SYM(rb_intern(
"MOVED_OBJECTS")),
SIZET2NUM(record->moved_objects));
13604 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_IS_MARKED")),
Qtrue);
13605#if GC_PROFILE_MORE_DETAIL
13606 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_MARK_TIME")),
DBL2NUM(record->gc_mark_time));
13607 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_SWEEP_TIME")),
DBL2NUM(record->gc_sweep_time));
13608 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_INCREASE")),
SIZET2NUM(record->allocate_increase));
13609 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_LIMIT")),
SIZET2NUM(record->allocate_limit));
13610 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_PAGES")),
SIZET2NUM(record->heap_use_pages));
13611 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_LIVE_OBJECTS")),
SIZET2NUM(record->heap_live_objects));
13612 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_FREE_OBJECTS")),
SIZET2NUM(record->heap_free_objects));
13614 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMOVING_OBJECTS")),
SIZET2NUM(record->removing_objects));
13615 rb_hash_aset(prof,
ID2SYM(rb_intern(
"EMPTY_OBJECTS")),
SIZET2NUM(record->empty_objects));
13617 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13620#if RGENGC_PROFILE > 0
13621 rb_hash_aset(prof,
ID2SYM(rb_intern(
"OLD_OBJECTS")),
SIZET2NUM(record->old_objects));
13622 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_NORMAL_OBJECTS")),
SIZET2NUM(record->remembered_normal_objects));
13623 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_SHADY_OBJECTS")),
SIZET2NUM(record->remembered_shady_objects));
13625 rb_ary_push(gc_profile, prof);
13631#if GC_PROFILE_MORE_DETAIL
13632#define MAJOR_REASON_MAX 0x10
13635gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
13637 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13640 if (reason == GPR_FLAG_NONE) {
13646 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13647 buff[i++] = #x[0]; \
13648 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13654#if RGENGC_ESTIMATE_OLDMALLOC
13667 size_t count = objspace->profile.next_index;
13668#ifdef MAJOR_REASON_MAX
13669 char reason_str[MAJOR_REASON_MAX];
13672 if (objspace->profile.run && count ) {
13676 append(out,
rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
13677 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13679 for (i = 0; i < count; i++) {
13680 record = &objspace->profile.records[i];
13681 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
13682 i+1, record->gc_invoke_time, record->heap_use_size,
13683 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13686#if GC_PROFILE_MORE_DETAIL
13687 const char *str =
"\n\n" \
13689 "Prepare Time = Previously GC's rest sweep time\n"
13690 "Index Flags Allocate Inc. Allocate Limit"
13691#if CALC_EXACT_MALLOC_SIZE
13694 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13696 " OldgenObj RemNormObj RemShadObj"
13698#if GC_PROFILE_DETAIL_MEMORY
13699 " MaxRSS(KB) MinorFLT MajorFLT"
13704 for (i = 0; i < count; i++) {
13705 record = &objspace->profile.records[i];
13706 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
13707#
if CALC_EXACT_MALLOC_SIZE
13710 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13712 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13714#
if GC_PROFILE_DETAIL_MEMORY
13720 gc_profile_dump_major_reason(record->flags, reason_str),
13721 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
13722 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
13723 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
13724 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
13725 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
13726 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
13727 record->allocate_increase, record->allocate_limit,
13728#if CALC_EXACT_MALLOC_SIZE
13729 record->allocated_size,
13731 record->heap_use_pages,
13732 record->gc_mark_time*1000,
13733 record->gc_sweep_time*1000,
13734 record->prepare_time*1000,
13736 record->heap_live_objects,
13737 record->heap_free_objects,
13738 record->removing_objects,
13739 record->empty_objects
13742 record->old_objects,
13743 record->remembered_normal_objects,
13744 record->remembered_shady_objects
13746#if GC_PROFILE_DETAIL_MEMORY
13748 record->maxrss / 1024,
13771gc_profile_result(
VALUE _)
13788gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
13792 out = (!rb_check_arity(argc, 0, 1) ?
rb_stdout : argv[0]);
13806gc_profile_total_time(
VALUE self)
13811 if (objspace->profile.run && objspace->profile.next_index > 0) {
13813 size_t count = objspace->profile.next_index;
13815 for (i = 0; i < count; i++) {
13816 time += objspace->profile.records[i].gc_time;
13830gc_profile_enable_get(
VALUE self)
13833 return RBOOL(objspace->profile.run);
13845gc_profile_enable(
VALUE _)
13848 objspace->profile.run = TRUE;
13849 objspace->profile.current_record = 0;
13862gc_profile_disable(
VALUE _)
13866 objspace->profile.run = FALSE;
13867 objspace->profile.current_record = 0;
13879#define TYPE_NAME(t) case (t): return #t;
13906 if (obj && rb_objspace_data_type_name(obj)) {
13907 return rb_objspace_data_type_name(obj);
13916obj_type_name(
VALUE obj)
13918 return type_name(
TYPE(obj), obj);
13922rb_method_type_name(rb_method_type_t
type)
13925 case VM_METHOD_TYPE_ISEQ:
return "iseq";
13926 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
13927 case VM_METHOD_TYPE_IVAR:
return "ivar";
13928 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
13929 case VM_METHOD_TYPE_ALIAS:
return "alias";
13930 case VM_METHOD_TYPE_REFINED:
return "refined";
13931 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
13932 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
13933 case VM_METHOD_TYPE_MISSING:
return "missing";
13934 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
13935 case VM_METHOD_TYPE_UNDEF:
return "undef";
13936 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
13938 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
13942rb_raw_iseq_info(
char *
const buff,
const size_t buff_size,
const rb_iseq_t *iseq)
13944 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj,
T_MOVED)) {
13945 VALUE path = rb_iseq_path(iseq);
13946 int n = ISEQ_BODY(iseq)->location.first_lineno;
13947 snprintf(buff, buff_size,
" %s@%s:%d",
13948 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13949 RSTRING_PTR(path), n);
13954str_len_no_raise(
VALUE str)
13956 long len = RSTRING_LEN(str);
13957 if (len < 0)
return 0;
13958 if (len > INT_MAX)
return INT_MAX;
13962#define BUFF_ARGS buff + pos, buff_size - pos
13963#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13964#define APPEND_S(s) do { \
13965 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13969 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13972#define TF(c) ((c) != 0 ? "true" : "false")
13973#define C(c, s) ((c) != 0 ? (s) : " ")
13976rb_raw_obj_info_common(
char *
const buff,
const size_t buff_size,
const VALUE obj)
13981 APPEND_F(
"%s", obj_type_name(obj));
13991 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
13993 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
13994 APPEND_F(
"%p [%d%s%s%s%s%s%s] %s ",
13996 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
13997 C(RVALUE_MARK_BITMAP(obj),
"M"),
13998 C(RVALUE_PIN_BITMAP(obj),
"P"),
13999 C(RVALUE_MARKING_BITMAP(obj),
"R"),
14000 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
14001 C(rb_objspace_garbage_object_p(obj),
"G"),
14002 obj_type_name(obj));
14006 APPEND_F(
"%p [%dXXXX] %s",
14008 obj_type_name(obj));
14011 if (internal_object_p(obj)) {
14014 else if (
RBASIC(obj)->klass == 0) {
14015 APPEND_S(
"(temporary internal)");
14019 if (!
NIL_P(class_path)) {
14020 APPEND_F(
"(%s)", RSTRING_PTR(class_path));
14025 APPEND_F(
"@%s:%d", RANY(obj)->file, RANY(obj)->line);
14034rb_raw_obj_info_buitin_type(
char *
const buff,
const size_t buff_size,
const VALUE obj,
size_t pos)
14041 UNEXPECTED_NODE(rb_raw_obj_info);
14044 if (ARY_SHARED_P(obj)) {
14045 APPEND_S(
"shared -> ");
14046 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
14048 else if (ARY_EMBED_P(obj)) {
14049 APPEND_F(
"[%s%s] len: %ld (embed)",
14050 C(ARY_EMBED_P(obj),
"E"),
14051 C(ARY_SHARED_P(obj),
"S"),
14055 APPEND_F(
"[%s%s%s] len: %ld, capa:%ld ptr:%p",
14056 C(ARY_EMBED_P(obj),
"E"),
14057 C(ARY_SHARED_P(obj),
"S"),
14058 C(RARRAY_TRANSIENT_P(obj),
"T"),
14060 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.capa,
14065 if (STR_SHARED_P(obj)) {
14066 APPEND_F(
" [shared] len: %ld", RSTRING_LEN(obj));
14069 if (STR_EMBED_P(obj)) APPEND_S(
" [embed]");
14071 APPEND_F(
" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj),
rb_str_capacity(obj));
14073 APPEND_F(
" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
14077 VALUE fstr = RSYMBOL(obj)->fstr;
14078 ID id = RSYMBOL(obj)->id;
14080 APPEND_F(
":%s id:%d", RSTRING_PTR(fstr), (
unsigned int)
id);
14083 APPEND_F(
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id);
14088 APPEND_F(
"-> %p", (
void*)rb_gc_location(obj));
14092 APPEND_F(
"[%c%c] %"PRIdSIZE,
14093 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
14094 RHASH_TRANSIENT_P(obj) ?
'T' :
' ',
14102 if (!
NIL_P(class_path)) {
14103 APPEND_F(
"%s", RSTRING_PTR(class_path));
14106 APPEND_S(
"(annon)");
14113 if (!
NIL_P(class_path)) {
14114 APPEND_F(
"src:%s", RSTRING_PTR(class_path));
14120 uint32_t len = ROBJECT_IV_CAPACITY(obj);
14122 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
14123 APPEND_F(
"(embed) len:%d", len);
14126 VALUE *ptr = ROBJECT_IVPTR(obj);
14127 APPEND_F(
"len:%d ptr:%p", len, (
void *)ptr);
14135 (block = vm_proc_block(obj)) != NULL &&
14136 (vm_block_type(block) == block_type_iseq) &&
14137 (iseq = vm_block_iseq(block)) != NULL) {
14138 rb_raw_iseq_info(BUFF_ARGS, iseq);
14140 else if (rb_ractor_p(obj)) {
14143 APPEND_F(
"r:%d", r->pub.id);
14147 const char *
const type_name = rb_objspace_data_type_name(obj);
14149 APPEND_F(
"%s", type_name);
14155 APPEND_F(
"<%s> ", rb_imemo_name(imemo_type(obj)));
14157 switch (imemo_type(obj)) {
14162 APPEND_F(
":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
14164 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
14165 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
14166 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
14167 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
14168 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
14169 me->def ? rb_method_type_name(me->def->
type) :
"NULL",
14170 me->def ? me->def->alias_count : -1,
14172 (void *)me->defined_class);
14175 switch (me->def->type) {
14176 case VM_METHOD_TYPE_ISEQ:
14177 APPEND_S(
" (iseq:");
14178 rb_raw_obj_info(BUFF_ARGS, (
VALUE)me->def->body.iseq.
iseqptr);
14190 rb_raw_iseq_info(BUFF_ARGS, iseq);
14193 case imemo_callinfo:
14196 APPEND_F(
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
14200 vm_ci_kwarg(ci) ?
"available" :
"NULL");
14203 case imemo_callcache:
14209 APPEND_F(
"(klass:%s cme:%s%s (%p) call:%p",
14210 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") : RSTRING_PTR(class_path),
14211 cme ?
rb_id2name(cme->called_id) :
"<NULL>",
14212 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
14214 (void *)vm_cc_call(cc));
14234rb_raw_obj_info(
char *
const buff,
const size_t buff_size,
VALUE obj)
14236 asan_unpoisoning_object(obj) {
14237 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14238 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14239 if (pos >= buff_size) {}
14250#define OBJ_INFO_BUFFERS_NUM 10
14251#define OBJ_INFO_BUFFERS_SIZE 0x100
14253static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14261 if (UNLIKELY(oldval >= maxval - 1)) {
14272 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14273 char *
const buff = obj_info_buffers[index];
14274 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14280 return obj_type_name(obj);
14284MJIT_FUNC_EXPORTED
const char *
14285rb_obj_info(
VALUE obj)
14287 return obj_info(obj);
14291rb_obj_info_dump(
VALUE obj)
14294 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14297MJIT_FUNC_EXPORTED
void
14298rb_obj_info_dump_loc(
VALUE obj,
const char *file,
int line,
const char *func)
14301 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14307rb_gcdebug_print_obj_condition(
VALUE obj)
14311 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14314 fprintf(stderr,
"moved?: true\n");
14317 fprintf(stderr,
"moved?: false\n");
14319 if (is_pointer_to_heap(objspace, (
void *)obj)) {
14320 fprintf(stderr,
"pointer to heap?: true\n");
14323 fprintf(stderr,
"pointer to heap?: false\n");
14327 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
14328 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
14329 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
14330 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
14331 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
14332 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
14334 if (is_lazy_sweeping(objspace)) {
14335 fprintf(stderr,
"lazy sweeping?: true\n");
14336 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
14339 fprintf(stderr,
"lazy sweeping?: false\n");
14346 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
14351rb_gcdebug_sentinel(
VALUE obj,
const char *name)
14358#if GC_DEBUG_STRESS_TO_CLASS
14367rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14371 if (!stress_to_class) {
14372 stress_to_class = rb_ary_hidden_new(argc);
14374 rb_ary_cat(stress_to_class, argv, argc);
14387rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14392 if (stress_to_class) {
14393 for (i = 0; i < argc; ++i) {
14394 rb_ary_delete_same(stress_to_class, argv[i]);
14397 stress_to_class = 0;
14469gc_using_rvargc_p(
VALUE mod)
14482 VALUE rb_mObjSpace;
14483 VALUE rb_mProfiler;
14484 VALUE gc_constants;
14488 gc_constants = rb_hash_new();
14489 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"DEBUG")), RBOOL(GC_DEBUG));
14490 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"BASE_SLOT_SIZE")),
SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14491 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OVERHEAD")),
SIZET2NUM(RVALUE_OVERHEAD));
14493 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_OBJ_LIMIT")),
SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14494 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_BITMAP_SIZE")),
SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14495 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_SIZE")),
SIZET2NUM(HEAP_PAGE_SIZE));
14496 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"SIZE_POOL_COUNT")),
LONG2FIX(SIZE_POOL_COUNT));
14497 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14521 rb_vm_register_special_exception(ruby_error_nomemory,
rb_eNoMemError,
"failed to allocate memory");
14551#if MALLOC_ALLOCATED_SIZE
14558 if (GC_COMPACTION_SUPPORTED) {
14573#if GC_DEBUG_STRESS_TO_CLASS
14582#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14586 OPT(RGENGC_CHECK_MODE);
14587 OPT(RGENGC_PROFILE);
14588 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14589 OPT(GC_PROFILE_MORE_DETAIL);
14590 OPT(GC_ENABLE_LAZY_SWEEP);
14591 OPT(CALC_EXACT_MALLOC_SIZE);
14592 OPT(MALLOC_ALLOCATED_SIZE);
14593 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14594 OPT(GC_PROFILE_DETAIL_MEMORY);
14595 OPT(GC_COMPACTION_SUPPORTED);
14604#ifdef ruby_xmalloc2
14605#undef ruby_xmalloc2
14610#ifdef ruby_xrealloc
14611#undef ruby_xrealloc
14613#ifdef ruby_xrealloc2
14614#undef ruby_xrealloc2
14618ruby_xmalloc(
size_t size)
14620#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14621 ruby_malloc_info_file = __FILE__;
14622 ruby_malloc_info_line = __LINE__;
14624 return ruby_xmalloc_body(size);
14628ruby_xmalloc2(
size_t n,
size_t size)
14630#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14631 ruby_malloc_info_file = __FILE__;
14632 ruby_malloc_info_line = __LINE__;
14634 return ruby_xmalloc2_body(n, size);
14638ruby_xcalloc(
size_t n,
size_t size)
14640#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14641 ruby_malloc_info_file = __FILE__;
14642 ruby_malloc_info_line = __LINE__;
14644 return ruby_xcalloc_body(n, size);
14648ruby_xrealloc(
void *ptr,
size_t new_size)
14650#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14651 ruby_malloc_info_file = __FILE__;
14652 ruby_malloc_info_line = __LINE__;
14654 return ruby_xrealloc_body(ptr, new_size);
14658ruby_xrealloc2(
void *ptr,
size_t n,
size_t new_size)
14660#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14661 ruby_malloc_info_file = __FILE__;
14662 ruby_malloc_info_line = __LINE__;
14664 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define rb_str_cat2
Old name of rb_str_cat_cstr.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define ULONG2NUM
Old name of RB_ULONG2NUM.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define CLASS_OF
Old name of rb_class_of.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_eNoMemError
NoMemoryError exception.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
VALUE rb_eArgError
ArgumentError exception.
VALUE rb_mKernel
Kernel module.
VALUE rb_cObject
Documented in include/ruby/internal/globals.h.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
VALUE rb_mEnumerable
Enumerable module.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_stdout
STDOUT constant.
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Defines RBIMPL_HAS_BUILTIN.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
#define RARRAY(obj)
Convenient casting macro.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
#define DATA_PTR(obj)
Convenient getter macro.
#define RDATA(obj)
Convenient casting macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define RMATCH(obj)
Convenient casting macro.
#define ROBJECT(obj)
Convenient casting macro.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_io_t * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
VALUE ary[ROBJECT_EMBED_LEN_MAX]
Embedded instance variables.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
struct rb_data_type_struct::@54 function
Function pointers.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
struct rb_io_enc_t encs
Decomposed encoding flags.
VALUE write_lock
This is a Ruby level mutex.
VALUE self
The IO's Ruby level counterpart.
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
VALUE timeout
The timeout associated with this IO when performing blocking operations.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
rb_cref_t * cref
class reference, should be marked
Internal header for Class.
Represents the region of a capture group.
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
struct re_registers regs
"Registers" of a match.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t VALUE
Type that represents a Ruby object.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
ruby_value_type
C-level type of an object.
@ RUBY_T_MASK
Bitmask of ruby_value_type.