diff options
author | Jean Boussier <jean.boussier@gmail.com> | 2025-07-30 16:51:59 +0200 |
---|---|---|
committer | Jean Boussier <jean.boussier@gmail.com> | 2025-08-01 10:42:04 +0200 |
commit | 547f111b5b0d773af2a4268fe407fdacc7060109 (patch) | |
tree | 23d9c77e2bdc54e29cf2cff352e8226c4776d20a /vm_method.c | |
parent | f2a7e48deadb9101d49c9b613abf5a83c9e1dd49 (diff) |
Refactor `vm_lookup_cc` to allow lock-free lookups in `RClass.cc_tbl`
In multi-ractor mode, the `cc_tbl` mutations use the RCU pattern,
which allow lock-less reads.
Based on the assumption that invalidations and misses should be
increasingly rare as the process ages, locking on modification
isn't a big concern.
Diffstat (limited to 'vm_method.c')
-rw-r--r-- | vm_method.c | 62 |
1 files changed, 60 insertions, 2 deletions
diff --git a/vm_method.c b/vm_method.c index 779e77b673..4788d54d2d 100644 --- a/vm_method.c +++ b/vm_method.c @@ -143,6 +143,64 @@ rb_vm_cc_table_create(size_t capa) } static enum rb_id_table_iterator_result +vm_cc_table_dup_i(ID key, VALUE old_ccs_ptr, void *data) +{ + struct rb_class_cc_entries *old_ccs = (struct rb_class_cc_entries *)old_ccs_ptr; + struct rb_class_cc_entries *new_ccs = ALLOC(struct rb_class_cc_entries); + MEMCPY(new_ccs, old_ccs, struct rb_class_cc_entries, 1); +#if VM_CHECK_MODE > 0 + new_ccs->debug_sig = ~(VALUE)new_ccs; +#endif + new_ccs->entries = ALLOC_N(struct rb_class_cc_entries_entry, new_ccs->capa); + MEMCPY(new_ccs->entries, old_ccs->entries, struct rb_class_cc_entries_entry, new_ccs->capa); + + VALUE new_table = (VALUE)data; + rb_managed_id_table_insert(new_table, key, (VALUE)new_ccs); + for (int index = 0; index < new_ccs->len; index++) { + RB_OBJ_WRITTEN(new_table, Qundef, new_ccs->entries[index].cc); + } + return ID_TABLE_CONTINUE; +} + +VALUE +rb_vm_cc_table_dup(VALUE old_table) +{ + VALUE new_table = rb_vm_cc_table_create(rb_managed_id_table_size(old_table)); + rb_managed_id_table_foreach(old_table, vm_cc_table_dup_i, (void *)new_table); + return new_table; +} + +static void +vm_ccs_invalidate(struct rb_class_cc_entries *ccs) +{ + if (ccs->entries) { + for (int i=0; i<ccs->len; i++) { + const struct rb_callcache *cc = ccs->entries[i].cc; + VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc)); + vm_cc_invalidate(cc); + } + } +} + +void +rb_vm_ccs_invalidate_and_free(struct rb_class_cc_entries *ccs) +{ + RB_DEBUG_COUNTER_INC(ccs_free); + vm_ccs_invalidate(ccs); + vm_ccs_free(ccs); +} + +void +rb_vm_cc_table_delete(VALUE table, ID mid) +{ + struct rb_class_cc_entries *ccs; + if (rb_managed_id_table_lookup(table, mid, (VALUE *)&ccs)) { + rb_managed_id_table_delete(table, mid); + rb_vm_ccs_invalidate_and_free(ccs); + } +} + +static enum rb_id_table_iterator_result vm_ccs_dump_i(ID mid, VALUE val, void *data) { const struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)val; @@ -296,7 +354,7 @@ invalidate_method_cache_in_cc_table(VALUE tbl, ID mid) struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_data; rb_yjit_cme_invalidate((rb_callable_method_entry_t *)ccs->cme); if (NIL_P(ccs->cme->owner)) invalidate_negative_cache(mid); - rb_vm_ccs_free(ccs); + rb_vm_ccs_invalidate_and_free(ccs); rb_managed_id_table_delete(tbl, mid); RB_DEBUG_COUNTER_INC(cc_invalidate_leaf_ccs); } @@ -1692,8 +1750,8 @@ cached_callable_method_entry(VALUE klass, ID mid) return ccs->cme; } else { - rb_vm_ccs_free(ccs); rb_managed_id_table_delete(cc_tbl, mid); + rb_vm_ccs_invalidate_and_free(ccs); } } |