@@ -416,21 +416,24 @@ vm_cc_call(const struct rb_callcache *cc)
416
416
}
417
417
418
418
static inline void
419
- vm_cc_atomic_shape_and_index ( const struct rb_callcache * cc , shape_id_t * shape_id , attr_index_t * index )
419
+ vm_unpack_shape_and_index ( uintptr_t cache_value , shape_id_t * shape_id , attr_index_t * index )
420
420
{
421
- uintptr_t cache_value = cc -> aux_ .attr .value ; // Atomically read 64 bits
422
421
* shape_id = (shape_id_t )(cache_value >> SHAPE_FLAG_SHIFT );
423
422
* index = (attr_index_t )(cache_value & SHAPE_FLAG_MASK ) - 1 ;
424
- return ;
425
423
}
426
424
427
425
static inline void
428
- vm_ic_atomic_shape_and_index (const struct iseq_inline_iv_cache_entry * ic , shape_id_t * shape_id , attr_index_t * index )
426
+ vm_cc_atomic_shape_and_index (const struct rb_callcache * cc , shape_id_t * shape_id , attr_index_t * index )
429
427
{
430
- uintptr_t cache_value = ic -> value ; // Atomically read 64 bits
431
- * shape_id = (shape_id_t )(cache_value >> SHAPE_FLAG_SHIFT );
432
- * index = (attr_index_t )(cache_value & SHAPE_FLAG_MASK ) - 1 ;
433
- return ;
428
+ // Atomically read uintptr_t
429
+ vm_unpack_shape_and_index (cc -> aux_ .attr .value , shape_id , index );
430
+ }
431
+
432
+ static inline void
433
+ vm_ic_atomic_shape_and_index (const struct iseq_inline_iv_cache_entry * ic , shape_id_t * shape_id , attr_index_t * index )
434
+ {
435
+ // Atomically read uintptr_t
436
+ vm_unpack_shape_and_index (ic -> value , shape_id , index );
434
437
}
435
438
436
439
static inline unsigned int
@@ -467,17 +470,23 @@ set_vm_cc_ivar(const struct rb_callcache *cc)
467
470
* (VALUE * )& cc -> flags |= VM_CALLCACHE_IVAR ;
468
471
}
469
472
473
+ static inline uintptr_t
474
+ vm_pack_shape_and_index (shape_id_t shape_id , attr_index_t index )
475
+ {
476
+ return (attr_index_t )(index + 1 ) | ((uintptr_t )(shape_id ) << SHAPE_FLAG_SHIFT );
477
+ }
478
+
470
479
static inline void
471
480
vm_cc_attr_index_set (const struct rb_callcache * cc , attr_index_t index , shape_id_t dest_shape_id )
472
481
{
473
482
uintptr_t * attr_value = (uintptr_t * )& cc -> aux_ .attr .value ;
474
483
if (!vm_cc_markable (cc )) {
475
- * attr_value = ( uintptr_t ) INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT ;
484
+ * attr_value = vm_pack_shape_and_index ( INVALID_SHAPE_ID , ATTR_INDEX_NOT_SET ) ;
476
485
return ;
477
486
}
478
487
VM_ASSERT (IMEMO_TYPE_P (cc , imemo_callcache ));
479
488
VM_ASSERT (cc != vm_cc_empty ());
480
- * attr_value = ( attr_index_t )( index + 1 ) | (( uintptr_t )( dest_shape_id ) << SHAPE_FLAG_SHIFT );
489
+ * attr_value = vm_pack_shape_and_index ( dest_shape_id , index );
481
490
set_vm_cc_ivar (cc );
482
491
}
483
492
@@ -490,13 +499,13 @@ vm_cc_ivar_p(const struct rb_callcache *cc)
490
499
static inline void
491
500
vm_ic_attr_index_set (const rb_iseq_t * iseq , const struct iseq_inline_iv_cache_entry * ic , attr_index_t index , shape_id_t dest_shape_id )
492
501
{
493
- * (uintptr_t * )& ic -> value = (( uintptr_t ) dest_shape_id << SHAPE_FLAG_SHIFT ) | ( attr_index_t )( index + 1 );
502
+ * (uintptr_t * )& ic -> value = vm_pack_shape_and_index ( dest_shape_id , index );
494
503
}
495
504
496
505
static inline void
497
506
vm_ic_attr_index_initialize (const struct iseq_inline_iv_cache_entry * ic , shape_id_t shape_id )
498
507
{
499
- * (uintptr_t * )& ic -> value = ( uintptr_t ) shape_id << SHAPE_FLAG_SHIFT ;
508
+ * (uintptr_t * )& ic -> value = vm_pack_shape_and_index ( shape_id , ATTR_INDEX_NOT_SET ) ;
500
509
}
501
510
502
511
static inline void
0 commit comments