@@ -39,7 +39,7 @@ init_node_buffer_elem(node_buffer_elem_t *nbe, size_t allocated, void *xmalloc(s
39
39
}
40
40
41
41
static void
42
- init_node_buffer_list (node_buffer_list_t * nb , node_buffer_elem_t * head , void * xmalloc (size_t ))
42
+ init_node_buffer_list (node_buffer_list_t * nb , node_buffer_elem_t * head , void * xmalloc (size_t ))
43
43
{
44
44
init_node_buffer_elem (head , NODE_BUF_DEFAULT_SIZE , xmalloc );
45
45
nb -> head = nb -> last = head ;
@@ -59,14 +59,13 @@ rb_node_buffer_new(void)
59
59
#endif
60
60
{
61
61
const size_t bucket_size = offsetof(node_buffer_elem_t , buf ) + NODE_BUF_DEFAULT_SIZE ;
62
- const size_t alloc_size = sizeof (node_buffer_t ) + (bucket_size * 2 );
62
+ const size_t alloc_size = sizeof (node_buffer_t ) + (bucket_size );
63
63
STATIC_ASSERT (
64
64
integer_overflow ,
65
65
offsetof(node_buffer_elem_t , buf ) + NODE_BUF_DEFAULT_SIZE
66
- > sizeof (node_buffer_t ) + 2 * sizeof (node_buffer_elem_t ));
66
+ > sizeof (node_buffer_t ) + sizeof (node_buffer_elem_t ));
67
67
node_buffer_t * nb = ruby_xmalloc (alloc_size );
68
- init_node_buffer_list (& nb -> unmarkable , (node_buffer_elem_t * )& nb [1 ], ruby_xmalloc );
69
- init_node_buffer_list (& nb -> markable , (node_buffer_elem_t * )((size_t )nb -> unmarkable .head + bucket_size ), ruby_xmalloc );
68
+ init_node_buffer_list (& nb -> buffer_list , (node_buffer_elem_t * )& nb [1 ], ruby_xmalloc );
70
69
nb -> local_tables = 0 ;
71
70
nb -> tokens = 0 ;
72
71
#ifdef UNIVERSAL_PARSER
@@ -238,9 +237,8 @@ rb_node_buffer_free(rb_ast_t *ast, node_buffer_t *nb)
238
237
if (ast -> node_buffer && ast -> node_buffer -> tokens ) {
239
238
parser_tokens_free (ast , ast -> node_buffer -> tokens );
240
239
}
241
- iterate_node_values (ast , & nb -> unmarkable , free_ast_value , NULL );
242
- node_buffer_list_free (ast , & nb -> unmarkable );
243
- node_buffer_list_free (ast , & nb -> markable );
240
+ iterate_node_values (ast , & nb -> buffer_list , free_ast_value , NULL );
241
+ node_buffer_list_free (ast , & nb -> buffer_list );
244
242
struct rb_ast_local_table_link * local_table = nb -> local_tables ;
245
243
while (local_table ) {
246
244
struct rb_ast_local_table_link * next_table = local_table -> next ;
@@ -277,23 +275,22 @@ ast_newnode_in_bucket(rb_ast_t *ast, node_buffer_list_t *nb, size_t size, size_t
277
275
return ptr ;
278
276
}
279
277
280
- RBIMPL_ATTR_PURE ()
281
- static bool
282
- nodetype_markable_p (enum node_type type )
283
- {
284
- return false;
285
- }
286
-
287
278
NODE *
288
279
rb_ast_newnode (rb_ast_t * ast , enum node_type type , size_t size , size_t alignment )
289
280
{
290
281
node_buffer_t * nb = ast -> node_buffer ;
291
- node_buffer_list_t * bucket =
292
- (nodetype_markable_p (type ) ? & nb -> markable : & nb -> unmarkable );
282
+ node_buffer_list_t * bucket = & nb -> buffer_list ;
293
283
return ast_newnode_in_bucket (ast , bucket , size , alignment );
294
284
}
295
285
296
286
#if RUBY_DEBUG
287
+ RBIMPL_ATTR_PURE ()
288
+ static bool
289
+ nodetype_markable_p (enum node_type type )
290
+ {
291
+ return false;
292
+ }
293
+
297
294
void
298
295
rb_ast_node_type_change (NODE * n , enum node_type type )
299
296
{
@@ -410,8 +407,7 @@ rb_ast_memsize(const rb_ast_t *ast)
410
407
411
408
if (nb ) {
412
409
size += sizeof (node_buffer_t );
413
- size += buffer_list_size (& nb -> unmarkable );
414
- size += buffer_list_size (& nb -> markable );
410
+ size += buffer_list_size (& nb -> buffer_list );
415
411
}
416
412
return size ;
417
413
}
0 commit comments