9
9
* Representation" In ACM SIGPLAN Workshop on Intermediate Representations
10
10
* (IR '95), pages 35-49, Jan. 1995.
11
11
*
12
- * The phisical IR representation is based on Mike Pall's LuaJIT IR.
12
+ * The physical IR representation is based on Mike Pall's LuaJIT IR.
13
13
* See: M. Pall. "LuaJIT 2.0 intellectual property disclosure and research
14
14
* opportunities" November 2009 http://lua-users.org/lists/lua-l/2009-11/msg00089.html
15
15
*/
@@ -134,10 +134,18 @@ void ir_print_const(const ir_ctx *ctx, const ir_insn *insn, FILE *f, bool quoted
134
134
fprintf (f , "%" PRIi64 , insn -> val .i64 );
135
135
break ;
136
136
case IR_DOUBLE :
137
- fprintf (f , "%g" , insn -> val .d );
137
+ if (isnan (insn -> val .d )) {
138
+ fprintf (f , "nan" );
139
+ } else {
140
+ fprintf (f , "%g" , insn -> val .d );
141
+ }
138
142
break ;
139
143
case IR_FLOAT :
140
- fprintf (f , "%f" , insn -> val .f );
144
+ if (isnan (insn -> val .f )) {
145
+ fprintf (f , "nan" );
146
+ } else {
147
+ fprintf (f , "%f" , insn -> val .f );
148
+ }
141
149
break ;
142
150
default :
143
151
IR_ASSERT (0 );
@@ -806,7 +814,7 @@ ir_ref ir_folding(ir_ctx *ctx, uint32_t opt, ir_ref op1, ir_ref op2, ir_ref op3,
806
814
}
807
815
}
808
816
if (any == 0x7f ) {
809
- /* All parrerns ar checked. Pass on to CSE. */
817
+ /* All parrerns are checked. Pass on to CSE. */
810
818
goto ir_fold_cse ;
811
819
}
812
820
/* op2/op1/op op2/_/op _/op1/op _/_/op
@@ -1307,6 +1315,104 @@ void ir_hashtab_key_sort(ir_hashtab *tab)
1307
1315
} while (-- i );
1308
1316
}
1309
1317
1318
+ static void ir_addrtab_resize (ir_hashtab * tab )
1319
+ {
1320
+ uint32_t old_hash_size = (uint32_t )(- (int32_t )tab -> mask );
1321
+ char * old_data = tab -> data ;
1322
+ uint32_t size = tab -> size * 2 ;
1323
+ uint32_t hash_size = ir_hashtab_hash_size (size );
1324
+ char * data = ir_mem_malloc (hash_size * sizeof (uint32_t ) + size * sizeof (ir_addrtab_bucket ));
1325
+ ir_addrtab_bucket * p ;
1326
+ uint32_t pos , i ;
1327
+
1328
+ memset (data , -1 , hash_size * sizeof (uint32_t ));
1329
+ tab -> data = data + (hash_size * sizeof (uint32_t ));
1330
+ tab -> mask = (uint32_t )(- (int32_t )hash_size );
1331
+ tab -> size = size ;
1332
+
1333
+ memcpy (tab -> data , old_data , tab -> count * sizeof (ir_addrtab_bucket ));
1334
+ ir_mem_free (old_data - (old_hash_size * sizeof (uint32_t )));
1335
+
1336
+ i = tab -> count ;
1337
+ pos = 0 ;
1338
+ p = (ir_addrtab_bucket * )tab -> data ;
1339
+ do {
1340
+ uint32_t key = (uint32_t )p -> key | tab -> mask ;
1341
+ p -> next = ((uint32_t * )tab -> data )[(int32_t )key ];
1342
+ ((uint32_t * )tab -> data )[(int32_t )key ] = pos ;
1343
+ pos += sizeof (ir_addrtab_bucket );
1344
+ p ++ ;
1345
+ } while (-- i );
1346
+ }
1347
+
1348
+ void ir_addrtab_init (ir_hashtab * tab , uint32_t size )
1349
+ {
1350
+ IR_ASSERT (size > 0 );
1351
+ uint32_t hash_size = ir_hashtab_hash_size (size );
1352
+ char * data = ir_mem_malloc (hash_size * sizeof (uint32_t ) + size * sizeof (ir_addrtab_bucket ));
1353
+ memset (data , -1 , hash_size * sizeof (uint32_t ));
1354
+ tab -> data = (data + (hash_size * sizeof (uint32_t )));
1355
+ tab -> mask = (uint32_t )(- (int32_t )hash_size );
1356
+ tab -> size = size ;
1357
+ tab -> count = 0 ;
1358
+ tab -> pos = 0 ;
1359
+ }
1360
+
1361
+ void ir_addrtab_free (ir_hashtab * tab )
1362
+ {
1363
+ uint32_t hash_size = (uint32_t )(- (int32_t )tab -> mask );
1364
+ char * data = (char * )tab -> data - (hash_size * sizeof (uint32_t ));
1365
+ ir_mem_free (data );
1366
+ tab -> data = NULL ;
1367
+ }
1368
+
1369
+ ir_ref ir_addrtab_find (const ir_hashtab * tab , uint64_t key )
1370
+ {
1371
+ const char * data = (const char * )tab -> data ;
1372
+ uint32_t pos = ((uint32_t * )data )[(int32_t )(key | tab -> mask )];
1373
+ ir_addrtab_bucket * p ;
1374
+
1375
+ while (pos != IR_INVALID_IDX ) {
1376
+ p = (ir_addrtab_bucket * )(data + pos );
1377
+ if (p -> key == key ) {
1378
+ return p -> val ;
1379
+ }
1380
+ pos = p -> next ;
1381
+ }
1382
+ return IR_INVALID_VAL ;
1383
+ }
1384
+
1385
+ bool ir_addrtab_add (ir_hashtab * tab , uint64_t key , ir_ref val )
1386
+ {
1387
+ char * data = (char * )tab -> data ;
1388
+ uint32_t pos = ((uint32_t * )data )[(int32_t )(key | tab -> mask )];
1389
+ ir_addrtab_bucket * p ;
1390
+
1391
+ while (pos != IR_INVALID_IDX ) {
1392
+ p = (ir_addrtab_bucket * )(data + pos );
1393
+ if (p -> key == key ) {
1394
+ return p -> val == val ;
1395
+ }
1396
+ pos = p -> next ;
1397
+ }
1398
+
1399
+ if (UNEXPECTED (tab -> count >= tab -> size )) {
1400
+ ir_addrtab_resize (tab );
1401
+ data = tab -> data ;
1402
+ }
1403
+
1404
+ pos = tab -> pos ;
1405
+ tab -> pos += sizeof (ir_addrtab_bucket );
1406
+ tab -> count ++ ;
1407
+ p = (ir_addrtab_bucket * )(data + pos );
1408
+ p -> key = key ;
1409
+ p -> val = val ;
1410
+ key |= tab -> mask ;
1411
+ p -> next = ((uint32_t * )data )[(int32_t )key ];
1412
+ ((uint32_t * )data )[(int32_t )key ] = pos ;
1413
+ return 1 ;
1414
+ }
1415
+
1310
1416
/* Memory API */
1311
1417
#ifdef _WIN32
1312
1418
void * ir_mem_mmap (size_t size )
@@ -1537,24 +1643,32 @@ ir_ref _ir_PARAM(ir_ctx *ctx, ir_type type, const char* name, ir_ref num)
1537
1643
1538
1644
ir_ref _ir_VAR (ir_ctx * ctx , ir_type type , const char * name )
1539
1645
{
1540
- IR_ASSERT (ctx -> control );
1541
- IR_ASSERT (IR_IS_BB_START (ctx -> ir_base [ctx -> control ].op ));
1542
- return ir_var (ctx , type , ctx -> control , name );
1646
+ // IR_ASSERT(ctx->control);
1647
+ // IR_ASSERT(IR_IS_BB_START(ctx->ir_base[ctx->control].op));
1648
+ // TODO: VAR may be insterted after some "memory" instruction
1649
+ ir_ref ref = ctx -> control ;
1650
+
1651
+ while (1 ) {
1652
+ IR_ASSERT (ctx -> control );
1653
+ if (IR_IS_BB_START (ctx -> ir_base [ref ].op )) {
1654
+ break ;
1655
+ }
1656
+ ref = ctx -> ir_base [ref ].op1 ;
1657
+ }
1658
+ return ir_var (ctx , type , ref , name );
1543
1659
}
1544
1660
1545
- ir_ref _ir_PHI_2 (ir_ctx * ctx , ir_ref src1 , ir_ref src2 )
1661
+ ir_ref _ir_PHI_2 (ir_ctx * ctx , ir_type type , ir_ref src1 , ir_ref src2 )
1546
1662
{
1547
- ir_type type = ctx -> ir_base [src1 ].type ;
1548
-
1549
1663
IR_ASSERT (ctx -> control );
1550
1664
IR_ASSERT (ctx -> ir_base [ctx -> control ].op == IR_MERGE || ctx -> ir_base [ctx -> control ].op == IR_LOOP_BEGIN );
1551
- if (src1 == src2 ) {
1665
+ if (src1 == src2 && src1 != IR_UNUSED ) {
1552
1666
return src1 ;
1553
1667
}
1554
1668
return ir_emit3 (ctx , IR_OPTX (IR_PHI , type , 3 ), ctx -> control , src1 , src2 );
1555
1669
}
1556
1670
1557
- ir_ref _ir_PHI_N (ir_ctx * ctx , ir_ref n , ir_ref * inputs )
1671
+ ir_ref _ir_PHI_N (ir_ctx * ctx , ir_type type , ir_ref n , ir_ref * inputs )
1558
1672
{
1559
1673
IR_ASSERT (ctx -> control );
1560
1674
IR_ASSERT (n > 0 );
@@ -1565,17 +1679,19 @@ ir_ref _ir_PHI_N(ir_ctx *ctx, ir_ref n, ir_ref *inputs)
1565
1679
ir_ref ref = inputs [0 ];
1566
1680
1567
1681
IR_ASSERT (ctx -> ir_base [ctx -> control ].op == IR_MERGE || ctx -> ir_base [ctx -> control ].op == IR_LOOP_BEGIN );
1568
- for (i = 1 ; i < n ; i ++ ) {
1569
- if (inputs [i ] != ref ) {
1570
- break ;
1682
+ if (ref != IR_UNUSED ) {
1683
+ for (i = 1 ; i < n ; i ++ ) {
1684
+ if (inputs [i ] != ref ) {
1685
+ break ;
1686
+ }
1687
+ }
1688
+ if (i == n ) {
1689
+ /* all the same */
1690
+ return ref ;
1571
1691
}
1572
- }
1573
- if (i == n ) {
1574
- /* all the same */
1575
- return ref ;
1576
1692
}
1577
1693
1578
- ref = ir_emit_N (ctx , IR_OPT (IR_PHI , ctx -> ir_base [ inputs [ 0 ]]. type ), n + 1 );
1694
+ ref = ir_emit_N (ctx , IR_OPT (IR_PHI , type ), n + 1 );
1579
1695
ir_set_op (ctx , ref , 1 , ctx -> control );
1580
1696
for (i = 0 ; i < n ; i ++ ) {
1581
1697
ir_set_op (ctx , ref , i + 2 , inputs [i ]);
@@ -1857,6 +1973,22 @@ ir_ref _ir_CALL_5(ir_ctx *ctx, ir_type type, ir_ref func, ir_ref arg1, ir_ref ar
1857
1973
return call ;
1858
1974
}
1859
1975
1976
+ ir_ref _ir_CALL_N (ir_ctx * ctx , ir_type type , ir_ref func , uint32_t count , ir_ref * args )
1977
+ {
1978
+ ir_ref call ;
1979
+ uint32_t i ;
1980
+
1981
+ IR_ASSERT (ctx -> control );
1982
+ call = ir_emit_N (ctx , IR_OPT (IR_CALL , type ), count + 2 );
1983
+ ir_set_op (ctx , call , 1 , ctx -> control );
1984
+ ir_set_op (ctx , call , 2 , func );
1985
+ for (i = 0 ; i < count ; i ++ ) {
1986
+ ir_set_op (ctx , call , i + 3 , args [i ]);
1987
+ }
1988
+ ctx -> control = call ;
1989
+ return call ;
1990
+ }
1991
+
1860
1992
void _ir_UNREACHABLE (ir_ctx * ctx )
1861
1993
{
1862
1994
IR_ASSERT (ctx -> control );
@@ -1941,6 +2073,22 @@ void _ir_TAILCALL_5(ir_ctx *ctx, ir_ref func, ir_ref arg1, ir_ref arg2, ir_ref a
1941
2073
_ir_UNREACHABLE (ctx );
1942
2074
}
1943
2075
2076
+ void _ir_TAILCALL_N (ir_ctx * ctx , ir_ref func , uint32_t count , ir_ref * args )
2077
+ {
2078
+ ir_ref call ;
2079
+ uint32_t i ;
2080
+
2081
+ IR_ASSERT (ctx -> control );
2082
+ call = ir_emit_N (ctx , IR_TAILCALL , count + 2 );
2083
+ ir_set_op (ctx , call , 1 , ctx -> control );
2084
+ ir_set_op (ctx , call , 2 , func );
2085
+ for (i = 0 ; i < count ; i ++ ) {
2086
+ ir_set_op (ctx , call , i + 3 , args [i ]);
2087
+ }
2088
+ ctx -> control = call ;
2089
+ _ir_UNREACHABLE (ctx );
2090
+ }
2091
+
1944
2092
ir_ref _ir_SWITCH (ir_ctx * ctx , ir_ref val )
1945
2093
{
1946
2094
ir_ref ref ;
@@ -2197,20 +2345,23 @@ ir_type ir_get_return_type(ir_ctx *ctx)
2197
2345
ir_ref ref ;
2198
2346
ir_insn * insn ;
2199
2347
uint8_t ret_type = 255 ;
2348
+ ir_type type ;
2200
2349
2201
2350
/* Check all RETURN nodes */
2202
2351
ref = ctx -> ir_base [1 ].op1 ;
2203
2352
while (ref ) {
2204
2353
insn = & ctx -> ir_base [ref ];
2205
2354
if (insn -> op == IR_RETURN ) {
2355
+ type = ctx -> ir_base [insn -> op2 ].type ;
2356
+ check_type :
2206
2357
if (ret_type == 255 ) {
2207
2358
if (insn -> op2 ) {
2208
- ret_type = ctx -> ir_base [ insn -> op2 ]. type ;
2359
+ ret_type = type ;
2209
2360
} else {
2210
2361
ret_type = IR_VOID ;
2211
2362
}
2212
2363
} else if (insn -> op2 ) {
2213
- if (ret_type != ctx -> ir_base [ insn -> op2 ]. type ) {
2364
+ if (ret_type != type ) {
2214
2365
IR_ASSERT (0 && "conflicting return types" );
2215
2366
return IR_VOID ;
2216
2367
}
@@ -2220,6 +2371,12 @@ ir_type ir_get_return_type(ir_ctx *ctx)
2220
2371
return IR_VOID ;
2221
2372
}
2222
2373
}
2374
+ } else if (insn -> op == IR_UNREACHABLE ) {
2375
+ insn = & ctx -> ir_base [insn -> op1 ];
2376
+ if (insn -> op == IR_TAILCALL ) {
2377
+ type = insn -> type ;
2378
+ goto check_type ;
2379
+ }
2223
2380
}
2224
2381
ref = ctx -> ir_base [ref ].op3 ;
2225
2382
}
0 commit comments