@@ -323,13 +323,14 @@ IR_FOLD(ADD(C_I16, C_I16))
323
323
IR_FOLD (ADD (C_I32 , C_I32 ))
324
324
{
325
325
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type || (sizeof (void * ) == 4 && IR_OPT_TYPE (opt ) == IR_ADDR ));
326
- IR_FOLD_CONST_I (op1_insn -> val .i32 + op2_insn -> val .i32 );
326
+ /* Here and below we use "unsigned math" to prevent undefined signed overflow behavior */
327
+ IR_FOLD_CONST_I ((int32_t )(op1_insn -> val .u32 + op2_insn -> val .u32 ));
327
328
}
328
329
329
330
IR_FOLD (ADD (C_I64 , C_I64 ))
330
331
{
331
332
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type || (sizeof (void * ) == 8 && IR_OPT_TYPE (opt ) == IR_ADDR ));
332
- IR_FOLD_CONST_I (op1_insn -> val .i64 + op2_insn -> val .i64 );
333
+ IR_FOLD_CONST_I (op1_insn -> val .u64 + op2_insn -> val .u64 );
333
334
}
334
335
335
336
IR_FOLD (ADD (C_DOUBLE , C_DOUBLE ))
@@ -393,13 +394,13 @@ IR_FOLD(SUB(C_I16, C_I16))
393
394
IR_FOLD (SUB (C_I32 , C_I32 ))
394
395
{
395
396
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
396
- IR_FOLD_CONST_I (op1_insn -> val .i32 - op2_insn -> val .i32 );
397
+ IR_FOLD_CONST_I (( int32_t )( op1_insn -> val .u32 - op2_insn -> val .u32 ) );
397
398
}
398
399
399
400
IR_FOLD (SUB (C_I64 , C_I64 ))
400
401
{
401
402
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
402
- IR_FOLD_CONST_I (op1_insn -> val .i64 - op2_insn -> val .i64 );
403
+ IR_FOLD_CONST_I (op1_insn -> val .u64 - op2_insn -> val .u64 );
403
404
}
404
405
405
406
IR_FOLD (SUB (C_DOUBLE , C_DOUBLE ))
@@ -463,13 +464,13 @@ IR_FOLD(MUL(C_I16, C_I16))
463
464
IR_FOLD (MUL (C_I32 , C_I32 ))
464
465
{
465
466
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
466
- IR_FOLD_CONST_I (op1_insn -> val .i32 * op2_insn -> val .i32 );
467
+ IR_FOLD_CONST_I (( int32_t )( op1_insn -> val .u32 * op2_insn -> val .u32 ) );
467
468
}
468
469
469
470
IR_FOLD (MUL (C_I64 , C_I64 ))
470
471
{
471
472
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
472
- IR_FOLD_CONST_I (op1_insn -> val .i64 * op2_insn -> val .i64 );
473
+ IR_FOLD_CONST_I (op1_insn -> val .u64 * op2_insn -> val .u64 );
473
474
}
474
475
475
476
IR_FOLD (MUL (C_DOUBLE , C_DOUBLE ))
@@ -556,7 +557,7 @@ IR_FOLD(NEG(C_I32))
556
557
IR_FOLD (NEG (C_I64 ))
557
558
{
558
559
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
559
- IR_FOLD_CONST_I (- op1_insn -> val .i64 );
560
+ IR_FOLD_CONST_I (- op1_insn -> val .u64 );
560
561
}
561
562
562
563
IR_FOLD (NEG (C_DOUBLE ))
@@ -580,7 +581,7 @@ IR_FOLD(ABS(C_I64))
580
581
if (op1_insn -> val .i64 >= 0 ) {
581
582
IR_FOLD_COPY (op1 );
582
583
} else {
583
- IR_FOLD_CONST_I (- op1_insn -> val .i64 );
584
+ IR_FOLD_CONST_I (- op1_insn -> val .u64 );
584
585
}
585
586
}
586
587
@@ -680,7 +681,7 @@ IR_FOLD(MUL_OV(C_I64, C_I64))
680
681
int64_t min = - max - 1 ;
681
682
int64_t res ;
682
683
IR_ASSERT (IR_OPT_TYPE (opt ) == op1_insn -> type );
683
- res = op1_insn -> val .i64 * op2_insn -> val .i64 ;
684
+ res = op1_insn -> val .u64 * op2_insn -> val .u64 ;
684
685
if (op1_insn -> val .i64 != 0 && res / op1_insn -> val .i64 != op2_insn -> val .i64 && res >= min && res <= max ) {
685
686
IR_FOLD_NEXT ;
686
687
}
@@ -2518,7 +2519,7 @@ IR_FOLD(ADD(ADD, C_I64))
2518
2519
{
2519
2520
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2520
2521
/* (x + c1) + c2 => x + (c1 + c2) */
2521
- val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .i64 + op2_insn -> val .i64 ;
2522
+ val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .u64 + op2_insn -> val .u64 ;
2522
2523
op1 = op1_insn -> op1 ;
2523
2524
op2 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
2524
2525
IR_FOLD_RESTART ;
@@ -2556,8 +2557,8 @@ IR_FOLD(ADD(SUB, C_I64))
2556
2557
{
2557
2558
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2558
2559
/* (x - c1) + c2 => x + (c2 - c1) */
2559
- val .i64 = op2_insn -> val .i64 - ctx -> ir_base [op1_insn -> op2 ].val .i64 ;
2560
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2560
+ val .i64 = op2_insn -> val .u64 - ctx -> ir_base [op1_insn -> op2 ].val .u64 ;
2561
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2561
2562
val .i64 = - val .i64 ;
2562
2563
opt ++ ; /* ADD -> SUB */
2563
2564
}
@@ -2566,7 +2567,7 @@ IR_FOLD(ADD(SUB, C_I64))
2566
2567
IR_FOLD_RESTART ;
2567
2568
} else if (IR_IS_CONST_REF (op1_insn -> op1 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op1 ].op )) {
2568
2569
/* (c1 - x) + c2 => (c1 + c2) - x */
2569
- val .i64 = ctx -> ir_base [op1_insn -> op1 ].val .i64 + op2_insn -> val .i64 ;
2570
+ val .i64 = ctx -> ir_base [op1_insn -> op1 ].val .u64 + op2_insn -> val .u64 ;
2570
2571
opt ++ ; /* ADD -> SUB */
2571
2572
op2 = op1_insn -> op2 ;
2572
2573
op1 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
@@ -2599,8 +2600,8 @@ IR_FOLD(SUB(ADD, C_I64))
2599
2600
{
2600
2601
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2601
2602
/* (x + c1) - c2 => x + (c1 - c2) */
2602
- val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .i64 - op2_insn -> val .i64 ;
2603
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2603
+ val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .u64 - op2_insn -> val .u64 ;
2604
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2604
2605
val .i64 = - val .i64 ;
2605
2606
} else {
2606
2607
opt -- ; /* SUB -> ADD */
@@ -2635,7 +2636,7 @@ IR_FOLD(SUB(C_I64, ADD))
2635
2636
{
2636
2637
if (IR_IS_CONST_REF (op2_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op2_insn -> op2 ].op )) {
2637
2638
/* c1 - (x + c2) => (c1 - c2) - x */
2638
- val .i64 = op1_insn -> val .i64 - ctx -> ir_base [op2_insn -> op2 ].val .i64 ;
2639
+ val .i64 = op1_insn -> val .u64 - ctx -> ir_base [op2_insn -> op2 ].val .u64 ;
2639
2640
op2 = op2_insn -> op1 ;
2640
2641
op1 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
2641
2642
IR_FOLD_RESTART ;
@@ -2652,7 +2653,7 @@ IR_FOLD(SUB(SUB, C_ADDR))
2652
2653
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2653
2654
/* (x - c1) - c2 => x - (c1 + c2) */
2654
2655
val .u64 = ctx -> ir_base [op1_insn -> op2 ].val .u64 + op2_insn -> val .u64 ;
2655
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2656
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2656
2657
val .i64 = - val .i64 ;
2657
2658
opt -- ; /* SUB -> ADD */
2658
2659
}
@@ -2676,8 +2677,8 @@ IR_FOLD(SUB(SUB, C_I64))
2676
2677
{
2677
2678
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2678
2679
/* (x - c1) - c2 => x - (c1 + c2) */
2679
- val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .i64 + op2_insn -> val .i64 ;
2680
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2680
+ val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .u64 + op2_insn -> val .u64 ;
2681
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2681
2682
val .i64 = - val .i64 ;
2682
2683
opt -- ; /* SUB -> ADD */
2683
2684
}
@@ -2686,7 +2687,7 @@ IR_FOLD(SUB(SUB, C_I64))
2686
2687
IR_FOLD_RESTART ;
2687
2688
} else if (IR_IS_CONST_REF (op1_insn -> op1 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op1 ].op )) {
2688
2689
/* (c1 - x) - c2 => (c1 - c2) - x */
2689
- val .i64 = ctx -> ir_base [op1_insn -> op1 ].val .i64 - op2_insn -> val .i64 ;
2690
+ val .i64 = ctx -> ir_base [op1_insn -> op1 ].val .u64 - op2_insn -> val .u64 ;
2690
2691
op2 = op1_insn -> op2 ;
2691
2692
op1 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
2692
2693
IR_FOLD_RESTART ;
@@ -2709,7 +2710,7 @@ IR_FOLD(SUB(C_ADDR, SUB))
2709
2710
} else if (IR_IS_CONST_REF (op2_insn -> op1 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op2_insn -> op1 ].op )) {
2710
2711
/* c1 - (c2 - x) => x + (c1 - c2) */
2711
2712
val .u64 = op1_insn -> val .u64 - ctx -> ir_base [op2_insn -> op1 ].val .u64 ;
2712
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2713
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2713
2714
val .i64 = - val .i64 ;
2714
2715
opt ++ ; /* ADD -> SUB */
2715
2716
}
@@ -2727,14 +2728,14 @@ IR_FOLD(SUB(C_I64, SUB))
2727
2728
{
2728
2729
if (IR_IS_CONST_REF (op2_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op2_insn -> op2 ].op )) {
2729
2730
/* c1 - (x - c2) => (c1 + c2) - x */
2730
- val .i64 = op1_insn -> val .i64 + ctx -> ir_base [op2_insn -> op2 ].val .i64 ;
2731
+ val .i64 = op1_insn -> val .u64 + ctx -> ir_base [op2_insn -> op2 ].val .u64 ;
2731
2732
op2 = op2_insn -> op1 ;
2732
2733
op1 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
2733
2734
IR_FOLD_RESTART ;
2734
2735
} else if (IR_IS_CONST_REF (op2_insn -> op1 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op2_insn -> op1 ].op )) {
2735
2736
/* c1 - (c2 - x) => x + (c1 - c2) */
2736
- val .i64 = op1_insn -> val .i64 - ctx -> ir_base [op2_insn -> op1 ].val .i64 ;
2737
- if (val .i64 < 0 && val .i64 - 1 < 0 ) {
2737
+ val .i64 = op1_insn -> val .u64 - ctx -> ir_base [op2_insn -> op1 ].val .u64 ;
2738
+ if (val .i64 < 0 && val .i64 != INT64_MIN ) {
2738
2739
val .i64 = - val .i64 ;
2739
2740
opt ++ ; /* ADD -> SUB */
2740
2741
}
@@ -2768,7 +2769,7 @@ IR_FOLD(MUL(MUL, C_I64))
2768
2769
{
2769
2770
if (IR_IS_CONST_REF (op1_insn -> op2 ) && !IR_IS_SYM_CONST (ctx -> ir_base [op1_insn -> op2 ].op )) {
2770
2771
/* (x * c1) * c2 => x * (c1 * c2) */
2771
- val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .i64 * op2_insn -> val .i64 ;
2772
+ val .i64 = ctx -> ir_base [op1_insn -> op2 ].val .u64 * op2_insn -> val .u64 ;
2772
2773
op1 = op1_insn -> op1 ;
2773
2774
op2 = ir_const (ctx , val , IR_OPT_TYPE (opt ));
2774
2775
IR_FOLD_RESTART ;
0 commit comments