Skip to content

Commit 0f8b4f6

Browse files
committed
Update now that #129508 is landed
* Remove temp stepvector operand from VPWidenIntOrFpInductionRecipe * Use VPInstruction::Broadcast
1 parent 8257a57 commit 0f8b4f6

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+327
-340
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7782,7 +7782,6 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
77827782
"Trying to execute plan with unsupported VF");
77837783
assert(BestVPlan.hasUF(BestUF) &&
77847784
"Trying to execute plan with unsupported UF");
7785-
VPlanTransforms::materializeStepVectors(BestVPlan);
77867785
// TODO: Move to VPlan transform stage once the transition to the VPlan-based
77877786
// cost model is complete for better cost estimates.
77887787
VPlanTransforms::runPass(VPlanTransforms::unrollByUF, BestVPlan, BestUF,

llvm/lib/Transforms/Vectorize/VPlan.cpp

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -317,17 +317,15 @@ Value *VPTransformState::get(const VPValue *Def, bool NeedsScalar) {
317317
LastLane = 0;
318318
}
319319

320+
auto *LastInst = cast<Instruction>(get(Def, LastLane));
321+
// Set the insert point after the last scalarized instruction or after the
322+
// last PHI, if LastInst is a PHI. This ensures the insertelement sequence
323+
// will directly follow the scalar definitions.
320324
auto OldIP = Builder.saveIP();
321-
auto *LastVal = get(Def, LastLane);
322-
if (auto *LastInst = dyn_cast<Instruction>(LastVal)) {
323-
// Set the insert point after the last scalarized instruction or after the
324-
// last PHI, if LastInst is a PHI. This ensures the insertelement sequence
325-
// will directly follow the scalar definitions.
326-
auto NewIP = isa<PHINode>(LastInst)
327-
? LastInst->getParent()->getFirstNonPHIIt()
328-
: std::next(BasicBlock::iterator(LastInst));
329-
Builder.SetInsertPoint(&*NewIP);
330-
}
325+
auto NewIP = isa<PHINode>(LastInst)
326+
? LastInst->getParent()->getFirstNonPHIIt()
327+
: std::next(BasicBlock::iterator(LastInst));
328+
Builder.SetInsertPoint(&*NewIP);
331329

332330
// However, if we are vectorizing, we need to construct the vector values.
333331
// If the value is known to be uniform after vectorization, we can just
@@ -342,7 +340,7 @@ Value *VPTransformState::get(const VPValue *Def, bool NeedsScalar) {
342340
} else {
343341
// Initialize packing with insertelements to start from undef.
344342
assert(!VF.isScalable() && "VF is assumed to be non scalable.");
345-
Value *Undef = PoisonValue::get(toVectorizedTy(LastVal->getType(), VF));
343+
Value *Undef = PoisonValue::get(toVectorizedTy(LastInst->getType(), VF));
346344
set(Def, Undef);
347345
for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
348346
packScalarIntoVectorizedValue(Def, Lane);

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1880,7 +1880,7 @@ class VPWidenIntOrFpInductionRecipe : public VPWidenInductionRecipe {
18801880
TruncInst *Trunc;
18811881

18821882
// If this recipe is unrolled it will have 2 additional operands.
1883-
bool isUnrolled() const { return getNumOperands() == 6; }
1883+
bool isUnrolled() const { return getNumOperands() == 5; }
18841884

18851885
public:
18861886
VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, VPValue *Step,
@@ -1930,16 +1930,6 @@ class VPWidenIntOrFpInductionRecipe : public VPWidenInductionRecipe {
19301930
VPValue *getVFValue() { return getOperand(2); }
19311931
const VPValue *getVFValue() const { return getOperand(2); }
19321932

1933-
// TODO: Remove once VPWidenIntOrFpInduction is fully expanded in
1934-
// convertToConcreteRecipes.
1935-
VPInstructionWithType *getStepVector() {
1936-
auto *StepVector =
1937-
cast<VPInstructionWithType>(getOperand(3)->getDefiningRecipe());
1938-
assert(StepVector->getOpcode() == VPInstruction::StepVector &&
1939-
"step vector operand must be a VPInstruction::StepVector");
1940-
return StepVector;
1941-
}
1942-
19431933
VPValue *getSplatVFValue() {
19441934
// If the recipe has been unrolled return the VPValue for the induction
19451935
// increment.

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -776,7 +776,6 @@ Value *VPInstruction::generate(VPTransformState &State) {
776776
return Builder.CreateCountTrailingZeroElems(Builder.getInt64Ty(), Mask,
777777
true, Name);
778778
}
779-
780779
default:
781780
llvm_unreachable("Unsupported opcode for instruction");
782781
}
@@ -967,6 +966,7 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
967966
case VPInstruction::BranchOnCount:
968967
case VPInstruction::BranchOnCond:
969968
case VPInstruction::ResumePhi:
969+
case VPInstruction::Broadcast:
970970
return true;
971971
case VPInstruction::PtrAdd:
972972
return Op == getOperand(0) || vputils::onlyFirstLaneUsed(this);
@@ -1087,15 +1087,14 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent,
10871087

10881088
void VPInstructionWithType::execute(VPTransformState &State) {
10891089
State.setDebugLocFrom(getDebugLoc());
1090-
switch (getOpcode()) {
1091-
case Instruction::ZExt:
1092-
case Instruction::Trunc: {
1090+
if (isScalarCast()) {
10931091
Value *Op = State.get(getOperand(0), VPLane(0));
10941092
Value *Cast = State.Builder.CreateCast(Instruction::CastOps(getOpcode()),
10951093
Op, ResultTy);
10961094
State.set(this, Cast, VPLane(0));
1097-
break;
1095+
return;
10981096
}
1097+
switch (getOpcode()) {
10991098
case VPInstruction::StepVector: {
11001099
Value *StepVector =
11011100
State.Builder.CreateStepVector(VectorType::get(ResultTy, State.VF));

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 19 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1223,16 +1223,6 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
12231223
WideIV->setStartValue(NewStart);
12241224
auto *NewStep = Plan.getOrAddLiveIn(ConstantInt::get(NewIVTy, 1));
12251225
WideIV->setStepValue(NewStep);
1226-
// TODO: Remove once VPWidenIntOrFpInductionRecipe is fully expanded.
1227-
VPInstructionWithType *OldStepVector = WideIV->getStepVector();
1228-
assert(OldStepVector->getNumUsers() == 1 &&
1229-
"step vector should only be used by single "
1230-
"VPWidenIntOrFpInductionRecipe");
1231-
auto *NewStepVector = new VPInstructionWithType(
1232-
VPInstruction::StepVector, {}, NewIVTy, OldStepVector->getDebugLoc());
1233-
NewStepVector->insertAfter(OldStepVector->getDefiningRecipe());
1234-
OldStepVector->replaceAllUsesWith(NewStepVector);
1235-
OldStepVector->eraseFromParent();
12361226

12371227
auto *NewBTC = new VPWidenCastRecipe(
12381228
Instruction::Trunc, Plan.getOrCreateBackedgeTakenCount(), NewIVTy);
@@ -2459,15 +2449,20 @@ expandVPWidenIntOrFpInduction(VPWidenIntOrFpInductionRecipe *WidenIVR,
24592449
}
24602450

24612451
// Construct the initial value of the vector IV in the vector loop preheader.
2462-
Type *IVIntTy = IntegerType::get(IV->getContext(), Ty->getScalarSizeInBits());
2452+
Type *StepTy = TypeInfo.inferScalarType(Step);
2453+
Type *IVIntTy =
2454+
IntegerType::get(IV->getContext(), StepTy->getScalarSizeInBits());
24632455
VPValue *Init = Builder.createNaryOp(VPInstruction::StepVector, {}, IVIntTy);
2464-
if (Ty->isFloatingPointTy())
2465-
Init = Builder.createWidenCast(Instruction::UIToFP, Init, Ty);
2456+
if (StepTy->isFloatingPointTy())
2457+
Init = Builder.createWidenCast(Instruction::UIToFP, Init, StepTy);
2458+
2459+
VPValue *SplatStart = Builder.createNaryOp(VPInstruction::Broadcast, Start);
2460+
VPValue *SplatStep = Builder.createNaryOp(VPInstruction::Broadcast, Step);
24662461

24672462
// FIXME: The newly created binary instructions should contain nsw/nuw
24682463
// flags, which can be found from the original scalar operations.
2469-
Init = Builder.createNaryOp(MulOp, {Init, Step}, FMFs);
2470-
Init = Builder.createNaryOp(AddOp, {Start, Init}, FMFs, {}, "induction");
2464+
Init = Builder.createNaryOp(MulOp, {Init, SplatStep}, FMFs);
2465+
Init = Builder.createNaryOp(AddOp, {SplatStart, Init}, FMFs, {}, "induction");
24712466

24722467
// Create the widened phi of the vector IV.
24732468
auto *WidePHI =
@@ -2479,18 +2474,21 @@ expandVPWidenIntOrFpInduction(VPWidenIntOrFpInductionRecipe *WidenIVR,
24792474
VPValue *Inc;
24802475
VPValue *Prev;
24812476
// If unrolled, use the increment and prev value from the operands.
2482-
if (WidenIVR->getNumOperands() == 5) {
2483-
Inc = WidenIVR->getSplatVFValue();
2477+
if (auto *SplatVF = WidenIVR->getSplatVFValue()) {
2478+
Inc = SplatVF;
24842479
Prev = WidenIVR->getLastUnrolledPartOperand();
24852480
} else {
24862481
// Multiply the vectorization factor by the step using integer or
24872482
// floating-point arithmetic as appropriate.
2488-
if (Ty->isFloatingPointTy())
2489-
VF = Builder.createScalarCast(Instruction::CastOps::UIToFP, VF, Ty, DL);
2490-
else if (Ty != TypeInfo.inferScalarType(VF))
2491-
VF = Builder.createScalarCast(Instruction::CastOps::Trunc, VF, Ty, DL);
2483+
if (StepTy->isFloatingPointTy())
2484+
VF = Builder.createScalarCast(Instruction::CastOps::UIToFP, VF, StepTy,
2485+
DL);
2486+
else
2487+
VF =
2488+
Builder.createScalarCast(Instruction::CastOps::Trunc, VF, StepTy, DL);
24922489

24932490
Inc = Builder.createNaryOp(MulOp, {Step, VF}, FMFs);
2491+
Inc = Builder.createNaryOp(VPInstruction::Broadcast, Inc);
24942492
Prev = WidePHI;
24952493
}
24962494

@@ -2691,27 +2689,6 @@ void VPlanTransforms::handleUncountableEarlyExit(
26912689
LatchExitingBranch->eraseFromParent();
26922690
}
26932691

2694-
void VPlanTransforms::materializeStepVectors(VPlan &Plan) {
2695-
for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
2696-
auto *IVR = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
2697-
if (!IVR)
2698-
continue;
2699-
2700-
Type *Ty = IVR->getPHINode()->getType();
2701-
if (TruncInst *Trunc = IVR->getTruncInst())
2702-
Ty = Trunc->getType();
2703-
if (Ty->isFloatingPointTy())
2704-
Ty = IntegerType::get(Ty->getContext(), Ty->getScalarSizeInBits());
2705-
2706-
VPBuilder Builder(Plan.getVectorPreheader());
2707-
VPInstruction *StepVector = Builder.createNaryOp(
2708-
VPInstruction::StepVector, {}, Ty, {}, IVR->getDebugLoc());
2709-
assert(IVR->getNumOperands() == 3 &&
2710-
"can only add step vector before unrolling");
2711-
IVR->addOperand(StepVector);
2712-
}
2713-
}
2714-
27152692
void VPlanTransforms::materializeBroadcasts(VPlan &Plan) {
27162693
if (Plan.hasScalarVFOnly())
27172694
return;

llvm/lib/Transforms/Vectorize/VPlanTransforms.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -199,11 +199,6 @@ struct VPlanTransforms {
199199
optimizeInductionExitUsers(VPlan &Plan,
200200
DenseMap<VPValue *, VPValue *> &EndValues);
201201

202-
/// Materialize VPInstruction::StepVectors for VPWidenIntOrFpInductionRecipes.
203-
/// TODO: Remove once all of VPWidenIntOrFpInductionRecipe is expanded in
204-
/// convertToConcreteRecipes.
205-
static void materializeStepVectors(VPlan &Plan);
206-
207202
/// Add explicit broadcasts for live-ins and VPValues defined in \p Plan's entry block if they are used as vectors.
208203
static void materializeBroadcasts(VPlan &Plan);
209204

llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
1616
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
1717
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
1818
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 8)
19-
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
2019
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
2120
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
21+
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
2222
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1)
2323
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
24-
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP6]], i64 0
24+
; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]]
25+
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
2526
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
26-
; CHECK-NEXT: [[TMP9:%.*]] = mul <vscale x 8 x i64> splat (i64 1), [[DOTSPLAT]]
2727
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
2828
; CHECK: vector.body:
2929
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -37,7 +37,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
3737
; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[TMP17]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
3838
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
3939
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 8)
40-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[TMP9]]
40+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
4141
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
4242
; CHECK: middle.block:
4343
; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
@@ -100,14 +100,14 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
100100
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
101101
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
102102
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]])
103-
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
104103
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
105104
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
105+
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
106106
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP8]], splat (i64 1)
107107
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
108-
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP6]], i64 0
108+
; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP6]]
109+
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
109110
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
110-
; CHECK-NEXT: [[TMP9:%.*]] = mul <vscale x 8 x i64> splat (i64 1), [[DOTSPLAT]]
111111
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
112112
; CHECK: vector.body:
113113
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -121,7 +121,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
121121
; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP14]], ptr [[TMP17]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
122122
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
123123
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]])
124-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[TMP9]]
124+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
125125
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
126126
; CHECK: middle.block:
127127
; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]

llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -862,8 +862,8 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
862862
; DEFAULT-NEXT: store i8 [[TMP33]], ptr [[TMP32]], align 1
863863
; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE14]]
864864
; DEFAULT: [[PRED_STORE_CONTINUE14]]:
865-
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
866865
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
866+
; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
867867
; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
868868
; DEFAULT: [[MIDDLE_BLOCK]]:
869869
; DEFAULT-NEXT: br label %[[EXIT:.*]]
@@ -964,8 +964,8 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
964964
; PRED-NEXT: store i8 [[TMP33]], ptr [[TMP32]], align 1
965965
; PRED-NEXT: br label %[[PRED_STORE_CONTINUE14]]
966966
; PRED: [[PRED_STORE_CONTINUE14]]:
967-
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
968967
; PRED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
968+
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i8> [[VEC_IND]], splat (i8 8)
969969
; PRED-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
970970
; PRED: [[MIDDLE_BLOCK]]:
971971
; PRED-NEXT: br label %[[EXIT:.*]]

0 commit comments

Comments
 (0)