|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
|
2 |
| -; RUN: opt -p loop-vectorize -S %s | FileCheck %s |
| 2 | +; RUN: opt -p loop-vectorize -scalable-vectorization=on -force-vector-width=1 -S %s | FileCheck %s |
3 | 3 |
|
4 | 4 | target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
|
5 | 5 | target triple = "riscv64-unknown-linux-gnu"
|
6 | 6 |
|
7 |
| -; Make sure we do not pick <vscale x 1 x i64> as VF for a loop with a |
8 |
| -; first-order recurrence. |
9 | 7 | define i64 @pr97452_scalable_vf1_for(ptr %src) #0 {
|
10 | 8 | ; CHECK-LABEL: define i64 @pr97452_scalable_vf1_for(
|
11 | 9 | ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
|
12 | 10 | ; CHECK-NEXT: [[ENTRY:.*]]:
|
13 |
| -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 11 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 12 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 23, [[TMP0]] |
| 13 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
14 | 14 | ; CHECK: [[VECTOR_PH]]:
|
| 15 | +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| 16 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 23, [[TMP1]] |
| 17 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 23, [[N_MOD_VF]] |
| 18 | +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() |
| 19 | +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() |
| 20 | +; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 |
| 21 | +; CHECK-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 0, i32 [[TMP4]] |
15 | 22 | ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
16 | 23 | ; CHECK: [[VECTOR_BODY]]:
|
17 | 24 | ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
18 |
| -; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 0>, %[[VECTOR_PH]] ], [ [[WIDE_LOAD1:%.*]], %[[VECTOR_BODY]] ] |
| 25 | +; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 1 x i64> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] |
19 | 26 | ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]]
|
20 |
| -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4 |
21 |
| -; CHECK-NEXT: [[WIDE_LOAD1]] = load <4 x i64>, ptr [[TMP5]], align 8 |
22 |
| -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 |
23 |
| -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 |
24 |
| -; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 27 | +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0 |
| 28 | +; CHECK-NEXT: [[WIDE_LOAD]] = load <vscale x 1 x i64>, ptr [[TMP6]], align 8 |
| 29 | +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> [[VECTOR_RECUR]], <vscale x 1 x i64> [[WIDE_LOAD]], i32 -1) |
| 30 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] |
| 31 | +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 32 | +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
25 | 33 | ; CHECK: [[MIDDLE_BLOCK]]:
|
26 |
| -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 2 |
27 |
| -; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[WIDE_LOAD1]], i32 3 |
28 |
| -; CHECK-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 34 | +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() |
| 35 | +; CHECK-NEXT: [[TMP10:%.*]] = sub i32 [[TMP9]], 1 |
| 36 | +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <vscale x 1 x i64> [[TMP7]], i32 [[TMP10]] |
| 37 | +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32() |
| 38 | +; CHECK-NEXT: [[TMP13:%.*]] = sub i32 [[TMP12]], 1 |
| 39 | +; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 1 x i64> [[WIDE_LOAD]], i32 [[TMP13]] |
| 40 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 23, [[N_VEC]] |
| 41 | +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
29 | 42 | ; CHECK: [[SCALAR_PH]]:
|
30 | 43 | ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
31 |
| -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 44 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
32 | 45 | ; CHECK-NEXT: br label %[[LOOP:.*]]
|
33 | 46 | ; CHECK: [[LOOP]]:
|
34 |
| -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] |
| 47 | +; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] |
35 | 48 | ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
|
36 | 49 | ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
|
37 | 50 | ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]]
|
38 | 51 | ; CHECK-NEXT: [[L]] = load i64, ptr [[GEP]], align 8
|
39 | 52 | ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 22
|
40 | 53 | ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
|
41 | 54 | ; CHECK: [[EXIT]]:
|
42 |
| -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[SCALAR_RECUR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] |
| 55 | +; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[FOR]], %[[LOOP]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] |
43 | 56 | ; CHECK-NEXT: ret i64 [[RES]]
|
44 | 57 | ;
|
45 | 58 | entry:
|
|
0 commit comments