|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -S < %s -passes=gvn | FileCheck %s |
| 3 | + |
| 4 | +; Analyze Load from clobbering memset.pattern. |
| 5 | +define i32 @memset_pattern_clobber_load(ptr %p) { |
| 6 | +; CHECK-LABEL: @memset_pattern_clobber_load( |
| 7 | +; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.i8.i64(ptr [[P:%.*]], i8 1, i64 8, i1 false) |
| 8 | +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4 |
| 9 | +; CHECK-NEXT: ret i32 [[LOAD]] |
| 10 | +; |
| 11 | + call void @llvm.experimental.memset.pattern.p0.p0.i64(ptr %p, i8 1, i64 8, i1 false) |
| 12 | + %load = load i32, ptr %p |
| 13 | + ret i32 %load |
| 14 | +} |
| 15 | + |
| 16 | +define i32 @memset_pattern_clobber_load2(ptr %p) { |
| 17 | +; CHECK-LABEL: @memset_pattern_clobber_load2( |
| 18 | +; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.i16.i64(ptr [[P:%.*]], i16 1, i64 8, i1 false) |
| 19 | +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4 |
| 20 | +; CHECK-NEXT: ret i32 [[LOAD]] |
| 21 | +; |
| 22 | + call void @llvm.experimental.memset.pattern(ptr %p, i16 1, i64 8, i1 false) |
| 23 | + %load = load i32, ptr %p |
| 24 | + ret i32 %load |
| 25 | +} |
| 26 | + |
| 27 | +define ptr @memset_pattern_clobber_load3(ptr %p) { |
| 28 | +; CHECK-LABEL: @memset_pattern_clobber_load3( |
| 29 | +; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.p0.i64(ptr [[P:%.*]], ptr [[P]], i64 8, i1 false) |
| 30 | +; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[P]], align 8 |
| 31 | +; CHECK-NEXT: ret ptr [[LOAD]] |
| 32 | +; |
| 33 | + call void @llvm.experimental.memset.pattern(ptr %p, ptr %p, i64 8, i1 false) |
| 34 | + %load = load ptr, ptr %p |
| 35 | + ret ptr %load |
| 36 | +} |
| 37 | + |
| 38 | +define i32 @load_forward_over_memset_pattern(ptr %P, ptr noalias %Q) { |
| 39 | +; CHECK-LABEL: @load_forward_over_memset_pattern( |
| 40 | +; CHECK-NEXT: tail call void @llvm.experimental.memset.pattern.p0.i8.i64(ptr [[P:%.*]], i8 27, i64 8, i1 false) |
| 41 | +; CHECK-NEXT: ret i32 0 |
| 42 | +; |
| 43 | + %v1 = load i32, ptr %Q |
| 44 | + tail call void @llvm.experimental.memset.pattern(ptr %P, i8 27, i64 8, i1 false) |
| 45 | + %v2 = load i32, ptr %Q |
| 46 | + %sub = sub i32 %v1, %v2 |
| 47 | + ret i32 %sub |
| 48 | +} |
| 49 | + |
| 50 | +define i32 @load_forward_over_memset_pattern2(ptr %P, ptr noalias %Q) nounwind ssp { |
| 51 | +; CHECK-LABEL: @load_forward_over_memset_pattern2( |
| 52 | +; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[Q:%.*]], align 4 |
| 53 | +; CHECK-NEXT: tail call void @llvm.experimental.memset.pattern.p0.p0.i64(ptr [[P:%.*]], ptr [[Q]], i64 8, i1 false) |
| 54 | +; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[Q]], align 4 |
| 55 | +; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]] |
| 56 | +; CHECK-NEXT: ret i32 [[SUB]] |
| 57 | +; |
| 58 | + %v1 = load i32, ptr %Q |
| 59 | + tail call void @llvm.experimental.memset.pattern(ptr %P, ptr %Q, i64 8, i1 false) |
| 60 | + %v2 = load i32, ptr %Q |
| 61 | + %sub = sub i32 %v1, %v2 |
| 62 | + ret i32 %sub |
| 63 | +} |
0 commit comments