Skip to content

AMDGPU: Add test showing bit operations that should be reducible #141837

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
162 changes: 162 additions & 0 deletions llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s

; Check for situations where we could reduce the width of bitwise
; operations.


; Should be able to reduce this to a 32-bit or plus a copy
; https://alive2.llvm.org/ce/z/9LddFX
define i64 @v_xor_i64_known_hi_i32_from_arg_range(i64 range(i64 0, 4294967296) %arg0, i64 %arg1) {
; CHECK-LABEL: v_xor_i64_known_hi_i32_from_arg_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_xor_b32_e32 v1, v1, v3
; CHECK-NEXT: v_xor_b32_e32 v0, v0, v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%xor = xor i64 %arg0, %arg1
ret i64 %xor
}

; Should be able to reduce this to a 32-bit or plus a copy
; https://alive2.llvm.org/ce/z/HaXnBJ
define i64 @v_or_i64_known_hi_i32_from_arg_range(i64 range(i64 0, 4294967296) %arg0, i64 %arg1) {
; CHECK-LABEL: v_or_i64_known_hi_i32_from_arg_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_or_b32_e32 v1, v1, v3
; CHECK-NEXT: v_or_b32_e32 v0, v0, v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%or = or i64 %arg0, %arg1
ret i64 %or
}

; https://alive2.llvm.org/ce/z/M96Ror
; Should be able to reduce this to a 32-bit plus a copy
define i64 @v_and_i64_known_i32_from_arg_range(i64 range(i64 -4294967296, 0) %arg0, i64 %arg1) {
; CHECK-LABEL: v_and_i64_known_i32_from_arg_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, v1, v3
; CHECK-NEXT: v_and_b32_e32 v0, v0, v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%and = and i64 %arg0, %arg1
ret i64 %and
}

define i64 @s_xor_i64_known_i32_from_arg_range(i64 range(i64 0, 65) inreg %arg) {
; CHECK-LABEL: s_xor_i64_known_i32_from_arg_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_not_b64 s[4:5], s[16:17]
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: s_setpc_b64 s[30:31]
%xor = xor i64 %arg, -1
ret i64 %xor
}

define i64 @v_xor_i64_known_i32_from_call_range() {
; CHECK-LABEL: v_xor_i64_known_i32_from_call_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; def v[0:1]
; CHECK-NEXT: ;;#ASMEND
; CHECK-NEXT: v_not_b32_e32 v1, v1
; CHECK-NEXT: v_not_b32_e32 v0, v0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%call = call range(i64 0, 65) i64 asm "; def $0", "=v"()
%xor = xor i64 %call, -1
ret i64 %xor
}

define i64 @s_xor_i64_known_i32_from_call_range() {
; CHECK-LABEL: s_xor_i64_known_i32_from_call_range:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; def s[4:5]
; CHECK-NEXT: ;;#ASMEND
; CHECK-NEXT: s_not_b64 s[4:5], s[4:5]
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: s_setpc_b64 s[30:31]
%call = call range(i64 0, 65) i64 asm "; def $0", "=s"()
%xor = xor i64 %call, -1
ret i64 %xor
}

; Reduced from -amdgpu-codegenprepare-expand-div64 output, produces a
; not of 0 which ideally would fold out.
; FIXME: Produces not of constant 0
define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
; CHECK-LABEL: v_xor_i64_known_i32_from_range_use_out_of_block:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_ffbh_u32_e32 v2, v0
; CHECK-NEXT: v_add_u32_e32 v2, 32, v2
; CHECK-NEXT: v_ffbh_u32_e32 v3, v1
; CHECK-NEXT: v_min_u32_e32 v4, v2, v3
; CHECK-NEXT: v_mov_b32_e32 v5, 0
; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
; CHECK-NEXT: ; %bb.1: ; %inc
; CHECK-NEXT: v_not_b32_e32 v2, v4
; CHECK-NEXT: v_not_b32_e32 v3, 0
; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
; CHECK-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_mov_b32_e32 v0, v2
; CHECK-NEXT: v_mov_b32_e32 v1, v3
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
%ctlz = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %x, i1 true)
%cmp.entry.not = icmp eq i64 %ctlz, %x
br i1 %cmp.entry.not, label %inc, label %ret

inc: ; preds = %entry
%i1 = xor i64 %ctlz, -1
%i2 = add i64 %x, %i1
ret i64 %i2

ret: ; preds = %loop, %entry
ret i64 0
}

define i64 @s_xor_i64_known_i32_from_range_use_out_of_block(i64 inreg %x) {
; CHECK-LABEL: s_xor_i64_known_i32_from_range_use_out_of_block:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_flbit_i32_b64 s4, s[16:17]
; CHECK-NEXT: s_mov_b32 s5, 0
; CHECK-NEXT: s_cmp_lg_u64 s[4:5], s[16:17]
; CHECK-NEXT: s_cbranch_scc1 .LBB7_2
; CHECK-NEXT: ; %bb.1: ; %inc
; CHECK-NEXT: s_not_b64 s[4:5], s[4:5]
; CHECK-NEXT: s_add_u32 s4, s16, s4
; CHECK-NEXT: s_addc_u32 s5, s17, s5
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB7_2: ; %ret
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
%ctlz = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %x, i1 true)
%cmp.entry.not = icmp eq i64 %ctlz, %x
br i1 %cmp.entry.not, label %inc, label %ret

inc: ; preds = %entry
%i1 = xor i64 %ctlz, -1
%i2 = add i64 %x, %i1
ret i64 %i2

ret: ; preds = %loop, %entry
ret i64 0
}

Loading