Skip to content

Commit 91a2b92

Browse files
committed
[llvm][aarch64] Add support for the MS qualifiers __ptr32, __ptr64, __sptr, __uptr
1 parent f1be516 commit 91a2b92

File tree

5 files changed

+274
-11
lines changed

5 files changed

+274
-11
lines changed

llvm/lib/Target/AArch64/AArch64.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,11 @@ void initializeSMEABIPass(PassRegistry &);
114114
void initializeSMEPeepholeOptPass(PassRegistry &);
115115
void initializeSVEIntrinsicOptsPass(PassRegistry &);
116116
void initializeAArch64Arm64ECCallLoweringPass(PassRegistry &);
117+
118+
namespace ARM64AS {
119+
enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
120+
}
121+
117122
} // end namespace llvm
118123

119124
#endif

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 79 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -532,6 +532,9 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
532532
setOperationAction(ISD::XOR, MVT::i32, Custom);
533533
setOperationAction(ISD::XOR, MVT::i64, Custom);
534534

535+
setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
536+
setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
537+
535538
// Virtually no operation on f128 is legal, but LLVM can't expand them when
536539
// there's a valid register class, so we need custom operations in most cases.
537540
setOperationAction(ISD::FABS, MVT::f128, Expand);
@@ -6713,6 +6716,37 @@ static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
67136716
ST->getBasePtr(), ST->getMemOperand());
67146717
}
67156718

6719+
static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
6720+
SDLoc dl(Op);
6721+
SDValue Src = Op.getOperand(0);
6722+
MVT DestVT = Op.getSimpleValueType();
6723+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6724+
AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
6725+
6726+
unsigned SrcAS = N->getSrcAddressSpace();
6727+
unsigned DestAS = N->getDestAddressSpace();
6728+
assert(SrcAS != DestAS &&
6729+
"addrspacecast must be between different address spaces");
6730+
assert(TLI.getTargetMachine().getPointerSize(SrcAS) !=
6731+
TLI.getTargetMachine().getPointerSize(DestAS) &&
6732+
"addrspacecast must be between different ptr sizes");
6733+
6734+
if (SrcAS == ARM64AS::PTR32_SPTR) {
6735+
return DAG.getNode(ISD::SIGN_EXTEND, dl, DestVT, Src,
6736+
DAG.getTargetConstant(0, dl, DestVT));
6737+
} else if (SrcAS == ARM64AS::PTR32_UPTR) {
6738+
return DAG.getNode(ISD::ZERO_EXTEND, dl, DestVT, Src,
6739+
DAG.getTargetConstant(0, dl, DestVT));
6740+
} else if ((DestAS == ARM64AS::PTR32_SPTR) ||
6741+
(DestAS == ARM64AS::PTR32_UPTR)) {
6742+
SDValue Ext = DAG.getAnyExtOrTrunc(Src, dl, DestVT);
6743+
SDValue Trunc = DAG.getZeroExtendInReg(Ext, dl, DestVT);
6744+
return Trunc;
6745+
} else {
6746+
return Src;
6747+
}
6748+
}
6749+
67166750
// Custom lowering for any store, vector or scalar and/or default or with
67176751
// a truncate operations. Currently only custom lower truncate operation
67186752
// from vector v4i16 to v4i8 or volatile stores of i128.
@@ -7366,6 +7400,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
73667400
case ISD::SIGN_EXTEND:
73677401
case ISD::ZERO_EXTEND:
73687402
return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
7403+
case ISD::ADDRSPACECAST:
7404+
return LowerADDRSPACECAST(Op, DAG);
73697405
case ISD::SIGN_EXTEND_INREG: {
73707406
// Only custom lower when ExtraVT has a legal byte based element type.
73717407
EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
@@ -23327,6 +23363,26 @@ static SDValue performLOADCombine(SDNode *N,
2332723363
performTBISimplification(N->getOperand(1), DCI, DAG);
2332823364

2332923365
LoadSDNode *LD = cast<LoadSDNode>(N);
23366+
EVT RegVT = LD->getValueType(0);
23367+
EVT MemVT = LD->getMemoryVT();
23368+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23369+
SDLoc DL(LD);
23370+
23371+
// Cast ptr32 and ptr64 pointers to the default address space before a load.
23372+
unsigned AddrSpace = LD->getAddressSpace();
23373+
if (AddrSpace == ARM64AS::PTR64 || AddrSpace == ARM64AS::PTR32_SPTR ||
23374+
AddrSpace == ARM64AS::PTR32_UPTR) {
23375+
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23376+
if (PtrVT != LD->getBasePtr().getSimpleValueType()) {
23377+
SDValue Cast =
23378+
DAG.getAddrSpaceCast(DL, PtrVT, LD->getBasePtr(), AddrSpace, 0);
23379+
return DAG.getExtLoad(LD->getExtensionType(), DL, RegVT, LD->getChain(),
23380+
Cast, LD->getPointerInfo(), MemVT,
23381+
LD->getOriginalAlign(),
23382+
LD->getMemOperand()->getFlags());
23383+
}
23384+
}
23385+
2333023386
if (LD->isVolatile() || !Subtarget->isLittleEndian())
2333123387
return SDValue(N, 0);
2333223388

@@ -23336,13 +23392,11 @@ static SDValue performLOADCombine(SDNode *N,
2333623392
if (!LD->isNonTemporal())
2333723393
return SDValue(N, 0);
2333823394

23339-
EVT MemVT = LD->getMemoryVT();
2334023395
if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 ||
2334123396
MemVT.getSizeInBits() % 256 == 0 ||
2334223397
256 % MemVT.getScalarSizeInBits() != 0)
2334323398
return SDValue(N, 0);
2334423399

23345-
SDLoc DL(LD);
2334623400
SDValue Chain = LD->getChain();
2334723401
SDValue BasePtr = LD->getBasePtr();
2334823402
SDNodeFlags Flags = LD->getFlags();
@@ -23602,12 +23656,28 @@ static SDValue performSTORECombine(SDNode *N,
2360223656
SDValue Value = ST->getValue();
2360323657
SDValue Ptr = ST->getBasePtr();
2360423658
EVT ValueVT = Value.getValueType();
23659+
EVT MemVT = ST->getMemoryVT();
23660+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23661+
SDLoc DL(ST);
2360523662

2360623663
auto hasValidElementTypeForFPTruncStore = [](EVT VT) {
2360723664
EVT EltVT = VT.getVectorElementType();
2360823665
return EltVT == MVT::f32 || EltVT == MVT::f64;
2360923666
};
2361023667

23668+
// Cast ptr32 and ptr64 pointers to the default address space before a store.
23669+
unsigned AddrSpace = ST->getAddressSpace();
23670+
if (AddrSpace == ARM64AS::PTR64 || AddrSpace == ARM64AS::PTR32_SPTR ||
23671+
AddrSpace == ARM64AS::PTR32_UPTR) {
23672+
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23673+
if (PtrVT != Ptr.getSimpleValueType()) {
23674+
SDValue Cast = DAG.getAddrSpaceCast(DL, PtrVT, Ptr, AddrSpace, 0);
23675+
return DAG.getStore(Chain, DL, Value, Cast, ST->getPointerInfo(),
23676+
ST->getOriginalAlign(),
23677+
ST->getMemOperand()->getFlags(), ST->getAAInfo());
23678+
}
23679+
}
23680+
2361123681
if (SDValue Res = combineI8TruncStore(ST, DAG, Subtarget))
2361223682
return Res;
2361323683

@@ -23621,8 +23691,8 @@ static SDValue performSTORECombine(SDNode *N,
2362123691
ValueVT.isFixedLengthVector() &&
2362223692
ValueVT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits() &&
2362323693
hasValidElementTypeForFPTruncStore(Value.getOperand(0).getValueType()))
23624-
return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
23625-
ST->getMemoryVT(), ST->getMemOperand());
23694+
return DAG.getTruncStore(Chain, DL, Value.getOperand(0), Ptr, MemVT,
23695+
ST->getMemOperand());
2362623696

2362723697
if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
2362823698
return Split;
@@ -26949,6 +27019,11 @@ void AArch64TargetLowering::ReplaceNodeResults(
2694927019
ReplaceATOMIC_LOAD_128Results(N, Results, DAG, Subtarget);
2695027020
return;
2695127021
}
27022+
case ISD::ADDRSPACECAST: {
27023+
SDValue V = LowerADDRSPACECAST(SDValue(N, 0), DAG);
27024+
Results.push_back(V);
27025+
return;
27026+
}
2695227027
case ISD::ATOMIC_LOAD:
2695327028
case ISD::LOAD: {
2695427029
MemSDNode *LoadNode = cast<MemSDNode>(N);

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -587,11 +587,15 @@ class AArch64TargetLowering : public TargetLowering {
587587
unsigned Depth) const override;
588588

589589
MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
590-
// Returning i64 unconditionally here (i.e. even for ILP32) means that the
591-
// *DAG* representation of pointers will always be 64-bits. They will be
592-
// truncated and extended when transferred to memory, but the 64-bit DAG
593-
// allows us to use AArch64's addressing modes much more easily.
594-
return MVT::getIntegerVT(64);
590+
if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
591+
return MVT::i32;
592+
} else {
593+
// Returning i64 unconditionally here (i.e. even for ILP32) means that the
594+
// *DAG* representation of pointers will always be 64-bits. They will be
595+
// truncated and extended when transferred to memory, but the 64-bit DAG
596+
// allows us to use AArch64's addressing modes much more easily.
597+
return MVT::i64;
598+
}
595599
}
596600

597601
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,

llvm/lib/Target/AArch64/AArch64TargetMachine.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,7 @@ class AArch64TargetMachine : public LLVMTargetMachine {
6565

6666
/// Returns true if a cast between SrcAS and DestAS is a noop.
6767
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
68-
// Addrspacecasts are always noops.
69-
return true;
68+
return (getPointerSize(SrcAS) == getPointerSize(DestAS));
7069
}
7170

7271
private:
Lines changed: 180 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s | FileCheck %s
3+
4+
; Source to regenerate:
5+
; struct Foo {
6+
; int * __ptr32 p32;
7+
; int * __ptr64 p64;
8+
; __attribute__((address_space(9))) int *p_other;
9+
; };
10+
; extern "C" void use_foo(Foo *f);
11+
; extern "C" int use_int(int i);
12+
; extern "C" void test_sign_ext(Foo *f, int * __ptr32 __sptr i) {
13+
; f->p64 = i;
14+
; use_foo(f);
15+
; }
16+
; extern "C" void test_sign_ext_store_load(int * __ptr32 __sptr i) {
17+
; *i = use_int(*i);
18+
; }
19+
; extern "C" void test_zero_ext(Foo *f, int * __ptr32 __uptr i) {
20+
; f->p64 = i;
21+
; use_foo(f);
22+
; }
23+
; extern "C" void test_zero_ext_store_load(int * __ptr32 __uptr i) {
24+
; *i = use_int(*i);
25+
; }
26+
; extern "C" void test_trunc(Foo *f, int * __ptr64 i) {
27+
; f->p32 = i;
28+
; use_foo(f);
29+
; }
30+
; extern "C" void test_noop1(Foo *f, int * __ptr32 i) {
31+
; f->p32 = i;
32+
; use_foo(f);
33+
; }
34+
; extern "C" void test_noop2(Foo *f, int * __ptr64 i) {
35+
; f->p64 = i;
36+
; use_foo(f);
37+
; }
38+
; extern "C" void test_null_arg(Foo *f, int * __ptr32 i) {
39+
; test_noop1(f, 0);
40+
; }
41+
; extern "C" void test_unrecognized(Foo *f, __attribute__((address_space(14))) int *i) {
42+
; f->p32 = (int * __ptr32)i;
43+
; use_foo(f);
44+
; }
45+
;
46+
; $ clang --target=aarch64-windows-msvc -fms-extensions -O2 -S -emit-llvm t.cpp
47+
48+
target datalayout = "e-m:w-p:64:64-i32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
49+
target triple = "aarch64-unknown-windows-msvc"
50+
51+
; Function Attrs: mustprogress uwtable
52+
define dso_local void @test_sign_ext(ptr noundef %f, ptr addrspace(270) noundef %i) local_unnamed_addr #0 {
53+
; CHECK-LABEL: test_sign_ext:
54+
; CHECK: // %bb.0: // %entry
55+
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
56+
; CHECK-NEXT: sxtw x8, w1
57+
; CHECK-NEXT: str x8, [x0, #8]
58+
; CHECK-NEXT: b use_foo
59+
entry:
60+
%0 = addrspacecast ptr addrspace(270) %i to ptr
61+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
62+
store ptr %0, ptr %p64, align 8
63+
tail call void @use_foo(ptr noundef %f)
64+
ret void
65+
}
66+
67+
declare dso_local void @use_foo(ptr noundef) local_unnamed_addr #1
68+
69+
; Function Attrs: mustprogress uwtable
70+
define dso_local void @test_sign_ext_store_load(ptr addrspace(270) nocapture noundef %i) local_unnamed_addr #0 {
71+
; CHECK-LABEL: test_sign_ext_store_load:
72+
; CHECK: // %bb.0: // %entry
73+
; CHECK: sxtw x19, w0
74+
; CHECK-NEXT: ldr w0, [x19]
75+
; CHECK-NEXT: bl use_int
76+
; CHECK-NEXT: str w0, [x19]
77+
entry:
78+
%0 = load i32, ptr addrspace(270) %i, align 4
79+
%call = tail call i32 @use_int(i32 noundef %0)
80+
store i32 %call, ptr addrspace(270) %i, align 4
81+
ret void
82+
}
83+
84+
declare dso_local i32 @use_int(i32 noundef) local_unnamed_addr #1
85+
86+
; Function Attrs: mustprogress uwtable
87+
define dso_local void @test_zero_ext(ptr noundef %f, ptr addrspace(271) noundef %i) local_unnamed_addr #0 {
88+
; CHECK-LABEL: test_zero_ext:
89+
; CHECK: // %bb.0: // %entry
90+
; CHECK-NEXT: mov w8, w1
91+
; CHECK-NEXT: str x8, [x0, #8]
92+
; CHECK-NEXT: b use_foo
93+
entry:
94+
%0 = addrspacecast ptr addrspace(271) %i to ptr
95+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
96+
store ptr %0, ptr %p64, align 8
97+
tail call void @use_foo(ptr noundef %f)
98+
ret void
99+
}
100+
101+
; Function Attrs: mustprogress uwtable
102+
define dso_local void @test_zero_ext_store_load(ptr addrspace(271) nocapture noundef %i) local_unnamed_addr #0 {
103+
; CHECK-LABEL: test_zero_ext_store_load:
104+
; CHECK: // %bb.0: // %entry
105+
; CHECK: mov w19, w0
106+
; CHECK-NEXT: ldr w0, [x19]
107+
; CHECK-NEXT: bl use_int
108+
; CHECK-NEXT: str w0, [x19]
109+
entry:
110+
%0 = load i32, ptr addrspace(271) %i, align 4
111+
%call = tail call i32 @use_int(i32 noundef %0)
112+
store i32 %call, ptr addrspace(271) %i, align 4
113+
ret void
114+
}
115+
116+
; Function Attrs: mustprogress uwtable
117+
define dso_local void @test_trunc(ptr noundef %f, ptr noundef %i) local_unnamed_addr #0 {
118+
; CHECK-LABEL: test_trunc:
119+
; CHECK: // %bb.0: // %entry
120+
; CHECK-NEXT: str w1, [x0]
121+
; CHECK-NEXT: b use_foo
122+
entry:
123+
%0 = addrspacecast ptr %i to ptr addrspace(270)
124+
store ptr addrspace(270) %0, ptr %f, align 8
125+
tail call void @use_foo(ptr noundef nonnull %f)
126+
ret void
127+
}
128+
129+
; Function Attrs: mustprogress uwtable
130+
define dso_local void @test_noop1(ptr noundef %f, ptr addrspace(270) noundef %i) local_unnamed_addr #0 {
131+
; CHECK-LABEL: test_noop1:
132+
; CHECK: // %bb.0: // %entry
133+
; CHECK-NEXT: str w1, [x0]
134+
; CHECK-NEXT: b use_foo
135+
entry:
136+
store ptr addrspace(270) %i, ptr %f, align 8
137+
tail call void @use_foo(ptr noundef nonnull %f)
138+
ret void
139+
}
140+
141+
; Function Attrs: mustprogress uwtable
142+
define dso_local void @test_noop2(ptr noundef %f, ptr noundef %i) local_unnamed_addr #0 {
143+
; CHECK-LABEL: test_noop2:
144+
; CHECK: // %bb.0: // %entry
145+
; CHECK-NEXT: str x1, [x0, #8]
146+
; CHECK-NEXT: b use_foo
147+
entry:
148+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
149+
store ptr %i, ptr %p64, align 8
150+
tail call void @use_foo(ptr noundef %f)
151+
ret void
152+
}
153+
154+
; Function Attrs: mustprogress uwtable
155+
define dso_local void @test_null_arg(ptr noundef %f, ptr addrspace(270) nocapture noundef readnone %i) local_unnamed_addr #0 {
156+
; CHECK-LABEL: test_null_arg:
157+
; CHECK: // %bb.0: // %entry
158+
; CHECK-NEXT: str wzr, [x0]
159+
; CHECK-NEXT: b use_foo
160+
entry:
161+
store ptr addrspace(270) null, ptr %f, align 8
162+
tail call void @use_foo(ptr noundef nonnull %f)
163+
ret void
164+
}
165+
166+
; Function Attrs: mustprogress uwtable
167+
define dso_local void @test_unrecognized(ptr noundef %f, ptr addrspace(14) noundef %i) local_unnamed_addr #0 {
168+
; CHECK-LABEL: test_unrecognized:
169+
; CHECK: // %bb.0: // %entry
170+
; CHECK-NEXT: str w1, [x0]
171+
; CHECK-NEXT: b use_foo
172+
entry:
173+
%0 = addrspacecast ptr addrspace(14) %i to ptr addrspace(270)
174+
store ptr addrspace(270) %0, ptr %f, align 8
175+
tail call void @use_foo(ptr noundef nonnull %f)
176+
ret void
177+
}
178+
179+
attributes #0 = { mustprogress uwtable "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" }
180+
attributes #1 = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" }

0 commit comments

Comments
 (0)