Skip to content

Commit 9e9d1a1

Browse files
committed
[llvm][aarch64] Add support for the MS qualifiers __ptr32, __ptr64, __sptr, __uptr
1 parent c54616e commit 9e9d1a1

File tree

4 files changed

+277
-11
lines changed

4 files changed

+277
-11
lines changed

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 79 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -530,6 +530,9 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
530530
setOperationAction(ISD::XOR, MVT::i32, Custom);
531531
setOperationAction(ISD::XOR, MVT::i64, Custom);
532532

533+
setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
534+
setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
535+
533536
// Virtually no operation on f128 is legal, but LLVM can't expand them when
534537
// there's a valid register class, so we need custom operations in most cases.
535538
setOperationAction(ISD::FABS, MVT::f128, Expand);
@@ -6746,6 +6749,37 @@ static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
67466749
ST->getBasePtr(), ST->getMemOperand());
67476750
}
67486751

6752+
static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
6753+
SDLoc dl(Op);
6754+
SDValue Src = Op.getOperand(0);
6755+
MVT DestVT = Op.getSimpleValueType();
6756+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6757+
AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
6758+
6759+
unsigned SrcAS = N->getSrcAddressSpace();
6760+
unsigned DestAS = N->getDestAddressSpace();
6761+
assert(SrcAS != DestAS &&
6762+
"addrspacecast must be between different address spaces");
6763+
assert(TLI.getTargetMachine().getPointerSize(SrcAS) !=
6764+
TLI.getTargetMachine().getPointerSize(DestAS) &&
6765+
"addrspacecast must be between different ptr sizes");
6766+
6767+
if (SrcAS == ARM64AS::PTR32_SPTR) {
6768+
return DAG.getNode(ISD::SIGN_EXTEND, dl, DestVT, Src,
6769+
DAG.getTargetConstant(0, dl, DestVT));
6770+
} else if (SrcAS == ARM64AS::PTR32_UPTR) {
6771+
return DAG.getNode(ISD::ZERO_EXTEND, dl, DestVT, Src,
6772+
DAG.getTargetConstant(0, dl, DestVT));
6773+
} else if ((DestAS == ARM64AS::PTR32_SPTR) ||
6774+
(DestAS == ARM64AS::PTR32_UPTR)) {
6775+
SDValue Ext = DAG.getAnyExtOrTrunc(Src, dl, DestVT);
6776+
SDValue Trunc = DAG.getZeroExtendInReg(Ext, dl, DestVT);
6777+
return Trunc;
6778+
} else {
6779+
return Src;
6780+
}
6781+
}
6782+
67496783
// Custom lowering for any store, vector or scalar and/or default or with
67506784
// a truncate operations. Currently only custom lower truncate operation
67516785
// from vector v4i16 to v4i8 or volatile stores of i128.
@@ -7399,6 +7433,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
73997433
case ISD::SIGN_EXTEND:
74007434
case ISD::ZERO_EXTEND:
74017435
return LowerFixedLengthVectorIntExtendToSVE(Op, DAG);
7436+
case ISD::ADDRSPACECAST:
7437+
return LowerADDRSPACECAST(Op, DAG);
74027438
case ISD::SIGN_EXTEND_INREG: {
74037439
// Only custom lower when ExtraVT has a legal byte based element type.
74047440
EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
@@ -23448,6 +23484,26 @@ static SDValue performLOADCombine(SDNode *N,
2344823484
performTBISimplification(N->getOperand(1), DCI, DAG);
2344923485

2345023486
LoadSDNode *LD = cast<LoadSDNode>(N);
23487+
EVT RegVT = LD->getValueType(0);
23488+
EVT MemVT = LD->getMemoryVT();
23489+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23490+
SDLoc DL(LD);
23491+
23492+
// Cast ptr32 and ptr64 pointers to the default address space before a load.
23493+
unsigned AddrSpace = LD->getAddressSpace();
23494+
if (AddrSpace == ARM64AS::PTR64 || AddrSpace == ARM64AS::PTR32_SPTR ||
23495+
AddrSpace == ARM64AS::PTR32_UPTR) {
23496+
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23497+
if (PtrVT != LD->getBasePtr().getSimpleValueType()) {
23498+
SDValue Cast =
23499+
DAG.getAddrSpaceCast(DL, PtrVT, LD->getBasePtr(), AddrSpace, 0);
23500+
return DAG.getExtLoad(LD->getExtensionType(), DL, RegVT, LD->getChain(),
23501+
Cast, LD->getPointerInfo(), MemVT,
23502+
LD->getOriginalAlign(),
23503+
LD->getMemOperand()->getFlags());
23504+
}
23505+
}
23506+
2345123507
if (LD->isVolatile() || !Subtarget->isLittleEndian())
2345223508
return SDValue(N, 0);
2345323509

@@ -23457,13 +23513,11 @@ static SDValue performLOADCombine(SDNode *N,
2345723513
if (!LD->isNonTemporal())
2345823514
return SDValue(N, 0);
2345923515

23460-
EVT MemVT = LD->getMemoryVT();
2346123516
if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 ||
2346223517
MemVT.getSizeInBits() % 256 == 0 ||
2346323518
256 % MemVT.getScalarSizeInBits() != 0)
2346423519
return SDValue(N, 0);
2346523520

23466-
SDLoc DL(LD);
2346723521
SDValue Chain = LD->getChain();
2346823522
SDValue BasePtr = LD->getBasePtr();
2346923523
SDNodeFlags Flags = LD->getFlags();
@@ -23723,12 +23777,28 @@ static SDValue performSTORECombine(SDNode *N,
2372323777
SDValue Value = ST->getValue();
2372423778
SDValue Ptr = ST->getBasePtr();
2372523779
EVT ValueVT = Value.getValueType();
23780+
EVT MemVT = ST->getMemoryVT();
23781+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23782+
SDLoc DL(ST);
2372623783

2372723784
auto hasValidElementTypeForFPTruncStore = [](EVT VT) {
2372823785
EVT EltVT = VT.getVectorElementType();
2372923786
return EltVT == MVT::f32 || EltVT == MVT::f64;
2373023787
};
2373123788

23789+
// Cast ptr32 and ptr64 pointers to the default address space before a store.
23790+
unsigned AddrSpace = ST->getAddressSpace();
23791+
if (AddrSpace == ARM64AS::PTR64 || AddrSpace == ARM64AS::PTR32_SPTR ||
23792+
AddrSpace == ARM64AS::PTR32_UPTR) {
23793+
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23794+
if (PtrVT != Ptr.getSimpleValueType()) {
23795+
SDValue Cast = DAG.getAddrSpaceCast(DL, PtrVT, Ptr, AddrSpace, 0);
23796+
return DAG.getStore(Chain, DL, Value, Cast, ST->getPointerInfo(),
23797+
ST->getOriginalAlign(),
23798+
ST->getMemOperand()->getFlags(), ST->getAAInfo());
23799+
}
23800+
}
23801+
2373223802
if (SDValue Res = combineI8TruncStore(ST, DAG, Subtarget))
2373323803
return Res;
2373423804

@@ -23742,8 +23812,8 @@ static SDValue performSTORECombine(SDNode *N,
2374223812
ValueVT.isFixedLengthVector() &&
2374323813
ValueVT.getFixedSizeInBits() >= Subtarget->getMinSVEVectorSizeInBits() &&
2374423814
hasValidElementTypeForFPTruncStore(Value.getOperand(0).getValueType()))
23745-
return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
23746-
ST->getMemoryVT(), ST->getMemOperand());
23815+
return DAG.getTruncStore(Chain, DL, Value.getOperand(0), Ptr, MemVT,
23816+
ST->getMemOperand());
2374723817

2374823818
if (SDValue Split = splitStores(N, DCI, DAG, Subtarget))
2374923819
return Split;
@@ -27070,6 +27140,11 @@ void AArch64TargetLowering::ReplaceNodeResults(
2707027140
ReplaceATOMIC_LOAD_128Results(N, Results, DAG, Subtarget);
2707127141
return;
2707227142
}
27143+
case ISD::ADDRSPACECAST: {
27144+
SDValue V = LowerADDRSPACECAST(SDValue(N, 0), DAG);
27145+
Results.push_back(V);
27146+
return;
27147+
}
2707327148
case ISD::ATOMIC_LOAD:
2707427149
case ISD::LOAD: {
2707527150
MemSDNode *LoadNode = cast<MemSDNode>(N);

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -554,6 +554,10 @@ const unsigned StackProbeMaxLoopUnroll = 4;
554554

555555
} // namespace AArch64
556556

557+
namespace ARM64AS {
558+
enum : unsigned { PTR32_SPTR = 270, PTR32_UPTR = 271, PTR64 = 272 };
559+
}
560+
557561
class AArch64Subtarget;
558562

559563
class AArch64TargetLowering : public TargetLowering {
@@ -585,11 +589,19 @@ class AArch64TargetLowering : public TargetLowering {
585589
unsigned Depth) const override;
586590

587591
MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
588-
// Returning i64 unconditionally here (i.e. even for ILP32) means that the
589-
// *DAG* representation of pointers will always be 64-bits. They will be
590-
// truncated and extended when transferred to memory, but the 64-bit DAG
591-
// allows us to use AArch64's addressing modes much more easily.
592-
return MVT::getIntegerVT(64);
592+
if ((AS == ARM64AS::PTR32_SPTR) || (AS == ARM64AS::PTR32_UPTR)) {
593+
// These are 32-bit pointers created using the `__ptr32` extension or
594+
// similar. They are handled by marking them as being in a different
595+
// address space, and will be extended to 64-bits when used as the target
596+
// of a load or store operation, or cast to a 64-bit pointer type.
597+
return MVT::i32;
598+
} else {
599+
// Returning i64 unconditionally here (i.e. even for ILP32) means that the
600+
// *DAG* representation of pointers will always be 64-bits. They will be
601+
// truncated and extended when transferred to memory, but the 64-bit DAG
602+
// allows us to use AArch64's addressing modes much more easily.
603+
return MVT::i64;
604+
}
593605
}
594606

595607
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,

llvm/lib/Target/AArch64/AArch64TargetMachine.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,7 @@ class AArch64TargetMachine : public CodeGenTargetMachineImpl {
6868

6969
/// Returns true if a cast between SrcAS and DestAS is a noop.
7070
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
71-
// Addrspacecasts are always noops.
72-
return true;
71+
return (getPointerSize(SrcAS) == getPointerSize(DestAS));
7372
}
7473

7574
private:
Lines changed: 180 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s | FileCheck %s
3+
4+
; Source to regenerate:
5+
; struct Foo {
6+
; int * __ptr32 p32;
7+
; int * __ptr64 p64;
8+
; __attribute__((address_space(9))) int *p_other;
9+
; };
10+
; extern "C" void use_foo(Foo *f);
11+
; extern "C" int use_int(int i);
12+
; extern "C" void test_sign_ext(Foo *f, int * __ptr32 __sptr i) {
13+
; f->p64 = i;
14+
; use_foo(f);
15+
; }
16+
; extern "C" void test_sign_ext_store_load(int * __ptr32 __sptr i) {
17+
; *i = use_int(*i);
18+
; }
19+
; extern "C" void test_zero_ext(Foo *f, int * __ptr32 __uptr i) {
20+
; f->p64 = i;
21+
; use_foo(f);
22+
; }
23+
; extern "C" void test_zero_ext_store_load(int * __ptr32 __uptr i) {
24+
; *i = use_int(*i);
25+
; }
26+
; extern "C" void test_trunc(Foo *f, int * __ptr64 i) {
27+
; f->p32 = i;
28+
; use_foo(f);
29+
; }
30+
; extern "C" void test_noop1(Foo *f, int * __ptr32 i) {
31+
; f->p32 = i;
32+
; use_foo(f);
33+
; }
34+
; extern "C" void test_noop2(Foo *f, int * __ptr64 i) {
35+
; f->p64 = i;
36+
; use_foo(f);
37+
; }
38+
; extern "C" void test_null_arg(Foo *f, int * __ptr32 i) {
39+
; test_noop1(f, 0);
40+
; }
41+
; extern "C" void test_unrecognized(Foo *f, __attribute__((address_space(14))) int *i) {
42+
; f->p32 = (int * __ptr32)i;
43+
; use_foo(f);
44+
; }
45+
;
46+
; $ clang --target=aarch64-windows-msvc -fms-extensions -O2 -S -emit-llvm t.cpp
47+
48+
target datalayout = "e-m:w-p:64:64-i32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
49+
target triple = "aarch64-unknown-windows-msvc"
50+
51+
; Function Attrs: mustprogress uwtable
52+
define dso_local void @test_sign_ext(ptr noundef %f, ptr addrspace(270) noundef %i) local_unnamed_addr #0 {
53+
; CHECK-LABEL: test_sign_ext:
54+
; CHECK: // %bb.0: // %entry
55+
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
56+
; CHECK-NEXT: sxtw x8, w1
57+
; CHECK-NEXT: str x8, [x0, #8]
58+
; CHECK-NEXT: b use_foo
59+
entry:
60+
%0 = addrspacecast ptr addrspace(270) %i to ptr
61+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
62+
store ptr %0, ptr %p64, align 8
63+
tail call void @use_foo(ptr noundef %f)
64+
ret void
65+
}
66+
67+
declare dso_local void @use_foo(ptr noundef) local_unnamed_addr #1
68+
69+
; Function Attrs: mustprogress uwtable
70+
define dso_local void @test_sign_ext_store_load(ptr addrspace(270) nocapture noundef %i) local_unnamed_addr #0 {
71+
; CHECK-LABEL: test_sign_ext_store_load:
72+
; CHECK: // %bb.0: // %entry
73+
; CHECK: sxtw x19, w0
74+
; CHECK-NEXT: ldr w0, [x19]
75+
; CHECK-NEXT: bl use_int
76+
; CHECK-NEXT: str w0, [x19]
77+
entry:
78+
%0 = load i32, ptr addrspace(270) %i, align 4
79+
%call = tail call i32 @use_int(i32 noundef %0)
80+
store i32 %call, ptr addrspace(270) %i, align 4
81+
ret void
82+
}
83+
84+
declare dso_local i32 @use_int(i32 noundef) local_unnamed_addr #1
85+
86+
; Function Attrs: mustprogress uwtable
87+
define dso_local void @test_zero_ext(ptr noundef %f, ptr addrspace(271) noundef %i) local_unnamed_addr #0 {
88+
; CHECK-LABEL: test_zero_ext:
89+
; CHECK: // %bb.0: // %entry
90+
; CHECK-NEXT: mov w8, w1
91+
; CHECK-NEXT: str x8, [x0, #8]
92+
; CHECK-NEXT: b use_foo
93+
entry:
94+
%0 = addrspacecast ptr addrspace(271) %i to ptr
95+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
96+
store ptr %0, ptr %p64, align 8
97+
tail call void @use_foo(ptr noundef %f)
98+
ret void
99+
}
100+
101+
; Function Attrs: mustprogress uwtable
102+
define dso_local void @test_zero_ext_store_load(ptr addrspace(271) nocapture noundef %i) local_unnamed_addr #0 {
103+
; CHECK-LABEL: test_zero_ext_store_load:
104+
; CHECK: // %bb.0: // %entry
105+
; CHECK: mov w19, w0
106+
; CHECK-NEXT: ldr w0, [x19]
107+
; CHECK-NEXT: bl use_int
108+
; CHECK-NEXT: str w0, [x19]
109+
entry:
110+
%0 = load i32, ptr addrspace(271) %i, align 4
111+
%call = tail call i32 @use_int(i32 noundef %0)
112+
store i32 %call, ptr addrspace(271) %i, align 4
113+
ret void
114+
}
115+
116+
; Function Attrs: mustprogress uwtable
117+
define dso_local void @test_trunc(ptr noundef %f, ptr noundef %i) local_unnamed_addr #0 {
118+
; CHECK-LABEL: test_trunc:
119+
; CHECK: // %bb.0: // %entry
120+
; CHECK-NEXT: str w1, [x0]
121+
; CHECK-NEXT: b use_foo
122+
entry:
123+
%0 = addrspacecast ptr %i to ptr addrspace(270)
124+
store ptr addrspace(270) %0, ptr %f, align 8
125+
tail call void @use_foo(ptr noundef nonnull %f)
126+
ret void
127+
}
128+
129+
; Function Attrs: mustprogress uwtable
130+
define dso_local void @test_noop1(ptr noundef %f, ptr addrspace(270) noundef %i) local_unnamed_addr #0 {
131+
; CHECK-LABEL: test_noop1:
132+
; CHECK: // %bb.0: // %entry
133+
; CHECK-NEXT: str w1, [x0]
134+
; CHECK-NEXT: b use_foo
135+
entry:
136+
store ptr addrspace(270) %i, ptr %f, align 8
137+
tail call void @use_foo(ptr noundef nonnull %f)
138+
ret void
139+
}
140+
141+
; Function Attrs: mustprogress uwtable
142+
define dso_local void @test_noop2(ptr noundef %f, ptr noundef %i) local_unnamed_addr #0 {
143+
; CHECK-LABEL: test_noop2:
144+
; CHECK: // %bb.0: // %entry
145+
; CHECK-NEXT: str x1, [x0, #8]
146+
; CHECK-NEXT: b use_foo
147+
entry:
148+
%p64 = getelementptr inbounds nuw i8, ptr %f, i64 8
149+
store ptr %i, ptr %p64, align 8
150+
tail call void @use_foo(ptr noundef %f)
151+
ret void
152+
}
153+
154+
; Function Attrs: mustprogress uwtable
155+
define dso_local void @test_null_arg(ptr noundef %f, ptr addrspace(270) nocapture noundef readnone %i) local_unnamed_addr #0 {
156+
; CHECK-LABEL: test_null_arg:
157+
; CHECK: // %bb.0: // %entry
158+
; CHECK-NEXT: str wzr, [x0]
159+
; CHECK-NEXT: b use_foo
160+
entry:
161+
store ptr addrspace(270) null, ptr %f, align 8
162+
tail call void @use_foo(ptr noundef nonnull %f)
163+
ret void
164+
}
165+
166+
; Function Attrs: mustprogress uwtable
167+
define dso_local void @test_unrecognized(ptr noundef %f, ptr addrspace(14) noundef %i) local_unnamed_addr #0 {
168+
; CHECK-LABEL: test_unrecognized:
169+
; CHECK: // %bb.0: // %entry
170+
; CHECK-NEXT: str w1, [x0]
171+
; CHECK-NEXT: b use_foo
172+
entry:
173+
%0 = addrspacecast ptr addrspace(14) %i to ptr addrspace(270)
174+
store ptr addrspace(270) %0, ptr %f, align 8
175+
tail call void @use_foo(ptr noundef nonnull %f)
176+
ret void
177+
}
178+
179+
attributes #0 = { mustprogress uwtable "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" }
180+
attributes #1 = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+fp-armv8,+neon,+v8a,-fmv" }

0 commit comments

Comments
 (0)