1 //===-- X86SelectionDAGInfo.cpp - X86 SelectionDAG Info -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the X86SelectionDAGInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "X86SelectionDAGInfo.h"
15 #include "X86ISelLowering.h"
16 #include "X86InstrInfo.h"
17 #include "X86RegisterInfo.h"
18 #include "X86Subtarget.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DerivedTypes.h"
22
23 using namespace llvm;
24
25 #define DEBUG_TYPE "x86-selectiondag-info"
26
isBaseRegConflictPossible(SelectionDAG & DAG,ArrayRef<MCPhysReg> ClobberSet) const27 bool X86SelectionDAGInfo::isBaseRegConflictPossible(
28 SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const {
29 // We cannot use TRI->hasBasePointer() until *after* we select all basic
30 // blocks. Legalization may introduce new stack temporaries with large
31 // alignment requirements. Fall back to generic code if there are any
32 // dynamic stack adjustments (hopefully rare) and the base pointer would
33 // conflict if we had to use it.
34 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
35 if (!MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
36 return false;
37
38 const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>(
39 DAG.getSubtarget().getRegisterInfo());
40 unsigned BaseReg = TRI->getBaseRegister();
41 for (unsigned R : ClobberSet)
42 if (BaseReg == R)
43 return true;
44 return false;
45 }
46
47 namespace {
48
49 // Represents a cover of a buffer of Size bytes with Count() blocks of type AVT
50 // (of size UBytes() bytes), as well as how many bytes remain (BytesLeft() is
51 // always smaller than the block size).
52 struct RepMovsRepeats {
RepMovsRepeats__anon178aa8190111::RepMovsRepeats53 RepMovsRepeats(uint64_t Size) : Size(Size) {}
54
Count__anon178aa8190111::RepMovsRepeats55 uint64_t Count() const { return Size / UBytes(); }
BytesLeft__anon178aa8190111::RepMovsRepeats56 uint64_t BytesLeft() const { return Size % UBytes(); }
UBytes__anon178aa8190111::RepMovsRepeats57 uint64_t UBytes() const { return AVT.getSizeInBits() / 8; }
58
59 const uint64_t Size;
60 MVT AVT = MVT::i8;
61 };
62
63 } // namespace
64
EmitTargetCodeForMemset(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Val,SDValue Size,unsigned Align,bool isVolatile,MachinePointerInfo DstPtrInfo) const65 SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
66 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val,
67 SDValue Size, unsigned Align, bool isVolatile,
68 MachinePointerInfo DstPtrInfo) const {
69 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
70 const X86Subtarget &Subtarget =
71 DAG.getMachineFunction().getSubtarget<X86Subtarget>();
72
73 #ifndef NDEBUG
74 // If the base register might conflict with our physical registers, bail out.
75 const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
76 X86::ECX, X86::EAX, X86::EDI};
77 assert(!isBaseRegConflictPossible(DAG, ClobberSet));
78 #endif
79
80 // If to a segment-relative address space, use the default lowering.
81 if (DstPtrInfo.getAddrSpace() >= 256)
82 return SDValue();
83
84 // If not DWORD aligned or size is more than the threshold, call the library.
85 // The libc version is likely to be faster for these cases. It can use the
86 // address value and run time information about the CPU.
87 if ((Align & 3) != 0 || !ConstantSize ||
88 ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) {
89 // Check to see if there is a specialized entry-point for memory zeroing.
90 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Val);
91
92 if (const char *bzeroName = (ValC && ValC->isNullValue())
93 ? DAG.getTargetLoweringInfo().getLibcallName(RTLIB::BZERO)
94 : nullptr) {
95 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
96 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
97 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
98 TargetLowering::ArgListTy Args;
99 TargetLowering::ArgListEntry Entry;
100 Entry.Node = Dst;
101 Entry.Ty = IntPtrTy;
102 Args.push_back(Entry);
103 Entry.Node = Size;
104 Args.push_back(Entry);
105
106 TargetLowering::CallLoweringInfo CLI(DAG);
107 CLI.setDebugLoc(dl)
108 .setChain(Chain)
109 .setLibCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
110 DAG.getExternalSymbol(bzeroName, IntPtr),
111 std::move(Args))
112 .setDiscardResult();
113
114 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
115 return CallResult.second;
116 }
117
118 // Otherwise have the target-independent code call memset.
119 return SDValue();
120 }
121
122 uint64_t SizeVal = ConstantSize->getZExtValue();
123 SDValue InFlag;
124 EVT AVT;
125 SDValue Count;
126 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Val);
127 unsigned BytesLeft = 0;
128 if (ValC) {
129 unsigned ValReg;
130 uint64_t Val = ValC->getZExtValue() & 255;
131
132 // If the value is a constant, then we can potentially use larger sets.
133 switch (Align & 3) {
134 case 2: // WORD aligned
135 AVT = MVT::i16;
136 ValReg = X86::AX;
137 Val = (Val << 8) | Val;
138 break;
139 case 0: // DWORD aligned
140 AVT = MVT::i32;
141 ValReg = X86::EAX;
142 Val = (Val << 8) | Val;
143 Val = (Val << 16) | Val;
144 if (Subtarget.is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
145 AVT = MVT::i64;
146 ValReg = X86::RAX;
147 Val = (Val << 32) | Val;
148 }
149 break;
150 default: // Byte aligned
151 AVT = MVT::i8;
152 ValReg = X86::AL;
153 Count = DAG.getIntPtrConstant(SizeVal, dl);
154 break;
155 }
156
157 if (AVT.bitsGT(MVT::i8)) {
158 unsigned UBytes = AVT.getSizeInBits() / 8;
159 Count = DAG.getIntPtrConstant(SizeVal / UBytes, dl);
160 BytesLeft = SizeVal % UBytes;
161 }
162
163 Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, dl, AVT),
164 InFlag);
165 InFlag = Chain.getValue(1);
166 } else {
167 AVT = MVT::i8;
168 Count = DAG.getIntPtrConstant(SizeVal, dl);
169 Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Val, InFlag);
170 InFlag = Chain.getValue(1);
171 }
172
173 Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RCX : X86::ECX,
174 Count, InFlag);
175 InFlag = Chain.getValue(1);
176 Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RDI : X86::EDI,
177 Dst, InFlag);
178 InFlag = Chain.getValue(1);
179
180 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
181 SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
182 Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
183
184 if (BytesLeft) {
185 // Handle the last 1 - 7 bytes.
186 unsigned Offset = SizeVal - BytesLeft;
187 EVT AddrVT = Dst.getValueType();
188 EVT SizeVT = Size.getValueType();
189
190 Chain = DAG.getMemset(Chain, dl,
191 DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
192 DAG.getConstant(Offset, dl, AddrVT)),
193 Val,
194 DAG.getConstant(BytesLeft, dl, SizeVT),
195 Align, isVolatile, false,
196 DstPtrInfo.getWithOffset(Offset));
197 }
198
199 // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
200 return Chain;
201 }
202
EmitTargetCodeForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVolatile,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo) const203 SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
204 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
205 SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
206 MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
207 // This requires the copy size to be a constant, preferably
208 // within a subtarget-specific limit.
209 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
210 const X86Subtarget &Subtarget =
211 DAG.getMachineFunction().getSubtarget<X86Subtarget>();
212 if (!ConstantSize)
213 return SDValue();
214 RepMovsRepeats Repeats(ConstantSize->getZExtValue());
215 if (!AlwaysInline && Repeats.Size > Subtarget.getMaxInlineSizeThreshold())
216 return SDValue();
217
218 /// If not DWORD aligned, it is more efficient to call the library. However
219 /// if calling the library is not allowed (AlwaysInline), then soldier on as
220 /// the code generated here is better than the long load-store sequence we
221 /// would otherwise get.
222 if (!AlwaysInline && (Align & 3) != 0)
223 return SDValue();
224
225 // If to a segment-relative address space, use the default lowering.
226 if (DstPtrInfo.getAddrSpace() >= 256 ||
227 SrcPtrInfo.getAddrSpace() >= 256)
228 return SDValue();
229
230 // If the base register might conflict with our physical registers, bail out.
231 const MCPhysReg ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
232 X86::ECX, X86::ESI, X86::EDI};
233 if (isBaseRegConflictPossible(DAG, ClobberSet))
234 return SDValue();
235
236 // If the target has enhanced REPMOVSB, then it's at least as fast to use
237 // REP MOVSB instead of REP MOVS{W,D,Q}, and it avoids having to handle
238 // BytesLeft.
239 if (!Subtarget.hasERMSB() && !(Align & 1)) {
240 if (Align & 2)
241 // WORD aligned
242 Repeats.AVT = MVT::i16;
243 else if (Align & 4)
244 // DWORD aligned
245 Repeats.AVT = MVT::i32;
246 else
247 // QWORD aligned
248 Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
249
250 if (Repeats.BytesLeft() > 0 &&
251 DAG.getMachineFunction().getFunction().optForMinSize()) {
252 // When agressively optimizing for size, avoid generating the code to
253 // handle BytesLeft.
254 Repeats.AVT = MVT::i8;
255 }
256 }
257
258 SDValue InFlag;
259 Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RCX : X86::ECX,
260 DAG.getIntPtrConstant(Repeats.Count(), dl), InFlag);
261 InFlag = Chain.getValue(1);
262 Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RDI : X86::EDI,
263 Dst, InFlag);
264 InFlag = Chain.getValue(1);
265 Chain = DAG.getCopyToReg(Chain, dl, Subtarget.is64Bit() ? X86::RSI : X86::ESI,
266 Src, InFlag);
267 InFlag = Chain.getValue(1);
268
269 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
270 SDValue Ops[] = { Chain, DAG.getValueType(Repeats.AVT), InFlag };
271 SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops);
272
273 SmallVector<SDValue, 4> Results;
274 Results.push_back(RepMovs);
275 if (Repeats.BytesLeft()) {
276 // Handle the last 1 - 7 bytes.
277 unsigned Offset = Repeats.Size - Repeats.BytesLeft();
278 EVT DstVT = Dst.getValueType();
279 EVT SrcVT = Src.getValueType();
280 EVT SizeVT = Size.getValueType();
281 Results.push_back(DAG.getMemcpy(Chain, dl,
282 DAG.getNode(ISD::ADD, dl, DstVT, Dst,
283 DAG.getConstant(Offset, dl,
284 DstVT)),
285 DAG.getNode(ISD::ADD, dl, SrcVT, Src,
286 DAG.getConstant(Offset, dl,
287 SrcVT)),
288 DAG.getConstant(Repeats.BytesLeft(), dl,
289 SizeVT),
290 Align, isVolatile, AlwaysInline, false,
291 DstPtrInfo.getWithOffset(Offset),
292 SrcPtrInfo.getWithOffset(Offset)));
293 }
294
295 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results);
296 }
297