1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the AArch64 target.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AArch64MachineFunctionInfo.h"
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/IntrinsicsAArch64.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/KnownBits.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/raw_ostream.h"
27
28 using namespace llvm;
29
30 #define DEBUG_TYPE "aarch64-isel"
31
32 //===--------------------------------------------------------------------===//
33 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
34 /// instructions for SelectionDAG operations.
35 ///
36 namespace {
37
38 class AArch64DAGToDAGISel : public SelectionDAGISel {
39
40 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
41 /// make the right decision when generating code for different targets.
42 const AArch64Subtarget *Subtarget;
43
44 public:
AArch64DAGToDAGISel(AArch64TargetMachine & tm,CodeGenOpt::Level OptLevel)45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
46 CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr) {}
48
getPassName() const49 StringRef getPassName() const override {
50 return "AArch64 Instruction Selection";
51 }
52
runOnMachineFunction(MachineFunction & MF)53 bool runOnMachineFunction(MachineFunction &MF) override {
54 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
55 return SelectionDAGISel::runOnMachineFunction(MF);
56 }
57
58 void Select(SDNode *Node) override;
59
60 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
61 /// inline asm expressions.
62 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
63 unsigned ConstraintID,
64 std::vector<SDValue> &OutOps) override;
65
66 template <signed Low, signed High, signed Scale>
67 bool SelectRDVLImm(SDValue N, SDValue &Imm);
68
69 bool tryMLAV64LaneV128(SDNode *N);
70 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
SelectArithShiftedRegister(SDValue N,SDValue & Reg,SDValue & Shift)74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
76 }
SelectLogicalShiftedRegister(SDValue N,SDValue & Reg,SDValue & Shift)77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
79 }
SelectAddrModeIndexed7S8(SDValue N,SDValue & Base,SDValue & OffImm)80 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 }
SelectAddrModeIndexed7S16(SDValue N,SDValue & Base,SDValue & OffImm)83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 }
SelectAddrModeIndexed7S32(SDValue N,SDValue & Base,SDValue & OffImm)86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 }
SelectAddrModeIndexed7S64(SDValue N,SDValue & Base,SDValue & OffImm)89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 }
SelectAddrModeIndexed7S128(SDValue N,SDValue & Base,SDValue & OffImm)92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 }
SelectAddrModeIndexedS9S128(SDValue N,SDValue & Base,SDValue & OffImm)95 bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm);
97 }
SelectAddrModeIndexedU6S128(SDValue N,SDValue & Base,SDValue & OffImm)98 bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm);
100 }
SelectAddrModeIndexed8(SDValue N,SDValue & Base,SDValue & OffImm)101 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 1, Base, OffImm);
103 }
SelectAddrModeIndexed16(SDValue N,SDValue & Base,SDValue & OffImm)104 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 2, Base, OffImm);
106 }
SelectAddrModeIndexed32(SDValue N,SDValue & Base,SDValue & OffImm)107 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 4, Base, OffImm);
109 }
SelectAddrModeIndexed64(SDValue N,SDValue & Base,SDValue & OffImm)110 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeIndexed(N, 8, Base, OffImm);
112 }
SelectAddrModeIndexed128(SDValue N,SDValue & Base,SDValue & OffImm)113 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeIndexed(N, 16, Base, OffImm);
115 }
SelectAddrModeUnscaled8(SDValue N,SDValue & Base,SDValue & OffImm)116 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
118 }
SelectAddrModeUnscaled16(SDValue N,SDValue & Base,SDValue & OffImm)119 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
121 }
SelectAddrModeUnscaled32(SDValue N,SDValue & Base,SDValue & OffImm)122 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
124 }
SelectAddrModeUnscaled64(SDValue N,SDValue & Base,SDValue & OffImm)125 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
126 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
127 }
SelectAddrModeUnscaled128(SDValue N,SDValue & Base,SDValue & OffImm)128 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
129 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
130 }
131
132 template<int Width>
SelectAddrModeWRO(SDValue N,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)133 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 }
137
138 template<int Width>
SelectAddrModeXRO(SDValue N,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)139 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
140 SDValue &SignExtend, SDValue &DoShift) {
141 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
142 }
143
SelectDupZeroOrUndef(SDValue N)144 bool SelectDupZeroOrUndef(SDValue N) {
145 switch(N->getOpcode()) {
146 case ISD::UNDEF:
147 return true;
148 case AArch64ISD::DUP:
149 case ISD::SPLAT_VECTOR: {
150 auto Opnd0 = N->getOperand(0);
151 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
152 if (CN->isNullValue())
153 return true;
154 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
155 if (CN->isZero())
156 return true;
157 break;
158 }
159 default:
160 break;
161 }
162
163 return false;
164 }
165
SelectDupZero(SDValue N)166 bool SelectDupZero(SDValue N) {
167 switch(N->getOpcode()) {
168 case AArch64ISD::DUP:
169 case ISD::SPLAT_VECTOR: {
170 auto Opnd0 = N->getOperand(0);
171 if (auto CN = dyn_cast<ConstantSDNode>(Opnd0))
172 if (CN->isNullValue())
173 return true;
174 if (auto CN = dyn_cast<ConstantFPSDNode>(Opnd0))
175 if (CN->isZero())
176 return true;
177 break;
178 }
179 }
180
181 return false;
182 }
183
184 template<MVT::SimpleValueType VT>
SelectSVEAddSubImm(SDValue N,SDValue & Imm,SDValue & Shift)185 bool SelectSVEAddSubImm(SDValue N, SDValue &Imm, SDValue &Shift) {
186 return SelectSVEAddSubImm(N, VT, Imm, Shift);
187 }
188
189 template<MVT::SimpleValueType VT>
SelectSVELogicalImm(SDValue N,SDValue & Imm)190 bool SelectSVELogicalImm(SDValue N, SDValue &Imm) {
191 return SelectSVELogicalImm(N, VT, Imm);
192 }
193
194 template <MVT::SimpleValueType VT>
SelectSVEArithImm(SDValue N,SDValue & Imm)195 bool SelectSVEArithImm(SDValue N, SDValue &Imm) {
196 return SelectSVEArithImm(N, VT, Imm);
197 }
198
199 template <unsigned Low, unsigned High, bool AllowSaturation = false>
SelectSVEShiftImm(SDValue N,SDValue & Imm)200 bool SelectSVEShiftImm(SDValue N, SDValue &Imm) {
201 return SelectSVEShiftImm(N, Low, High, AllowSaturation, Imm);
202 }
203
204 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
205 template<signed Min, signed Max, signed Scale, bool Shift>
SelectCntImm(SDValue N,SDValue & Imm)206 bool SelectCntImm(SDValue N, SDValue &Imm) {
207 if (!isa<ConstantSDNode>(N))
208 return false;
209
210 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
211 if (Shift)
212 MulImm = 1LL << MulImm;
213
214 if ((MulImm % std::abs(Scale)) != 0)
215 return false;
216
217 MulImm /= Scale;
218 if ((MulImm >= Min) && (MulImm <= Max)) {
219 Imm = CurDAG->getTargetConstant(MulImm, SDLoc(N), MVT::i32);
220 return true;
221 }
222
223 return false;
224 }
225
226 /// Form sequences of consecutive 64/128-bit registers for use in NEON
227 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
228 /// between 1 and 4 elements. If it contains a single element that is returned
229 /// unchanged; otherwise a REG_SEQUENCE value is returned.
230 SDValue createDTuple(ArrayRef<SDValue> Vecs);
231 SDValue createQTuple(ArrayRef<SDValue> Vecs);
232 // Form a sequence of SVE registers for instructions using list of vectors,
233 // e.g. structured loads and stores (ldN, stN).
234 SDValue createZTuple(ArrayRef<SDValue> Vecs);
235
236 /// Generic helper for the createDTuple/createQTuple
237 /// functions. Those should almost always be called instead.
238 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
239 const unsigned SubRegs[]);
240
241 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
242
243 bool tryIndexedLoad(SDNode *N);
244
245 bool trySelectStackSlotTagP(SDNode *N);
246 void SelectTagP(SDNode *N);
247
248 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
249 unsigned SubRegIdx);
250 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
251 unsigned SubRegIdx);
252 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
253 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
254 void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
255 unsigned Opc_rr, unsigned Opc_ri);
256
257 bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
258 /// SVE Reg+Imm addressing mode.
259 template <int64_t Min, int64_t Max>
260 bool SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, SDValue &Base,
261 SDValue &OffImm);
262 /// SVE Reg+Reg address mode.
263 template <unsigned Scale>
SelectSVERegRegAddrMode(SDValue N,SDValue & Base,SDValue & Offset)264 bool SelectSVERegRegAddrMode(SDValue N, SDValue &Base, SDValue &Offset) {
265 return SelectSVERegRegAddrMode(N, Scale, Base, Offset);
266 }
267
268 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
269 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
270 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
271 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
272 void SelectPredicatedStore(SDNode *N, unsigned NumVecs, unsigned Scale,
273 unsigned Opc_rr, unsigned Opc_ri);
274 std::tuple<unsigned, SDValue, SDValue>
275 findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr, unsigned Opc_ri,
276 const SDValue &OldBase, const SDValue &OldOffset,
277 unsigned Scale);
278
279 bool tryBitfieldExtractOp(SDNode *N);
280 bool tryBitfieldExtractOpFromSExt(SDNode *N);
281 bool tryBitfieldInsertOp(SDNode *N);
282 bool tryBitfieldInsertInZeroOp(SDNode *N);
283 bool tryShiftAmountMod(SDNode *N);
284 bool tryHighFPExt(SDNode *N);
285
286 bool tryReadRegister(SDNode *N);
287 bool tryWriteRegister(SDNode *N);
288
289 // Include the pieces autogenerated from the target description.
290 #include "AArch64GenDAGISel.inc"
291
292 private:
293 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
294 SDValue &Shift);
SelectAddrModeIndexed7S(SDValue N,unsigned Size,SDValue & Base,SDValue & OffImm)295 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
296 SDValue &OffImm) {
297 return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm);
298 }
299 bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW,
300 unsigned Size, SDValue &Base,
301 SDValue &OffImm);
302 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
303 SDValue &OffImm);
304 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
305 SDValue &OffImm);
306 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
307 SDValue &Offset, SDValue &SignExtend,
308 SDValue &DoShift);
309 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
310 SDValue &Offset, SDValue &SignExtend,
311 SDValue &DoShift);
312 bool isWorthFolding(SDValue V) const;
313 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
314 SDValue &Offset, SDValue &SignExtend);
315
316 template<unsigned RegWidth>
SelectCVTFixedPosOperand(SDValue N,SDValue & FixedPos)317 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
318 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
319 }
320
321 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
322
323 bool SelectCMP_SWAP(SDNode *N);
324
325 bool SelectSVE8BitLslImm(SDValue N, SDValue &Imm, SDValue &Shift);
326
327 bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
328
329 bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm);
330
331 bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
332 bool SelectSVEShiftImm(SDValue N, uint64_t Low, uint64_t High,
333 bool AllowSaturation, SDValue &Imm);
334
335 bool SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm);
336 bool SelectSVERegRegAddrMode(SDValue N, unsigned Scale, SDValue &Base,
337 SDValue &Offset);
338 };
339 } // end anonymous namespace
340
341 /// isIntImmediate - This method tests to see if the node is a constant
342 /// operand. If so Imm will receive the 32-bit value.
isIntImmediate(const SDNode * N,uint64_t & Imm)343 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
344 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
345 Imm = C->getZExtValue();
346 return true;
347 }
348 return false;
349 }
350
351 // isIntImmediate - This method tests to see if a constant operand.
352 // If so Imm will receive the value.
isIntImmediate(SDValue N,uint64_t & Imm)353 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
354 return isIntImmediate(N.getNode(), Imm);
355 }
356
357 // isOpcWithIntImmediate - This method tests to see if the node is a specific
358 // opcode and that it has a immediate integer right operand.
359 // If so Imm will receive the 32 bit value.
isOpcWithIntImmediate(const SDNode * N,unsigned Opc,uint64_t & Imm)360 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
361 uint64_t &Imm) {
362 return N->getOpcode() == Opc &&
363 isIntImmediate(N->getOperand(1).getNode(), Imm);
364 }
365
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)366 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
367 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
368 switch(ConstraintID) {
369 default:
370 llvm_unreachable("Unexpected asm memory constraint");
371 case InlineAsm::Constraint_m:
372 case InlineAsm::Constraint_Q:
373 // We need to make sure that this one operand does not end up in XZR, thus
374 // require the address to be in a PointerRegClass register.
375 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
376 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF);
377 SDLoc dl(Op);
378 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i64);
379 SDValue NewOp =
380 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
381 dl, Op.getValueType(),
382 Op, RC), 0);
383 OutOps.push_back(NewOp);
384 return false;
385 }
386 return true;
387 }
388
389 /// SelectArithImmed - Select an immediate value that can be represented as
390 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
391 /// Val set to the 12-bit value and Shift set to the shifter operand.
SelectArithImmed(SDValue N,SDValue & Val,SDValue & Shift)392 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
393 SDValue &Shift) {
394 // This function is called from the addsub_shifted_imm ComplexPattern,
395 // which lists [imm] as the list of opcode it's interested in, however
396 // we still need to check whether the operand is actually an immediate
397 // here because the ComplexPattern opcode list is only used in
398 // root-level opcode matching.
399 if (!isa<ConstantSDNode>(N.getNode()))
400 return false;
401
402 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
403 unsigned ShiftAmt;
404
405 if (Immed >> 12 == 0) {
406 ShiftAmt = 0;
407 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
408 ShiftAmt = 12;
409 Immed = Immed >> 12;
410 } else
411 return false;
412
413 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
414 SDLoc dl(N);
415 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
416 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
417 return true;
418 }
419
420 /// SelectNegArithImmed - As above, but negates the value before trying to
421 /// select it.
SelectNegArithImmed(SDValue N,SDValue & Val,SDValue & Shift)422 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
423 SDValue &Shift) {
424 // This function is called from the addsub_shifted_imm ComplexPattern,
425 // which lists [imm] as the list of opcode it's interested in, however
426 // we still need to check whether the operand is actually an immediate
427 // here because the ComplexPattern opcode list is only used in
428 // root-level opcode matching.
429 if (!isa<ConstantSDNode>(N.getNode()))
430 return false;
431
432 // The immediate operand must be a 24-bit zero-extended immediate.
433 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
434
435 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
436 // have the opposite effect on the C flag, so this pattern mustn't match under
437 // those circumstances.
438 if (Immed == 0)
439 return false;
440
441 if (N.getValueType() == MVT::i32)
442 Immed = ~((uint32_t)Immed) + 1;
443 else
444 Immed = ~Immed + 1ULL;
445 if (Immed & 0xFFFFFFFFFF000000ULL)
446 return false;
447
448 Immed &= 0xFFFFFFULL;
449 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
450 Shift);
451 }
452
453 /// getShiftTypeForNode - Translate a shift node to the corresponding
454 /// ShiftType value.
getShiftTypeForNode(SDValue N)455 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
456 switch (N.getOpcode()) {
457 default:
458 return AArch64_AM::InvalidShiftExtend;
459 case ISD::SHL:
460 return AArch64_AM::LSL;
461 case ISD::SRL:
462 return AArch64_AM::LSR;
463 case ISD::SRA:
464 return AArch64_AM::ASR;
465 case ISD::ROTR:
466 return AArch64_AM::ROR;
467 }
468 }
469
470 /// Determine whether it is worth it to fold SHL into the addressing
471 /// mode.
isWorthFoldingSHL(SDValue V)472 static bool isWorthFoldingSHL(SDValue V) {
473 assert(V.getOpcode() == ISD::SHL && "invalid opcode");
474 // It is worth folding logical shift of up to three places.
475 auto *CSD = dyn_cast<ConstantSDNode>(V.getOperand(1));
476 if (!CSD)
477 return false;
478 unsigned ShiftVal = CSD->getZExtValue();
479 if (ShiftVal > 3)
480 return false;
481
482 // Check if this particular node is reused in any non-memory related
483 // operation. If yes, do not try to fold this node into the address
484 // computation, since the computation will be kept.
485 const SDNode *Node = V.getNode();
486 for (SDNode *UI : Node->uses())
487 if (!isa<MemSDNode>(*UI))
488 for (SDNode *UII : UI->uses())
489 if (!isa<MemSDNode>(*UII))
490 return false;
491 return true;
492 }
493
494 /// Determine whether it is worth to fold V into an extended register.
isWorthFolding(SDValue V) const495 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
496 // Trivial if we are optimizing for code size or if there is only
497 // one use of the value.
498 if (CurDAG->shouldOptForSize() || V.hasOneUse())
499 return true;
500 // If a subtarget has a fastpath LSL we can fold a logical shift into
501 // the addressing mode and save a cycle.
502 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::SHL &&
503 isWorthFoldingSHL(V))
504 return true;
505 if (Subtarget->hasLSLFast() && V.getOpcode() == ISD::ADD) {
506 const SDValue LHS = V.getOperand(0);
507 const SDValue RHS = V.getOperand(1);
508 if (LHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(LHS))
509 return true;
510 if (RHS.getOpcode() == ISD::SHL && isWorthFoldingSHL(RHS))
511 return true;
512 }
513
514 // It hurts otherwise, since the value will be reused.
515 return false;
516 }
517
518 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
519 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
520 /// instructions allow the shifted register to be rotated, but the arithmetic
521 /// instructions do not. The AllowROR parameter specifies whether ROR is
522 /// supported.
SelectShiftedRegister(SDValue N,bool AllowROR,SDValue & Reg,SDValue & Shift)523 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
524 SDValue &Reg, SDValue &Shift) {
525 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
526 if (ShType == AArch64_AM::InvalidShiftExtend)
527 return false;
528 if (!AllowROR && ShType == AArch64_AM::ROR)
529 return false;
530
531 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
532 unsigned BitSize = N.getValueSizeInBits();
533 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
534 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
535
536 Reg = N.getOperand(0);
537 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
538 return isWorthFolding(N);
539 }
540
541 return false;
542 }
543
544 /// getExtendTypeForNode - Translate an extend node to the corresponding
545 /// ExtendType value.
546 static AArch64_AM::ShiftExtendType
getExtendTypeForNode(SDValue N,bool IsLoadStore=false)547 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
548 if (N.getOpcode() == ISD::SIGN_EXTEND ||
549 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
550 EVT SrcVT;
551 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
552 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
553 else
554 SrcVT = N.getOperand(0).getValueType();
555
556 if (!IsLoadStore && SrcVT == MVT::i8)
557 return AArch64_AM::SXTB;
558 else if (!IsLoadStore && SrcVT == MVT::i16)
559 return AArch64_AM::SXTH;
560 else if (SrcVT == MVT::i32)
561 return AArch64_AM::SXTW;
562 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
563
564 return AArch64_AM::InvalidShiftExtend;
565 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
566 N.getOpcode() == ISD::ANY_EXTEND) {
567 EVT SrcVT = N.getOperand(0).getValueType();
568 if (!IsLoadStore && SrcVT == MVT::i8)
569 return AArch64_AM::UXTB;
570 else if (!IsLoadStore && SrcVT == MVT::i16)
571 return AArch64_AM::UXTH;
572 else if (SrcVT == MVT::i32)
573 return AArch64_AM::UXTW;
574 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
575
576 return AArch64_AM::InvalidShiftExtend;
577 } else if (N.getOpcode() == ISD::AND) {
578 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
579 if (!CSD)
580 return AArch64_AM::InvalidShiftExtend;
581 uint64_t AndMask = CSD->getZExtValue();
582
583 switch (AndMask) {
584 default:
585 return AArch64_AM::InvalidShiftExtend;
586 case 0xFF:
587 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
588 case 0xFFFF:
589 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
590 case 0xFFFFFFFF:
591 return AArch64_AM::UXTW;
592 }
593 }
594
595 return AArch64_AM::InvalidShiftExtend;
596 }
597
598 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
checkHighLaneIndex(SDNode * DL,SDValue & LaneOp,int & LaneIdx)599 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
600 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
601 DL->getOpcode() != AArch64ISD::DUPLANE32)
602 return false;
603
604 SDValue SV = DL->getOperand(0);
605 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
606 return false;
607
608 SDValue EV = SV.getOperand(1);
609 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
610 return false;
611
612 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
613 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
614 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
615 LaneOp = EV.getOperand(0);
616
617 return true;
618 }
619
620 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
621 // high lane extract.
checkV64LaneV128(SDValue Op0,SDValue Op1,SDValue & StdOp,SDValue & LaneOp,int & LaneIdx)622 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
623 SDValue &LaneOp, int &LaneIdx) {
624
625 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
626 std::swap(Op0, Op1);
627 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
628 return false;
629 }
630 StdOp = Op1;
631 return true;
632 }
633
634 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
635 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
636 /// so that we don't emit unnecessary lane extracts.
tryMLAV64LaneV128(SDNode * N)637 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
638 SDLoc dl(N);
639 SDValue Op0 = N->getOperand(0);
640 SDValue Op1 = N->getOperand(1);
641 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
642 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
643 int LaneIdx = -1; // Will hold the lane index.
644
645 if (Op1.getOpcode() != ISD::MUL ||
646 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
647 LaneIdx)) {
648 std::swap(Op0, Op1);
649 if (Op1.getOpcode() != ISD::MUL ||
650 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
651 LaneIdx))
652 return false;
653 }
654
655 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
656
657 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
658
659 unsigned MLAOpc = ~0U;
660
661 switch (N->getSimpleValueType(0).SimpleTy) {
662 default:
663 llvm_unreachable("Unrecognized MLA.");
664 case MVT::v4i16:
665 MLAOpc = AArch64::MLAv4i16_indexed;
666 break;
667 case MVT::v8i16:
668 MLAOpc = AArch64::MLAv8i16_indexed;
669 break;
670 case MVT::v2i32:
671 MLAOpc = AArch64::MLAv2i32_indexed;
672 break;
673 case MVT::v4i32:
674 MLAOpc = AArch64::MLAv4i32_indexed;
675 break;
676 }
677
678 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
679 return true;
680 }
681
tryMULLV64LaneV128(unsigned IntNo,SDNode * N)682 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
683 SDLoc dl(N);
684 SDValue SMULLOp0;
685 SDValue SMULLOp1;
686 int LaneIdx;
687
688 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
689 LaneIdx))
690 return false;
691
692 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
693
694 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
695
696 unsigned SMULLOpc = ~0U;
697
698 if (IntNo == Intrinsic::aarch64_neon_smull) {
699 switch (N->getSimpleValueType(0).SimpleTy) {
700 default:
701 llvm_unreachable("Unrecognized SMULL.");
702 case MVT::v4i32:
703 SMULLOpc = AArch64::SMULLv4i16_indexed;
704 break;
705 case MVT::v2i64:
706 SMULLOpc = AArch64::SMULLv2i32_indexed;
707 break;
708 }
709 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
710 switch (N->getSimpleValueType(0).SimpleTy) {
711 default:
712 llvm_unreachable("Unrecognized SMULL.");
713 case MVT::v4i32:
714 SMULLOpc = AArch64::UMULLv4i16_indexed;
715 break;
716 case MVT::v2i64:
717 SMULLOpc = AArch64::UMULLv2i32_indexed;
718 break;
719 }
720 } else
721 llvm_unreachable("Unrecognized intrinsic.");
722
723 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
724 return true;
725 }
726
727 /// Instructions that accept extend modifiers like UXTW expect the register
728 /// being extended to be a GPR32, but the incoming DAG might be acting on a
729 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
730 /// this is the case.
narrowIfNeeded(SelectionDAG * CurDAG,SDValue N)731 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
732 if (N.getValueType() == MVT::i32)
733 return N;
734
735 SDLoc dl(N);
736 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
737 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
738 dl, MVT::i32, N, SubReg);
739 return SDValue(Node, 0);
740 }
741
742 // Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
743 template<signed Low, signed High, signed Scale>
SelectRDVLImm(SDValue N,SDValue & Imm)744 bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {
745 if (!isa<ConstantSDNode>(N))
746 return false;
747
748 int64_t MulImm = cast<ConstantSDNode>(N)->getSExtValue();
749 if ((MulImm % std::abs(Scale)) == 0) {
750 int64_t RDVLImm = MulImm / Scale;
751 if ((RDVLImm >= Low) && (RDVLImm <= High)) {
752 Imm = CurDAG->getTargetConstant(RDVLImm, SDLoc(N), MVT::i32);
753 return true;
754 }
755 }
756
757 return false;
758 }
759
760 /// SelectArithExtendedRegister - Select a "extended register" operand. This
761 /// operand folds in an extend followed by an optional left shift.
SelectArithExtendedRegister(SDValue N,SDValue & Reg,SDValue & Shift)762 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
763 SDValue &Shift) {
764 unsigned ShiftVal = 0;
765 AArch64_AM::ShiftExtendType Ext;
766
767 if (N.getOpcode() == ISD::SHL) {
768 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
769 if (!CSD)
770 return false;
771 ShiftVal = CSD->getZExtValue();
772 if (ShiftVal > 4)
773 return false;
774
775 Ext = getExtendTypeForNode(N.getOperand(0));
776 if (Ext == AArch64_AM::InvalidShiftExtend)
777 return false;
778
779 Reg = N.getOperand(0).getOperand(0);
780 } else {
781 Ext = getExtendTypeForNode(N);
782 if (Ext == AArch64_AM::InvalidShiftExtend)
783 return false;
784
785 Reg = N.getOperand(0);
786
787 // Don't match if free 32-bit -> 64-bit zext can be used instead.
788 if (Ext == AArch64_AM::UXTW &&
789 Reg->getValueType(0).getSizeInBits() == 32 && isDef32(*Reg.getNode()))
790 return false;
791 }
792
793 // AArch64 mandates that the RHS of the operation must use the smallest
794 // register class that could contain the size being extended from. Thus,
795 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
796 // there might not be an actual 32-bit value in the program. We can
797 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
798 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
799 Reg = narrowIfNeeded(CurDAG, Reg);
800 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
801 MVT::i32);
802 return isWorthFolding(N);
803 }
804
805 /// If there's a use of this ADDlow that's not itself a load/store then we'll
806 /// need to create a real ADD instruction from it anyway and there's no point in
807 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
808 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
809 /// leads to duplicated ADRP instructions.
isWorthFoldingADDlow(SDValue N)810 static bool isWorthFoldingADDlow(SDValue N) {
811 for (auto Use : N->uses()) {
812 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
813 Use->getOpcode() != ISD::ATOMIC_LOAD &&
814 Use->getOpcode() != ISD::ATOMIC_STORE)
815 return false;
816
817 // ldar and stlr have much more restrictive addressing modes (just a
818 // register).
819 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
820 return false;
821 }
822
823 return true;
824 }
825
826 /// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
827 /// immediate" address. The "Size" argument is the size in bytes of the memory
828 /// reference, which determines the scale.
SelectAddrModeIndexedBitWidth(SDValue N,bool IsSignedImm,unsigned BW,unsigned Size,SDValue & Base,SDValue & OffImm)829 bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
830 unsigned BW, unsigned Size,
831 SDValue &Base,
832 SDValue &OffImm) {
833 SDLoc dl(N);
834 const DataLayout &DL = CurDAG->getDataLayout();
835 const TargetLowering *TLI = getTargetLowering();
836 if (N.getOpcode() == ISD::FrameIndex) {
837 int FI = cast<FrameIndexSDNode>(N)->getIndex();
838 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
839 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
840 return true;
841 }
842
843 // As opposed to the (12-bit) Indexed addressing mode below, the 7/9-bit signed
844 // selected here doesn't support labels/immediates, only base+offset.
845 if (CurDAG->isBaseWithConstantOffset(N)) {
846 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
847 if (IsSignedImm) {
848 int64_t RHSC = RHS->getSExtValue();
849 unsigned Scale = Log2_32(Size);
850 int64_t Range = 0x1LL << (BW - 1);
851
852 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) &&
853 RHSC < (Range << Scale)) {
854 Base = N.getOperand(0);
855 if (Base.getOpcode() == ISD::FrameIndex) {
856 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
857 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
858 }
859 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
860 return true;
861 }
862 } else {
863 // unsigned Immediate
864 uint64_t RHSC = RHS->getZExtValue();
865 unsigned Scale = Log2_32(Size);
866 uint64_t Range = 0x1ULL << BW;
867
868 if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) {
869 Base = N.getOperand(0);
870 if (Base.getOpcode() == ISD::FrameIndex) {
871 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
872 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
873 }
874 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
875 return true;
876 }
877 }
878 }
879 }
880 // Base only. The address will be materialized into a register before
881 // the memory is accessed.
882 // add x0, Xbase, #offset
883 // stp x1, x2, [x0]
884 Base = N;
885 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
886 return true;
887 }
888
889 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
890 /// immediate" address. The "Size" argument is the size in bytes of the memory
891 /// reference, which determines the scale.
SelectAddrModeIndexed(SDValue N,unsigned Size,SDValue & Base,SDValue & OffImm)892 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
893 SDValue &Base, SDValue &OffImm) {
894 SDLoc dl(N);
895 const DataLayout &DL = CurDAG->getDataLayout();
896 const TargetLowering *TLI = getTargetLowering();
897 if (N.getOpcode() == ISD::FrameIndex) {
898 int FI = cast<FrameIndexSDNode>(N)->getIndex();
899 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
900 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
901 return true;
902 }
903
904 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
905 GlobalAddressSDNode *GAN =
906 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
907 Base = N.getOperand(0);
908 OffImm = N.getOperand(1);
909 if (!GAN)
910 return true;
911
912 if (GAN->getOffset() % Size == 0 &&
913 GAN->getGlobal()->getPointerAlignment(DL) >= Size)
914 return true;
915 }
916
917 if (CurDAG->isBaseWithConstantOffset(N)) {
918 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
919 int64_t RHSC = (int64_t)RHS->getZExtValue();
920 unsigned Scale = Log2_32(Size);
921 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
922 Base = N.getOperand(0);
923 if (Base.getOpcode() == ISD::FrameIndex) {
924 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
925 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
926 }
927 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
928 return true;
929 }
930 }
931 }
932
933 // Before falling back to our general case, check if the unscaled
934 // instructions can handle this. If so, that's preferable.
935 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
936 return false;
937
938 // Base only. The address will be materialized into a register before
939 // the memory is accessed.
940 // add x0, Xbase, #offset
941 // ldr x0, [x0]
942 Base = N;
943 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
944 return true;
945 }
946
947 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
948 /// immediate" address. This should only match when there is an offset that
949 /// is not valid for a scaled immediate addressing mode. The "Size" argument
950 /// is the size in bytes of the memory reference, which is needed here to know
951 /// what is valid for a scaled immediate.
SelectAddrModeUnscaled(SDValue N,unsigned Size,SDValue & Base,SDValue & OffImm)952 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
953 SDValue &Base,
954 SDValue &OffImm) {
955 if (!CurDAG->isBaseWithConstantOffset(N))
956 return false;
957 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
958 int64_t RHSC = RHS->getSExtValue();
959 // If the offset is valid as a scaled immediate, don't match here.
960 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
961 RHSC < (0x1000 << Log2_32(Size)))
962 return false;
963 if (RHSC >= -256 && RHSC < 256) {
964 Base = N.getOperand(0);
965 if (Base.getOpcode() == ISD::FrameIndex) {
966 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
967 const TargetLowering *TLI = getTargetLowering();
968 Base = CurDAG->getTargetFrameIndex(
969 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
970 }
971 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
972 return true;
973 }
974 }
975 return false;
976 }
977
Widen(SelectionDAG * CurDAG,SDValue N)978 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
979 SDLoc dl(N);
980 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
981 SDValue ImpDef = SDValue(
982 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
983 MachineSDNode *Node = CurDAG->getMachineNode(
984 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
985 return SDValue(Node, 0);
986 }
987
988 /// Check if the given SHL node (\p N), can be used to form an
989 /// extended register for an addressing mode.
SelectExtendedSHL(SDValue N,unsigned Size,bool WantExtend,SDValue & Offset,SDValue & SignExtend)990 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
991 bool WantExtend, SDValue &Offset,
992 SDValue &SignExtend) {
993 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
994 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
995 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
996 return false;
997
998 SDLoc dl(N);
999 if (WantExtend) {
1000 AArch64_AM::ShiftExtendType Ext =
1001 getExtendTypeForNode(N.getOperand(0), true);
1002 if (Ext == AArch64_AM::InvalidShiftExtend)
1003 return false;
1004
1005 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
1006 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1007 MVT::i32);
1008 } else {
1009 Offset = N.getOperand(0);
1010 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
1011 }
1012
1013 unsigned LegalShiftVal = Log2_32(Size);
1014 unsigned ShiftVal = CSD->getZExtValue();
1015
1016 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
1017 return false;
1018
1019 return isWorthFolding(N);
1020 }
1021
SelectAddrModeWRO(SDValue N,unsigned Size,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)1022 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
1023 SDValue &Base, SDValue &Offset,
1024 SDValue &SignExtend,
1025 SDValue &DoShift) {
1026 if (N.getOpcode() != ISD::ADD)
1027 return false;
1028 SDValue LHS = N.getOperand(0);
1029 SDValue RHS = N.getOperand(1);
1030 SDLoc dl(N);
1031
1032 // We don't want to match immediate adds here, because they are better lowered
1033 // to the register-immediate addressing modes.
1034 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
1035 return false;
1036
1037 // Check if this particular node is reused in any non-memory related
1038 // operation. If yes, do not try to fold this node into the address
1039 // computation, since the computation will be kept.
1040 const SDNode *Node = N.getNode();
1041 for (SDNode *UI : Node->uses()) {
1042 if (!isa<MemSDNode>(*UI))
1043 return false;
1044 }
1045
1046 // Remember if it is worth folding N when it produces extended register.
1047 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1048
1049 // Try to match a shifted extend on the RHS.
1050 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1051 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
1052 Base = LHS;
1053 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1054 return true;
1055 }
1056
1057 // Try to match a shifted extend on the LHS.
1058 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1059 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
1060 Base = RHS;
1061 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
1062 return true;
1063 }
1064
1065 // There was no shift, whatever else we find.
1066 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
1067
1068 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
1069 // Try to match an unshifted extend on the LHS.
1070 if (IsExtendedRegisterWorthFolding &&
1071 (Ext = getExtendTypeForNode(LHS, true)) !=
1072 AArch64_AM::InvalidShiftExtend) {
1073 Base = RHS;
1074 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
1075 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1076 MVT::i32);
1077 if (isWorthFolding(LHS))
1078 return true;
1079 }
1080
1081 // Try to match an unshifted extend on the RHS.
1082 if (IsExtendedRegisterWorthFolding &&
1083 (Ext = getExtendTypeForNode(RHS, true)) !=
1084 AArch64_AM::InvalidShiftExtend) {
1085 Base = LHS;
1086 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
1087 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
1088 MVT::i32);
1089 if (isWorthFolding(RHS))
1090 return true;
1091 }
1092
1093 return false;
1094 }
1095
1096 // Check if the given immediate is preferred by ADD. If an immediate can be
1097 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
1098 // encoded by one MOVZ, return true.
isPreferredADD(int64_t ImmOff)1099 static bool isPreferredADD(int64_t ImmOff) {
1100 // Constant in [0x0, 0xfff] can be encoded in ADD.
1101 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
1102 return true;
1103 // Check if it can be encoded in an "ADD LSL #12".
1104 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
1105 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
1106 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
1107 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
1108 return false;
1109 }
1110
SelectAddrModeXRO(SDValue N,unsigned Size,SDValue & Base,SDValue & Offset,SDValue & SignExtend,SDValue & DoShift)1111 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
1112 SDValue &Base, SDValue &Offset,
1113 SDValue &SignExtend,
1114 SDValue &DoShift) {
1115 if (N.getOpcode() != ISD::ADD)
1116 return false;
1117 SDValue LHS = N.getOperand(0);
1118 SDValue RHS = N.getOperand(1);
1119 SDLoc DL(N);
1120
1121 // Check if this particular node is reused in any non-memory related
1122 // operation. If yes, do not try to fold this node into the address
1123 // computation, since the computation will be kept.
1124 const SDNode *Node = N.getNode();
1125 for (SDNode *UI : Node->uses()) {
1126 if (!isa<MemSDNode>(*UI))
1127 return false;
1128 }
1129
1130 // Watch out if RHS is a wide immediate, it can not be selected into
1131 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
1132 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
1133 // instructions like:
1134 // MOV X0, WideImmediate
1135 // ADD X1, BaseReg, X0
1136 // LDR X2, [X1, 0]
1137 // For such situation, using [BaseReg, XReg] addressing mode can save one
1138 // ADD/SUB:
1139 // MOV X0, WideImmediate
1140 // LDR X2, [BaseReg, X0]
1141 if (isa<ConstantSDNode>(RHS)) {
1142 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
1143 unsigned Scale = Log2_32(Size);
1144 // Skip the immediate can be selected by load/store addressing mode.
1145 // Also skip the immediate can be encoded by a single ADD (SUB is also
1146 // checked by using -ImmOff).
1147 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
1148 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
1149 return false;
1150
1151 SDValue Ops[] = { RHS };
1152 SDNode *MOVI =
1153 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
1154 SDValue MOVIV = SDValue(MOVI, 0);
1155 // This ADD of two X register will be selected into [Reg+Reg] mode.
1156 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
1157 }
1158
1159 // Remember if it is worth folding N when it produces extended register.
1160 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
1161
1162 // Try to match a shifted extend on the RHS.
1163 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
1164 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
1165 Base = LHS;
1166 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1167 return true;
1168 }
1169
1170 // Try to match a shifted extend on the LHS.
1171 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
1172 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
1173 Base = RHS;
1174 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
1175 return true;
1176 }
1177
1178 // Match any non-shifted, non-extend, non-immediate add expression.
1179 Base = LHS;
1180 Offset = RHS;
1181 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
1182 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
1183 // Reg1 + Reg2 is free: no check needed.
1184 return true;
1185 }
1186
createDTuple(ArrayRef<SDValue> Regs)1187 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
1188 static const unsigned RegClassIDs[] = {
1189 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
1190 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
1191 AArch64::dsub2, AArch64::dsub3};
1192
1193 return createTuple(Regs, RegClassIDs, SubRegs);
1194 }
1195
createQTuple(ArrayRef<SDValue> Regs)1196 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
1197 static const unsigned RegClassIDs[] = {
1198 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
1199 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
1200 AArch64::qsub2, AArch64::qsub3};
1201
1202 return createTuple(Regs, RegClassIDs, SubRegs);
1203 }
1204
createZTuple(ArrayRef<SDValue> Regs)1205 SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {
1206 static const unsigned RegClassIDs[] = {AArch64::ZPR2RegClassID,
1207 AArch64::ZPR3RegClassID,
1208 AArch64::ZPR4RegClassID};
1209 static const unsigned SubRegs[] = {AArch64::zsub0, AArch64::zsub1,
1210 AArch64::zsub2, AArch64::zsub3};
1211
1212 return createTuple(Regs, RegClassIDs, SubRegs);
1213 }
1214
createTuple(ArrayRef<SDValue> Regs,const unsigned RegClassIDs[],const unsigned SubRegs[])1215 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
1216 const unsigned RegClassIDs[],
1217 const unsigned SubRegs[]) {
1218 // There's no special register-class for a vector-list of 1 element: it's just
1219 // a vector.
1220 if (Regs.size() == 1)
1221 return Regs[0];
1222
1223 assert(Regs.size() >= 2 && Regs.size() <= 4);
1224
1225 SDLoc DL(Regs[0]);
1226
1227 SmallVector<SDValue, 4> Ops;
1228
1229 // First operand of REG_SEQUENCE is the desired RegClass.
1230 Ops.push_back(
1231 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1232
1233 // Then we get pairs of source & subregister-position for the components.
1234 for (unsigned i = 0; i < Regs.size(); ++i) {
1235 Ops.push_back(Regs[i]);
1236 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1237 }
1238
1239 SDNode *N =
1240 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1241 return SDValue(N, 0);
1242 }
1243
SelectTable(SDNode * N,unsigned NumVecs,unsigned Opc,bool isExt)1244 void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
1245 bool isExt) {
1246 SDLoc dl(N);
1247 EVT VT = N->getValueType(0);
1248
1249 unsigned ExtOff = isExt;
1250
1251 // Form a REG_SEQUENCE to force register allocation.
1252 unsigned Vec0Off = ExtOff + 1;
1253 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1254 N->op_begin() + Vec0Off + NumVecs);
1255 SDValue RegSeq = createQTuple(Regs);
1256
1257 SmallVector<SDValue, 6> Ops;
1258 if (isExt)
1259 Ops.push_back(N->getOperand(1));
1260 Ops.push_back(RegSeq);
1261 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1262 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
1263 }
1264
tryIndexedLoad(SDNode * N)1265 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
1266 LoadSDNode *LD = cast<LoadSDNode>(N);
1267 if (LD->isUnindexed())
1268 return false;
1269 EVT VT = LD->getMemoryVT();
1270 EVT DstVT = N->getValueType(0);
1271 ISD::MemIndexedMode AM = LD->getAddressingMode();
1272 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1273
1274 // We're not doing validity checking here. That was done when checking
1275 // if we should mark the load as indexed or not. We're just selecting
1276 // the right instruction.
1277 unsigned Opcode = 0;
1278
1279 ISD::LoadExtType ExtType = LD->getExtensionType();
1280 bool InsertTo64 = false;
1281 if (VT == MVT::i64)
1282 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1283 else if (VT == MVT::i32) {
1284 if (ExtType == ISD::NON_EXTLOAD)
1285 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1286 else if (ExtType == ISD::SEXTLOAD)
1287 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1288 else {
1289 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1290 InsertTo64 = true;
1291 // The result of the load is only i32. It's the subreg_to_reg that makes
1292 // it into an i64.
1293 DstVT = MVT::i32;
1294 }
1295 } else if (VT == MVT::i16) {
1296 if (ExtType == ISD::SEXTLOAD) {
1297 if (DstVT == MVT::i64)
1298 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1299 else
1300 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1301 } else {
1302 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1303 InsertTo64 = DstVT == MVT::i64;
1304 // The result of the load is only i32. It's the subreg_to_reg that makes
1305 // it into an i64.
1306 DstVT = MVT::i32;
1307 }
1308 } else if (VT == MVT::i8) {
1309 if (ExtType == ISD::SEXTLOAD) {
1310 if (DstVT == MVT::i64)
1311 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1312 else
1313 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1314 } else {
1315 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1316 InsertTo64 = DstVT == MVT::i64;
1317 // The result of the load is only i32. It's the subreg_to_reg that makes
1318 // it into an i64.
1319 DstVT = MVT::i32;
1320 }
1321 } else if (VT == MVT::f16) {
1322 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1323 } else if (VT == MVT::bf16) {
1324 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1325 } else if (VT == MVT::f32) {
1326 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1327 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1328 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1329 } else if (VT.is128BitVector()) {
1330 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1331 } else
1332 return false;
1333 SDValue Chain = LD->getChain();
1334 SDValue Base = LD->getBasePtr();
1335 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1336 int OffsetVal = (int)OffsetOp->getZExtValue();
1337 SDLoc dl(N);
1338 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1339 SDValue Ops[] = { Base, Offset, Chain };
1340 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1341 MVT::Other, Ops);
1342 // Either way, we're replacing the node, so tell the caller that.
1343 SDValue LoadedVal = SDValue(Res, 1);
1344 if (InsertTo64) {
1345 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1346 LoadedVal =
1347 SDValue(CurDAG->getMachineNode(
1348 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1349 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1350 SubReg),
1351 0);
1352 }
1353
1354 ReplaceUses(SDValue(N, 0), LoadedVal);
1355 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1356 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1357 CurDAG->RemoveDeadNode(N);
1358 return true;
1359 }
1360
SelectLoad(SDNode * N,unsigned NumVecs,unsigned Opc,unsigned SubRegIdx)1361 void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
1362 unsigned SubRegIdx) {
1363 SDLoc dl(N);
1364 EVT VT = N->getValueType(0);
1365 SDValue Chain = N->getOperand(0);
1366
1367 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1368 Chain};
1369
1370 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1371
1372 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1373 SDValue SuperReg = SDValue(Ld, 0);
1374 for (unsigned i = 0; i < NumVecs; ++i)
1375 ReplaceUses(SDValue(N, i),
1376 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1377
1378 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1379
1380 // Transfer memoperands.
1381 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1382 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
1383
1384 CurDAG->RemoveDeadNode(N);
1385 }
1386
SelectPostLoad(SDNode * N,unsigned NumVecs,unsigned Opc,unsigned SubRegIdx)1387 void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1388 unsigned Opc, unsigned SubRegIdx) {
1389 SDLoc dl(N);
1390 EVT VT = N->getValueType(0);
1391 SDValue Chain = N->getOperand(0);
1392
1393 SDValue Ops[] = {N->getOperand(1), // Mem operand
1394 N->getOperand(2), // Incremental
1395 Chain};
1396
1397 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1398 MVT::Untyped, MVT::Other};
1399
1400 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1401
1402 // Update uses of write back register
1403 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1404
1405 // Update uses of vector list
1406 SDValue SuperReg = SDValue(Ld, 1);
1407 if (NumVecs == 1)
1408 ReplaceUses(SDValue(N, 0), SuperReg);
1409 else
1410 for (unsigned i = 0; i < NumVecs; ++i)
1411 ReplaceUses(SDValue(N, i),
1412 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1413
1414 // Update the chain
1415 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1416 CurDAG->RemoveDeadNode(N);
1417 }
1418
1419 /// Optimize \param OldBase and \param OldOffset selecting the best addressing
1420 /// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
1421 /// new Base and an SDValue representing the new offset.
1422 std::tuple<unsigned, SDValue, SDValue>
findAddrModeSVELoadStore(SDNode * N,unsigned Opc_rr,unsigned Opc_ri,const SDValue & OldBase,const SDValue & OldOffset,unsigned Scale)1423 AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
1424 unsigned Opc_ri,
1425 const SDValue &OldBase,
1426 const SDValue &OldOffset,
1427 unsigned Scale) {
1428 SDValue NewBase = OldBase;
1429 SDValue NewOffset = OldOffset;
1430 // Detect a possible Reg+Imm addressing mode.
1431 const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
1432 N, OldBase, NewBase, NewOffset);
1433
1434 // Detect a possible reg+reg addressing mode, but only if we haven't already
1435 // detected a Reg+Imm one.
1436 const bool IsRegReg =
1437 !IsRegImm && SelectSVERegRegAddrMode(OldBase, Scale, NewBase, NewOffset);
1438
1439 // Select the instruction.
1440 return std::make_tuple(IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset);
1441 }
1442
SelectPredicatedLoad(SDNode * N,unsigned NumVecs,unsigned Scale,unsigned Opc_ri,unsigned Opc_rr)1443 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
1444 unsigned Scale, unsigned Opc_ri,
1445 unsigned Opc_rr) {
1446 assert(Scale < 4 && "Invalid scaling value.");
1447 SDLoc DL(N);
1448 EVT VT = N->getValueType(0);
1449 SDValue Chain = N->getOperand(0);
1450
1451 // Optimize addressing mode.
1452 SDValue Base, Offset;
1453 unsigned Opc;
1454 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1455 N, Opc_rr, Opc_ri, N->getOperand(2),
1456 CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
1457
1458 SDValue Ops[] = {N->getOperand(1), // Predicate
1459 Base, // Memory operand
1460 Offset, Chain};
1461
1462 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1463
1464 SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops);
1465 SDValue SuperReg = SDValue(Load, 0);
1466 for (unsigned i = 0; i < NumVecs; ++i)
1467 ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg(
1468 AArch64::zsub0 + i, DL, VT, SuperReg));
1469
1470 // Copy chain
1471 unsigned ChainIdx = NumVecs;
1472 ReplaceUses(SDValue(N, ChainIdx), SDValue(Load, 1));
1473 CurDAG->RemoveDeadNode(N);
1474 }
1475
SelectStore(SDNode * N,unsigned NumVecs,unsigned Opc)1476 void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1477 unsigned Opc) {
1478 SDLoc dl(N);
1479 EVT VT = N->getOperand(2)->getValueType(0);
1480
1481 // Form a REG_SEQUENCE to force register allocation.
1482 bool Is128Bit = VT.getSizeInBits() == 128;
1483 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1484 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1485
1486 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1487 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1488
1489 // Transfer memoperands.
1490 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1491 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1492
1493 ReplaceNode(N, St);
1494 }
1495
SelectPredicatedStore(SDNode * N,unsigned NumVecs,unsigned Scale,unsigned Opc_rr,unsigned Opc_ri)1496 void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
1497 unsigned Scale, unsigned Opc_rr,
1498 unsigned Opc_ri) {
1499 SDLoc dl(N);
1500
1501 // Form a REG_SEQUENCE to force register allocation.
1502 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1503 SDValue RegSeq = createZTuple(Regs);
1504
1505 // Optimize addressing mode.
1506 unsigned Opc;
1507 SDValue Offset, Base;
1508 std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
1509 N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
1510 CurDAG->getTargetConstant(0, dl, MVT::i64), Scale);
1511
1512 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
1513 Base, // address
1514 Offset, // offset
1515 N->getOperand(0)}; // chain
1516 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1517
1518 ReplaceNode(N, St);
1519 }
1520
SelectAddrModeFrameIndexSVE(SDValue N,SDValue & Base,SDValue & OffImm)1521 bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
1522 SDValue &OffImm) {
1523 SDLoc dl(N);
1524 const DataLayout &DL = CurDAG->getDataLayout();
1525 const TargetLowering *TLI = getTargetLowering();
1526
1527 // Try to match it for the frame address
1528 if (auto FINode = dyn_cast<FrameIndexSDNode>(N)) {
1529 int FI = FINode->getIndex();
1530 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
1531 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
1532 return true;
1533 }
1534
1535 return false;
1536 }
1537
SelectPostStore(SDNode * N,unsigned NumVecs,unsigned Opc)1538 void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1539 unsigned Opc) {
1540 SDLoc dl(N);
1541 EVT VT = N->getOperand(2)->getValueType(0);
1542 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1543 MVT::Other}; // Type for the Chain
1544
1545 // Form a REG_SEQUENCE to force register allocation.
1546 bool Is128Bit = VT.getSizeInBits() == 128;
1547 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1548 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1549
1550 SDValue Ops[] = {RegSeq,
1551 N->getOperand(NumVecs + 1), // base register
1552 N->getOperand(NumVecs + 2), // Incremental
1553 N->getOperand(0)}; // Chain
1554 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1555
1556 ReplaceNode(N, St);
1557 }
1558
1559 namespace {
1560 /// WidenVector - Given a value in the V64 register class, produce the
1561 /// equivalent value in the V128 register class.
1562 class WidenVector {
1563 SelectionDAG &DAG;
1564
1565 public:
WidenVector(SelectionDAG & DAG)1566 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1567
operator ()(SDValue V64Reg)1568 SDValue operator()(SDValue V64Reg) {
1569 EVT VT = V64Reg.getValueType();
1570 unsigned NarrowSize = VT.getVectorNumElements();
1571 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1572 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1573 SDLoc DL(V64Reg);
1574
1575 SDValue Undef =
1576 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1577 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1578 }
1579 };
1580 } // namespace
1581
1582 /// NarrowVector - Given a value in the V128 register class, produce the
1583 /// equivalent value in the V64 register class.
NarrowVector(SDValue V128Reg,SelectionDAG & DAG)1584 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1585 EVT VT = V128Reg.getValueType();
1586 unsigned WideSize = VT.getVectorNumElements();
1587 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1588 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1589
1590 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1591 V128Reg);
1592 }
1593
SelectLoadLane(SDNode * N,unsigned NumVecs,unsigned Opc)1594 void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1595 unsigned Opc) {
1596 SDLoc dl(N);
1597 EVT VT = N->getValueType(0);
1598 bool Narrow = VT.getSizeInBits() == 64;
1599
1600 // Form a REG_SEQUENCE to force register allocation.
1601 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1602
1603 if (Narrow)
1604 transform(Regs, Regs.begin(),
1605 WidenVector(*CurDAG));
1606
1607 SDValue RegSeq = createQTuple(Regs);
1608
1609 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1610
1611 unsigned LaneNo =
1612 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1613
1614 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1615 N->getOperand(NumVecs + 3), N->getOperand(0)};
1616 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1617 SDValue SuperReg = SDValue(Ld, 0);
1618
1619 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1620 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1621 AArch64::qsub2, AArch64::qsub3 };
1622 for (unsigned i = 0; i < NumVecs; ++i) {
1623 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1624 if (Narrow)
1625 NV = NarrowVector(NV, *CurDAG);
1626 ReplaceUses(SDValue(N, i), NV);
1627 }
1628
1629 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1630 CurDAG->RemoveDeadNode(N);
1631 }
1632
SelectPostLoadLane(SDNode * N,unsigned NumVecs,unsigned Opc)1633 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1634 unsigned Opc) {
1635 SDLoc dl(N);
1636 EVT VT = N->getValueType(0);
1637 bool Narrow = VT.getSizeInBits() == 64;
1638
1639 // Form a REG_SEQUENCE to force register allocation.
1640 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1641
1642 if (Narrow)
1643 transform(Regs, Regs.begin(),
1644 WidenVector(*CurDAG));
1645
1646 SDValue RegSeq = createQTuple(Regs);
1647
1648 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1649 RegSeq->getValueType(0), MVT::Other};
1650
1651 unsigned LaneNo =
1652 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1653
1654 SDValue Ops[] = {RegSeq,
1655 CurDAG->getTargetConstant(LaneNo, dl,
1656 MVT::i64), // Lane Number
1657 N->getOperand(NumVecs + 2), // Base register
1658 N->getOperand(NumVecs + 3), // Incremental
1659 N->getOperand(0)};
1660 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1661
1662 // Update uses of the write back register
1663 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1664
1665 // Update uses of the vector list
1666 SDValue SuperReg = SDValue(Ld, 1);
1667 if (NumVecs == 1) {
1668 ReplaceUses(SDValue(N, 0),
1669 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1670 } else {
1671 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1672 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1673 AArch64::qsub2, AArch64::qsub3 };
1674 for (unsigned i = 0; i < NumVecs; ++i) {
1675 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1676 SuperReg);
1677 if (Narrow)
1678 NV = NarrowVector(NV, *CurDAG);
1679 ReplaceUses(SDValue(N, i), NV);
1680 }
1681 }
1682
1683 // Update the Chain
1684 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1685 CurDAG->RemoveDeadNode(N);
1686 }
1687
SelectStoreLane(SDNode * N,unsigned NumVecs,unsigned Opc)1688 void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1689 unsigned Opc) {
1690 SDLoc dl(N);
1691 EVT VT = N->getOperand(2)->getValueType(0);
1692 bool Narrow = VT.getSizeInBits() == 64;
1693
1694 // Form a REG_SEQUENCE to force register allocation.
1695 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1696
1697 if (Narrow)
1698 transform(Regs, Regs.begin(),
1699 WidenVector(*CurDAG));
1700
1701 SDValue RegSeq = createQTuple(Regs);
1702
1703 unsigned LaneNo =
1704 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1705
1706 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1707 N->getOperand(NumVecs + 3), N->getOperand(0)};
1708 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1709
1710 // Transfer memoperands.
1711 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1712 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1713
1714 ReplaceNode(N, St);
1715 }
1716
SelectPostStoreLane(SDNode * N,unsigned NumVecs,unsigned Opc)1717 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1718 unsigned Opc) {
1719 SDLoc dl(N);
1720 EVT VT = N->getOperand(2)->getValueType(0);
1721 bool Narrow = VT.getSizeInBits() == 64;
1722
1723 // Form a REG_SEQUENCE to force register allocation.
1724 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1725
1726 if (Narrow)
1727 transform(Regs, Regs.begin(),
1728 WidenVector(*CurDAG));
1729
1730 SDValue RegSeq = createQTuple(Regs);
1731
1732 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1733 MVT::Other};
1734
1735 unsigned LaneNo =
1736 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1737
1738 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1739 N->getOperand(NumVecs + 2), // Base Register
1740 N->getOperand(NumVecs + 3), // Incremental
1741 N->getOperand(0)};
1742 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1743
1744 // Transfer memoperands.
1745 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1746 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
1747
1748 ReplaceNode(N, St);
1749 }
1750
isBitfieldExtractOpFromAnd(SelectionDAG * CurDAG,SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB,unsigned NumberOfIgnoredLowBits,bool BiggerPattern)1751 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1752 unsigned &Opc, SDValue &Opd0,
1753 unsigned &LSB, unsigned &MSB,
1754 unsigned NumberOfIgnoredLowBits,
1755 bool BiggerPattern) {
1756 assert(N->getOpcode() == ISD::AND &&
1757 "N must be a AND operation to call this function");
1758
1759 EVT VT = N->getValueType(0);
1760
1761 // Here we can test the type of VT and return false when the type does not
1762 // match, but since it is done prior to that call in the current context
1763 // we turned that into an assert to avoid redundant code.
1764 assert((VT == MVT::i32 || VT == MVT::i64) &&
1765 "Type checking must have been done before calling this function");
1766
1767 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1768 // changed the AND node to a 32-bit mask operation. We'll have to
1769 // undo that as part of the transform here if we want to catch all
1770 // the opportunities.
1771 // Currently the NumberOfIgnoredLowBits argument helps to recover
1772 // form these situations when matching bigger pattern (bitfield insert).
1773
1774 // For unsigned extracts, check for a shift right and mask
1775 uint64_t AndImm = 0;
1776 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
1777 return false;
1778
1779 const SDNode *Op0 = N->getOperand(0).getNode();
1780
1781 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1782 // simplified. Try to undo that
1783 AndImm |= maskTrailingOnes<uint64_t>(NumberOfIgnoredLowBits);
1784
1785 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1786 if (AndImm & (AndImm + 1))
1787 return false;
1788
1789 bool ClampMSB = false;
1790 uint64_t SrlImm = 0;
1791 // Handle the SRL + ANY_EXTEND case.
1792 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1793 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
1794 // Extend the incoming operand of the SRL to 64-bit.
1795 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1796 // Make sure to clamp the MSB so that we preserve the semantics of the
1797 // original operations.
1798 ClampMSB = true;
1799 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1800 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1801 SrlImm)) {
1802 // If the shift result was truncated, we can still combine them.
1803 Opd0 = Op0->getOperand(0).getOperand(0);
1804
1805 // Use the type of SRL node.
1806 VT = Opd0->getValueType(0);
1807 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
1808 Opd0 = Op0->getOperand(0);
1809 } else if (BiggerPattern) {
1810 // Let's pretend a 0 shift right has been performed.
1811 // The resulting code will be at least as good as the original one
1812 // plus it may expose more opportunities for bitfield insert pattern.
1813 // FIXME: Currently we limit this to the bigger pattern, because
1814 // some optimizations expect AND and not UBFM.
1815 Opd0 = N->getOperand(0);
1816 } else
1817 return false;
1818
1819 // Bail out on large immediates. This happens when no proper
1820 // combining/constant folding was performed.
1821 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
1822 LLVM_DEBUG(
1823 (dbgs() << N
1824 << ": Found large shift immediate, this should not happen\n"));
1825 return false;
1826 }
1827
1828 LSB = SrlImm;
1829 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
1830 : countTrailingOnes<uint64_t>(AndImm)) -
1831 1;
1832 if (ClampMSB)
1833 // Since we're moving the extend before the right shift operation, we need
1834 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1835 // the zeros which would get shifted in with the original right shift
1836 // operation.
1837 MSB = MSB > 31 ? 31 : MSB;
1838
1839 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1840 return true;
1841 }
1842
isBitfieldExtractOpFromSExtInReg(SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & Immr,unsigned & Imms)1843 static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
1844 SDValue &Opd0, unsigned &Immr,
1845 unsigned &Imms) {
1846 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
1847
1848 EVT VT = N->getValueType(0);
1849 unsigned BitWidth = VT.getSizeInBits();
1850 assert((VT == MVT::i32 || VT == MVT::i64) &&
1851 "Type checking must have been done before calling this function");
1852
1853 SDValue Op = N->getOperand(0);
1854 if (Op->getOpcode() == ISD::TRUNCATE) {
1855 Op = Op->getOperand(0);
1856 VT = Op->getValueType(0);
1857 BitWidth = VT.getSizeInBits();
1858 }
1859
1860 uint64_t ShiftImm;
1861 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
1862 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
1863 return false;
1864
1865 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1866 if (ShiftImm + Width > BitWidth)
1867 return false;
1868
1869 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
1870 Opd0 = Op.getOperand(0);
1871 Immr = ShiftImm;
1872 Imms = ShiftImm + Width - 1;
1873 return true;
1874 }
1875
isSeveralBitsExtractOpFromShr(SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & LSB,unsigned & MSB)1876 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1877 SDValue &Opd0, unsigned &LSB,
1878 unsigned &MSB) {
1879 // We are looking for the following pattern which basically extracts several
1880 // continuous bits from the source value and places it from the LSB of the
1881 // destination value, all other bits of the destination value or set to zero:
1882 //
1883 // Value2 = AND Value, MaskImm
1884 // SRL Value2, ShiftImm
1885 //
1886 // with MaskImm >> ShiftImm to search for the bit width.
1887 //
1888 // This gets selected into a single UBFM:
1889 //
1890 // UBFM Value, ShiftImm, BitWide + SrlImm -1
1891 //
1892
1893 if (N->getOpcode() != ISD::SRL)
1894 return false;
1895
1896 uint64_t AndMask = 0;
1897 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
1898 return false;
1899
1900 Opd0 = N->getOperand(0).getOperand(0);
1901
1902 uint64_t SrlImm = 0;
1903 if (!isIntImmediate(N->getOperand(1), SrlImm))
1904 return false;
1905
1906 // Check whether we really have several bits extract here.
1907 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
1908 if (BitWide && isMask_64(AndMask >> SrlImm)) {
1909 if (N->getValueType(0) == MVT::i32)
1910 Opc = AArch64::UBFMWri;
1911 else
1912 Opc = AArch64::UBFMXri;
1913
1914 LSB = SrlImm;
1915 MSB = BitWide + SrlImm - 1;
1916 return true;
1917 }
1918
1919 return false;
1920 }
1921
isBitfieldExtractOpFromShr(SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & Immr,unsigned & Imms,bool BiggerPattern)1922 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1923 unsigned &Immr, unsigned &Imms,
1924 bool BiggerPattern) {
1925 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1926 "N must be a SHR/SRA operation to call this function");
1927
1928 EVT VT = N->getValueType(0);
1929
1930 // Here we can test the type of VT and return false when the type does not
1931 // match, but since it is done prior to that call in the current context
1932 // we turned that into an assert to avoid redundant code.
1933 assert((VT == MVT::i32 || VT == MVT::i64) &&
1934 "Type checking must have been done before calling this function");
1935
1936 // Check for AND + SRL doing several bits extract.
1937 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1938 return true;
1939
1940 // We're looking for a shift of a shift.
1941 uint64_t ShlImm = 0;
1942 uint64_t TruncBits = 0;
1943 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
1944 Opd0 = N->getOperand(0).getOperand(0);
1945 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1946 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1947 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1948 // be considered as setting high 32 bits as zero. Our strategy here is to
1949 // always generate 64bit UBFM. This consistency will help the CSE pass
1950 // later find more redundancy.
1951 Opd0 = N->getOperand(0).getOperand(0);
1952 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1953 VT = Opd0.getValueType();
1954 assert(VT == MVT::i64 && "the promoted type should be i64");
1955 } else if (BiggerPattern) {
1956 // Let's pretend a 0 shift left has been performed.
1957 // FIXME: Currently we limit this to the bigger pattern case,
1958 // because some optimizations expect AND and not UBFM
1959 Opd0 = N->getOperand(0);
1960 } else
1961 return false;
1962
1963 // Missing combines/constant folding may have left us with strange
1964 // constants.
1965 if (ShlImm >= VT.getSizeInBits()) {
1966 LLVM_DEBUG(
1967 (dbgs() << N
1968 << ": Found large shift immediate, this should not happen\n"));
1969 return false;
1970 }
1971
1972 uint64_t SrlImm = 0;
1973 if (!isIntImmediate(N->getOperand(1), SrlImm))
1974 return false;
1975
1976 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
1977 "bad amount in shift node!");
1978 int immr = SrlImm - ShlImm;
1979 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1980 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
1981 // SRA requires a signed extraction
1982 if (VT == MVT::i32)
1983 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1984 else
1985 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1986 return true;
1987 }
1988
tryBitfieldExtractOpFromSExt(SDNode * N)1989 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
1990 assert(N->getOpcode() == ISD::SIGN_EXTEND);
1991
1992 EVT VT = N->getValueType(0);
1993 EVT NarrowVT = N->getOperand(0)->getValueType(0);
1994 if (VT != MVT::i64 || NarrowVT != MVT::i32)
1995 return false;
1996
1997 uint64_t ShiftImm;
1998 SDValue Op = N->getOperand(0);
1999 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
2000 return false;
2001
2002 SDLoc dl(N);
2003 // Extend the incoming operand of the shift to 64-bits.
2004 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
2005 unsigned Immr = ShiftImm;
2006 unsigned Imms = NarrowVT.getSizeInBits() - 1;
2007 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2008 CurDAG->getTargetConstant(Imms, dl, VT)};
2009 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
2010 return true;
2011 }
2012
2013 /// Try to form fcvtl2 instructions from a floating-point extend of a high-half
2014 /// extract of a subvector.
tryHighFPExt(SDNode * N)2015 bool AArch64DAGToDAGISel::tryHighFPExt(SDNode *N) {
2016 assert(N->getOpcode() == ISD::FP_EXTEND);
2017
2018 // There are 2 forms of fcvtl2 - extend to double or extend to float.
2019 SDValue Extract = N->getOperand(0);
2020 EVT VT = N->getValueType(0);
2021 EVT NarrowVT = Extract.getValueType();
2022 if ((VT != MVT::v2f64 || NarrowVT != MVT::v2f32) &&
2023 (VT != MVT::v4f32 || NarrowVT != MVT::v4f16))
2024 return false;
2025
2026 // Optionally look past a bitcast.
2027 Extract = peekThroughBitcasts(Extract);
2028 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2029 return false;
2030
2031 // Match extract from start of high half index.
2032 // Example: v8i16 -> v4i16 means the extract must begin at index 4.
2033 unsigned ExtractIndex = Extract.getConstantOperandVal(1);
2034 if (ExtractIndex != Extract.getValueType().getVectorNumElements())
2035 return false;
2036
2037 auto Opcode = VT == MVT::v2f64 ? AArch64::FCVTLv4i32 : AArch64::FCVTLv8i16;
2038 CurDAG->SelectNodeTo(N, Opcode, VT, Extract.getOperand(0));
2039 return true;
2040 }
2041
isBitfieldExtractOp(SelectionDAG * CurDAG,SDNode * N,unsigned & Opc,SDValue & Opd0,unsigned & Immr,unsigned & Imms,unsigned NumberOfIgnoredLowBits=0,bool BiggerPattern=false)2042 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
2043 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
2044 unsigned NumberOfIgnoredLowBits = 0,
2045 bool BiggerPattern = false) {
2046 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
2047 return false;
2048
2049 switch (N->getOpcode()) {
2050 default:
2051 if (!N->isMachineOpcode())
2052 return false;
2053 break;
2054 case ISD::AND:
2055 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
2056 NumberOfIgnoredLowBits, BiggerPattern);
2057 case ISD::SRL:
2058 case ISD::SRA:
2059 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
2060
2061 case ISD::SIGN_EXTEND_INREG:
2062 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
2063 }
2064
2065 unsigned NOpc = N->getMachineOpcode();
2066 switch (NOpc) {
2067 default:
2068 return false;
2069 case AArch64::SBFMWri:
2070 case AArch64::UBFMWri:
2071 case AArch64::SBFMXri:
2072 case AArch64::UBFMXri:
2073 Opc = NOpc;
2074 Opd0 = N->getOperand(0);
2075 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
2076 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
2077 return true;
2078 }
2079 // Unreachable
2080 return false;
2081 }
2082
tryBitfieldExtractOp(SDNode * N)2083 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
2084 unsigned Opc, Immr, Imms;
2085 SDValue Opd0;
2086 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
2087 return false;
2088
2089 EVT VT = N->getValueType(0);
2090 SDLoc dl(N);
2091
2092 // If the bit extract operation is 64bit but the original type is 32bit, we
2093 // need to add one EXTRACT_SUBREG.
2094 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
2095 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
2096 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
2097
2098 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
2099 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
2100 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
2101 MVT::i32, SDValue(BFM, 0), SubReg));
2102 return true;
2103 }
2104
2105 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
2106 CurDAG->getTargetConstant(Imms, dl, VT)};
2107 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2108 return true;
2109 }
2110
2111 /// Does DstMask form a complementary pair with the mask provided by
2112 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
2113 /// this asks whether DstMask zeroes precisely those bits that will be set by
2114 /// the other half.
isBitfieldDstMask(uint64_t DstMask,const APInt & BitsToBeInserted,unsigned NumberOfIgnoredHighBits,EVT VT)2115 static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
2116 unsigned NumberOfIgnoredHighBits, EVT VT) {
2117 assert((VT == MVT::i32 || VT == MVT::i64) &&
2118 "i32 or i64 mask type expected!");
2119 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
2120
2121 APInt SignificantDstMask = APInt(BitWidth, DstMask);
2122 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
2123
2124 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
2125 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
2126 }
2127
2128 // Look for bits that will be useful for later uses.
2129 // A bit is consider useless as soon as it is dropped and never used
2130 // before it as been dropped.
2131 // E.g., looking for useful bit of x
2132 // 1. y = x & 0x7
2133 // 2. z = y >> 2
2134 // After #1, x useful bits are 0x7, then the useful bits of x, live through
2135 // y.
2136 // After #2, the useful bits of x are 0x4.
2137 // However, if x is used on an unpredicatable instruction, then all its bits
2138 // are useful.
2139 // E.g.
2140 // 1. y = x & 0x7
2141 // 2. z = y >> 2
2142 // 3. str x, [@x]
2143 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
2144
getUsefulBitsFromAndWithImmediate(SDValue Op,APInt & UsefulBits,unsigned Depth)2145 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
2146 unsigned Depth) {
2147 uint64_t Imm =
2148 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2149 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
2150 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
2151 getUsefulBits(Op, UsefulBits, Depth + 1);
2152 }
2153
getUsefulBitsFromBitfieldMoveOpd(SDValue Op,APInt & UsefulBits,uint64_t Imm,uint64_t MSB,unsigned Depth)2154 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
2155 uint64_t Imm, uint64_t MSB,
2156 unsigned Depth) {
2157 // inherit the bitwidth value
2158 APInt OpUsefulBits(UsefulBits);
2159 OpUsefulBits = 1;
2160
2161 if (MSB >= Imm) {
2162 OpUsefulBits <<= MSB - Imm + 1;
2163 --OpUsefulBits;
2164 // The interesting part will be in the lower part of the result
2165 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2166 // The interesting part was starting at Imm in the argument
2167 OpUsefulBits <<= Imm;
2168 } else {
2169 OpUsefulBits <<= MSB + 1;
2170 --OpUsefulBits;
2171 // The interesting part will be shifted in the result
2172 OpUsefulBits <<= OpUsefulBits.getBitWidth() - Imm;
2173 getUsefulBits(Op, OpUsefulBits, Depth + 1);
2174 // The interesting part was at zero in the argument
2175 OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
2176 }
2177
2178 UsefulBits &= OpUsefulBits;
2179 }
2180
getUsefulBitsFromUBFM(SDValue Op,APInt & UsefulBits,unsigned Depth)2181 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
2182 unsigned Depth) {
2183 uint64_t Imm =
2184 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
2185 uint64_t MSB =
2186 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2187
2188 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
2189 }
2190
getUsefulBitsFromOrWithShiftedReg(SDValue Op,APInt & UsefulBits,unsigned Depth)2191 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
2192 unsigned Depth) {
2193 uint64_t ShiftTypeAndValue =
2194 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2195 APInt Mask(UsefulBits);
2196 Mask.clearAllBits();
2197 Mask.flipAllBits();
2198
2199 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
2200 // Shift Left
2201 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2202 Mask <<= ShiftAmt;
2203 getUsefulBits(Op, Mask, Depth + 1);
2204 Mask.lshrInPlace(ShiftAmt);
2205 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
2206 // Shift Right
2207 // We do not handle AArch64_AM::ASR, because the sign will change the
2208 // number of useful bits
2209 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
2210 Mask.lshrInPlace(ShiftAmt);
2211 getUsefulBits(Op, Mask, Depth + 1);
2212 Mask <<= ShiftAmt;
2213 } else
2214 return;
2215
2216 UsefulBits &= Mask;
2217 }
2218
getUsefulBitsFromBFM(SDValue Op,SDValue Orig,APInt & UsefulBits,unsigned Depth)2219 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
2220 unsigned Depth) {
2221 uint64_t Imm =
2222 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
2223 uint64_t MSB =
2224 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
2225
2226 APInt OpUsefulBits(UsefulBits);
2227 OpUsefulBits = 1;
2228
2229 APInt ResultUsefulBits(UsefulBits.getBitWidth(), 0);
2230 ResultUsefulBits.flipAllBits();
2231 APInt Mask(UsefulBits.getBitWidth(), 0);
2232
2233 getUsefulBits(Op, ResultUsefulBits, Depth + 1);
2234
2235 if (MSB >= Imm) {
2236 // The instruction is a BFXIL.
2237 uint64_t Width = MSB - Imm + 1;
2238 uint64_t LSB = Imm;
2239
2240 OpUsefulBits <<= Width;
2241 --OpUsefulBits;
2242
2243 if (Op.getOperand(1) == Orig) {
2244 // Copy the low bits from the result to bits starting from LSB.
2245 Mask = ResultUsefulBits & OpUsefulBits;
2246 Mask <<= LSB;
2247 }
2248
2249 if (Op.getOperand(0) == Orig)
2250 // Bits starting from LSB in the input contribute to the result.
2251 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2252 } else {
2253 // The instruction is a BFI.
2254 uint64_t Width = MSB + 1;
2255 uint64_t LSB = UsefulBits.getBitWidth() - Imm;
2256
2257 OpUsefulBits <<= Width;
2258 --OpUsefulBits;
2259 OpUsefulBits <<= LSB;
2260
2261 if (Op.getOperand(1) == Orig) {
2262 // Copy the bits from the result to the zero bits.
2263 Mask = ResultUsefulBits & OpUsefulBits;
2264 Mask.lshrInPlace(LSB);
2265 }
2266
2267 if (Op.getOperand(0) == Orig)
2268 Mask |= (ResultUsefulBits & ~OpUsefulBits);
2269 }
2270
2271 UsefulBits &= Mask;
2272 }
2273
getUsefulBitsForUse(SDNode * UserNode,APInt & UsefulBits,SDValue Orig,unsigned Depth)2274 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
2275 SDValue Orig, unsigned Depth) {
2276
2277 // Users of this node should have already been instruction selected
2278 // FIXME: Can we turn that into an assert?
2279 if (!UserNode->isMachineOpcode())
2280 return;
2281
2282 switch (UserNode->getMachineOpcode()) {
2283 default:
2284 return;
2285 case AArch64::ANDSWri:
2286 case AArch64::ANDSXri:
2287 case AArch64::ANDWri:
2288 case AArch64::ANDXri:
2289 // We increment Depth only when we call the getUsefulBits
2290 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
2291 Depth);
2292 case AArch64::UBFMWri:
2293 case AArch64::UBFMXri:
2294 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
2295
2296 case AArch64::ORRWrs:
2297 case AArch64::ORRXrs:
2298 if (UserNode->getOperand(1) != Orig)
2299 return;
2300 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
2301 Depth);
2302 case AArch64::BFMWri:
2303 case AArch64::BFMXri:
2304 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
2305
2306 case AArch64::STRBBui:
2307 case AArch64::STURBBi:
2308 if (UserNode->getOperand(0) != Orig)
2309 return;
2310 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
2311 return;
2312
2313 case AArch64::STRHHui:
2314 case AArch64::STURHHi:
2315 if (UserNode->getOperand(0) != Orig)
2316 return;
2317 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
2318 return;
2319 }
2320 }
2321
getUsefulBits(SDValue Op,APInt & UsefulBits,unsigned Depth)2322 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
2323 if (Depth >= SelectionDAG::MaxRecursionDepth)
2324 return;
2325 // Initialize UsefulBits
2326 if (!Depth) {
2327 unsigned Bitwidth = Op.getScalarValueSizeInBits();
2328 // At the beginning, assume every produced bits is useful
2329 UsefulBits = APInt(Bitwidth, 0);
2330 UsefulBits.flipAllBits();
2331 }
2332 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
2333
2334 for (SDNode *Node : Op.getNode()->uses()) {
2335 // A use cannot produce useful bits
2336 APInt UsefulBitsForUse = APInt(UsefulBits);
2337 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
2338 UsersUsefulBits |= UsefulBitsForUse;
2339 }
2340 // UsefulBits contains the produced bits that are meaningful for the
2341 // current definition, thus a user cannot make a bit meaningful at
2342 // this point
2343 UsefulBits &= UsersUsefulBits;
2344 }
2345
2346 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
2347 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
2348 /// 0, return Op unchanged.
getLeftShift(SelectionDAG * CurDAG,SDValue Op,int ShlAmount)2349 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
2350 if (ShlAmount == 0)
2351 return Op;
2352
2353 EVT VT = Op.getValueType();
2354 SDLoc dl(Op);
2355 unsigned BitWidth = VT.getSizeInBits();
2356 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
2357
2358 SDNode *ShiftNode;
2359 if (ShlAmount > 0) {
2360 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
2361 ShiftNode = CurDAG->getMachineNode(
2362 UBFMOpc, dl, VT, Op,
2363 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
2364 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
2365 } else {
2366 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
2367 assert(ShlAmount < 0 && "expected right shift");
2368 int ShrAmount = -ShlAmount;
2369 ShiftNode = CurDAG->getMachineNode(
2370 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
2371 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
2372 }
2373
2374 return SDValue(ShiftNode, 0);
2375 }
2376
2377 /// Does this tree qualify as an attempt to move a bitfield into position,
2378 /// essentially "(and (shl VAL, N), Mask)".
isBitfieldPositioningOp(SelectionDAG * CurDAG,SDValue Op,bool BiggerPattern,SDValue & Src,int & ShiftAmount,int & MaskWidth)2379 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
2380 bool BiggerPattern,
2381 SDValue &Src, int &ShiftAmount,
2382 int &MaskWidth) {
2383 EVT VT = Op.getValueType();
2384 unsigned BitWidth = VT.getSizeInBits();
2385 (void)BitWidth;
2386 assert(BitWidth == 32 || BitWidth == 64);
2387
2388 KnownBits Known = CurDAG->computeKnownBits(Op);
2389
2390 // Non-zero in the sense that they're not provably zero, which is the key
2391 // point if we want to use this value
2392 uint64_t NonZeroBits = (~Known.Zero).getZExtValue();
2393
2394 // Discard a constant AND mask if present. It's safe because the node will
2395 // already have been factored into the computeKnownBits calculation above.
2396 uint64_t AndImm;
2397 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
2398 assert((~APInt(BitWidth, AndImm) & ~Known.Zero) == 0);
2399 Op = Op.getOperand(0);
2400 }
2401
2402 // Don't match if the SHL has more than one use, since then we'll end up
2403 // generating SHL+UBFIZ instead of just keeping SHL+AND.
2404 if (!BiggerPattern && !Op.hasOneUse())
2405 return false;
2406
2407 uint64_t ShlImm;
2408 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
2409 return false;
2410 Op = Op.getOperand(0);
2411
2412 if (!isShiftedMask_64(NonZeroBits))
2413 return false;
2414
2415 ShiftAmount = countTrailingZeros(NonZeroBits);
2416 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
2417
2418 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
2419 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
2420 // amount. BiggerPattern is true when this pattern is being matched for BFI,
2421 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
2422 // which case it is not profitable to insert an extra shift.
2423 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
2424 return false;
2425 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
2426
2427 return true;
2428 }
2429
isShiftedMask(uint64_t Mask,EVT VT)2430 static bool isShiftedMask(uint64_t Mask, EVT VT) {
2431 assert(VT == MVT::i32 || VT == MVT::i64);
2432 if (VT == MVT::i32)
2433 return isShiftedMask_32(Mask);
2434 return isShiftedMask_64(Mask);
2435 }
2436
2437 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
2438 // inserted only sets known zero bits.
tryBitfieldInsertOpFromOrAndImm(SDNode * N,SelectionDAG * CurDAG)2439 static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
2440 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2441
2442 EVT VT = N->getValueType(0);
2443 if (VT != MVT::i32 && VT != MVT::i64)
2444 return false;
2445
2446 unsigned BitWidth = VT.getSizeInBits();
2447
2448 uint64_t OrImm;
2449 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
2450 return false;
2451
2452 // Skip this transformation if the ORR immediate can be encoded in the ORR.
2453 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
2454 // performance neutral.
2455 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
2456 return false;
2457
2458 uint64_t MaskImm;
2459 SDValue And = N->getOperand(0);
2460 // Must be a single use AND with an immediate operand.
2461 if (!And.hasOneUse() ||
2462 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
2463 return false;
2464
2465 // Compute the Known Zero for the AND as this allows us to catch more general
2466 // cases than just looking for AND with imm.
2467 KnownBits Known = CurDAG->computeKnownBits(And);
2468
2469 // Non-zero in the sense that they're not provably zero, which is the key
2470 // point if we want to use this value.
2471 uint64_t NotKnownZero = (~Known.Zero).getZExtValue();
2472
2473 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
2474 if (!isShiftedMask(Known.Zero.getZExtValue(), VT))
2475 return false;
2476
2477 // The bits being inserted must only set those bits that are known to be zero.
2478 if ((OrImm & NotKnownZero) != 0) {
2479 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
2480 // currently handle this case.
2481 return false;
2482 }
2483
2484 // BFI/BFXIL dst, src, #lsb, #width.
2485 int LSB = countTrailingOnes(NotKnownZero);
2486 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
2487
2488 // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
2489 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2490 unsigned ImmS = Width - 1;
2491
2492 // If we're creating a BFI instruction avoid cases where we need more
2493 // instructions to materialize the BFI constant as compared to the original
2494 // ORR. A BFXIL will use the same constant as the original ORR, so the code
2495 // should be no worse in this case.
2496 bool IsBFI = LSB != 0;
2497 uint64_t BFIImm = OrImm >> LSB;
2498 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
2499 // We have a BFI instruction and we know the constant can't be materialized
2500 // with a ORR-immediate with the zero register.
2501 unsigned OrChunks = 0, BFIChunks = 0;
2502 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
2503 if (((OrImm >> Shift) & 0xFFFF) != 0)
2504 ++OrChunks;
2505 if (((BFIImm >> Shift) & 0xFFFF) != 0)
2506 ++BFIChunks;
2507 }
2508 if (BFIChunks > OrChunks)
2509 return false;
2510 }
2511
2512 // Materialize the constant to be inserted.
2513 SDLoc DL(N);
2514 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
2515 SDNode *MOVI = CurDAG->getMachineNode(
2516 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
2517
2518 // Create the BFI/BFXIL instruction.
2519 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
2520 CurDAG->getTargetConstant(ImmR, DL, VT),
2521 CurDAG->getTargetConstant(ImmS, DL, VT)};
2522 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2523 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2524 return true;
2525 }
2526
tryBitfieldInsertOpFromOr(SDNode * N,const APInt & UsefulBits,SelectionDAG * CurDAG)2527 static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
2528 SelectionDAG *CurDAG) {
2529 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
2530
2531 EVT VT = N->getValueType(0);
2532 if (VT != MVT::i32 && VT != MVT::i64)
2533 return false;
2534
2535 unsigned BitWidth = VT.getSizeInBits();
2536
2537 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
2538 // have the expected shape. Try to undo that.
2539
2540 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2541 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2542
2543 // Given a OR operation, check if we have the following pattern
2544 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
2545 // isBitfieldExtractOp)
2546 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
2547 // countTrailingZeros(mask2) == imm2 - imm + 1
2548 // f = d | c
2549 // if yes, replace the OR instruction with:
2550 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
2551
2552 // OR is commutative, check all combinations of operand order and values of
2553 // BiggerPattern, i.e.
2554 // Opd0, Opd1, BiggerPattern=false
2555 // Opd1, Opd0, BiggerPattern=false
2556 // Opd0, Opd1, BiggerPattern=true
2557 // Opd1, Opd0, BiggerPattern=true
2558 // Several of these combinations may match, so check with BiggerPattern=false
2559 // first since that will produce better results by matching more instructions
2560 // and/or inserting fewer extra instructions.
2561 for (int I = 0; I < 4; ++I) {
2562
2563 SDValue Dst, Src;
2564 unsigned ImmR, ImmS;
2565 bool BiggerPattern = I / 2;
2566 SDValue OrOpd0Val = N->getOperand(I % 2);
2567 SDNode *OrOpd0 = OrOpd0Val.getNode();
2568 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2569 SDNode *OrOpd1 = OrOpd1Val.getNode();
2570
2571 unsigned BFXOpc;
2572 int DstLSB, Width;
2573 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2574 NumberOfIgnoredLowBits, BiggerPattern)) {
2575 // Check that the returned opcode is compatible with the pattern,
2576 // i.e., same type and zero extended (U and not S)
2577 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2578 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2579 continue;
2580
2581 // Compute the width of the bitfield insertion
2582 DstLSB = 0;
2583 Width = ImmS - ImmR + 1;
2584 // FIXME: This constraint is to catch bitfield insertion we may
2585 // want to widen the pattern if we want to grab general bitfied
2586 // move case
2587 if (Width <= 0)
2588 continue;
2589
2590 // If the mask on the insertee is correct, we have a BFXIL operation. We
2591 // can share the ImmR and ImmS values from the already-computed UBFM.
2592 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
2593 BiggerPattern,
2594 Src, DstLSB, Width)) {
2595 ImmR = (BitWidth - DstLSB) % BitWidth;
2596 ImmS = Width - 1;
2597 } else
2598 continue;
2599
2600 // Check the second part of the pattern
2601 EVT VT = OrOpd1Val.getValueType();
2602 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2603
2604 // Compute the Known Zero for the candidate of the first operand.
2605 // This allows to catch more general case than just looking for
2606 // AND with imm. Indeed, simplify-demanded-bits may have removed
2607 // the AND instruction because it proves it was useless.
2608 KnownBits Known = CurDAG->computeKnownBits(OrOpd1Val);
2609
2610 // Check if there is enough room for the second operand to appear
2611 // in the first one
2612 APInt BitsToBeInserted =
2613 APInt::getBitsSet(Known.getBitWidth(), DstLSB, DstLSB + Width);
2614
2615 if ((BitsToBeInserted & ~Known.Zero) != 0)
2616 continue;
2617
2618 // Set the first operand
2619 uint64_t Imm;
2620 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2621 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2622 // In that case, we can eliminate the AND
2623 Dst = OrOpd1->getOperand(0);
2624 else
2625 // Maybe the AND has been removed by simplify-demanded-bits
2626 // or is useful because it discards more bits
2627 Dst = OrOpd1Val;
2628
2629 // both parts match
2630 SDLoc DL(N);
2631 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
2632 CurDAG->getTargetConstant(ImmS, DL, VT)};
2633 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2634 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2635 return true;
2636 }
2637
2638 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
2639 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
2640 // mask (e.g., 0x000ffff0).
2641 uint64_t Mask0Imm, Mask1Imm;
2642 SDValue And0 = N->getOperand(0);
2643 SDValue And1 = N->getOperand(1);
2644 if (And0.hasOneUse() && And1.hasOneUse() &&
2645 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
2646 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
2647 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
2648 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
2649
2650 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
2651 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
2652 // bits to be inserted.
2653 if (isShiftedMask(Mask0Imm, VT)) {
2654 std::swap(And0, And1);
2655 std::swap(Mask0Imm, Mask1Imm);
2656 }
2657
2658 SDValue Src = And1->getOperand(0);
2659 SDValue Dst = And0->getOperand(0);
2660 unsigned LSB = countTrailingZeros(Mask1Imm);
2661 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
2662
2663 // The BFXIL inserts the low-order bits from a source register, so right
2664 // shift the needed bits into place.
2665 SDLoc DL(N);
2666 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2667 SDNode *LSR = CurDAG->getMachineNode(
2668 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
2669 CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
2670
2671 // BFXIL is an alias of BFM, so translate to BFM operands.
2672 unsigned ImmR = (BitWidth - LSB) % BitWidth;
2673 unsigned ImmS = Width - 1;
2674
2675 // Create the BFXIL instruction.
2676 SDValue Ops[] = {Dst, SDValue(LSR, 0),
2677 CurDAG->getTargetConstant(ImmR, DL, VT),
2678 CurDAG->getTargetConstant(ImmS, DL, VT)};
2679 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
2680 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2681 return true;
2682 }
2683
2684 return false;
2685 }
2686
tryBitfieldInsertOp(SDNode * N)2687 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
2688 if (N->getOpcode() != ISD::OR)
2689 return false;
2690
2691 APInt NUsefulBits;
2692 getUsefulBits(SDValue(N, 0), NUsefulBits);
2693
2694 // If all bits are not useful, just return UNDEF.
2695 if (!NUsefulBits) {
2696 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
2697 return true;
2698 }
2699
2700 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
2701 return true;
2702
2703 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
2704 }
2705
2706 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2707 /// equivalent of a left shift by a constant amount followed by an and masking
2708 /// out a contiguous set of bits.
tryBitfieldInsertInZeroOp(SDNode * N)2709 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
2710 if (N->getOpcode() != ISD::AND)
2711 return false;
2712
2713 EVT VT = N->getValueType(0);
2714 if (VT != MVT::i32 && VT != MVT::i64)
2715 return false;
2716
2717 SDValue Op0;
2718 int DstLSB, Width;
2719 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2720 Op0, DstLSB, Width))
2721 return false;
2722
2723 // ImmR is the rotate right amount.
2724 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2725 // ImmS is the most significant bit of the source to be moved.
2726 unsigned ImmS = Width - 1;
2727
2728 SDLoc DL(N);
2729 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2730 CurDAG->getTargetConstant(ImmS, DL, VT)};
2731 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
2732 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2733 return true;
2734 }
2735
2736 /// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
2737 /// variable shift/rotate instructions.
tryShiftAmountMod(SDNode * N)2738 bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
2739 EVT VT = N->getValueType(0);
2740
2741 unsigned Opc;
2742 switch (N->getOpcode()) {
2743 case ISD::ROTR:
2744 Opc = (VT == MVT::i32) ? AArch64::RORVWr : AArch64::RORVXr;
2745 break;
2746 case ISD::SHL:
2747 Opc = (VT == MVT::i32) ? AArch64::LSLVWr : AArch64::LSLVXr;
2748 break;
2749 case ISD::SRL:
2750 Opc = (VT == MVT::i32) ? AArch64::LSRVWr : AArch64::LSRVXr;
2751 break;
2752 case ISD::SRA:
2753 Opc = (VT == MVT::i32) ? AArch64::ASRVWr : AArch64::ASRVXr;
2754 break;
2755 default:
2756 return false;
2757 }
2758
2759 uint64_t Size;
2760 uint64_t Bits;
2761 if (VT == MVT::i32) {
2762 Bits = 5;
2763 Size = 32;
2764 } else if (VT == MVT::i64) {
2765 Bits = 6;
2766 Size = 64;
2767 } else
2768 return false;
2769
2770 SDValue ShiftAmt = N->getOperand(1);
2771 SDLoc DL(N);
2772 SDValue NewShiftAmt;
2773
2774 // Skip over an extend of the shift amount.
2775 if (ShiftAmt->getOpcode() == ISD::ZERO_EXTEND ||
2776 ShiftAmt->getOpcode() == ISD::ANY_EXTEND)
2777 ShiftAmt = ShiftAmt->getOperand(0);
2778
2779 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
2780 SDValue Add0 = ShiftAmt->getOperand(0);
2781 SDValue Add1 = ShiftAmt->getOperand(1);
2782 uint64_t Add0Imm;
2783 uint64_t Add1Imm;
2784 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
2785 // to avoid the ADD/SUB.
2786 if (isIntImmediate(Add1, Add1Imm) && (Add1Imm % Size == 0))
2787 NewShiftAmt = Add0;
2788 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
2789 // generate a NEG instead of a SUB of a constant.
2790 else if (ShiftAmt->getOpcode() == ISD::SUB &&
2791 isIntImmediate(Add0, Add0Imm) && Add0Imm != 0 &&
2792 (Add0Imm % Size == 0)) {
2793 unsigned NegOpc;
2794 unsigned ZeroReg;
2795 EVT SubVT = ShiftAmt->getValueType(0);
2796 if (SubVT == MVT::i32) {
2797 NegOpc = AArch64::SUBWrr;
2798 ZeroReg = AArch64::WZR;
2799 } else {
2800 assert(SubVT == MVT::i64);
2801 NegOpc = AArch64::SUBXrr;
2802 ZeroReg = AArch64::XZR;
2803 }
2804 SDValue Zero =
2805 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, ZeroReg, SubVT);
2806 MachineSDNode *Neg =
2807 CurDAG->getMachineNode(NegOpc, DL, SubVT, Zero, Add1);
2808 NewShiftAmt = SDValue(Neg, 0);
2809 } else
2810 return false;
2811 } else {
2812 // If the shift amount is masked with an AND, check that the mask covers the
2813 // bits that are implicitly ANDed off by the above opcodes and if so, skip
2814 // the AND.
2815 uint64_t MaskImm;
2816 if (!isOpcWithIntImmediate(ShiftAmt.getNode(), ISD::AND, MaskImm) &&
2817 !isOpcWithIntImmediate(ShiftAmt.getNode(), AArch64ISD::ANDS, MaskImm))
2818 return false;
2819
2820 if (countTrailingOnes(MaskImm) < Bits)
2821 return false;
2822
2823 NewShiftAmt = ShiftAmt->getOperand(0);
2824 }
2825
2826 // Narrow/widen the shift amount to match the size of the shift operation.
2827 if (VT == MVT::i32)
2828 NewShiftAmt = narrowIfNeeded(CurDAG, NewShiftAmt);
2829 else if (VT == MVT::i64 && NewShiftAmt->getValueType(0) == MVT::i32) {
2830 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, DL, MVT::i32);
2831 MachineSDNode *Ext = CurDAG->getMachineNode(
2832 AArch64::SUBREG_TO_REG, DL, VT,
2833 CurDAG->getTargetConstant(0, DL, MVT::i64), NewShiftAmt, SubReg);
2834 NewShiftAmt = SDValue(Ext, 0);
2835 }
2836
2837 SDValue Ops[] = {N->getOperand(0), NewShiftAmt};
2838 CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2839 return true;
2840 }
2841
2842 bool
SelectCVTFixedPosOperand(SDValue N,SDValue & FixedPos,unsigned RegWidth)2843 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2844 unsigned RegWidth) {
2845 APFloat FVal(0.0);
2846 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2847 FVal = CN->getValueAPF();
2848 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2849 // Some otherwise illegal constants are allowed in this case.
2850 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2851 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2852 return false;
2853
2854 ConstantPoolSDNode *CN =
2855 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2856 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2857 } else
2858 return false;
2859
2860 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2861 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2862 // x-register.
2863 //
2864 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2865 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2866 // integers.
2867 bool IsExact;
2868
2869 // fbits is between 1 and 64 in the worst-case, which means the fmul
2870 // could have 2^64 as an actual operand. Need 65 bits of precision.
2871 APSInt IntVal(65, true);
2872 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2873
2874 // N.b. isPowerOf2 also checks for > 0.
2875 if (!IsExact || !IntVal.isPowerOf2()) return false;
2876 unsigned FBits = IntVal.logBase2();
2877
2878 // Checks above should have guaranteed that we haven't lost information in
2879 // finding FBits, but it must still be in range.
2880 if (FBits == 0 || FBits > RegWidth) return false;
2881
2882 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2883 return true;
2884 }
2885
2886 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2887 // of the string and obtains the integer values from them and combines these
2888 // into a single value to be used in the MRS/MSR instruction.
getIntOperandFromRegisterString(StringRef RegString)2889 static int getIntOperandFromRegisterString(StringRef RegString) {
2890 SmallVector<StringRef, 5> Fields;
2891 RegString.split(Fields, ':');
2892
2893 if (Fields.size() == 1)
2894 return -1;
2895
2896 assert(Fields.size() == 5
2897 && "Invalid number of fields in read register string");
2898
2899 SmallVector<int, 5> Ops;
2900 bool AllIntFields = true;
2901
2902 for (StringRef Field : Fields) {
2903 unsigned IntField;
2904 AllIntFields &= !Field.getAsInteger(10, IntField);
2905 Ops.push_back(IntField);
2906 }
2907
2908 assert(AllIntFields &&
2909 "Unexpected non-integer value in special register string.");
2910
2911 // Need to combine the integer fields of the string into a single value
2912 // based on the bit encoding of MRS/MSR instruction.
2913 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2914 (Ops[3] << 3) | (Ops[4]);
2915 }
2916
2917 // Lower the read_register intrinsic to an MRS instruction node if the special
2918 // register string argument is either of the form detailed in the ALCE (the
2919 // form described in getIntOperandsFromRegsterString) or is a named register
2920 // known by the MRS SysReg mapper.
tryReadRegister(SDNode * N)2921 bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
2922 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2923 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2924 SDLoc DL(N);
2925
2926 int Reg = getIntOperandFromRegisterString(RegString->getString());
2927 if (Reg != -1) {
2928 ReplaceNode(N, CurDAG->getMachineNode(
2929 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2930 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2931 N->getOperand(0)));
2932 return true;
2933 }
2934
2935 // Use the sysreg mapper to map the remaining possible strings to the
2936 // value for the register to be used for the instruction operand.
2937 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
2938 if (TheReg && TheReg->Readable &&
2939 TheReg->haveFeatures(Subtarget->getFeatureBits()))
2940 Reg = TheReg->Encoding;
2941 else
2942 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
2943
2944 if (Reg != -1) {
2945 ReplaceNode(N, CurDAG->getMachineNode(
2946 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
2947 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2948 N->getOperand(0)));
2949 return true;
2950 }
2951
2952 if (RegString->getString() == "pc") {
2953 ReplaceNode(N, CurDAG->getMachineNode(
2954 AArch64::ADR, DL, N->getSimpleValueType(0), MVT::Other,
2955 CurDAG->getTargetConstant(0, DL, MVT::i32),
2956 N->getOperand(0)));
2957 return true;
2958 }
2959
2960 return false;
2961 }
2962
2963 // Lower the write_register intrinsic to an MSR instruction node if the special
2964 // register string argument is either of the form detailed in the ALCE (the
2965 // form described in getIntOperandsFromRegsterString) or is a named register
2966 // known by the MSR SysReg mapper.
tryWriteRegister(SDNode * N)2967 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
2968 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2969 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2970 SDLoc DL(N);
2971
2972 int Reg = getIntOperandFromRegisterString(RegString->getString());
2973 if (Reg != -1) {
2974 ReplaceNode(
2975 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2976 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2977 N->getOperand(2), N->getOperand(0)));
2978 return true;
2979 }
2980
2981 // Check if the register was one of those allowed as the pstatefield value in
2982 // the MSR (immediate) instruction. To accept the values allowed in the
2983 // pstatefield for the MSR (immediate) instruction, we also require that an
2984 // immediate value has been provided as an argument, we know that this is
2985 // the case as it has been ensured by semantic checking.
2986 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());
2987 if (PMapper) {
2988 assert (isa<ConstantSDNode>(N->getOperand(2))
2989 && "Expected a constant integer expression.");
2990 unsigned Reg = PMapper->Encoding;
2991 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2992 unsigned State;
2993 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO || Reg == AArch64PState::SSBS) {
2994 assert(Immed < 2 && "Bad imm");
2995 State = AArch64::MSRpstateImm1;
2996 } else {
2997 assert(Immed < 16 && "Bad imm");
2998 State = AArch64::MSRpstateImm4;
2999 }
3000 ReplaceNode(N, CurDAG->getMachineNode(
3001 State, DL, MVT::Other,
3002 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3003 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
3004 N->getOperand(0)));
3005 return true;
3006 }
3007
3008 // Use the sysreg mapper to attempt to map the remaining possible strings
3009 // to the value for the register to be used for the MSR (register)
3010 // instruction operand.
3011 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
3012 if (TheReg && TheReg->Writeable &&
3013 TheReg->haveFeatures(Subtarget->getFeatureBits()))
3014 Reg = TheReg->Encoding;
3015 else
3016 Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
3017 if (Reg != -1) {
3018 ReplaceNode(N, CurDAG->getMachineNode(
3019 AArch64::MSR, DL, MVT::Other,
3020 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
3021 N->getOperand(2), N->getOperand(0)));
3022 return true;
3023 }
3024
3025 return false;
3026 }
3027
3028 /// We've got special pseudo-instructions for these
SelectCMP_SWAP(SDNode * N)3029 bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3030 unsigned Opcode;
3031 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3032
3033 // Leave IR for LSE if subtarget supports it.
3034 if (Subtarget->hasLSE()) return false;
3035
3036 if (MemTy == MVT::i8)
3037 Opcode = AArch64::CMP_SWAP_8;
3038 else if (MemTy == MVT::i16)
3039 Opcode = AArch64::CMP_SWAP_16;
3040 else if (MemTy == MVT::i32)
3041 Opcode = AArch64::CMP_SWAP_32;
3042 else if (MemTy == MVT::i64)
3043 Opcode = AArch64::CMP_SWAP_64;
3044 else
3045 llvm_unreachable("Unknown AtomicCmpSwap type");
3046
3047 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
3048 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3049 N->getOperand(0)};
3050 SDNode *CmpSwap = CurDAG->getMachineNode(
3051 Opcode, SDLoc(N),
3052 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
3053
3054 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3055 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3056
3057 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3058 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3059 CurDAG->RemoveDeadNode(N);
3060
3061 return true;
3062 }
3063
SelectSVE8BitLslImm(SDValue N,SDValue & Base,SDValue & Offset)3064 bool AArch64DAGToDAGISel::SelectSVE8BitLslImm(SDValue N, SDValue &Base,
3065 SDValue &Offset) {
3066 auto C = dyn_cast<ConstantSDNode>(N);
3067 if (!C)
3068 return false;
3069
3070 auto Ty = N->getValueType(0);
3071
3072 int64_t Imm = C->getSExtValue();
3073 SDLoc DL(N);
3074
3075 if ((Imm >= -128) && (Imm <= 127)) {
3076 Base = CurDAG->getTargetConstant(Imm, DL, Ty);
3077 Offset = CurDAG->getTargetConstant(0, DL, Ty);
3078 return true;
3079 }
3080
3081 if (((Imm % 256) == 0) && (Imm >= -32768) && (Imm <= 32512)) {
3082 Base = CurDAG->getTargetConstant(Imm/256, DL, Ty);
3083 Offset = CurDAG->getTargetConstant(8, DL, Ty);
3084 return true;
3085 }
3086
3087 return false;
3088 }
3089
SelectSVEAddSubImm(SDValue N,MVT VT,SDValue & Imm,SDValue & Shift)3090 bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift) {
3091 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3092 const int64_t ImmVal = CNode->getZExtValue();
3093 SDLoc DL(N);
3094
3095 switch (VT.SimpleTy) {
3096 case MVT::i8:
3097 if ((ImmVal & 0xFF) == ImmVal) {
3098 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3099 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3100 return true;
3101 }
3102 break;
3103 case MVT::i16:
3104 case MVT::i32:
3105 case MVT::i64:
3106 if ((ImmVal & 0xFF) == ImmVal) {
3107 Shift = CurDAG->getTargetConstant(0, DL, MVT::i32);
3108 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3109 return true;
3110 } else if ((ImmVal & 0xFF00) == ImmVal) {
3111 Shift = CurDAG->getTargetConstant(8, DL, MVT::i32);
3112 Imm = CurDAG->getTargetConstant(ImmVal >> 8, DL, MVT::i32);
3113 return true;
3114 }
3115 break;
3116 default:
3117 break;
3118 }
3119 }
3120
3121 return false;
3122 }
3123
SelectSVESignedArithImm(SDValue N,SDValue & Imm)3124 bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
3125 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3126 int64_t ImmVal = CNode->getSExtValue();
3127 SDLoc DL(N);
3128 if (ImmVal >= -128 && ImmVal < 128) {
3129 Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
3130 return true;
3131 }
3132 }
3133 return false;
3134 }
3135
SelectSVEArithImm(SDValue N,MVT VT,SDValue & Imm)3136 bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {
3137 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3138 uint64_t ImmVal = CNode->getZExtValue();
3139
3140 switch (VT.SimpleTy) {
3141 case MVT::i8:
3142 ImmVal &= 0xFF;
3143 break;
3144 case MVT::i16:
3145 ImmVal &= 0xFFFF;
3146 break;
3147 case MVT::i32:
3148 ImmVal &= 0xFFFFFFFF;
3149 break;
3150 case MVT::i64:
3151 break;
3152 default:
3153 llvm_unreachable("Unexpected type");
3154 }
3155
3156 if (ImmVal < 256) {
3157 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3158 return true;
3159 }
3160 }
3161 return false;
3162 }
3163
SelectSVELogicalImm(SDValue N,MVT VT,SDValue & Imm)3164 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm) {
3165 if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
3166 uint64_t ImmVal = CNode->getZExtValue();
3167 SDLoc DL(N);
3168
3169 // Shift mask depending on type size.
3170 switch (VT.SimpleTy) {
3171 case MVT::i8:
3172 ImmVal &= 0xFF;
3173 ImmVal |= ImmVal << 8;
3174 ImmVal |= ImmVal << 16;
3175 ImmVal |= ImmVal << 32;
3176 break;
3177 case MVT::i16:
3178 ImmVal &= 0xFFFF;
3179 ImmVal |= ImmVal << 16;
3180 ImmVal |= ImmVal << 32;
3181 break;
3182 case MVT::i32:
3183 ImmVal &= 0xFFFFFFFF;
3184 ImmVal |= ImmVal << 32;
3185 break;
3186 case MVT::i64:
3187 break;
3188 default:
3189 llvm_unreachable("Unexpected type");
3190 }
3191
3192 uint64_t encoding;
3193 if (AArch64_AM::processLogicalImmediate(ImmVal, 64, encoding)) {
3194 Imm = CurDAG->getTargetConstant(encoding, DL, MVT::i64);
3195 return true;
3196 }
3197 }
3198 return false;
3199 }
3200
3201 // SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
3202 // Rather than attempt to normalise everything we can sometimes saturate the
3203 // shift amount during selection. This function also allows for consistent
3204 // isel patterns by ensuring the resulting "Imm" node is of the i32 type
3205 // required by the instructions.
SelectSVEShiftImm(SDValue N,uint64_t Low,uint64_t High,bool AllowSaturation,SDValue & Imm)3206 bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
3207 uint64_t High, bool AllowSaturation,
3208 SDValue &Imm) {
3209 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
3210 uint64_t ImmVal = CN->getZExtValue();
3211
3212 // Reject shift amounts that are too small.
3213 if (ImmVal < Low)
3214 return false;
3215
3216 // Reject or saturate shift amounts that are too big.
3217 if (ImmVal > High) {
3218 if (!AllowSaturation)
3219 return false;
3220 ImmVal = High;
3221 }
3222
3223 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i32);
3224 return true;
3225 }
3226
3227 return false;
3228 }
3229
trySelectStackSlotTagP(SDNode * N)3230 bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
3231 // tagp(FrameIndex, IRGstack, tag_offset):
3232 // since the offset between FrameIndex and IRGstack is a compile-time
3233 // constant, this can be lowered to a single ADDG instruction.
3234 if (!(isa<FrameIndexSDNode>(N->getOperand(1)))) {
3235 return false;
3236 }
3237
3238 SDValue IRG_SP = N->getOperand(2);
3239 if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
3240 cast<ConstantSDNode>(IRG_SP->getOperand(1))->getZExtValue() !=
3241 Intrinsic::aarch64_irg_sp) {
3242 return false;
3243 }
3244
3245 const TargetLowering *TLI = getTargetLowering();
3246 SDLoc DL(N);
3247 int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
3248 SDValue FiOp = CurDAG->getTargetFrameIndex(
3249 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3250 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3251
3252 SDNode *Out = CurDAG->getMachineNode(
3253 AArch64::TAGPstack, DL, MVT::i64,
3254 {FiOp, CurDAG->getTargetConstant(0, DL, MVT::i64), N->getOperand(2),
3255 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3256 ReplaceNode(N, Out);
3257 return true;
3258 }
3259
SelectTagP(SDNode * N)3260 void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
3261 assert(isa<ConstantSDNode>(N->getOperand(3)) &&
3262 "llvm.aarch64.tagp third argument must be an immediate");
3263 if (trySelectStackSlotTagP(N))
3264 return;
3265 // FIXME: above applies in any case when offset between Op1 and Op2 is a
3266 // compile-time constant, not just for stack allocations.
3267
3268 // General case for unrelated pointers in Op1 and Op2.
3269 SDLoc DL(N);
3270 int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
3271 SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64,
3272 {N->getOperand(1), N->getOperand(2)});
3273 SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64,
3274 {SDValue(N1, 0), N->getOperand(2)});
3275 SDNode *N3 = CurDAG->getMachineNode(
3276 AArch64::ADDG, DL, MVT::i64,
3277 {SDValue(N2, 0), CurDAG->getTargetConstant(0, DL, MVT::i64),
3278 CurDAG->getTargetConstant(TagOffset, DL, MVT::i64)});
3279 ReplaceNode(N, N3);
3280 }
3281
3282 // NOTE: We cannot use EXTRACT_SUBREG in all cases because the fixed length
3283 // vector types larger than NEON don't have a matching SubRegIndex.
extractSubReg(SelectionDAG * DAG,EVT VT,SDValue V)3284 static SDNode *extractSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3285 assert(V.getValueType().isScalableVector() &&
3286 V.getValueType().getSizeInBits().getKnownMinSize() ==
3287 AArch64::SVEBitsPerBlock &&
3288 "Expected to extract from a packed scalable vector!");
3289 assert(VT.isFixedLengthVector() &&
3290 "Expected to extract a fixed length vector!");
3291
3292 SDLoc DL(V);
3293 switch (VT.getSizeInBits()) {
3294 case 64: {
3295 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3296 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3297 }
3298 case 128: {
3299 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3300 return DAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, VT, V, SubReg);
3301 }
3302 default: {
3303 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3304 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3305 }
3306 }
3307 }
3308
3309 // NOTE: We cannot use INSERT_SUBREG in all cases because the fixed length
3310 // vector types larger than NEON don't have a matching SubRegIndex.
insertSubReg(SelectionDAG * DAG,EVT VT,SDValue V)3311 static SDNode *insertSubReg(SelectionDAG *DAG, EVT VT, SDValue V) {
3312 assert(VT.isScalableVector() &&
3313 VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock &&
3314 "Expected to insert into a packed scalable vector!");
3315 assert(V.getValueType().isFixedLengthVector() &&
3316 "Expected to insert a fixed length vector!");
3317
3318 SDLoc DL(V);
3319 switch (V.getValueType().getSizeInBits()) {
3320 case 64: {
3321 auto SubReg = DAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
3322 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3323 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3324 SDValue(Container, 0), V, SubReg);
3325 }
3326 case 128: {
3327 auto SubReg = DAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
3328 auto Container = DAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
3329 return DAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, VT,
3330 SDValue(Container, 0), V, SubReg);
3331 }
3332 default: {
3333 auto RC = DAG->getTargetConstant(AArch64::ZPRRegClassID, DL, MVT::i64);
3334 return DAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
3335 }
3336 }
3337 }
3338
Select(SDNode * Node)3339 void AArch64DAGToDAGISel::Select(SDNode *Node) {
3340 // If we have a custom node, we already have selected!
3341 if (Node->isMachineOpcode()) {
3342 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
3343 Node->setNodeId(-1);
3344 return;
3345 }
3346
3347 // Few custom selection stuff.
3348 EVT VT = Node->getValueType(0);
3349
3350 switch (Node->getOpcode()) {
3351 default:
3352 break;
3353
3354 case ISD::ATOMIC_CMP_SWAP:
3355 if (SelectCMP_SWAP(Node))
3356 return;
3357 break;
3358
3359 case ISD::READ_REGISTER:
3360 if (tryReadRegister(Node))
3361 return;
3362 break;
3363
3364 case ISD::WRITE_REGISTER:
3365 if (tryWriteRegister(Node))
3366 return;
3367 break;
3368
3369 case ISD::ADD:
3370 if (tryMLAV64LaneV128(Node))
3371 return;
3372 break;
3373
3374 case ISD::LOAD: {
3375 // Try to select as an indexed load. Fall through to normal processing
3376 // if we can't.
3377 if (tryIndexedLoad(Node))
3378 return;
3379 break;
3380 }
3381
3382 case ISD::SRL:
3383 case ISD::AND:
3384 case ISD::SRA:
3385 case ISD::SIGN_EXTEND_INREG:
3386 if (tryBitfieldExtractOp(Node))
3387 return;
3388 if (tryBitfieldInsertInZeroOp(Node))
3389 return;
3390 LLVM_FALLTHROUGH;
3391 case ISD::ROTR:
3392 case ISD::SHL:
3393 if (tryShiftAmountMod(Node))
3394 return;
3395 break;
3396
3397 case ISD::SIGN_EXTEND:
3398 if (tryBitfieldExtractOpFromSExt(Node))
3399 return;
3400 break;
3401
3402 case ISD::FP_EXTEND:
3403 if (tryHighFPExt(Node))
3404 return;
3405 break;
3406
3407 case ISD::OR:
3408 if (tryBitfieldInsertOp(Node))
3409 return;
3410 break;
3411
3412 case ISD::EXTRACT_SUBVECTOR: {
3413 // Bail when not a "cast" like extract_subvector.
3414 if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue() != 0)
3415 break;
3416
3417 // Bail when normal isel can do the job.
3418 EVT InVT = Node->getOperand(0).getValueType();
3419 if (VT.isScalableVector() || InVT.isFixedLengthVector())
3420 break;
3421
3422 // NOTE: We can only get here when doing fixed length SVE code generation.
3423 // We do manual selection because the types involved are not linked to real
3424 // registers (despite being legal) and must be coerced into SVE registers.
3425 //
3426 // NOTE: If the above changes, be aware that selection will still not work
3427 // because the td definition of extract_vector does not support extracting
3428 // a fixed length vector from a scalable vector.
3429
3430 ReplaceNode(Node, extractSubReg(CurDAG, VT, Node->getOperand(0)));
3431 return;
3432 }
3433
3434 case ISD::INSERT_SUBVECTOR: {
3435 // Bail when not a "cast" like insert_subvector.
3436 if (cast<ConstantSDNode>(Node->getOperand(2))->getZExtValue() != 0)
3437 break;
3438 if (!Node->getOperand(0).isUndef())
3439 break;
3440
3441 // Bail when normal isel should do the job.
3442 EVT InVT = Node->getOperand(1).getValueType();
3443 if (VT.isFixedLengthVector() || InVT.isScalableVector())
3444 break;
3445
3446 // NOTE: We can only get here when doing fixed length SVE code generation.
3447 // We do manual selection because the types involved are not linked to real
3448 // registers (despite being legal) and must be coerced into SVE registers.
3449 //
3450 // NOTE: If the above changes, be aware that selection will still not work
3451 // because the td definition of insert_vector does not support inserting a
3452 // fixed length vector into a scalable vector.
3453
3454 ReplaceNode(Node, insertSubReg(CurDAG, VT, Node->getOperand(1)));
3455 return;
3456 }
3457
3458 case ISD::Constant: {
3459 // Materialize zero constants as copies from WZR/XZR. This allows
3460 // the coalescer to propagate these into other instructions.
3461 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
3462 if (ConstNode->isNullValue()) {
3463 if (VT == MVT::i32) {
3464 SDValue New = CurDAG->getCopyFromReg(
3465 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
3466 ReplaceNode(Node, New.getNode());
3467 return;
3468 } else if (VT == MVT::i64) {
3469 SDValue New = CurDAG->getCopyFromReg(
3470 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
3471 ReplaceNode(Node, New.getNode());
3472 return;
3473 }
3474 }
3475 break;
3476 }
3477
3478 case ISD::FrameIndex: {
3479 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
3480 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
3481 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
3482 const TargetLowering *TLI = getTargetLowering();
3483 SDValue TFI = CurDAG->getTargetFrameIndex(
3484 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3485 SDLoc DL(Node);
3486 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
3487 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
3488 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
3489 return;
3490 }
3491 case ISD::INTRINSIC_W_CHAIN: {
3492 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3493 switch (IntNo) {
3494 default:
3495 break;
3496 case Intrinsic::aarch64_ldaxp:
3497 case Intrinsic::aarch64_ldxp: {
3498 unsigned Op =
3499 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
3500 SDValue MemAddr = Node->getOperand(2);
3501 SDLoc DL(Node);
3502 SDValue Chain = Node->getOperand(0);
3503
3504 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
3505 MVT::Other, MemAddr, Chain);
3506
3507 // Transfer memoperands.
3508 MachineMemOperand *MemOp =
3509 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3510 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
3511 ReplaceNode(Node, Ld);
3512 return;
3513 }
3514 case Intrinsic::aarch64_stlxp:
3515 case Intrinsic::aarch64_stxp: {
3516 unsigned Op =
3517 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
3518 SDLoc DL(Node);
3519 SDValue Chain = Node->getOperand(0);
3520 SDValue ValLo = Node->getOperand(2);
3521 SDValue ValHi = Node->getOperand(3);
3522 SDValue MemAddr = Node->getOperand(4);
3523
3524 // Place arguments in the right order.
3525 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
3526
3527 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
3528 // Transfer memoperands.
3529 MachineMemOperand *MemOp =
3530 cast<MemIntrinsicSDNode>(Node)->getMemOperand();
3531 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
3532
3533 ReplaceNode(Node, St);
3534 return;
3535 }
3536 case Intrinsic::aarch64_neon_ld1x2:
3537 if (VT == MVT::v8i8) {
3538 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
3539 return;
3540 } else if (VT == MVT::v16i8) {
3541 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
3542 return;
3543 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3544 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
3545 return;
3546 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3547 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
3548 return;
3549 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3550 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
3551 return;
3552 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3553 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
3554 return;
3555 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3556 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3557 return;
3558 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3559 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
3560 return;
3561 }
3562 break;
3563 case Intrinsic::aarch64_neon_ld1x3:
3564 if (VT == MVT::v8i8) {
3565 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
3566 return;
3567 } else if (VT == MVT::v16i8) {
3568 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
3569 return;
3570 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3571 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
3572 return;
3573 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3574 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
3575 return;
3576 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3577 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
3578 return;
3579 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3580 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
3581 return;
3582 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3583 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3584 return;
3585 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3586 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
3587 return;
3588 }
3589 break;
3590 case Intrinsic::aarch64_neon_ld1x4:
3591 if (VT == MVT::v8i8) {
3592 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
3593 return;
3594 } else if (VT == MVT::v16i8) {
3595 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
3596 return;
3597 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3598 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
3599 return;
3600 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3601 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
3602 return;
3603 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3604 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
3605 return;
3606 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3607 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
3608 return;
3609 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3610 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3611 return;
3612 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3613 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
3614 return;
3615 }
3616 break;
3617 case Intrinsic::aarch64_neon_ld2:
3618 if (VT == MVT::v8i8) {
3619 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
3620 return;
3621 } else if (VT == MVT::v16i8) {
3622 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
3623 return;
3624 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3625 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
3626 return;
3627 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3628 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
3629 return;
3630 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3631 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
3632 return;
3633 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3634 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
3635 return;
3636 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3637 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
3638 return;
3639 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3640 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
3641 return;
3642 }
3643 break;
3644 case Intrinsic::aarch64_neon_ld3:
3645 if (VT == MVT::v8i8) {
3646 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
3647 return;
3648 } else if (VT == MVT::v16i8) {
3649 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
3650 return;
3651 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3652 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
3653 return;
3654 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3655 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
3656 return;
3657 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3658 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
3659 return;
3660 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3661 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
3662 return;
3663 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3664 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
3665 return;
3666 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3667 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
3668 return;
3669 }
3670 break;
3671 case Intrinsic::aarch64_neon_ld4:
3672 if (VT == MVT::v8i8) {
3673 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
3674 return;
3675 } else if (VT == MVT::v16i8) {
3676 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
3677 return;
3678 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3679 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
3680 return;
3681 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3682 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
3683 return;
3684 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3685 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
3686 return;
3687 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3688 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
3689 return;
3690 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3691 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
3692 return;
3693 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3694 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
3695 return;
3696 }
3697 break;
3698 case Intrinsic::aarch64_neon_ld2r:
3699 if (VT == MVT::v8i8) {
3700 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
3701 return;
3702 } else if (VT == MVT::v16i8) {
3703 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
3704 return;
3705 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3706 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
3707 return;
3708 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3709 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
3710 return;
3711 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3712 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
3713 return;
3714 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3715 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
3716 return;
3717 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3718 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
3719 return;
3720 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3721 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
3722 return;
3723 }
3724 break;
3725 case Intrinsic::aarch64_neon_ld3r:
3726 if (VT == MVT::v8i8) {
3727 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
3728 return;
3729 } else if (VT == MVT::v16i8) {
3730 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
3731 return;
3732 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3733 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
3734 return;
3735 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3736 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
3737 return;
3738 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3739 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
3740 return;
3741 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3742 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
3743 return;
3744 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3745 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
3746 return;
3747 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3748 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
3749 return;
3750 }
3751 break;
3752 case Intrinsic::aarch64_neon_ld4r:
3753 if (VT == MVT::v8i8) {
3754 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
3755 return;
3756 } else if (VT == MVT::v16i8) {
3757 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
3758 return;
3759 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
3760 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
3761 return;
3762 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
3763 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
3764 return;
3765 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3766 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
3767 return;
3768 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3769 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
3770 return;
3771 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3772 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
3773 return;
3774 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3775 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
3776 return;
3777 }
3778 break;
3779 case Intrinsic::aarch64_neon_ld2lane:
3780 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3781 SelectLoadLane(Node, 2, AArch64::LD2i8);
3782 return;
3783 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3784 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3785 SelectLoadLane(Node, 2, AArch64::LD2i16);
3786 return;
3787 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3788 VT == MVT::v2f32) {
3789 SelectLoadLane(Node, 2, AArch64::LD2i32);
3790 return;
3791 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3792 VT == MVT::v1f64) {
3793 SelectLoadLane(Node, 2, AArch64::LD2i64);
3794 return;
3795 }
3796 break;
3797 case Intrinsic::aarch64_neon_ld3lane:
3798 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3799 SelectLoadLane(Node, 3, AArch64::LD3i8);
3800 return;
3801 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3802 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3803 SelectLoadLane(Node, 3, AArch64::LD3i16);
3804 return;
3805 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3806 VT == MVT::v2f32) {
3807 SelectLoadLane(Node, 3, AArch64::LD3i32);
3808 return;
3809 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3810 VT == MVT::v1f64) {
3811 SelectLoadLane(Node, 3, AArch64::LD3i64);
3812 return;
3813 }
3814 break;
3815 case Intrinsic::aarch64_neon_ld4lane:
3816 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
3817 SelectLoadLane(Node, 4, AArch64::LD4i8);
3818 return;
3819 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3820 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
3821 SelectLoadLane(Node, 4, AArch64::LD4i16);
3822 return;
3823 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3824 VT == MVT::v2f32) {
3825 SelectLoadLane(Node, 4, AArch64::LD4i32);
3826 return;
3827 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3828 VT == MVT::v1f64) {
3829 SelectLoadLane(Node, 4, AArch64::LD4i64);
3830 return;
3831 }
3832 break;
3833 }
3834 } break;
3835 case ISD::INTRINSIC_WO_CHAIN: {
3836 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
3837 switch (IntNo) {
3838 default:
3839 break;
3840 case Intrinsic::aarch64_tagp:
3841 SelectTagP(Node);
3842 return;
3843 case Intrinsic::aarch64_neon_tbl2:
3844 SelectTable(Node, 2,
3845 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
3846 false);
3847 return;
3848 case Intrinsic::aarch64_neon_tbl3:
3849 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
3850 : AArch64::TBLv16i8Three,
3851 false);
3852 return;
3853 case Intrinsic::aarch64_neon_tbl4:
3854 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
3855 : AArch64::TBLv16i8Four,
3856 false);
3857 return;
3858 case Intrinsic::aarch64_neon_tbx2:
3859 SelectTable(Node, 2,
3860 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
3861 true);
3862 return;
3863 case Intrinsic::aarch64_neon_tbx3:
3864 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
3865 : AArch64::TBXv16i8Three,
3866 true);
3867 return;
3868 case Intrinsic::aarch64_neon_tbx4:
3869 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
3870 : AArch64::TBXv16i8Four,
3871 true);
3872 return;
3873 case Intrinsic::aarch64_neon_smull:
3874 case Intrinsic::aarch64_neon_umull:
3875 if (tryMULLV64LaneV128(IntNo, Node))
3876 return;
3877 break;
3878 }
3879 break;
3880 }
3881 case ISD::INTRINSIC_VOID: {
3882 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
3883 if (Node->getNumOperands() >= 3)
3884 VT = Node->getOperand(2)->getValueType(0);
3885 switch (IntNo) {
3886 default:
3887 break;
3888 case Intrinsic::aarch64_neon_st1x2: {
3889 if (VT == MVT::v8i8) {
3890 SelectStore(Node, 2, AArch64::ST1Twov8b);
3891 return;
3892 } else if (VT == MVT::v16i8) {
3893 SelectStore(Node, 2, AArch64::ST1Twov16b);
3894 return;
3895 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
3896 VT == MVT::v4bf16) {
3897 SelectStore(Node, 2, AArch64::ST1Twov4h);
3898 return;
3899 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
3900 VT == MVT::v8bf16) {
3901 SelectStore(Node, 2, AArch64::ST1Twov8h);
3902 return;
3903 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3904 SelectStore(Node, 2, AArch64::ST1Twov2s);
3905 return;
3906 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3907 SelectStore(Node, 2, AArch64::ST1Twov4s);
3908 return;
3909 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3910 SelectStore(Node, 2, AArch64::ST1Twov2d);
3911 return;
3912 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3913 SelectStore(Node, 2, AArch64::ST1Twov1d);
3914 return;
3915 }
3916 break;
3917 }
3918 case Intrinsic::aarch64_neon_st1x3: {
3919 if (VT == MVT::v8i8) {
3920 SelectStore(Node, 3, AArch64::ST1Threev8b);
3921 return;
3922 } else if (VT == MVT::v16i8) {
3923 SelectStore(Node, 3, AArch64::ST1Threev16b);
3924 return;
3925 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
3926 VT == MVT::v4bf16) {
3927 SelectStore(Node, 3, AArch64::ST1Threev4h);
3928 return;
3929 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
3930 VT == MVT::v8bf16) {
3931 SelectStore(Node, 3, AArch64::ST1Threev8h);
3932 return;
3933 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3934 SelectStore(Node, 3, AArch64::ST1Threev2s);
3935 return;
3936 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3937 SelectStore(Node, 3, AArch64::ST1Threev4s);
3938 return;
3939 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3940 SelectStore(Node, 3, AArch64::ST1Threev2d);
3941 return;
3942 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3943 SelectStore(Node, 3, AArch64::ST1Threev1d);
3944 return;
3945 }
3946 break;
3947 }
3948 case Intrinsic::aarch64_neon_st1x4: {
3949 if (VT == MVT::v8i8) {
3950 SelectStore(Node, 4, AArch64::ST1Fourv8b);
3951 return;
3952 } else if (VT == MVT::v16i8) {
3953 SelectStore(Node, 4, AArch64::ST1Fourv16b);
3954 return;
3955 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
3956 VT == MVT::v4bf16) {
3957 SelectStore(Node, 4, AArch64::ST1Fourv4h);
3958 return;
3959 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
3960 VT == MVT::v8bf16) {
3961 SelectStore(Node, 4, AArch64::ST1Fourv8h);
3962 return;
3963 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3964 SelectStore(Node, 4, AArch64::ST1Fourv2s);
3965 return;
3966 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3967 SelectStore(Node, 4, AArch64::ST1Fourv4s);
3968 return;
3969 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
3970 SelectStore(Node, 4, AArch64::ST1Fourv2d);
3971 return;
3972 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
3973 SelectStore(Node, 4, AArch64::ST1Fourv1d);
3974 return;
3975 }
3976 break;
3977 }
3978 case Intrinsic::aarch64_neon_st2: {
3979 if (VT == MVT::v8i8) {
3980 SelectStore(Node, 2, AArch64::ST2Twov8b);
3981 return;
3982 } else if (VT == MVT::v16i8) {
3983 SelectStore(Node, 2, AArch64::ST2Twov16b);
3984 return;
3985 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
3986 VT == MVT::v4bf16) {
3987 SelectStore(Node, 2, AArch64::ST2Twov4h);
3988 return;
3989 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
3990 VT == MVT::v8bf16) {
3991 SelectStore(Node, 2, AArch64::ST2Twov8h);
3992 return;
3993 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
3994 SelectStore(Node, 2, AArch64::ST2Twov2s);
3995 return;
3996 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
3997 SelectStore(Node, 2, AArch64::ST2Twov4s);
3998 return;
3999 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4000 SelectStore(Node, 2, AArch64::ST2Twov2d);
4001 return;
4002 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4003 SelectStore(Node, 2, AArch64::ST1Twov1d);
4004 return;
4005 }
4006 break;
4007 }
4008 case Intrinsic::aarch64_neon_st3: {
4009 if (VT == MVT::v8i8) {
4010 SelectStore(Node, 3, AArch64::ST3Threev8b);
4011 return;
4012 } else if (VT == MVT::v16i8) {
4013 SelectStore(Node, 3, AArch64::ST3Threev16b);
4014 return;
4015 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4016 VT == MVT::v4bf16) {
4017 SelectStore(Node, 3, AArch64::ST3Threev4h);
4018 return;
4019 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4020 VT == MVT::v8bf16) {
4021 SelectStore(Node, 3, AArch64::ST3Threev8h);
4022 return;
4023 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4024 SelectStore(Node, 3, AArch64::ST3Threev2s);
4025 return;
4026 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4027 SelectStore(Node, 3, AArch64::ST3Threev4s);
4028 return;
4029 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4030 SelectStore(Node, 3, AArch64::ST3Threev2d);
4031 return;
4032 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4033 SelectStore(Node, 3, AArch64::ST1Threev1d);
4034 return;
4035 }
4036 break;
4037 }
4038 case Intrinsic::aarch64_neon_st4: {
4039 if (VT == MVT::v8i8) {
4040 SelectStore(Node, 4, AArch64::ST4Fourv8b);
4041 return;
4042 } else if (VT == MVT::v16i8) {
4043 SelectStore(Node, 4, AArch64::ST4Fourv16b);
4044 return;
4045 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 ||
4046 VT == MVT::v4bf16) {
4047 SelectStore(Node, 4, AArch64::ST4Fourv4h);
4048 return;
4049 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 ||
4050 VT == MVT::v8bf16) {
4051 SelectStore(Node, 4, AArch64::ST4Fourv8h);
4052 return;
4053 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4054 SelectStore(Node, 4, AArch64::ST4Fourv2s);
4055 return;
4056 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4057 SelectStore(Node, 4, AArch64::ST4Fourv4s);
4058 return;
4059 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4060 SelectStore(Node, 4, AArch64::ST4Fourv2d);
4061 return;
4062 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4063 SelectStore(Node, 4, AArch64::ST1Fourv1d);
4064 return;
4065 }
4066 break;
4067 }
4068 case Intrinsic::aarch64_neon_st2lane: {
4069 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4070 SelectStoreLane(Node, 2, AArch64::ST2i8);
4071 return;
4072 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4073 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4074 SelectStoreLane(Node, 2, AArch64::ST2i16);
4075 return;
4076 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4077 VT == MVT::v2f32) {
4078 SelectStoreLane(Node, 2, AArch64::ST2i32);
4079 return;
4080 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4081 VT == MVT::v1f64) {
4082 SelectStoreLane(Node, 2, AArch64::ST2i64);
4083 return;
4084 }
4085 break;
4086 }
4087 case Intrinsic::aarch64_neon_st3lane: {
4088 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4089 SelectStoreLane(Node, 3, AArch64::ST3i8);
4090 return;
4091 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4092 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4093 SelectStoreLane(Node, 3, AArch64::ST3i16);
4094 return;
4095 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4096 VT == MVT::v2f32) {
4097 SelectStoreLane(Node, 3, AArch64::ST3i32);
4098 return;
4099 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4100 VT == MVT::v1f64) {
4101 SelectStoreLane(Node, 3, AArch64::ST3i64);
4102 return;
4103 }
4104 break;
4105 }
4106 case Intrinsic::aarch64_neon_st4lane: {
4107 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4108 SelectStoreLane(Node, 4, AArch64::ST4i8);
4109 return;
4110 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4111 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4112 SelectStoreLane(Node, 4, AArch64::ST4i16);
4113 return;
4114 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4115 VT == MVT::v2f32) {
4116 SelectStoreLane(Node, 4, AArch64::ST4i32);
4117 return;
4118 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4119 VT == MVT::v1f64) {
4120 SelectStoreLane(Node, 4, AArch64::ST4i64);
4121 return;
4122 }
4123 break;
4124 }
4125 case Intrinsic::aarch64_sve_st2: {
4126 if (VT == MVT::nxv16i8) {
4127 SelectPredicatedStore(Node, 2, 0, AArch64::ST2B, AArch64::ST2B_IMM);
4128 return;
4129 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4130 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4131 SelectPredicatedStore(Node, 2, 1, AArch64::ST2H, AArch64::ST2H_IMM);
4132 return;
4133 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4134 SelectPredicatedStore(Node, 2, 2, AArch64::ST2W, AArch64::ST2W_IMM);
4135 return;
4136 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4137 SelectPredicatedStore(Node, 2, 3, AArch64::ST2D, AArch64::ST2D_IMM);
4138 return;
4139 }
4140 break;
4141 }
4142 case Intrinsic::aarch64_sve_st3: {
4143 if (VT == MVT::nxv16i8) {
4144 SelectPredicatedStore(Node, 3, 0, AArch64::ST3B, AArch64::ST3B_IMM);
4145 return;
4146 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4147 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4148 SelectPredicatedStore(Node, 3, 1, AArch64::ST3H, AArch64::ST3H_IMM);
4149 return;
4150 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4151 SelectPredicatedStore(Node, 3, 2, AArch64::ST3W, AArch64::ST3W_IMM);
4152 return;
4153 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4154 SelectPredicatedStore(Node, 3, 3, AArch64::ST3D, AArch64::ST3D_IMM);
4155 return;
4156 }
4157 break;
4158 }
4159 case Intrinsic::aarch64_sve_st4: {
4160 if (VT == MVT::nxv16i8) {
4161 SelectPredicatedStore(Node, 4, 0, AArch64::ST4B, AArch64::ST4B_IMM);
4162 return;
4163 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4164 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4165 SelectPredicatedStore(Node, 4, 1, AArch64::ST4H, AArch64::ST4H_IMM);
4166 return;
4167 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4168 SelectPredicatedStore(Node, 4, 2, AArch64::ST4W, AArch64::ST4W_IMM);
4169 return;
4170 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4171 SelectPredicatedStore(Node, 4, 3, AArch64::ST4D, AArch64::ST4D_IMM);
4172 return;
4173 }
4174 break;
4175 }
4176 }
4177 break;
4178 }
4179 case AArch64ISD::LD2post: {
4180 if (VT == MVT::v8i8) {
4181 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
4182 return;
4183 } else if (VT == MVT::v16i8) {
4184 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
4185 return;
4186 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4187 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
4188 return;
4189 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4190 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
4191 return;
4192 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4193 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
4194 return;
4195 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4196 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
4197 return;
4198 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4199 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4200 return;
4201 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4202 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
4203 return;
4204 }
4205 break;
4206 }
4207 case AArch64ISD::LD3post: {
4208 if (VT == MVT::v8i8) {
4209 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
4210 return;
4211 } else if (VT == MVT::v16i8) {
4212 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
4213 return;
4214 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4215 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
4216 return;
4217 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4218 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
4219 return;
4220 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4221 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
4222 return;
4223 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4224 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
4225 return;
4226 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4227 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4228 return;
4229 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4230 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
4231 return;
4232 }
4233 break;
4234 }
4235 case AArch64ISD::LD4post: {
4236 if (VT == MVT::v8i8) {
4237 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
4238 return;
4239 } else if (VT == MVT::v16i8) {
4240 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
4241 return;
4242 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4243 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
4244 return;
4245 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4246 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
4247 return;
4248 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4249 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
4250 return;
4251 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4252 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
4253 return;
4254 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4255 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4256 return;
4257 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4258 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
4259 return;
4260 }
4261 break;
4262 }
4263 case AArch64ISD::LD1x2post: {
4264 if (VT == MVT::v8i8) {
4265 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
4266 return;
4267 } else if (VT == MVT::v16i8) {
4268 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
4269 return;
4270 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4271 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
4272 return;
4273 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4274 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
4275 return;
4276 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4277 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
4278 return;
4279 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4280 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
4281 return;
4282 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4283 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
4284 return;
4285 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4286 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
4287 return;
4288 }
4289 break;
4290 }
4291 case AArch64ISD::LD1x3post: {
4292 if (VT == MVT::v8i8) {
4293 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
4294 return;
4295 } else if (VT == MVT::v16i8) {
4296 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
4297 return;
4298 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4299 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
4300 return;
4301 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4302 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
4303 return;
4304 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4305 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
4306 return;
4307 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4308 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
4309 return;
4310 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4311 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
4312 return;
4313 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4314 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
4315 return;
4316 }
4317 break;
4318 }
4319 case AArch64ISD::LD1x4post: {
4320 if (VT == MVT::v8i8) {
4321 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
4322 return;
4323 } else if (VT == MVT::v16i8) {
4324 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
4325 return;
4326 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4327 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
4328 return;
4329 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4330 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
4331 return;
4332 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4333 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
4334 return;
4335 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4336 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
4337 return;
4338 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4339 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
4340 return;
4341 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4342 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
4343 return;
4344 }
4345 break;
4346 }
4347 case AArch64ISD::LD1DUPpost: {
4348 if (VT == MVT::v8i8) {
4349 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
4350 return;
4351 } else if (VT == MVT::v16i8) {
4352 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
4353 return;
4354 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4355 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
4356 return;
4357 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4358 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
4359 return;
4360 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4361 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
4362 return;
4363 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4364 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
4365 return;
4366 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4367 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
4368 return;
4369 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4370 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
4371 return;
4372 }
4373 break;
4374 }
4375 case AArch64ISD::LD2DUPpost: {
4376 if (VT == MVT::v8i8) {
4377 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
4378 return;
4379 } else if (VT == MVT::v16i8) {
4380 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
4381 return;
4382 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4383 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
4384 return;
4385 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4386 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
4387 return;
4388 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4389 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
4390 return;
4391 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4392 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
4393 return;
4394 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4395 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
4396 return;
4397 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4398 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
4399 return;
4400 }
4401 break;
4402 }
4403 case AArch64ISD::LD3DUPpost: {
4404 if (VT == MVT::v8i8) {
4405 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
4406 return;
4407 } else if (VT == MVT::v16i8) {
4408 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
4409 return;
4410 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4411 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
4412 return;
4413 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4414 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
4415 return;
4416 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4417 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
4418 return;
4419 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4420 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
4421 return;
4422 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4423 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
4424 return;
4425 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4426 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
4427 return;
4428 }
4429 break;
4430 }
4431 case AArch64ISD::LD4DUPpost: {
4432 if (VT == MVT::v8i8) {
4433 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
4434 return;
4435 } else if (VT == MVT::v16i8) {
4436 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
4437 return;
4438 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4439 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
4440 return;
4441 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4442 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
4443 return;
4444 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4445 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
4446 return;
4447 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4448 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
4449 return;
4450 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4451 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
4452 return;
4453 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4454 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
4455 return;
4456 }
4457 break;
4458 }
4459 case AArch64ISD::LD1LANEpost: {
4460 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4461 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
4462 return;
4463 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4464 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4465 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
4466 return;
4467 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4468 VT == MVT::v2f32) {
4469 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
4470 return;
4471 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4472 VT == MVT::v1f64) {
4473 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
4474 return;
4475 }
4476 break;
4477 }
4478 case AArch64ISD::LD2LANEpost: {
4479 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4480 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
4481 return;
4482 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4483 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4484 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
4485 return;
4486 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4487 VT == MVT::v2f32) {
4488 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
4489 return;
4490 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4491 VT == MVT::v1f64) {
4492 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
4493 return;
4494 }
4495 break;
4496 }
4497 case AArch64ISD::LD3LANEpost: {
4498 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4499 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
4500 return;
4501 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4502 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4503 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
4504 return;
4505 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4506 VT == MVT::v2f32) {
4507 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
4508 return;
4509 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4510 VT == MVT::v1f64) {
4511 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
4512 return;
4513 }
4514 break;
4515 }
4516 case AArch64ISD::LD4LANEpost: {
4517 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4518 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
4519 return;
4520 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4521 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4522 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
4523 return;
4524 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4525 VT == MVT::v2f32) {
4526 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
4527 return;
4528 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4529 VT == MVT::v1f64) {
4530 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
4531 return;
4532 }
4533 break;
4534 }
4535 case AArch64ISD::ST2post: {
4536 VT = Node->getOperand(1).getValueType();
4537 if (VT == MVT::v8i8) {
4538 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
4539 return;
4540 } else if (VT == MVT::v16i8) {
4541 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
4542 return;
4543 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4544 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
4545 return;
4546 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4547 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
4548 return;
4549 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4550 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
4551 return;
4552 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4553 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
4554 return;
4555 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4556 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
4557 return;
4558 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4559 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4560 return;
4561 }
4562 break;
4563 }
4564 case AArch64ISD::ST3post: {
4565 VT = Node->getOperand(1).getValueType();
4566 if (VT == MVT::v8i8) {
4567 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
4568 return;
4569 } else if (VT == MVT::v16i8) {
4570 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
4571 return;
4572 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4573 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
4574 return;
4575 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4576 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
4577 return;
4578 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4579 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
4580 return;
4581 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4582 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
4583 return;
4584 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4585 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
4586 return;
4587 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4588 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4589 return;
4590 }
4591 break;
4592 }
4593 case AArch64ISD::ST4post: {
4594 VT = Node->getOperand(1).getValueType();
4595 if (VT == MVT::v8i8) {
4596 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
4597 return;
4598 } else if (VT == MVT::v16i8) {
4599 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
4600 return;
4601 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4602 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
4603 return;
4604 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4605 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
4606 return;
4607 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4608 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
4609 return;
4610 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4611 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
4612 return;
4613 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4614 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
4615 return;
4616 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4617 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4618 return;
4619 }
4620 break;
4621 }
4622 case AArch64ISD::ST1x2post: {
4623 VT = Node->getOperand(1).getValueType();
4624 if (VT == MVT::v8i8) {
4625 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
4626 return;
4627 } else if (VT == MVT::v16i8) {
4628 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
4629 return;
4630 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4631 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
4632 return;
4633 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4634 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
4635 return;
4636 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4637 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
4638 return;
4639 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4640 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
4641 return;
4642 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4643 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
4644 return;
4645 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4646 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
4647 return;
4648 }
4649 break;
4650 }
4651 case AArch64ISD::ST1x3post: {
4652 VT = Node->getOperand(1).getValueType();
4653 if (VT == MVT::v8i8) {
4654 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
4655 return;
4656 } else if (VT == MVT::v16i8) {
4657 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
4658 return;
4659 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4660 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
4661 return;
4662 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16 ) {
4663 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
4664 return;
4665 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4666 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
4667 return;
4668 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4669 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
4670 return;
4671 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4672 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
4673 return;
4674 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4675 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
4676 return;
4677 }
4678 break;
4679 }
4680 case AArch64ISD::ST1x4post: {
4681 VT = Node->getOperand(1).getValueType();
4682 if (VT == MVT::v8i8) {
4683 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
4684 return;
4685 } else if (VT == MVT::v16i8) {
4686 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
4687 return;
4688 } else if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16) {
4689 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
4690 return;
4691 } else if (VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8bf16) {
4692 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
4693 return;
4694 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
4695 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
4696 return;
4697 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
4698 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
4699 return;
4700 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
4701 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
4702 return;
4703 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
4704 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
4705 return;
4706 }
4707 break;
4708 }
4709 case AArch64ISD::ST2LANEpost: {
4710 VT = Node->getOperand(1).getValueType();
4711 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4712 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
4713 return;
4714 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4715 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4716 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
4717 return;
4718 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4719 VT == MVT::v2f32) {
4720 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
4721 return;
4722 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4723 VT == MVT::v1f64) {
4724 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
4725 return;
4726 }
4727 break;
4728 }
4729 case AArch64ISD::ST3LANEpost: {
4730 VT = Node->getOperand(1).getValueType();
4731 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4732 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
4733 return;
4734 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4735 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4736 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
4737 return;
4738 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4739 VT == MVT::v2f32) {
4740 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
4741 return;
4742 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4743 VT == MVT::v1f64) {
4744 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
4745 return;
4746 }
4747 break;
4748 }
4749 case AArch64ISD::ST4LANEpost: {
4750 VT = Node->getOperand(1).getValueType();
4751 if (VT == MVT::v16i8 || VT == MVT::v8i8) {
4752 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
4753 return;
4754 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
4755 VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
4756 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
4757 return;
4758 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
4759 VT == MVT::v2f32) {
4760 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
4761 return;
4762 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
4763 VT == MVT::v1f64) {
4764 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
4765 return;
4766 }
4767 break;
4768 }
4769 case AArch64ISD::SVE_LD2_MERGE_ZERO: {
4770 if (VT == MVT::nxv16i8) {
4771 SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B);
4772 return;
4773 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4774 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4775 SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H);
4776 return;
4777 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4778 SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W);
4779 return;
4780 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4781 SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D);
4782 return;
4783 }
4784 break;
4785 }
4786 case AArch64ISD::SVE_LD3_MERGE_ZERO: {
4787 if (VT == MVT::nxv16i8) {
4788 SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B);
4789 return;
4790 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4791 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4792 SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H);
4793 return;
4794 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4795 SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W);
4796 return;
4797 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4798 SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D);
4799 return;
4800 }
4801 break;
4802 }
4803 case AArch64ISD::SVE_LD4_MERGE_ZERO: {
4804 if (VT == MVT::nxv16i8) {
4805 SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B);
4806 return;
4807 } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
4808 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
4809 SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H);
4810 return;
4811 } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
4812 SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W);
4813 return;
4814 } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
4815 SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D);
4816 return;
4817 }
4818 break;
4819 }
4820 }
4821
4822 // Select the default instruction
4823 SelectCode(Node);
4824 }
4825
4826 /// createAArch64ISelDag - This pass converts a legalized DAG into a
4827 /// AArch64-specific DAG, ready for instruction scheduling.
createAArch64ISelDag(AArch64TargetMachine & TM,CodeGenOpt::Level OptLevel)4828 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
4829 CodeGenOpt::Level OptLevel) {
4830 return new AArch64DAGToDAGISel(TM, OptLevel);
4831 }
4832
4833 /// When \p PredVT is a scalable vector predicate in the form
4834 /// MVT::nx<M>xi1, it builds the correspondent scalable vector of
4835 /// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
4836 /// structured vectors (NumVec >1), the output data type is
4837 /// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
4838 /// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
4839 /// EVT.
getPackedVectorTypeFromPredicateType(LLVMContext & Ctx,EVT PredVT,unsigned NumVec)4840 static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
4841 unsigned NumVec) {
4842 assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
4843 if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
4844 return EVT();
4845
4846 if (PredVT != MVT::nxv16i1 && PredVT != MVT::nxv8i1 &&
4847 PredVT != MVT::nxv4i1 && PredVT != MVT::nxv2i1)
4848 return EVT();
4849
4850 ElementCount EC = PredVT.getVectorElementCount();
4851 EVT ScalarVT =
4852 EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / EC.getKnownMinValue());
4853 EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC * NumVec);
4854
4855 return MemVT;
4856 }
4857
4858 /// Return the EVT of the data associated to a memory operation in \p
4859 /// Root. If such EVT cannot be retrived, it returns an invalid EVT.
getMemVTFromNode(LLVMContext & Ctx,SDNode * Root)4860 static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
4861 if (isa<MemSDNode>(Root))
4862 return cast<MemSDNode>(Root)->getMemoryVT();
4863
4864 if (isa<MemIntrinsicSDNode>(Root))
4865 return cast<MemIntrinsicSDNode>(Root)->getMemoryVT();
4866
4867 const unsigned Opcode = Root->getOpcode();
4868 // For custom ISD nodes, we have to look at them individually to extract the
4869 // type of the data moved to/from memory.
4870 switch (Opcode) {
4871 case AArch64ISD::LD1_MERGE_ZERO:
4872 case AArch64ISD::LD1S_MERGE_ZERO:
4873 case AArch64ISD::LDNF1_MERGE_ZERO:
4874 case AArch64ISD::LDNF1S_MERGE_ZERO:
4875 return cast<VTSDNode>(Root->getOperand(3))->getVT();
4876 case AArch64ISD::ST1_PRED:
4877 return cast<VTSDNode>(Root->getOperand(4))->getVT();
4878 case AArch64ISD::SVE_LD2_MERGE_ZERO:
4879 return getPackedVectorTypeFromPredicateType(
4880 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/2);
4881 case AArch64ISD::SVE_LD3_MERGE_ZERO:
4882 return getPackedVectorTypeFromPredicateType(
4883 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/3);
4884 case AArch64ISD::SVE_LD4_MERGE_ZERO:
4885 return getPackedVectorTypeFromPredicateType(
4886 Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/4);
4887 default:
4888 break;
4889 }
4890
4891 if (Opcode != ISD::INTRINSIC_VOID)
4892 return EVT();
4893
4894 const unsigned IntNo =
4895 cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue();
4896 if (IntNo != Intrinsic::aarch64_sve_prf)
4897 return EVT();
4898
4899 // We are using an SVE prefetch intrinsic. Type must be inferred
4900 // from the width of the predicate.
4901 return getPackedVectorTypeFromPredicateType(
4902 Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
4903 }
4904
4905 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
4906 /// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
4907 /// where Root is the memory access using N for its address.
4908 template <int64_t Min, int64_t Max>
SelectAddrModeIndexedSVE(SDNode * Root,SDValue N,SDValue & Base,SDValue & OffImm)4909 bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
4910 SDValue &Base,
4911 SDValue &OffImm) {
4912 const EVT MemVT = getMemVTFromNode(*(CurDAG->getContext()), Root);
4913
4914 if (MemVT == EVT())
4915 return false;
4916
4917 if (N.getOpcode() != ISD::ADD)
4918 return false;
4919
4920 SDValue VScale = N.getOperand(1);
4921 if (VScale.getOpcode() != ISD::VSCALE)
4922 return false;
4923
4924 TypeSize TS = MemVT.getSizeInBits();
4925 int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinSize()) / 8;
4926 int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue();
4927
4928 if ((MulImm % MemWidthBytes) != 0)
4929 return false;
4930
4931 int64_t Offset = MulImm / MemWidthBytes;
4932 if (Offset < Min || Offset > Max)
4933 return false;
4934
4935 Base = N.getOperand(0);
4936 OffImm = CurDAG->getTargetConstant(Offset, SDLoc(N), MVT::i64);
4937 return true;
4938 }
4939
4940 /// Select register plus register addressing mode for SVE, with scaled
4941 /// offset.
SelectSVERegRegAddrMode(SDValue N,unsigned Scale,SDValue & Base,SDValue & Offset)4942 bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
4943 SDValue &Base,
4944 SDValue &Offset) {
4945 if (N.getOpcode() != ISD::ADD)
4946 return false;
4947
4948 // Process an ADD node.
4949 const SDValue LHS = N.getOperand(0);
4950 const SDValue RHS = N.getOperand(1);
4951
4952 // 8 bit data does not come with the SHL node, so it is treated
4953 // separately.
4954 if (Scale == 0) {
4955 Base = LHS;
4956 Offset = RHS;
4957 return true;
4958 }
4959
4960 // Check if the RHS is a shift node with a constant.
4961 if (RHS.getOpcode() != ISD::SHL)
4962 return false;
4963
4964 const SDValue ShiftRHS = RHS.getOperand(1);
4965 if (auto *C = dyn_cast<ConstantSDNode>(ShiftRHS))
4966 if (C->getZExtValue() == Scale) {
4967 Base = LHS;
4968 Offset = RHS.getOperand(0);
4969 return true;
4970 }
4971
4972 return false;
4973 }
4974