1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "HexagonInstrInfo.h"
15 #include "Hexagon.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/DFAPacketizer.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValue.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <cctype>
31
32 using namespace llvm;
33
34 #define DEBUG_TYPE "hexagon-instrinfo"
35
36 #define GET_INSTRINFO_CTOR_DTOR
37 #define GET_INSTRMAP_INFO
38 #include "HexagonGenInstrInfo.inc"
39 #include "HexagonGenDFAPacketizer.inc"
40
41 using namespace llvm;
42
43 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
44 cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
45 "packetization boundary."));
46
47 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
48 cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
49
50 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule",
51 cl::Hidden, cl::ZeroOrMore, cl::init(false),
52 cl::desc("Disable schedule adjustment for new value stores."));
53
54 static cl::opt<bool> EnableTimingClassLatency(
55 "enable-timing-class-latency", cl::Hidden, cl::init(false),
56 cl::desc("Enable timing class latency"));
57
58 static cl::opt<bool> EnableALUForwarding(
59 "enable-alu-forwarding", cl::Hidden, cl::init(true),
60 cl::desc("Enable vec alu forwarding"));
61
62 static cl::opt<bool> EnableACCForwarding(
63 "enable-acc-forwarding", cl::Hidden, cl::init(true),
64 cl::desc("Enable vec acc forwarding"));
65
66 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
67 cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"));
68
69 ///
70 /// Constants for Hexagon instructions.
71 ///
72 const int Hexagon_MEMV_OFFSET_MAX_128B = 2047; // #s7
73 const int Hexagon_MEMV_OFFSET_MIN_128B = -2048; // #s7
74 const int Hexagon_MEMV_OFFSET_MAX = 1023; // #s6
75 const int Hexagon_MEMV_OFFSET_MIN = -1024; // #s6
76 const int Hexagon_MEMW_OFFSET_MAX = 4095;
77 const int Hexagon_MEMW_OFFSET_MIN = -4096;
78 const int Hexagon_MEMD_OFFSET_MAX = 8191;
79 const int Hexagon_MEMD_OFFSET_MIN = -8192;
80 const int Hexagon_MEMH_OFFSET_MAX = 2047;
81 const int Hexagon_MEMH_OFFSET_MIN = -2048;
82 const int Hexagon_MEMB_OFFSET_MAX = 1023;
83 const int Hexagon_MEMB_OFFSET_MIN = -1024;
84 const int Hexagon_ADDI_OFFSET_MAX = 32767;
85 const int Hexagon_ADDI_OFFSET_MIN = -32768;
86 const int Hexagon_MEMD_AUTOINC_MAX = 56;
87 const int Hexagon_MEMD_AUTOINC_MIN = -64;
88 const int Hexagon_MEMW_AUTOINC_MAX = 28;
89 const int Hexagon_MEMW_AUTOINC_MIN = -32;
90 const int Hexagon_MEMH_AUTOINC_MAX = 14;
91 const int Hexagon_MEMH_AUTOINC_MIN = -16;
92 const int Hexagon_MEMB_AUTOINC_MAX = 7;
93 const int Hexagon_MEMB_AUTOINC_MIN = -8;
94 const int Hexagon_MEMV_AUTOINC_MAX = 192;
95 const int Hexagon_MEMV_AUTOINC_MIN = -256;
96 const int Hexagon_MEMV_AUTOINC_MAX_128B = 384;
97 const int Hexagon_MEMV_AUTOINC_MIN_128B = -512;
98
99 // Pin the vtable to this file.
anchor()100 void HexagonInstrInfo::anchor() {}
101
HexagonInstrInfo(HexagonSubtarget & ST)102 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
103 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
104 RI() {}
105
106
isIntRegForSubInst(unsigned Reg)107 static bool isIntRegForSubInst(unsigned Reg) {
108 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
109 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
110 }
111
112
isDblRegForSubInst(unsigned Reg,const HexagonRegisterInfo & HRI)113 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
114 return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_loreg)) &&
115 isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_hireg));
116 }
117
118
119 /// Calculate number of instructions excluding the debug instructions.
nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,MachineBasicBlock::const_instr_iterator MIE)120 static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
121 MachineBasicBlock::const_instr_iterator MIE) {
122 unsigned Count = 0;
123 for (; MIB != MIE; ++MIB) {
124 if (!MIB->isDebugValue())
125 ++Count;
126 }
127 return Count;
128 }
129
130
131 /// Find the hardware loop instruction used to set-up the specified loop.
132 /// On Hexagon, we have two instructions used to set-up the hardware loop
133 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
134 /// to indicate the end of a loop.
findLoopInstr(MachineBasicBlock * BB,int EndLoopOp,SmallPtrSet<MachineBasicBlock *,8> & Visited)135 static MachineInstr *findLoopInstr(MachineBasicBlock *BB, int EndLoopOp,
136 SmallPtrSet<MachineBasicBlock *, 8> &Visited) {
137 int LOOPi;
138 int LOOPr;
139 if (EndLoopOp == Hexagon::ENDLOOP0) {
140 LOOPi = Hexagon::J2_loop0i;
141 LOOPr = Hexagon::J2_loop0r;
142 } else { // EndLoopOp == Hexagon::EndLOOP1
143 LOOPi = Hexagon::J2_loop1i;
144 LOOPr = Hexagon::J2_loop1r;
145 }
146
147 // The loop set-up instruction will be in a predecessor block
148 for (MachineBasicBlock::pred_iterator PB = BB->pred_begin(),
149 PE = BB->pred_end(); PB != PE; ++PB) {
150 // If this has been visited, already skip it.
151 if (!Visited.insert(*PB).second)
152 continue;
153 if (*PB == BB)
154 continue;
155 for (MachineBasicBlock::reverse_instr_iterator I = (*PB)->instr_rbegin(),
156 E = (*PB)->instr_rend(); I != E; ++I) {
157 int Opc = I->getOpcode();
158 if (Opc == LOOPi || Opc == LOOPr)
159 return &*I;
160 // We've reached a different loop, which means the loop0 has been removed.
161 if (Opc == EndLoopOp)
162 return 0;
163 }
164 // Check the predecessors for the LOOP instruction.
165 MachineInstr *loop = findLoopInstr(*PB, EndLoopOp, Visited);
166 if (loop)
167 return loop;
168 }
169 return 0;
170 }
171
172
173 /// Gather register def/uses from MI.
174 /// This treats possible (predicated) defs as actually happening ones
175 /// (conservatively).
parseOperands(const MachineInstr * MI,SmallVector<unsigned,4> & Defs,SmallVector<unsigned,8> & Uses)176 static inline void parseOperands(const MachineInstr *MI,
177 SmallVector<unsigned, 4> &Defs, SmallVector<unsigned, 8> &Uses) {
178 Defs.clear();
179 Uses.clear();
180
181 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
182 const MachineOperand &MO = MI->getOperand(i);
183
184 if (!MO.isReg())
185 continue;
186
187 unsigned Reg = MO.getReg();
188 if (!Reg)
189 continue;
190
191 if (MO.isUse())
192 Uses.push_back(MO.getReg());
193
194 if (MO.isDef())
195 Defs.push_back(MO.getReg());
196 }
197 }
198
199
200 // Position dependent, so check twice for swap.
isDuplexPairMatch(unsigned Ga,unsigned Gb)201 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
202 switch (Ga) {
203 case HexagonII::HSIG_None:
204 default:
205 return false;
206 case HexagonII::HSIG_L1:
207 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
208 case HexagonII::HSIG_L2:
209 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
210 Gb == HexagonII::HSIG_A);
211 case HexagonII::HSIG_S1:
212 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
213 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
214 case HexagonII::HSIG_S2:
215 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
216 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
217 Gb == HexagonII::HSIG_A);
218 case HexagonII::HSIG_A:
219 return (Gb == HexagonII::HSIG_A);
220 case HexagonII::HSIG_Compound:
221 return (Gb == HexagonII::HSIG_Compound);
222 }
223 return false;
224 }
225
226
227
228 /// isLoadFromStackSlot - If the specified machine instruction is a direct
229 /// load from a stack slot, return the virtual or physical register number of
230 /// the destination along with the FrameIndex of the loaded stack slot. If
231 /// not, return 0. This predicate must return 0 if the instruction has
232 /// any side effects other than loading from the stack slot.
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const233 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
234 int &FrameIndex) const {
235 switch (MI->getOpcode()) {
236 default: break;
237 case Hexagon::L2_loadri_io:
238 case Hexagon::L2_loadrd_io:
239 case Hexagon::L2_loadrh_io:
240 case Hexagon::L2_loadrb_io:
241 case Hexagon::L2_loadrub_io:
242 if (MI->getOperand(2).isFI() &&
243 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
244 FrameIndex = MI->getOperand(2).getIndex();
245 return MI->getOperand(0).getReg();
246 }
247 break;
248 }
249 return 0;
250 }
251
252
253 /// isStoreToStackSlot - If the specified machine instruction is a direct
254 /// store to a stack slot, return the virtual or physical register number of
255 /// the source reg along with the FrameIndex of the loaded stack slot. If
256 /// not, return 0. This predicate must return 0 if the instruction has
257 /// any side effects other than storing to the stack slot.
isStoreToStackSlot(const MachineInstr * MI,int & FrameIndex) const258 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
259 int &FrameIndex) const {
260 switch (MI->getOpcode()) {
261 default: break;
262 case Hexagon::S2_storeri_io:
263 case Hexagon::S2_storerd_io:
264 case Hexagon::S2_storerh_io:
265 case Hexagon::S2_storerb_io:
266 if (MI->getOperand(2).isFI() &&
267 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
268 FrameIndex = MI->getOperand(0).getIndex();
269 return MI->getOperand(2).getReg();
270 }
271 break;
272 }
273 return 0;
274 }
275
276
277 /// This function can analyze one/two way branching only and should (mostly) be
278 /// called by target independent side.
279 /// First entry is always the opcode of the branching instruction, except when
280 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a
281 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
282 /// e.g. Jump_c p will have
283 /// Cond[0] = Jump_c
284 /// Cond[1] = p
285 /// HW-loop ENDLOOP:
286 /// Cond[0] = ENDLOOP
287 /// Cond[1] = MBB
288 /// New value jump:
289 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
290 /// Cond[1] = R
291 /// Cond[2] = Imm
292 ///
AnalyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const293 bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
294 MachineBasicBlock *&TBB,
295 MachineBasicBlock *&FBB,
296 SmallVectorImpl<MachineOperand> &Cond,
297 bool AllowModify) const {
298 TBB = nullptr;
299 FBB = nullptr;
300 Cond.clear();
301
302 // If the block has no terminators, it just falls into the block after it.
303 MachineBasicBlock::instr_iterator I = MBB.instr_end();
304 if (I == MBB.instr_begin())
305 return false;
306
307 // A basic block may looks like this:
308 //
309 // [ insn
310 // EH_LABEL
311 // insn
312 // insn
313 // insn
314 // EH_LABEL
315 // insn ]
316 //
317 // It has two succs but does not have a terminator
318 // Don't know how to handle it.
319 do {
320 --I;
321 if (I->isEHLabel())
322 // Don't analyze EH branches.
323 return true;
324 } while (I != MBB.instr_begin());
325
326 I = MBB.instr_end();
327 --I;
328
329 while (I->isDebugValue()) {
330 if (I == MBB.instr_begin())
331 return false;
332 --I;
333 }
334
335 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
336 I->getOperand(0).isMBB();
337 // Delete the J2_jump if it's equivalent to a fall-through.
338 if (AllowModify && JumpToBlock &&
339 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
340 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
341 I->eraseFromParent();
342 I = MBB.instr_end();
343 if (I == MBB.instr_begin())
344 return false;
345 --I;
346 }
347 if (!isUnpredicatedTerminator(&*I))
348 return false;
349
350 // Get the last instruction in the block.
351 MachineInstr *LastInst = &*I;
352 MachineInstr *SecondLastInst = nullptr;
353 // Find one more terminator if present.
354 for (;;) {
355 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(&*I)) {
356 if (!SecondLastInst)
357 SecondLastInst = &*I;
358 else
359 // This is a third branch.
360 return true;
361 }
362 if (I == MBB.instr_begin())
363 break;
364 --I;
365 }
366
367 int LastOpcode = LastInst->getOpcode();
368 int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
369 // If the branch target is not a basic block, it could be a tail call.
370 // (It is, if the target is a function.)
371 if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
372 return true;
373 if (SecLastOpcode == Hexagon::J2_jump &&
374 !SecondLastInst->getOperand(0).isMBB())
375 return true;
376
377 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
378 bool LastOpcodeHasNVJump = isNewValueJump(LastInst);
379
380 // If there is only one terminator instruction, process it.
381 if (LastInst && !SecondLastInst) {
382 if (LastOpcode == Hexagon::J2_jump) {
383 TBB = LastInst->getOperand(0).getMBB();
384 return false;
385 }
386 if (isEndLoopN(LastOpcode)) {
387 TBB = LastInst->getOperand(0).getMBB();
388 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
389 Cond.push_back(LastInst->getOperand(0));
390 return false;
391 }
392 if (LastOpcodeHasJMP_c) {
393 TBB = LastInst->getOperand(1).getMBB();
394 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
395 Cond.push_back(LastInst->getOperand(0));
396 return false;
397 }
398 // Only supporting rr/ri versions of new-value jumps.
399 if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
400 TBB = LastInst->getOperand(2).getMBB();
401 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
402 Cond.push_back(LastInst->getOperand(0));
403 Cond.push_back(LastInst->getOperand(1));
404 return false;
405 }
406 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
407 << " with one jump\n";);
408 // Otherwise, don't know what this is.
409 return true;
410 }
411
412 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
413 bool SecLastOpcodeHasNVJump = isNewValueJump(SecondLastInst);
414 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
415 TBB = SecondLastInst->getOperand(1).getMBB();
416 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
417 Cond.push_back(SecondLastInst->getOperand(0));
418 FBB = LastInst->getOperand(0).getMBB();
419 return false;
420 }
421
422 // Only supporting rr/ri versions of new-value jumps.
423 if (SecLastOpcodeHasNVJump &&
424 (SecondLastInst->getNumExplicitOperands() == 3) &&
425 (LastOpcode == Hexagon::J2_jump)) {
426 TBB = SecondLastInst->getOperand(2).getMBB();
427 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
428 Cond.push_back(SecondLastInst->getOperand(0));
429 Cond.push_back(SecondLastInst->getOperand(1));
430 FBB = LastInst->getOperand(0).getMBB();
431 return false;
432 }
433
434 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
435 // executed, so remove it.
436 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
437 TBB = SecondLastInst->getOperand(0).getMBB();
438 I = LastInst->getIterator();
439 if (AllowModify)
440 I->eraseFromParent();
441 return false;
442 }
443
444 // If the block ends with an ENDLOOP, and J2_jump, handle it.
445 if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
446 TBB = SecondLastInst->getOperand(0).getMBB();
447 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
448 Cond.push_back(SecondLastInst->getOperand(0));
449 FBB = LastInst->getOperand(0).getMBB();
450 return false;
451 }
452 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
453 << " with two jumps";);
454 // Otherwise, can't handle this.
455 return true;
456 }
457
458
RemoveBranch(MachineBasicBlock & MBB) const459 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
460 DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber());
461 MachineBasicBlock::iterator I = MBB.end();
462 unsigned Count = 0;
463 while (I != MBB.begin()) {
464 --I;
465 if (I->isDebugValue())
466 continue;
467 // Only removing branches from end of MBB.
468 if (!I->isBranch())
469 return Count;
470 if (Count && (I->getOpcode() == Hexagon::J2_jump))
471 llvm_unreachable("Malformed basic block: unconditional branch not last");
472 MBB.erase(&MBB.back());
473 I = MBB.end();
474 ++Count;
475 }
476 return Count;
477 }
478
479
InsertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,DebugLoc DL) const480 unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,
481 MachineBasicBlock *TBB, MachineBasicBlock *FBB,
482 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
483 unsigned BOpc = Hexagon::J2_jump;
484 unsigned BccOpc = Hexagon::J2_jumpt;
485 assert(validateBranchCond(Cond) && "Invalid branching condition");
486 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
487
488 // Check if ReverseBranchCondition has asked to reverse this branch
489 // If we want to reverse the branch an odd number of times, we want
490 // J2_jumpf.
491 if (!Cond.empty() && Cond[0].isImm())
492 BccOpc = Cond[0].getImm();
493
494 if (!FBB) {
495 if (Cond.empty()) {
496 // Due to a bug in TailMerging/CFG Optimization, we need to add a
497 // special case handling of a predicated jump followed by an
498 // unconditional jump. If not, Tail Merging and CFG Optimization go
499 // into an infinite loop.
500 MachineBasicBlock *NewTBB, *NewFBB;
501 SmallVector<MachineOperand, 4> Cond;
502 MachineInstr *Term = MBB.getFirstTerminator();
503 if (Term != MBB.end() && isPredicated(Term) &&
504 !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) {
505 MachineBasicBlock *NextBB = &*++MBB.getIterator();
506 if (NewTBB == NextBB) {
507 ReverseBranchCondition(Cond);
508 RemoveBranch(MBB);
509 return InsertBranch(MBB, TBB, nullptr, Cond, DL);
510 }
511 }
512 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
513 } else if (isEndLoopN(Cond[0].getImm())) {
514 int EndLoopOp = Cond[0].getImm();
515 assert(Cond[1].isMBB());
516 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
517 // Check for it, and change the BB target if needed.
518 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
519 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
520 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
521 Loop->getOperand(0).setMBB(TBB);
522 // Add the ENDLOOP after the finding the LOOP0.
523 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
524 } else if (isNewValueJump(Cond[0].getImm())) {
525 assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
526 // New value jump
527 // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
528 // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
529 unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
530 DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber(););
531 if (Cond[2].isReg()) {
532 unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
533 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
534 addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
535 } else if(Cond[2].isImm()) {
536 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
537 addImm(Cond[2].getImm()).addMBB(TBB);
538 } else
539 llvm_unreachable("Invalid condition for branching");
540 } else {
541 assert((Cond.size() == 2) && "Malformed cond vector");
542 const MachineOperand &RO = Cond[1];
543 unsigned Flags = getUndefRegState(RO.isUndef());
544 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
545 }
546 return 1;
547 }
548 assert((!Cond.empty()) &&
549 "Cond. cannot be empty when multiple branchings are required");
550 assert((!isNewValueJump(Cond[0].getImm())) &&
551 "NV-jump cannot be inserted with another branch");
552 // Special case for hardware loops. The condition is a basic block.
553 if (isEndLoopN(Cond[0].getImm())) {
554 int EndLoopOp = Cond[0].getImm();
555 assert(Cond[1].isMBB());
556 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
557 // Check for it, and change the BB target if needed.
558 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
559 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
560 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
561 Loop->getOperand(0).setMBB(TBB);
562 // Add the ENDLOOP after the finding the LOOP0.
563 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
564 } else {
565 const MachineOperand &RO = Cond[1];
566 unsigned Flags = getUndefRegState(RO.isUndef());
567 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
568 }
569 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
570
571 return 2;
572 }
573
574
isProfitableToIfCvt(MachineBasicBlock & MBB,unsigned NumCycles,unsigned ExtraPredCycles,BranchProbability Probability) const575 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
576 unsigned NumCycles, unsigned ExtraPredCycles,
577 BranchProbability Probability) const {
578 return nonDbgBBSize(&MBB) <= 3;
579 }
580
581
isProfitableToIfCvt(MachineBasicBlock & TMBB,unsigned NumTCycles,unsigned ExtraTCycles,MachineBasicBlock & FMBB,unsigned NumFCycles,unsigned ExtraFCycles,BranchProbability Probability) const582 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
583 unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
584 unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
585 const {
586 return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
587 }
588
589
isProfitableToDupForIfCvt(MachineBasicBlock & MBB,unsigned NumInstrs,BranchProbability Probability) const590 bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
591 unsigned NumInstrs, BranchProbability Probability) const {
592 return NumInstrs <= 4;
593 }
594
595
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,DebugLoc DL,unsigned DestReg,unsigned SrcReg,bool KillSrc) const596 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
597 MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg,
598 unsigned SrcReg, bool KillSrc) const {
599 auto &HRI = getRegisterInfo();
600 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
601 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg).addReg(SrcReg);
602 return;
603 }
604 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
605 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg).addReg(SrcReg);
606 return;
607 }
608 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
609 // Map Pd = Ps to Pd = or(Ps, Ps).
610 BuildMI(MBB, I, DL, get(Hexagon::C2_or),
611 DestReg).addReg(SrcReg).addReg(SrcReg);
612 return;
613 }
614 if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
615 Hexagon::IntRegsRegClass.contains(SrcReg)) {
616 // We can have an overlap between single and double reg: r1:0 = r0.
617 if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
618 // r1:0 = r0
619 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
620 Hexagon::subreg_hireg))).addImm(0);
621 } else {
622 // r1:0 = r1 or no overlap.
623 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), (RI.getSubReg(DestReg,
624 Hexagon::subreg_loreg))).addReg(SrcReg);
625 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
626 Hexagon::subreg_hireg))).addImm(0);
627 }
628 return;
629 }
630 if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
631 Hexagon::IntRegsRegClass.contains(SrcReg)) {
632 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg).addReg(SrcReg);
633 return;
634 }
635 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
636 Hexagon::IntRegsRegClass.contains(DestReg)) {
637 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg).
638 addReg(SrcReg, getKillRegState(KillSrc));
639 return;
640 }
641 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
642 Hexagon::PredRegsRegClass.contains(DestReg)) {
643 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg).
644 addReg(SrcReg, getKillRegState(KillSrc));
645 return;
646 }
647 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
648 Hexagon::IntRegsRegClass.contains(DestReg)) {
649 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg).
650 addReg(SrcReg, getKillRegState(KillSrc));
651 return;
652 }
653 if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) {
654 BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
655 addReg(SrcReg, getKillRegState(KillSrc));
656 return;
657 }
658 if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) {
659 BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg).
660 addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg),
661 getKillRegState(KillSrc)).
662 addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg),
663 getKillRegState(KillSrc));
664 return;
665 }
666 if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) {
667 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg).
668 addReg(SrcReg).
669 addReg(SrcReg, getKillRegState(KillSrc));
670 return;
671 }
672 if (Hexagon::VecPredRegsRegClass.contains(SrcReg) &&
673 Hexagon::VectorRegsRegClass.contains(DestReg)) {
674 llvm_unreachable("Unimplemented pred to vec");
675 return;
676 }
677 if (Hexagon::VecPredRegsRegClass.contains(DestReg) &&
678 Hexagon::VectorRegsRegClass.contains(SrcReg)) {
679 llvm_unreachable("Unimplemented vec to pred");
680 return;
681 }
682 if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) {
683 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and),
684 HRI.getSubReg(DestReg, Hexagon::subreg_hireg)).
685 addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg),
686 getKillRegState(KillSrc));
687 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and),
688 HRI.getSubReg(DestReg, Hexagon::subreg_loreg)).
689 addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg),
690 getKillRegState(KillSrc));
691 return;
692 }
693
694 #ifndef NDEBUG
695 // Show the invalid registers to ease debugging.
696 dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber()
697 << ": " << PrintReg(DestReg, &HRI)
698 << " = " << PrintReg(SrcReg, &HRI) << '\n';
699 #endif
700 llvm_unreachable("Unimplemented");
701 }
702
703
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const704 void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
705 MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI,
706 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
707 DebugLoc DL = MBB.findDebugLoc(I);
708 MachineFunction &MF = *MBB.getParent();
709 MachineFrameInfo &MFI = *MF.getFrameInfo();
710 unsigned Align = MFI.getObjectAlignment(FI);
711
712 MachineMemOperand *MMO = MF.getMachineMemOperand(
713 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
714 MFI.getObjectSize(FI), Align);
715
716 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
717 BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
718 .addFrameIndex(FI).addImm(0)
719 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
720 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
721 BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
722 .addFrameIndex(FI).addImm(0)
723 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
724 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
725 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
726 .addFrameIndex(FI).addImm(0)
727 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
728 } else {
729 llvm_unreachable("Unimplemented");
730 }
731 }
732
733
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,unsigned DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const734 void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
735 MachineBasicBlock::iterator I, unsigned DestReg, int FI,
736 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
737 DebugLoc DL = MBB.findDebugLoc(I);
738 MachineFunction &MF = *MBB.getParent();
739 MachineFrameInfo &MFI = *MF.getFrameInfo();
740 unsigned Align = MFI.getObjectAlignment(FI);
741
742 MachineMemOperand *MMO = MF.getMachineMemOperand(
743 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
744 MFI.getObjectSize(FI), Align);
745 if (RC == &Hexagon::IntRegsRegClass) {
746 BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
747 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
748 } else if (RC == &Hexagon::DoubleRegsRegClass) {
749 BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
750 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
751 } else if (RC == &Hexagon::PredRegsRegClass) {
752 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
753 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
754 } else {
755 llvm_unreachable("Can't store this register to stack slot");
756 }
757 }
758
759
760 /// expandPostRAPseudo - This function is called for all pseudo instructions
761 /// that remain after register allocation. Many pseudo instructions are
762 /// created to help register allocation. This is the place to convert them
763 /// into real instructions. The target can edit MI in place, or it can insert
764 /// new instructions and erase MI. The function should return true if
765 /// anything was changed.
expandPostRAPseudo(MachineBasicBlock::iterator MI) const766 bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
767 const {
768 const HexagonRegisterInfo &HRI = getRegisterInfo();
769 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
770 MachineBasicBlock &MBB = *MI->getParent();
771 DebugLoc DL = MI->getDebugLoc();
772 unsigned Opc = MI->getOpcode();
773 const unsigned VecOffset = 1;
774 bool Is128B = false;
775
776 switch (Opc) {
777 case Hexagon::ALIGNA:
778 BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI->getOperand(0).getReg())
779 .addReg(HRI.getFrameRegister())
780 .addImm(-MI->getOperand(1).getImm());
781 MBB.erase(MI);
782 return true;
783 case Hexagon::HEXAGON_V6_vassignp_128B:
784 case Hexagon::HEXAGON_V6_vassignp: {
785 unsigned SrcReg = MI->getOperand(1).getReg();
786 unsigned DstReg = MI->getOperand(0).getReg();
787 if (SrcReg != DstReg)
788 copyPhysReg(MBB, MI, DL, DstReg, SrcReg, MI->getOperand(1).isKill());
789 MBB.erase(MI);
790 return true;
791 }
792 case Hexagon::HEXAGON_V6_lo_128B:
793 case Hexagon::HEXAGON_V6_lo: {
794 unsigned SrcReg = MI->getOperand(1).getReg();
795 unsigned DstReg = MI->getOperand(0).getReg();
796 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
797 copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI->getOperand(1).isKill());
798 MBB.erase(MI);
799 MRI.clearKillFlags(SrcSubLo);
800 return true;
801 }
802 case Hexagon::HEXAGON_V6_hi_128B:
803 case Hexagon::HEXAGON_V6_hi: {
804 unsigned SrcReg = MI->getOperand(1).getReg();
805 unsigned DstReg = MI->getOperand(0).getReg();
806 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
807 copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI->getOperand(1).isKill());
808 MBB.erase(MI);
809 MRI.clearKillFlags(SrcSubHi);
810 return true;
811 }
812 case Hexagon::STrivv_indexed_128B:
813 Is128B = true;
814 case Hexagon::STrivv_indexed: {
815 unsigned SrcReg = MI->getOperand(2).getReg();
816 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
817 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
818 unsigned NewOpcd = Is128B ? Hexagon::V6_vS32b_ai_128B
819 : Hexagon::V6_vS32b_ai;
820 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
821 MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpcd))
822 .addOperand(MI->getOperand(0))
823 .addImm(MI->getOperand(1).getImm())
824 .addReg(SrcSubLo)
825 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
826 MI1New->getOperand(0).setIsKill(false);
827 BuildMI(MBB, MI, DL, get(NewOpcd))
828 .addOperand(MI->getOperand(0))
829 // The Vectors are indexed in multiples of vector size.
830 .addImm(MI->getOperand(1).getImm()+Offset)
831 .addReg(SrcSubHi)
832 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
833 MBB.erase(MI);
834 return true;
835 }
836 case Hexagon::LDrivv_pseudo_V6_128B:
837 case Hexagon::LDrivv_indexed_128B:
838 Is128B = true;
839 case Hexagon::LDrivv_pseudo_V6:
840 case Hexagon::LDrivv_indexed: {
841 unsigned NewOpcd = Is128B ? Hexagon::V6_vL32b_ai_128B
842 : Hexagon::V6_vL32b_ai;
843 unsigned DstReg = MI->getOperand(0).getReg();
844 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
845 MachineInstr *MI1New =
846 BuildMI(MBB, MI, DL, get(NewOpcd),
847 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
848 .addOperand(MI->getOperand(1))
849 .addImm(MI->getOperand(2).getImm());
850 MI1New->getOperand(1).setIsKill(false);
851 BuildMI(MBB, MI, DL, get(NewOpcd),
852 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
853 .addOperand(MI->getOperand(1))
854 // The Vectors are indexed in multiples of vector size.
855 .addImm(MI->getOperand(2).getImm() + Offset)
856 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
857 MBB.erase(MI);
858 return true;
859 }
860 case Hexagon::LDriv_pseudo_V6_128B:
861 Is128B = true;
862 case Hexagon::LDriv_pseudo_V6: {
863 unsigned DstReg = MI->getOperand(0).getReg();
864 unsigned NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B
865 : Hexagon::V6_vL32b_ai;
866 int32_t Off = MI->getOperand(2).getImm();
867 int32_t Idx = Off;
868 BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
869 .addOperand(MI->getOperand(1))
870 .addImm(Idx)
871 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
872 MBB.erase(MI);
873 return true;
874 }
875 case Hexagon::STriv_pseudo_V6_128B:
876 Is128B = true;
877 case Hexagon::STriv_pseudo_V6: {
878 unsigned NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B
879 : Hexagon::V6_vS32b_ai;
880 int32_t Off = MI->getOperand(1).getImm();
881 int32_t Idx = Is128B ? (Off >> 7) : (Off >> 6);
882 BuildMI(MBB, MI, DL, get(NewOpc))
883 .addOperand(MI->getOperand(0))
884 .addImm(Idx)
885 .addOperand(MI->getOperand(2))
886 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
887 MBB.erase(MI);
888 return true;
889 }
890 case Hexagon::TFR_PdTrue: {
891 unsigned Reg = MI->getOperand(0).getReg();
892 BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
893 .addReg(Reg, RegState::Undef)
894 .addReg(Reg, RegState::Undef);
895 MBB.erase(MI);
896 return true;
897 }
898 case Hexagon::TFR_PdFalse: {
899 unsigned Reg = MI->getOperand(0).getReg();
900 BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
901 .addReg(Reg, RegState::Undef)
902 .addReg(Reg, RegState::Undef);
903 MBB.erase(MI);
904 return true;
905 }
906 case Hexagon::VMULW: {
907 // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
908 unsigned DstReg = MI->getOperand(0).getReg();
909 unsigned Src1Reg = MI->getOperand(1).getReg();
910 unsigned Src2Reg = MI->getOperand(2).getReg();
911 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
912 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
913 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
914 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
915 BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_mpyi),
916 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)).addReg(Src1SubHi)
917 .addReg(Src2SubHi);
918 BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_mpyi),
919 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)).addReg(Src1SubLo)
920 .addReg(Src2SubLo);
921 MBB.erase(MI);
922 MRI.clearKillFlags(Src1SubHi);
923 MRI.clearKillFlags(Src1SubLo);
924 MRI.clearKillFlags(Src2SubHi);
925 MRI.clearKillFlags(Src2SubLo);
926 return true;
927 }
928 case Hexagon::VMULW_ACC: {
929 // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
930 unsigned DstReg = MI->getOperand(0).getReg();
931 unsigned Src1Reg = MI->getOperand(1).getReg();
932 unsigned Src2Reg = MI->getOperand(2).getReg();
933 unsigned Src3Reg = MI->getOperand(3).getReg();
934 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
935 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
936 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
937 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
938 unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::subreg_hireg);
939 unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::subreg_loreg);
940 BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_maci),
941 HRI.getSubReg(DstReg, Hexagon::subreg_hireg)).addReg(Src1SubHi)
942 .addReg(Src2SubHi).addReg(Src3SubHi);
943 BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_maci),
944 HRI.getSubReg(DstReg, Hexagon::subreg_loreg)).addReg(Src1SubLo)
945 .addReg(Src2SubLo).addReg(Src3SubLo);
946 MBB.erase(MI);
947 MRI.clearKillFlags(Src1SubHi);
948 MRI.clearKillFlags(Src1SubLo);
949 MRI.clearKillFlags(Src2SubHi);
950 MRI.clearKillFlags(Src2SubLo);
951 MRI.clearKillFlags(Src3SubHi);
952 MRI.clearKillFlags(Src3SubLo);
953 return true;
954 }
955 case Hexagon::MUX64_rr: {
956 const MachineOperand &Op0 = MI->getOperand(0);
957 const MachineOperand &Op1 = MI->getOperand(1);
958 const MachineOperand &Op2 = MI->getOperand(2);
959 const MachineOperand &Op3 = MI->getOperand(3);
960 unsigned Rd = Op0.getReg();
961 unsigned Pu = Op1.getReg();
962 unsigned Rs = Op2.getReg();
963 unsigned Rt = Op3.getReg();
964 DebugLoc DL = MI->getDebugLoc();
965 unsigned K1 = getKillRegState(Op1.isKill());
966 unsigned K2 = getKillRegState(Op2.isKill());
967 unsigned K3 = getKillRegState(Op3.isKill());
968 if (Rd != Rs)
969 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
970 .addReg(Pu, (Rd == Rt) ? K1 : 0)
971 .addReg(Rs, K2);
972 if (Rd != Rt)
973 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
974 .addReg(Pu, K1)
975 .addReg(Rt, K3);
976 MBB.erase(MI);
977 return true;
978 }
979 case Hexagon::TCRETURNi:
980 MI->setDesc(get(Hexagon::J2_jump));
981 return true;
982 case Hexagon::TCRETURNr:
983 MI->setDesc(get(Hexagon::J2_jumpr));
984 return true;
985 case Hexagon::TFRI_f:
986 case Hexagon::TFRI_cPt_f:
987 case Hexagon::TFRI_cNotPt_f: {
988 unsigned Opx = (Opc == Hexagon::TFRI_f) ? 1 : 2;
989 APFloat FVal = MI->getOperand(Opx).getFPImm()->getValueAPF();
990 APInt IVal = FVal.bitcastToAPInt();
991 MI->RemoveOperand(Opx);
992 unsigned NewOpc = (Opc == Hexagon::TFRI_f) ? Hexagon::A2_tfrsi :
993 (Opc == Hexagon::TFRI_cPt_f) ? Hexagon::C2_cmoveit :
994 Hexagon::C2_cmoveif;
995 MI->setDesc(get(NewOpc));
996 MI->addOperand(MachineOperand::CreateImm(IVal.getZExtValue()));
997 return true;
998 }
999 }
1000
1001 return false;
1002 }
1003
1004
1005 // We indicate that we want to reverse the branch by
1006 // inserting the reversed branching opcode.
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const1007 bool HexagonInstrInfo::ReverseBranchCondition(
1008 SmallVectorImpl<MachineOperand> &Cond) const {
1009 if (Cond.empty())
1010 return true;
1011 assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1012 unsigned opcode = Cond[0].getImm();
1013 //unsigned temp;
1014 assert(get(opcode).isBranch() && "Should be a branching condition.");
1015 if (isEndLoopN(opcode))
1016 return true;
1017 unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1018 Cond[0].setImm(NewOpcode);
1019 return false;
1020 }
1021
1022
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const1023 void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1024 MachineBasicBlock::iterator MI) const {
1025 DebugLoc DL;
1026 BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1027 }
1028
1029
1030 // Returns true if an instruction is predicated irrespective of the predicate
1031 // sense. For example, all of the following will return true.
1032 // if (p0) R1 = add(R2, R3)
1033 // if (!p0) R1 = add(R2, R3)
1034 // if (p0.new) R1 = add(R2, R3)
1035 // if (!p0.new) R1 = add(R2, R3)
1036 // Note: New-value stores are not included here as in the current
1037 // implementation, we don't need to check their predicate sense.
isPredicated(const MachineInstr * MI) const1038 bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
1039 const uint64_t F = MI->getDesc().TSFlags;
1040 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1041 }
1042
1043
PredicateInstruction(MachineInstr * MI,ArrayRef<MachineOperand> Cond) const1044 bool HexagonInstrInfo::PredicateInstruction(MachineInstr *MI,
1045 ArrayRef<MachineOperand> Cond) const {
1046 if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1047 isEndLoopN(Cond[0].getImm())) {
1048 DEBUG(dbgs() << "\nCannot predicate:"; MI->dump(););
1049 return false;
1050 }
1051 int Opc = MI->getOpcode();
1052 assert (isPredicable(MI) && "Expected predicable instruction");
1053 bool invertJump = predOpcodeHasNot(Cond);
1054
1055 // We have to predicate MI "in place", i.e. after this function returns,
1056 // MI will need to be transformed into a predicated form. To avoid com-
1057 // plicated manipulations with the operands (handling tied operands,
1058 // etc.), build a new temporary instruction, then overwrite MI with it.
1059
1060 MachineBasicBlock &B = *MI->getParent();
1061 DebugLoc DL = MI->getDebugLoc();
1062 unsigned PredOpc = getCondOpcode(Opc, invertJump);
1063 MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1064 unsigned NOp = 0, NumOps = MI->getNumOperands();
1065 while (NOp < NumOps) {
1066 MachineOperand &Op = MI->getOperand(NOp);
1067 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1068 break;
1069 T.addOperand(Op);
1070 NOp++;
1071 }
1072
1073 unsigned PredReg, PredRegPos, PredRegFlags;
1074 bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1075 (void)GotPredReg;
1076 assert(GotPredReg);
1077 T.addReg(PredReg, PredRegFlags);
1078 while (NOp < NumOps)
1079 T.addOperand(MI->getOperand(NOp++));
1080
1081 MI->setDesc(get(PredOpc));
1082 while (unsigned n = MI->getNumOperands())
1083 MI->RemoveOperand(n-1);
1084 for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1085 MI->addOperand(T->getOperand(i));
1086
1087 MachineBasicBlock::instr_iterator TI = T->getIterator();
1088 B.erase(TI);
1089
1090 MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1091 MRI.clearKillFlags(PredReg);
1092 return true;
1093 }
1094
1095
SubsumesPredicate(ArrayRef<MachineOperand> Pred1,ArrayRef<MachineOperand> Pred2) const1096 bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1097 ArrayRef<MachineOperand> Pred2) const {
1098 // TODO: Fix this
1099 return false;
1100 }
1101
1102
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const1103 bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
1104 std::vector<MachineOperand> &Pred) const {
1105 auto &HRI = getRegisterInfo();
1106 for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
1107 MachineOperand MO = MI->getOperand(oper);
1108 if (MO.isReg() && MO.isDef()) {
1109 const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1110 if (RC == &Hexagon::PredRegsRegClass) {
1111 Pred.push_back(MO);
1112 return true;
1113 }
1114 }
1115 }
1116 return false;
1117 }
1118
isPredicable(MachineInstr * MI) const1119 bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
1120 bool isPred = MI->getDesc().isPredicable();
1121
1122 if (!isPred)
1123 return false;
1124
1125 const int Opc = MI->getOpcode();
1126 int NumOperands = MI->getNumOperands();
1127
1128 // Keep a flag for upto 4 operands in the instructions, to indicate if
1129 // that operand has been constant extended.
1130 bool OpCExtended[4];
1131 if (NumOperands > 4)
1132 NumOperands = 4;
1133
1134 for (int i = 0; i < NumOperands; i++)
1135 OpCExtended[i] = (isOperandExtended(MI, i) && isConstExtended(MI));
1136
1137 switch(Opc) {
1138 case Hexagon::A2_tfrsi:
1139 return (isOperandExtended(MI, 1) && isConstExtended(MI)) ||
1140 isInt<12>(MI->getOperand(1).getImm());
1141
1142 case Hexagon::S2_storerd_io:
1143 return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
1144
1145 case Hexagon::S2_storeri_io:
1146 case Hexagon::S2_storerinew_io:
1147 return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
1148
1149 case Hexagon::S2_storerh_io:
1150 case Hexagon::S2_storerhnew_io:
1151 return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
1152
1153 case Hexagon::S2_storerb_io:
1154 case Hexagon::S2_storerbnew_io:
1155 return isUInt<6>(MI->getOperand(1).getImm());
1156
1157 case Hexagon::L2_loadrd_io:
1158 return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
1159
1160 case Hexagon::L2_loadri_io:
1161 return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
1162
1163 case Hexagon::L2_loadrh_io:
1164 case Hexagon::L2_loadruh_io:
1165 return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
1166
1167 case Hexagon::L2_loadrb_io:
1168 case Hexagon::L2_loadrub_io:
1169 return isUInt<6>(MI->getOperand(2).getImm());
1170
1171 case Hexagon::L2_loadrd_pi:
1172 return isShiftedInt<4,3>(MI->getOperand(3).getImm());
1173
1174 case Hexagon::L2_loadri_pi:
1175 return isShiftedInt<4,2>(MI->getOperand(3).getImm());
1176
1177 case Hexagon::L2_loadrh_pi:
1178 case Hexagon::L2_loadruh_pi:
1179 return isShiftedInt<4,1>(MI->getOperand(3).getImm());
1180
1181 case Hexagon::L2_loadrb_pi:
1182 case Hexagon::L2_loadrub_pi:
1183 return isInt<4>(MI->getOperand(3).getImm());
1184
1185 case Hexagon::S4_storeirb_io:
1186 case Hexagon::S4_storeirh_io:
1187 case Hexagon::S4_storeiri_io:
1188 return (OpCExtended[1] || isUInt<6>(MI->getOperand(1).getImm())) &&
1189 (OpCExtended[2] || isInt<6>(MI->getOperand(2).getImm()));
1190
1191 case Hexagon::A2_addi:
1192 return isInt<8>(MI->getOperand(2).getImm());
1193
1194 case Hexagon::A2_aslh:
1195 case Hexagon::A2_asrh:
1196 case Hexagon::A2_sxtb:
1197 case Hexagon::A2_sxth:
1198 case Hexagon::A2_zxtb:
1199 case Hexagon::A2_zxth:
1200 return true;
1201 }
1202
1203 return true;
1204 }
1205
1206
isSchedulingBoundary(const MachineInstr * MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const1207 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
1208 const MachineBasicBlock *MBB, const MachineFunction &MF) const {
1209 // Debug info is never a scheduling boundary. It's necessary to be explicit
1210 // due to the special treatment of IT instructions below, otherwise a
1211 // dbg_value followed by an IT will result in the IT instruction being
1212 // considered a scheduling hazard, which is wrong. It should be the actual
1213 // instruction preceding the dbg_value instruction(s), just like it is
1214 // when debug info is not present.
1215 if (MI->isDebugValue())
1216 return false;
1217
1218 // Throwing call is a boundary.
1219 if (MI->isCall()) {
1220 // If any of the block's successors is a landing pad, this could be a
1221 // throwing call.
1222 for (auto I : MBB->successors())
1223 if (I->isEHPad())
1224 return true;
1225 }
1226
1227 // Don't mess around with no return calls.
1228 if (MI->getOpcode() == Hexagon::CALLv3nr)
1229 return true;
1230
1231 // Terminators and labels can't be scheduled around.
1232 if (MI->getDesc().isTerminator() || MI->isPosition())
1233 return true;
1234
1235 if (MI->isInlineAsm() && !ScheduleInlineAsm)
1236 return true;
1237
1238 return false;
1239 }
1240
1241
1242 /// Measure the specified inline asm to determine an approximation of its
1243 /// length.
1244 /// Comments (which run till the next SeparatorString or newline) do not
1245 /// count as an instruction.
1246 /// Any other non-whitespace text is considered an instruction, with
1247 /// multiple instructions separated by SeparatorString or newlines.
1248 /// Variable-length instructions are not handled here; this function
1249 /// may be overloaded in the target code to do that.
1250 /// Hexagon counts the number of ##'s and adjust for that many
1251 /// constant exenders.
getInlineAsmLength(const char * Str,const MCAsmInfo & MAI) const1252 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1253 const MCAsmInfo &MAI) const {
1254 StringRef AStr(Str);
1255 // Count the number of instructions in the asm.
1256 bool atInsnStart = true;
1257 unsigned Length = 0;
1258 for (; *Str; ++Str) {
1259 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1260 strlen(MAI.getSeparatorString())) == 0)
1261 atInsnStart = true;
1262 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1263 Length += MAI.getMaxInstLength();
1264 atInsnStart = false;
1265 }
1266 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
1267 strlen(MAI.getCommentString())) == 0)
1268 atInsnStart = false;
1269 }
1270
1271 // Add to size number of constant extenders seen * 4.
1272 StringRef Occ("##");
1273 Length += AStr.count(Occ)*4;
1274 return Length;
1275 }
1276
1277
1278 ScheduleHazardRecognizer*
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const1279 HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1280 const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1281 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1282 }
1283
1284
1285 /// \brief For a comparison instruction, return the source registers in
1286 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1287 /// compares against in CmpValue. Return true if the comparison instruction
1288 /// can be analyzed.
analyzeCompare(const MachineInstr * MI,unsigned & SrcReg,unsigned & SrcReg2,int & Mask,int & Value) const1289 bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
1290 unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const {
1291 unsigned Opc = MI->getOpcode();
1292
1293 // Set mask and the first source register.
1294 switch (Opc) {
1295 case Hexagon::C2_cmpeq:
1296 case Hexagon::C2_cmpeqp:
1297 case Hexagon::C2_cmpgt:
1298 case Hexagon::C2_cmpgtp:
1299 case Hexagon::C2_cmpgtu:
1300 case Hexagon::C2_cmpgtup:
1301 case Hexagon::C4_cmpneq:
1302 case Hexagon::C4_cmplte:
1303 case Hexagon::C4_cmplteu:
1304 case Hexagon::C2_cmpeqi:
1305 case Hexagon::C2_cmpgti:
1306 case Hexagon::C2_cmpgtui:
1307 case Hexagon::C4_cmpneqi:
1308 case Hexagon::C4_cmplteui:
1309 case Hexagon::C4_cmpltei:
1310 SrcReg = MI->getOperand(1).getReg();
1311 Mask = ~0;
1312 break;
1313 case Hexagon::A4_cmpbeq:
1314 case Hexagon::A4_cmpbgt:
1315 case Hexagon::A4_cmpbgtu:
1316 case Hexagon::A4_cmpbeqi:
1317 case Hexagon::A4_cmpbgti:
1318 case Hexagon::A4_cmpbgtui:
1319 SrcReg = MI->getOperand(1).getReg();
1320 Mask = 0xFF;
1321 break;
1322 case Hexagon::A4_cmpheq:
1323 case Hexagon::A4_cmphgt:
1324 case Hexagon::A4_cmphgtu:
1325 case Hexagon::A4_cmpheqi:
1326 case Hexagon::A4_cmphgti:
1327 case Hexagon::A4_cmphgtui:
1328 SrcReg = MI->getOperand(1).getReg();
1329 Mask = 0xFFFF;
1330 break;
1331 }
1332
1333 // Set the value/second source register.
1334 switch (Opc) {
1335 case Hexagon::C2_cmpeq:
1336 case Hexagon::C2_cmpeqp:
1337 case Hexagon::C2_cmpgt:
1338 case Hexagon::C2_cmpgtp:
1339 case Hexagon::C2_cmpgtu:
1340 case Hexagon::C2_cmpgtup:
1341 case Hexagon::A4_cmpbeq:
1342 case Hexagon::A4_cmpbgt:
1343 case Hexagon::A4_cmpbgtu:
1344 case Hexagon::A4_cmpheq:
1345 case Hexagon::A4_cmphgt:
1346 case Hexagon::A4_cmphgtu:
1347 case Hexagon::C4_cmpneq:
1348 case Hexagon::C4_cmplte:
1349 case Hexagon::C4_cmplteu:
1350 SrcReg2 = MI->getOperand(2).getReg();
1351 return true;
1352
1353 case Hexagon::C2_cmpeqi:
1354 case Hexagon::C2_cmpgtui:
1355 case Hexagon::C2_cmpgti:
1356 case Hexagon::C4_cmpneqi:
1357 case Hexagon::C4_cmplteui:
1358 case Hexagon::C4_cmpltei:
1359 case Hexagon::A4_cmpbeqi:
1360 case Hexagon::A4_cmpbgti:
1361 case Hexagon::A4_cmpbgtui:
1362 case Hexagon::A4_cmpheqi:
1363 case Hexagon::A4_cmphgti:
1364 case Hexagon::A4_cmphgtui:
1365 SrcReg2 = 0;
1366 Value = MI->getOperand(2).getImm();
1367 return true;
1368 }
1369
1370 return false;
1371 }
1372
1373
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr * MI,unsigned * PredCost) const1374 unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1375 const MachineInstr *MI, unsigned *PredCost) const {
1376 return getInstrTimingClassLatency(ItinData, MI);
1377 }
1378
1379
CreateTargetScheduleState(const TargetSubtargetInfo & STI) const1380 DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1381 const TargetSubtargetInfo &STI) const {
1382 const InstrItineraryData *II = STI.getInstrItineraryData();
1383 return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1384 }
1385
1386
1387 // Inspired by this pair:
1388 // %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
1389 // S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
1390 // Currently AA considers the addresses in these instructions to be aliasing.
areMemAccessesTriviallyDisjoint(MachineInstr * MIa,MachineInstr * MIb,AliasAnalysis * AA) const1391 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
1392 MachineInstr *MIb, AliasAnalysis *AA) const {
1393 int OffsetA = 0, OffsetB = 0;
1394 unsigned SizeA = 0, SizeB = 0;
1395
1396 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
1397 MIa->hasOrderedMemoryRef() || MIa->hasOrderedMemoryRef())
1398 return false;
1399
1400 // Instructions that are pure loads, not loads and stores like memops are not
1401 // dependent.
1402 if (MIa->mayLoad() && !isMemOp(MIa) && MIb->mayLoad() && !isMemOp(MIb))
1403 return true;
1404
1405 // Get base, offset, and access size in MIa.
1406 unsigned BaseRegA = getBaseAndOffset(MIa, OffsetA, SizeA);
1407 if (!BaseRegA || !SizeA)
1408 return false;
1409
1410 // Get base, offset, and access size in MIb.
1411 unsigned BaseRegB = getBaseAndOffset(MIb, OffsetB, SizeB);
1412 if (!BaseRegB || !SizeB)
1413 return false;
1414
1415 if (BaseRegA != BaseRegB)
1416 return false;
1417
1418 // This is a mem access with the same base register and known offsets from it.
1419 // Reason about it.
1420 if (OffsetA > OffsetB) {
1421 uint64_t offDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1422 return (SizeB <= offDiff);
1423 } else if (OffsetA < OffsetB) {
1424 uint64_t offDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1425 return (SizeA <= offDiff);
1426 }
1427
1428 return false;
1429 }
1430
1431
createVR(MachineFunction * MF,MVT VT) const1432 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
1433 MachineRegisterInfo &MRI = MF->getRegInfo();
1434 const TargetRegisterClass *TRC;
1435 if (VT == MVT::i1) {
1436 TRC = &Hexagon::PredRegsRegClass;
1437 } else if (VT == MVT::i32 || VT == MVT::f32) {
1438 TRC = &Hexagon::IntRegsRegClass;
1439 } else if (VT == MVT::i64 || VT == MVT::f64) {
1440 TRC = &Hexagon::DoubleRegsRegClass;
1441 } else {
1442 llvm_unreachable("Cannot handle this register class");
1443 }
1444
1445 unsigned NewReg = MRI.createVirtualRegister(TRC);
1446 return NewReg;
1447 }
1448
1449
isAbsoluteSet(const MachineInstr * MI) const1450 bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr* MI) const {
1451 return (getAddrMode(MI) == HexagonII::AbsoluteSet);
1452 }
1453
1454
isAccumulator(const MachineInstr * MI) const1455 bool HexagonInstrInfo::isAccumulator(const MachineInstr *MI) const {
1456 const uint64_t F = MI->getDesc().TSFlags;
1457 return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
1458 }
1459
1460
isComplex(const MachineInstr * MI) const1461 bool HexagonInstrInfo::isComplex(const MachineInstr *MI) const {
1462 const MachineFunction *MF = MI->getParent()->getParent();
1463 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1464 const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
1465
1466 if (!(isTC1(MI))
1467 && !(QII->isTC2Early(MI))
1468 && !(MI->getDesc().mayLoad())
1469 && !(MI->getDesc().mayStore())
1470 && (MI->getDesc().getOpcode() != Hexagon::S2_allocframe)
1471 && (MI->getDesc().getOpcode() != Hexagon::L2_deallocframe)
1472 && !(QII->isMemOp(MI))
1473 && !(MI->isBranch())
1474 && !(MI->isReturn())
1475 && !MI->isCall())
1476 return true;
1477
1478 return false;
1479 }
1480
1481
1482 // Return true if the instruction is a compund branch instruction.
isCompoundBranchInstr(const MachineInstr * MI) const1483 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr *MI) const {
1484 return (getType(MI) == HexagonII::TypeCOMPOUND && MI->isBranch());
1485 }
1486
1487
isCondInst(const MachineInstr * MI) const1488 bool HexagonInstrInfo::isCondInst(const MachineInstr *MI) const {
1489 return (MI->isBranch() && isPredicated(MI)) ||
1490 isConditionalTransfer(MI) ||
1491 isConditionalALU32(MI) ||
1492 isConditionalLoad(MI) ||
1493 // Predicated stores which don't have a .new on any operands.
1494 (MI->mayStore() && isPredicated(MI) && !isNewValueStore(MI) &&
1495 !isPredicatedNew(MI));
1496 }
1497
1498
isConditionalALU32(const MachineInstr * MI) const1499 bool HexagonInstrInfo::isConditionalALU32(const MachineInstr* MI) const {
1500 switch (MI->getOpcode()) {
1501 case Hexagon::A2_paddf:
1502 case Hexagon::A2_paddfnew:
1503 case Hexagon::A2_paddif:
1504 case Hexagon::A2_paddifnew:
1505 case Hexagon::A2_paddit:
1506 case Hexagon::A2_padditnew:
1507 case Hexagon::A2_paddt:
1508 case Hexagon::A2_paddtnew:
1509 case Hexagon::A2_pandf:
1510 case Hexagon::A2_pandfnew:
1511 case Hexagon::A2_pandt:
1512 case Hexagon::A2_pandtnew:
1513 case Hexagon::A2_porf:
1514 case Hexagon::A2_porfnew:
1515 case Hexagon::A2_port:
1516 case Hexagon::A2_portnew:
1517 case Hexagon::A2_psubf:
1518 case Hexagon::A2_psubfnew:
1519 case Hexagon::A2_psubt:
1520 case Hexagon::A2_psubtnew:
1521 case Hexagon::A2_pxorf:
1522 case Hexagon::A2_pxorfnew:
1523 case Hexagon::A2_pxort:
1524 case Hexagon::A2_pxortnew:
1525 case Hexagon::A4_paslhf:
1526 case Hexagon::A4_paslhfnew:
1527 case Hexagon::A4_paslht:
1528 case Hexagon::A4_paslhtnew:
1529 case Hexagon::A4_pasrhf:
1530 case Hexagon::A4_pasrhfnew:
1531 case Hexagon::A4_pasrht:
1532 case Hexagon::A4_pasrhtnew:
1533 case Hexagon::A4_psxtbf:
1534 case Hexagon::A4_psxtbfnew:
1535 case Hexagon::A4_psxtbt:
1536 case Hexagon::A4_psxtbtnew:
1537 case Hexagon::A4_psxthf:
1538 case Hexagon::A4_psxthfnew:
1539 case Hexagon::A4_psxtht:
1540 case Hexagon::A4_psxthtnew:
1541 case Hexagon::A4_pzxtbf:
1542 case Hexagon::A4_pzxtbfnew:
1543 case Hexagon::A4_pzxtbt:
1544 case Hexagon::A4_pzxtbtnew:
1545 case Hexagon::A4_pzxthf:
1546 case Hexagon::A4_pzxthfnew:
1547 case Hexagon::A4_pzxtht:
1548 case Hexagon::A4_pzxthtnew:
1549 case Hexagon::C2_ccombinewf:
1550 case Hexagon::C2_ccombinewt:
1551 return true;
1552 }
1553 return false;
1554 }
1555
1556
1557 // FIXME - Function name and it's functionality don't match.
1558 // It should be renamed to hasPredNewOpcode()
isConditionalLoad(const MachineInstr * MI) const1559 bool HexagonInstrInfo::isConditionalLoad(const MachineInstr* MI) const {
1560 if (!MI->getDesc().mayLoad() || !isPredicated(MI))
1561 return false;
1562
1563 int PNewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
1564 // Instruction with valid predicated-new opcode can be promoted to .new.
1565 return PNewOpcode >= 0;
1566 }
1567
1568
1569 // Returns true if an instruction is a conditional store.
1570 //
1571 // Note: It doesn't include conditional new-value stores as they can't be
1572 // converted to .new predicate.
isConditionalStore(const MachineInstr * MI) const1573 bool HexagonInstrInfo::isConditionalStore(const MachineInstr* MI) const {
1574 switch (MI->getOpcode()) {
1575 default: return false;
1576 case Hexagon::S4_storeirbt_io:
1577 case Hexagon::S4_storeirbf_io:
1578 case Hexagon::S4_pstorerbt_rr:
1579 case Hexagon::S4_pstorerbf_rr:
1580 case Hexagon::S2_pstorerbt_io:
1581 case Hexagon::S2_pstorerbf_io:
1582 case Hexagon::S2_pstorerbt_pi:
1583 case Hexagon::S2_pstorerbf_pi:
1584 case Hexagon::S2_pstorerdt_io:
1585 case Hexagon::S2_pstorerdf_io:
1586 case Hexagon::S4_pstorerdt_rr:
1587 case Hexagon::S4_pstorerdf_rr:
1588 case Hexagon::S2_pstorerdt_pi:
1589 case Hexagon::S2_pstorerdf_pi:
1590 case Hexagon::S2_pstorerht_io:
1591 case Hexagon::S2_pstorerhf_io:
1592 case Hexagon::S4_storeirht_io:
1593 case Hexagon::S4_storeirhf_io:
1594 case Hexagon::S4_pstorerht_rr:
1595 case Hexagon::S4_pstorerhf_rr:
1596 case Hexagon::S2_pstorerht_pi:
1597 case Hexagon::S2_pstorerhf_pi:
1598 case Hexagon::S2_pstorerit_io:
1599 case Hexagon::S2_pstorerif_io:
1600 case Hexagon::S4_storeirit_io:
1601 case Hexagon::S4_storeirif_io:
1602 case Hexagon::S4_pstorerit_rr:
1603 case Hexagon::S4_pstorerif_rr:
1604 case Hexagon::S2_pstorerit_pi:
1605 case Hexagon::S2_pstorerif_pi:
1606
1607 // V4 global address store before promoting to dot new.
1608 case Hexagon::S4_pstorerdt_abs:
1609 case Hexagon::S4_pstorerdf_abs:
1610 case Hexagon::S4_pstorerbt_abs:
1611 case Hexagon::S4_pstorerbf_abs:
1612 case Hexagon::S4_pstorerht_abs:
1613 case Hexagon::S4_pstorerhf_abs:
1614 case Hexagon::S4_pstorerit_abs:
1615 case Hexagon::S4_pstorerif_abs:
1616 return true;
1617
1618 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
1619 // from the "Conditional Store" list. Because a predicated new value store
1620 // would NOT be promoted to a double dot new store.
1621 // This function returns yes for those stores that are predicated but not
1622 // yet promoted to predicate dot new instructions.
1623 }
1624 }
1625
1626
isConditionalTransfer(const MachineInstr * MI) const1627 bool HexagonInstrInfo::isConditionalTransfer(const MachineInstr *MI) const {
1628 switch (MI->getOpcode()) {
1629 case Hexagon::A2_tfrt:
1630 case Hexagon::A2_tfrf:
1631 case Hexagon::C2_cmoveit:
1632 case Hexagon::C2_cmoveif:
1633 case Hexagon::A2_tfrtnew:
1634 case Hexagon::A2_tfrfnew:
1635 case Hexagon::C2_cmovenewit:
1636 case Hexagon::C2_cmovenewif:
1637 case Hexagon::A2_tfrpt:
1638 case Hexagon::A2_tfrpf:
1639 return true;
1640
1641 default:
1642 return false;
1643 }
1644 return false;
1645 }
1646
1647
1648 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
1649 // isFPImm and later getFPImm as well.
isConstExtended(const MachineInstr * MI) const1650 bool HexagonInstrInfo::isConstExtended(const MachineInstr *MI) const {
1651 const uint64_t F = MI->getDesc().TSFlags;
1652 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
1653 if (isExtended) // Instruction must be extended.
1654 return true;
1655
1656 unsigned isExtendable =
1657 (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
1658 if (!isExtendable)
1659 return false;
1660
1661 if (MI->isCall())
1662 return false;
1663
1664 short ExtOpNum = getCExtOpNum(MI);
1665 const MachineOperand &MO = MI->getOperand(ExtOpNum);
1666 // Use MO operand flags to determine if MO
1667 // has the HMOTF_ConstExtended flag set.
1668 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
1669 return true;
1670 // If this is a Machine BB address we are talking about, and it is
1671 // not marked as extended, say so.
1672 if (MO.isMBB())
1673 return false;
1674
1675 // We could be using an instruction with an extendable immediate and shoehorn
1676 // a global address into it. If it is a global address it will be constant
1677 // extended. We do this for COMBINE.
1678 // We currently only handle isGlobal() because it is the only kind of
1679 // object we are going to end up with here for now.
1680 // In the future we probably should add isSymbol(), etc.
1681 if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
1682 MO.isJTI() || MO.isCPI())
1683 return true;
1684
1685 // If the extendable operand is not 'Immediate' type, the instruction should
1686 // have 'isExtended' flag set.
1687 assert(MO.isImm() && "Extendable operand must be Immediate type");
1688
1689 int MinValue = getMinValue(MI);
1690 int MaxValue = getMaxValue(MI);
1691 int ImmValue = MO.getImm();
1692
1693 return (ImmValue < MinValue || ImmValue > MaxValue);
1694 }
1695
1696
isDeallocRet(const MachineInstr * MI) const1697 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
1698 switch (MI->getOpcode()) {
1699 case Hexagon::L4_return :
1700 case Hexagon::L4_return_t :
1701 case Hexagon::L4_return_f :
1702 case Hexagon::L4_return_tnew_pnt :
1703 case Hexagon::L4_return_fnew_pnt :
1704 case Hexagon::L4_return_tnew_pt :
1705 case Hexagon::L4_return_fnew_pt :
1706 return true;
1707 }
1708 return false;
1709 }
1710
1711
1712 // Return true when ConsMI uses a register defined by ProdMI.
isDependent(const MachineInstr * ProdMI,const MachineInstr * ConsMI) const1713 bool HexagonInstrInfo::isDependent(const MachineInstr *ProdMI,
1714 const MachineInstr *ConsMI) const {
1715 const MCInstrDesc &ProdMCID = ProdMI->getDesc();
1716 if (!ProdMCID.getNumDefs())
1717 return false;
1718
1719 auto &HRI = getRegisterInfo();
1720
1721 SmallVector<unsigned, 4> DefsA;
1722 SmallVector<unsigned, 4> DefsB;
1723 SmallVector<unsigned, 8> UsesA;
1724 SmallVector<unsigned, 8> UsesB;
1725
1726 parseOperands(ProdMI, DefsA, UsesA);
1727 parseOperands(ConsMI, DefsB, UsesB);
1728
1729 for (auto &RegA : DefsA)
1730 for (auto &RegB : UsesB) {
1731 // True data dependency.
1732 if (RegA == RegB)
1733 return true;
1734
1735 if (Hexagon::DoubleRegsRegClass.contains(RegA))
1736 for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
1737 if (RegB == *SubRegs)
1738 return true;
1739
1740 if (Hexagon::DoubleRegsRegClass.contains(RegB))
1741 for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
1742 if (RegA == *SubRegs)
1743 return true;
1744 }
1745
1746 return false;
1747 }
1748
1749
1750 // Returns true if the instruction is alread a .cur.
isDotCurInst(const MachineInstr * MI) const1751 bool HexagonInstrInfo::isDotCurInst(const MachineInstr* MI) const {
1752 switch (MI->getOpcode()) {
1753 case Hexagon::V6_vL32b_cur_pi:
1754 case Hexagon::V6_vL32b_cur_ai:
1755 case Hexagon::V6_vL32b_cur_pi_128B:
1756 case Hexagon::V6_vL32b_cur_ai_128B:
1757 return true;
1758 }
1759 return false;
1760 }
1761
1762
1763 // Returns true, if any one of the operands is a dot new
1764 // insn, whether it is predicated dot new or register dot new.
isDotNewInst(const MachineInstr * MI) const1765 bool HexagonInstrInfo::isDotNewInst(const MachineInstr* MI) const {
1766 if (isNewValueInst(MI) ||
1767 (isPredicated(MI) && isPredicatedNew(MI)))
1768 return true;
1769
1770 return false;
1771 }
1772
1773
1774 /// Symmetrical. See if these two instructions are fit for duplex pair.
isDuplexPair(const MachineInstr * MIa,const MachineInstr * MIb) const1775 bool HexagonInstrInfo::isDuplexPair(const MachineInstr *MIa,
1776 const MachineInstr *MIb) const {
1777 HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
1778 HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
1779 return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
1780 }
1781
1782
isEarlySourceInstr(const MachineInstr * MI) const1783 bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr *MI) const {
1784 if (!MI)
1785 return false;
1786
1787 if (MI->mayLoad() || MI->mayStore() || MI->isCompare())
1788 return true;
1789
1790 // Multiply
1791 unsigned SchedClass = MI->getDesc().getSchedClass();
1792 if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
1793 return true;
1794 return false;
1795 }
1796
1797
isEndLoopN(unsigned Opcode) const1798 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
1799 return (Opcode == Hexagon::ENDLOOP0 ||
1800 Opcode == Hexagon::ENDLOOP1);
1801 }
1802
1803
isExpr(unsigned OpType) const1804 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
1805 switch(OpType) {
1806 case MachineOperand::MO_MachineBasicBlock:
1807 case MachineOperand::MO_GlobalAddress:
1808 case MachineOperand::MO_ExternalSymbol:
1809 case MachineOperand::MO_JumpTableIndex:
1810 case MachineOperand::MO_ConstantPoolIndex:
1811 case MachineOperand::MO_BlockAddress:
1812 return true;
1813 default:
1814 return false;
1815 }
1816 }
1817
1818
isExtendable(const MachineInstr * MI) const1819 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
1820 const MCInstrDesc &MID = MI->getDesc();
1821 const uint64_t F = MID.TSFlags;
1822 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
1823 return true;
1824
1825 // TODO: This is largely obsolete now. Will need to be removed
1826 // in consecutive patches.
1827 switch(MI->getOpcode()) {
1828 // TFR_FI Remains a special case.
1829 case Hexagon::TFR_FI:
1830 return true;
1831 default:
1832 return false;
1833 }
1834 return false;
1835 }
1836
1837
1838 // This returns true in two cases:
1839 // - The OP code itself indicates that this is an extended instruction.
1840 // - One of MOs has been marked with HMOTF_ConstExtended flag.
isExtended(const MachineInstr * MI) const1841 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
1842 // First check if this is permanently extended op code.
1843 const uint64_t F = MI->getDesc().TSFlags;
1844 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
1845 return true;
1846 // Use MO operand flags to determine if one of MI's operands
1847 // has HMOTF_ConstExtended flag set.
1848 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
1849 E = MI->operands_end(); I != E; ++I) {
1850 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
1851 return true;
1852 }
1853 return false;
1854 }
1855
1856
isFloat(const MachineInstr * MI) const1857 bool HexagonInstrInfo::isFloat(const MachineInstr *MI) const {
1858 unsigned Opcode = MI->getOpcode();
1859 const uint64_t F = get(Opcode).TSFlags;
1860 return (F >> HexagonII::FPPos) & HexagonII::FPMask;
1861 }
1862
1863
1864 // No V60 HVX VMEM with A_INDIRECT.
isHVXMemWithAIndirect(const MachineInstr * I,const MachineInstr * J) const1865 bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr *I,
1866 const MachineInstr *J) const {
1867 if (!isV60VectorInstruction(I))
1868 return false;
1869 if (!I->mayLoad() && !I->mayStore())
1870 return false;
1871 return J->isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
1872 }
1873
1874
isIndirectCall(const MachineInstr * MI) const1875 bool HexagonInstrInfo::isIndirectCall(const MachineInstr *MI) const {
1876 switch (MI->getOpcode()) {
1877 case Hexagon::J2_callr :
1878 case Hexagon::J2_callrf :
1879 case Hexagon::J2_callrt :
1880 return true;
1881 }
1882 return false;
1883 }
1884
1885
isIndirectL4Return(const MachineInstr * MI) const1886 bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr *MI) const {
1887 switch (MI->getOpcode()) {
1888 case Hexagon::L4_return :
1889 case Hexagon::L4_return_t :
1890 case Hexagon::L4_return_f :
1891 case Hexagon::L4_return_fnew_pnt :
1892 case Hexagon::L4_return_fnew_pt :
1893 case Hexagon::L4_return_tnew_pnt :
1894 case Hexagon::L4_return_tnew_pt :
1895 return true;
1896 }
1897 return false;
1898 }
1899
1900
isJumpR(const MachineInstr * MI) const1901 bool HexagonInstrInfo::isJumpR(const MachineInstr *MI) const {
1902 switch (MI->getOpcode()) {
1903 case Hexagon::J2_jumpr :
1904 case Hexagon::J2_jumprt :
1905 case Hexagon::J2_jumprf :
1906 case Hexagon::J2_jumprtnewpt :
1907 case Hexagon::J2_jumprfnewpt :
1908 case Hexagon::J2_jumprtnew :
1909 case Hexagon::J2_jumprfnew :
1910 return true;
1911 }
1912 return false;
1913 }
1914
1915
1916 // Return true if a given MI can accomodate given offset.
1917 // Use abs estimate as oppose to the exact number.
1918 // TODO: This will need to be changed to use MC level
1919 // definition of instruction extendable field size.
isJumpWithinBranchRange(const MachineInstr * MI,unsigned offset) const1920 bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr *MI,
1921 unsigned offset) const {
1922 // This selection of jump instructions matches to that what
1923 // AnalyzeBranch can parse, plus NVJ.
1924 if (isNewValueJump(MI)) // r9:2
1925 return isInt<11>(offset);
1926
1927 switch (MI->getOpcode()) {
1928 // Still missing Jump to address condition on register value.
1929 default:
1930 return false;
1931 case Hexagon::J2_jump: // bits<24> dst; // r22:2
1932 case Hexagon::J2_call:
1933 case Hexagon::CALLv3nr:
1934 return isInt<24>(offset);
1935 case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
1936 case Hexagon::J2_jumpf:
1937 case Hexagon::J2_jumptnew:
1938 case Hexagon::J2_jumptnewpt:
1939 case Hexagon::J2_jumpfnew:
1940 case Hexagon::J2_jumpfnewpt:
1941 case Hexagon::J2_callt:
1942 case Hexagon::J2_callf:
1943 return isInt<17>(offset);
1944 case Hexagon::J2_loop0i:
1945 case Hexagon::J2_loop0iext:
1946 case Hexagon::J2_loop0r:
1947 case Hexagon::J2_loop0rext:
1948 case Hexagon::J2_loop1i:
1949 case Hexagon::J2_loop1iext:
1950 case Hexagon::J2_loop1r:
1951 case Hexagon::J2_loop1rext:
1952 return isInt<9>(offset);
1953 // TODO: Add all the compound branches here. Can we do this in Relation model?
1954 case Hexagon::J4_cmpeqi_tp0_jump_nt:
1955 case Hexagon::J4_cmpeqi_tp1_jump_nt:
1956 return isInt<11>(offset);
1957 }
1958 }
1959
1960
isLateInstrFeedsEarlyInstr(const MachineInstr * LRMI,const MachineInstr * ESMI) const1961 bool HexagonInstrInfo::isLateInstrFeedsEarlyInstr(const MachineInstr *LRMI,
1962 const MachineInstr *ESMI) const {
1963 if (!LRMI || !ESMI)
1964 return false;
1965
1966 bool isLate = isLateResultInstr(LRMI);
1967 bool isEarly = isEarlySourceInstr(ESMI);
1968
1969 DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
1970 DEBUG(LRMI->dump());
1971 DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
1972 DEBUG(ESMI->dump());
1973
1974 if (isLate && isEarly) {
1975 DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
1976 return true;
1977 }
1978
1979 return false;
1980 }
1981
1982
isLateResultInstr(const MachineInstr * MI) const1983 bool HexagonInstrInfo::isLateResultInstr(const MachineInstr *MI) const {
1984 if (!MI)
1985 return false;
1986
1987 switch (MI->getOpcode()) {
1988 case TargetOpcode::EXTRACT_SUBREG:
1989 case TargetOpcode::INSERT_SUBREG:
1990 case TargetOpcode::SUBREG_TO_REG:
1991 case TargetOpcode::REG_SEQUENCE:
1992 case TargetOpcode::IMPLICIT_DEF:
1993 case TargetOpcode::COPY:
1994 case TargetOpcode::INLINEASM:
1995 case TargetOpcode::PHI:
1996 return false;
1997 default:
1998 break;
1999 }
2000
2001 unsigned SchedClass = MI->getDesc().getSchedClass();
2002
2003 switch (SchedClass) {
2004 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2005 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2006 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2007 case Hexagon::Sched::ALU64_tc_1_SLOT23:
2008 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2009 case Hexagon::Sched::S_2op_tc_1_SLOT23:
2010 case Hexagon::Sched::S_3op_tc_1_SLOT23:
2011 case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
2012 case Hexagon::Sched::V2LDST_tc_st_SLOT0:
2013 case Hexagon::Sched::V2LDST_tc_st_SLOT01:
2014 case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
2015 case Hexagon::Sched::V4LDST_tc_st_SLOT0:
2016 case Hexagon::Sched::V4LDST_tc_st_SLOT01:
2017 return false;
2018 }
2019 return true;
2020 }
2021
2022
isLateSourceInstr(const MachineInstr * MI) const2023 bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr *MI) const {
2024 if (!MI)
2025 return false;
2026
2027 // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2028 // resource, but all operands can be received late like an ALU instruction.
2029 return MI->getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
2030 }
2031
2032
isLoopN(const MachineInstr * MI) const2033 bool HexagonInstrInfo::isLoopN(const MachineInstr *MI) const {
2034 unsigned Opcode = MI->getOpcode();
2035 return Opcode == Hexagon::J2_loop0i ||
2036 Opcode == Hexagon::J2_loop0r ||
2037 Opcode == Hexagon::J2_loop0iext ||
2038 Opcode == Hexagon::J2_loop0rext ||
2039 Opcode == Hexagon::J2_loop1i ||
2040 Opcode == Hexagon::J2_loop1r ||
2041 Opcode == Hexagon::J2_loop1iext ||
2042 Opcode == Hexagon::J2_loop1rext;
2043 }
2044
2045
isMemOp(const MachineInstr * MI) const2046 bool HexagonInstrInfo::isMemOp(const MachineInstr *MI) const {
2047 switch (MI->getOpcode()) {
2048 default: return false;
2049 case Hexagon::L4_iadd_memopw_io :
2050 case Hexagon::L4_isub_memopw_io :
2051 case Hexagon::L4_add_memopw_io :
2052 case Hexagon::L4_sub_memopw_io :
2053 case Hexagon::L4_and_memopw_io :
2054 case Hexagon::L4_or_memopw_io :
2055 case Hexagon::L4_iadd_memoph_io :
2056 case Hexagon::L4_isub_memoph_io :
2057 case Hexagon::L4_add_memoph_io :
2058 case Hexagon::L4_sub_memoph_io :
2059 case Hexagon::L4_and_memoph_io :
2060 case Hexagon::L4_or_memoph_io :
2061 case Hexagon::L4_iadd_memopb_io :
2062 case Hexagon::L4_isub_memopb_io :
2063 case Hexagon::L4_add_memopb_io :
2064 case Hexagon::L4_sub_memopb_io :
2065 case Hexagon::L4_and_memopb_io :
2066 case Hexagon::L4_or_memopb_io :
2067 case Hexagon::L4_ior_memopb_io:
2068 case Hexagon::L4_ior_memoph_io:
2069 case Hexagon::L4_ior_memopw_io:
2070 case Hexagon::L4_iand_memopb_io:
2071 case Hexagon::L4_iand_memoph_io:
2072 case Hexagon::L4_iand_memopw_io:
2073 return true;
2074 }
2075 return false;
2076 }
2077
2078
isNewValue(const MachineInstr * MI) const2079 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
2080 const uint64_t F = MI->getDesc().TSFlags;
2081 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2082 }
2083
2084
isNewValue(unsigned Opcode) const2085 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2086 const uint64_t F = get(Opcode).TSFlags;
2087 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2088 }
2089
2090
isNewValueInst(const MachineInstr * MI) const2091 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
2092 return isNewValueJump(MI) || isNewValueStore(MI);
2093 }
2094
2095
isNewValueJump(const MachineInstr * MI) const2096 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
2097 return isNewValue(MI) && MI->isBranch();
2098 }
2099
2100
isNewValueJump(unsigned Opcode) const2101 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2102 return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2103 }
2104
2105
isNewValueStore(const MachineInstr * MI) const2106 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
2107 const uint64_t F = MI->getDesc().TSFlags;
2108 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2109 }
2110
2111
isNewValueStore(unsigned Opcode) const2112 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2113 const uint64_t F = get(Opcode).TSFlags;
2114 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2115 }
2116
2117
2118 // Returns true if a particular operand is extendable for an instruction.
isOperandExtended(const MachineInstr * MI,unsigned OperandNum) const2119 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
2120 unsigned OperandNum) const {
2121 const uint64_t F = MI->getDesc().TSFlags;
2122 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2123 == OperandNum;
2124 }
2125
2126
isPostIncrement(const MachineInstr * MI) const2127 bool HexagonInstrInfo::isPostIncrement(const MachineInstr* MI) const {
2128 return getAddrMode(MI) == HexagonII::PostInc;
2129 }
2130
2131
isPredicatedNew(const MachineInstr * MI) const2132 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const {
2133 const uint64_t F = MI->getDesc().TSFlags;
2134 assert(isPredicated(MI));
2135 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2136 }
2137
2138
isPredicatedNew(unsigned Opcode) const2139 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2140 const uint64_t F = get(Opcode).TSFlags;
2141 assert(isPredicated(Opcode));
2142 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2143 }
2144
2145
isPredicatedTrue(const MachineInstr * MI) const2146 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr *MI) const {
2147 const uint64_t F = MI->getDesc().TSFlags;
2148 return !((F >> HexagonII::PredicatedFalsePos) &
2149 HexagonII::PredicatedFalseMask);
2150 }
2151
2152
isPredicatedTrue(unsigned Opcode) const2153 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2154 const uint64_t F = get(Opcode).TSFlags;
2155 // Make sure that the instruction is predicated.
2156 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
2157 return !((F >> HexagonII::PredicatedFalsePos) &
2158 HexagonII::PredicatedFalseMask);
2159 }
2160
2161
isPredicated(unsigned Opcode) const2162 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2163 const uint64_t F = get(Opcode).TSFlags;
2164 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2165 }
2166
2167
isPredicateLate(unsigned Opcode) const2168 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2169 const uint64_t F = get(Opcode).TSFlags;
2170 return ~(F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2171 }
2172
2173
isPredictedTaken(unsigned Opcode) const2174 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2175 const uint64_t F = get(Opcode).TSFlags;
2176 assert(get(Opcode).isBranch() &&
2177 (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2178 return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2179 }
2180
2181
isSaveCalleeSavedRegsCall(const MachineInstr * MI) const2182 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
2183 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2184 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
2185 }
2186
2187
isSolo(const MachineInstr * MI) const2188 bool HexagonInstrInfo::isSolo(const MachineInstr* MI) const {
2189 const uint64_t F = MI->getDesc().TSFlags;
2190 return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2191 }
2192
2193
isSpillPredRegOp(const MachineInstr * MI) const2194 bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr *MI) const {
2195 switch (MI->getOpcode()) {
2196 case Hexagon::STriw_pred :
2197 case Hexagon::LDriw_pred :
2198 return true;
2199 default:
2200 return false;
2201 }
2202 }
2203
2204
2205 // Returns true when SU has a timing class TC1.
isTC1(const MachineInstr * MI) const2206 bool HexagonInstrInfo::isTC1(const MachineInstr *MI) const {
2207 unsigned SchedClass = MI->getDesc().getSchedClass();
2208 switch (SchedClass) {
2209 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2210 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2211 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2212 case Hexagon::Sched::ALU64_tc_1_SLOT23:
2213 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2214 //case Hexagon::Sched::M_tc_1_SLOT23:
2215 case Hexagon::Sched::S_2op_tc_1_SLOT23:
2216 case Hexagon::Sched::S_3op_tc_1_SLOT23:
2217 return true;
2218
2219 default:
2220 return false;
2221 }
2222 }
2223
2224
isTC2(const MachineInstr * MI) const2225 bool HexagonInstrInfo::isTC2(const MachineInstr *MI) const {
2226 unsigned SchedClass = MI->getDesc().getSchedClass();
2227 switch (SchedClass) {
2228 case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
2229 case Hexagon::Sched::ALU64_tc_2_SLOT23:
2230 case Hexagon::Sched::CR_tc_2_SLOT3:
2231 case Hexagon::Sched::M_tc_2_SLOT23:
2232 case Hexagon::Sched::S_2op_tc_2_SLOT23:
2233 case Hexagon::Sched::S_3op_tc_2_SLOT23:
2234 return true;
2235
2236 default:
2237 return false;
2238 }
2239 }
2240
2241
isTC2Early(const MachineInstr * MI) const2242 bool HexagonInstrInfo::isTC2Early(const MachineInstr *MI) const {
2243 unsigned SchedClass = MI->getDesc().getSchedClass();
2244 switch (SchedClass) {
2245 case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
2246 case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
2247 case Hexagon::Sched::ALU64_tc_2early_SLOT23:
2248 case Hexagon::Sched::CR_tc_2early_SLOT23:
2249 case Hexagon::Sched::CR_tc_2early_SLOT3:
2250 case Hexagon::Sched::J_tc_2early_SLOT0123:
2251 case Hexagon::Sched::J_tc_2early_SLOT2:
2252 case Hexagon::Sched::J_tc_2early_SLOT23:
2253 case Hexagon::Sched::S_2op_tc_2early_SLOT23:
2254 case Hexagon::Sched::S_3op_tc_2early_SLOT23:
2255 return true;
2256
2257 default:
2258 return false;
2259 }
2260 }
2261
2262
isTC4x(const MachineInstr * MI) const2263 bool HexagonInstrInfo::isTC4x(const MachineInstr *MI) const {
2264 if (!MI)
2265 return false;
2266
2267 unsigned SchedClass = MI->getDesc().getSchedClass();
2268 return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
2269 }
2270
2271
isV60VectorInstruction(const MachineInstr * MI) const2272 bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr *MI) const {
2273 if (!MI)
2274 return false;
2275
2276 const uint64_t V = getType(MI);
2277 return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2278 }
2279
2280
2281 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2282 //
isValidAutoIncImm(const EVT VT,const int Offset) const2283 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, const int Offset) const {
2284 if (VT == MVT::v16i32 || VT == MVT::v8i64 ||
2285 VT == MVT::v32i16 || VT == MVT::v64i8) {
2286 return (Offset >= Hexagon_MEMV_AUTOINC_MIN &&
2287 Offset <= Hexagon_MEMV_AUTOINC_MAX &&
2288 (Offset & 0x3f) == 0);
2289 }
2290 // 128B
2291 if (VT == MVT::v32i32 || VT == MVT::v16i64 ||
2292 VT == MVT::v64i16 || VT == MVT::v128i8) {
2293 return (Offset >= Hexagon_MEMV_AUTOINC_MIN_128B &&
2294 Offset <= Hexagon_MEMV_AUTOINC_MAX_128B &&
2295 (Offset & 0x7f) == 0);
2296 }
2297 if (VT == MVT::i64) {
2298 return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
2299 Offset <= Hexagon_MEMD_AUTOINC_MAX &&
2300 (Offset & 0x7) == 0);
2301 }
2302 if (VT == MVT::i32) {
2303 return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
2304 Offset <= Hexagon_MEMW_AUTOINC_MAX &&
2305 (Offset & 0x3) == 0);
2306 }
2307 if (VT == MVT::i16) {
2308 return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
2309 Offset <= Hexagon_MEMH_AUTOINC_MAX &&
2310 (Offset & 0x1) == 0);
2311 }
2312 if (VT == MVT::i8) {
2313 return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
2314 Offset <= Hexagon_MEMB_AUTOINC_MAX);
2315 }
2316 llvm_unreachable("Not an auto-inc opc!");
2317 }
2318
2319
isValidOffset(unsigned Opcode,int Offset,bool Extend) const2320 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2321 bool Extend) const {
2322 // This function is to check whether the "Offset" is in the correct range of
2323 // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2324 // inserted to calculate the final address. Due to this reason, the function
2325 // assumes that the "Offset" has correct alignment.
2326 // We used to assert if the offset was not properly aligned, however,
2327 // there are cases where a misaligned pointer recast can cause this
2328 // problem, and we need to allow for it. The front end warns of such
2329 // misaligns with respect to load size.
2330
2331 switch (Opcode) {
2332 case Hexagon::STriq_pred_V6:
2333 case Hexagon::STriq_pred_vec_V6:
2334 case Hexagon::STriv_pseudo_V6:
2335 case Hexagon::STrivv_pseudo_V6:
2336 case Hexagon::LDriq_pred_V6:
2337 case Hexagon::LDriq_pred_vec_V6:
2338 case Hexagon::LDriv_pseudo_V6:
2339 case Hexagon::LDrivv_pseudo_V6:
2340 case Hexagon::LDrivv_indexed:
2341 case Hexagon::STrivv_indexed:
2342 case Hexagon::V6_vL32b_ai:
2343 case Hexagon::V6_vS32b_ai:
2344 case Hexagon::V6_vL32Ub_ai:
2345 case Hexagon::V6_vS32Ub_ai:
2346 return (Offset >= Hexagon_MEMV_OFFSET_MIN) &&
2347 (Offset <= Hexagon_MEMV_OFFSET_MAX);
2348
2349 case Hexagon::STriq_pred_V6_128B:
2350 case Hexagon::STriq_pred_vec_V6_128B:
2351 case Hexagon::STriv_pseudo_V6_128B:
2352 case Hexagon::STrivv_pseudo_V6_128B:
2353 case Hexagon::LDriq_pred_V6_128B:
2354 case Hexagon::LDriq_pred_vec_V6_128B:
2355 case Hexagon::LDriv_pseudo_V6_128B:
2356 case Hexagon::LDrivv_pseudo_V6_128B:
2357 case Hexagon::LDrivv_indexed_128B:
2358 case Hexagon::STrivv_indexed_128B:
2359 case Hexagon::V6_vL32b_ai_128B:
2360 case Hexagon::V6_vS32b_ai_128B:
2361 case Hexagon::V6_vL32Ub_ai_128B:
2362 case Hexagon::V6_vS32Ub_ai_128B:
2363 return (Offset >= Hexagon_MEMV_OFFSET_MIN_128B) &&
2364 (Offset <= Hexagon_MEMV_OFFSET_MAX_128B);
2365
2366 case Hexagon::J2_loop0i:
2367 case Hexagon::J2_loop1i:
2368 return isUInt<10>(Offset);
2369 }
2370
2371 if (Extend)
2372 return true;
2373
2374 switch (Opcode) {
2375 case Hexagon::L2_loadri_io:
2376 case Hexagon::S2_storeri_io:
2377 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2378 (Offset <= Hexagon_MEMW_OFFSET_MAX);
2379
2380 case Hexagon::L2_loadrd_io:
2381 case Hexagon::S2_storerd_io:
2382 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2383 (Offset <= Hexagon_MEMD_OFFSET_MAX);
2384
2385 case Hexagon::L2_loadrh_io:
2386 case Hexagon::L2_loadruh_io:
2387 case Hexagon::S2_storerh_io:
2388 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2389 (Offset <= Hexagon_MEMH_OFFSET_MAX);
2390
2391 case Hexagon::L2_loadrb_io:
2392 case Hexagon::L2_loadrub_io:
2393 case Hexagon::S2_storerb_io:
2394 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2395 (Offset <= Hexagon_MEMB_OFFSET_MAX);
2396
2397 case Hexagon::A2_addi:
2398 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2399 (Offset <= Hexagon_ADDI_OFFSET_MAX);
2400
2401 case Hexagon::L4_iadd_memopw_io :
2402 case Hexagon::L4_isub_memopw_io :
2403 case Hexagon::L4_add_memopw_io :
2404 case Hexagon::L4_sub_memopw_io :
2405 case Hexagon::L4_and_memopw_io :
2406 case Hexagon::L4_or_memopw_io :
2407 return (0 <= Offset && Offset <= 255);
2408
2409 case Hexagon::L4_iadd_memoph_io :
2410 case Hexagon::L4_isub_memoph_io :
2411 case Hexagon::L4_add_memoph_io :
2412 case Hexagon::L4_sub_memoph_io :
2413 case Hexagon::L4_and_memoph_io :
2414 case Hexagon::L4_or_memoph_io :
2415 return (0 <= Offset && Offset <= 127);
2416
2417 case Hexagon::L4_iadd_memopb_io :
2418 case Hexagon::L4_isub_memopb_io :
2419 case Hexagon::L4_add_memopb_io :
2420 case Hexagon::L4_sub_memopb_io :
2421 case Hexagon::L4_and_memopb_io :
2422 case Hexagon::L4_or_memopb_io :
2423 return (0 <= Offset && Offset <= 63);
2424
2425 // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
2426 // any size. Later pass knows how to handle it.
2427 case Hexagon::STriw_pred:
2428 case Hexagon::LDriw_pred:
2429 return true;
2430
2431 case Hexagon::TFR_FI:
2432 case Hexagon::TFR_FIA:
2433 case Hexagon::INLINEASM:
2434 return true;
2435
2436 case Hexagon::L2_ploadrbt_io:
2437 case Hexagon::L2_ploadrbf_io:
2438 case Hexagon::L2_ploadrubt_io:
2439 case Hexagon::L2_ploadrubf_io:
2440 case Hexagon::S2_pstorerbt_io:
2441 case Hexagon::S2_pstorerbf_io:
2442 case Hexagon::S4_storeirb_io:
2443 case Hexagon::S4_storeirbt_io:
2444 case Hexagon::S4_storeirbf_io:
2445 return isUInt<6>(Offset);
2446
2447 case Hexagon::L2_ploadrht_io:
2448 case Hexagon::L2_ploadrhf_io:
2449 case Hexagon::L2_ploadruht_io:
2450 case Hexagon::L2_ploadruhf_io:
2451 case Hexagon::S2_pstorerht_io:
2452 case Hexagon::S2_pstorerhf_io:
2453 case Hexagon::S4_storeirh_io:
2454 case Hexagon::S4_storeirht_io:
2455 case Hexagon::S4_storeirhf_io:
2456 return isShiftedUInt<6,1>(Offset);
2457
2458 case Hexagon::L2_ploadrit_io:
2459 case Hexagon::L2_ploadrif_io:
2460 case Hexagon::S2_pstorerit_io:
2461 case Hexagon::S2_pstorerif_io:
2462 case Hexagon::S4_storeiri_io:
2463 case Hexagon::S4_storeirit_io:
2464 case Hexagon::S4_storeirif_io:
2465 return isShiftedUInt<6,2>(Offset);
2466
2467 case Hexagon::L2_ploadrdt_io:
2468 case Hexagon::L2_ploadrdf_io:
2469 case Hexagon::S2_pstorerdt_io:
2470 case Hexagon::S2_pstorerdf_io:
2471 return isShiftedUInt<6,3>(Offset);
2472 } // switch
2473
2474 llvm_unreachable("No offset range is defined for this opcode. "
2475 "Please define it in the above switch statement!");
2476 }
2477
2478
isVecAcc(const MachineInstr * MI) const2479 bool HexagonInstrInfo::isVecAcc(const MachineInstr *MI) const {
2480 return MI && isV60VectorInstruction(MI) && isAccumulator(MI);
2481 }
2482
2483
isVecALU(const MachineInstr * MI) const2484 bool HexagonInstrInfo::isVecALU(const MachineInstr *MI) const {
2485 if (!MI)
2486 return false;
2487 const uint64_t F = get(MI->getOpcode()).TSFlags;
2488 const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2489 return
2490 V == HexagonII::TypeCVI_VA ||
2491 V == HexagonII::TypeCVI_VA_DV;
2492 }
2493
2494
isVecUsableNextPacket(const MachineInstr * ProdMI,const MachineInstr * ConsMI) const2495 bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr *ProdMI,
2496 const MachineInstr *ConsMI) const {
2497 if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2498 return true;
2499
2500 if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2501 return true;
2502
2503 if (mayBeNewStore(ConsMI))
2504 return true;
2505
2506 return false;
2507 }
2508
2509
2510 /// \brief Can these instructions execute at the same time in a bundle.
canExecuteInBundle(const MachineInstr * First,const MachineInstr * Second) const2511 bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr *First,
2512 const MachineInstr *Second) const {
2513 if (DisableNVSchedule)
2514 return false;
2515 if (mayBeNewStore(Second)) {
2516 // Make sure the definition of the first instruction is the value being
2517 // stored.
2518 const MachineOperand &Stored =
2519 Second->getOperand(Second->getNumOperands() - 1);
2520 if (!Stored.isReg())
2521 return false;
2522 for (unsigned i = 0, e = First->getNumOperands(); i < e; ++i) {
2523 const MachineOperand &Op = First->getOperand(i);
2524 if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
2525 return true;
2526 }
2527 }
2528 return false;
2529 }
2530
2531
hasEHLabel(const MachineBasicBlock * B) const2532 bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
2533 for (auto &I : *B)
2534 if (I.isEHLabel())
2535 return true;
2536 return false;
2537 }
2538
2539
2540 // Returns true if an instruction can be converted into a non-extended
2541 // equivalent instruction.
hasNonExtEquivalent(const MachineInstr * MI) const2542 bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr *MI) const {
2543 short NonExtOpcode;
2544 // Check if the instruction has a register form that uses register in place
2545 // of the extended operand, if so return that as the non-extended form.
2546 if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
2547 return true;
2548
2549 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2550 // Check addressing mode and retrieve non-ext equivalent instruction.
2551
2552 switch (getAddrMode(MI)) {
2553 case HexagonII::Absolute :
2554 // Load/store with absolute addressing mode can be converted into
2555 // base+offset mode.
2556 NonExtOpcode = Hexagon::getBaseWithImmOffset(MI->getOpcode());
2557 break;
2558 case HexagonII::BaseImmOffset :
2559 // Load/store with base+offset addressing mode can be converted into
2560 // base+register offset addressing mode. However left shift operand should
2561 // be set to 0.
2562 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
2563 break;
2564 case HexagonII::BaseLongOffset:
2565 NonExtOpcode = Hexagon::getRegShlForm(MI->getOpcode());
2566 break;
2567 default:
2568 return false;
2569 }
2570 if (NonExtOpcode < 0)
2571 return false;
2572 return true;
2573 }
2574 return false;
2575 }
2576
2577
hasPseudoInstrPair(const MachineInstr * MI) const2578 bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr *MI) const {
2579 return Hexagon::getRealHWInstr(MI->getOpcode(),
2580 Hexagon::InstrType_Pseudo) >= 0;
2581 }
2582
2583
hasUncondBranch(const MachineBasicBlock * B) const2584 bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
2585 const {
2586 MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
2587 while (I != E) {
2588 if (I->isBarrier())
2589 return true;
2590 ++I;
2591 }
2592 return false;
2593 }
2594
2595
2596 // Returns true, if a LD insn can be promoted to a cur load.
mayBeCurLoad(const MachineInstr * MI) const2597 bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr *MI) const {
2598 auto &HST = MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>();
2599 const uint64_t F = MI->getDesc().TSFlags;
2600 return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
2601 HST.hasV60TOps();
2602 }
2603
2604
2605 // Returns true, if a ST insn can be promoted to a new-value store.
mayBeNewStore(const MachineInstr * MI) const2606 bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const {
2607 const uint64_t F = MI->getDesc().TSFlags;
2608 return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
2609 }
2610
2611
producesStall(const MachineInstr * ProdMI,const MachineInstr * ConsMI) const2612 bool HexagonInstrInfo::producesStall(const MachineInstr *ProdMI,
2613 const MachineInstr *ConsMI) const {
2614 // There is no stall when ProdMI is not a V60 vector.
2615 if (!isV60VectorInstruction(ProdMI))
2616 return false;
2617
2618 // There is no stall when ProdMI and ConsMI are not dependent.
2619 if (!isDependent(ProdMI, ConsMI))
2620 return false;
2621
2622 // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
2623 // are scheduled in consecutive packets.
2624 if (isVecUsableNextPacket(ProdMI, ConsMI))
2625 return false;
2626
2627 return true;
2628 }
2629
2630
producesStall(const MachineInstr * MI,MachineBasicBlock::const_instr_iterator BII) const2631 bool HexagonInstrInfo::producesStall(const MachineInstr *MI,
2632 MachineBasicBlock::const_instr_iterator BII) const {
2633 // There is no stall when I is not a V60 vector.
2634 if (!isV60VectorInstruction(MI))
2635 return false;
2636
2637 MachineBasicBlock::const_instr_iterator MII = BII;
2638 MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
2639
2640 if (!(*MII).isBundle()) {
2641 const MachineInstr *J = &*MII;
2642 if (!isV60VectorInstruction(J))
2643 return false;
2644 else if (isVecUsableNextPacket(J, MI))
2645 return false;
2646 return true;
2647 }
2648
2649 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
2650 const MachineInstr *J = &*MII;
2651 if (producesStall(J, MI))
2652 return true;
2653 }
2654 return false;
2655 }
2656
2657
predCanBeUsedAsDotNew(const MachineInstr * MI,unsigned PredReg) const2658 bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr *MI,
2659 unsigned PredReg) const {
2660 for (unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
2661 const MachineOperand &MO = MI->getOperand(opNum);
2662 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
2663 return false; // Predicate register must be explicitly defined.
2664 }
2665
2666 // Hexagon Programmer's Reference says that decbin, memw_locked, and
2667 // memd_locked cannot be used as .new as well,
2668 // but we don't seem to have these instructions defined.
2669 return MI->getOpcode() != Hexagon::A4_tlbmatch;
2670 }
2671
2672
PredOpcodeHasJMP_c(unsigned Opcode) const2673 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
2674 return (Opcode == Hexagon::J2_jumpt) ||
2675 (Opcode == Hexagon::J2_jumpf) ||
2676 (Opcode == Hexagon::J2_jumptnew) ||
2677 (Opcode == Hexagon::J2_jumpfnew) ||
2678 (Opcode == Hexagon::J2_jumptnewpt) ||
2679 (Opcode == Hexagon::J2_jumpfnewpt);
2680 }
2681
2682
predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const2683 bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
2684 if (Cond.empty() || !isPredicated(Cond[0].getImm()))
2685 return false;
2686 return !isPredicatedTrue(Cond[0].getImm());
2687 }
2688
2689
getAddrMode(const MachineInstr * MI) const2690 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
2691 const uint64_t F = MI->getDesc().TSFlags;
2692 return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
2693 }
2694
2695
2696 // Returns the base register in a memory access (load/store). The offset is
2697 // returned in Offset and the access size is returned in AccessSize.
getBaseAndOffset(const MachineInstr * MI,int & Offset,unsigned & AccessSize) const2698 unsigned HexagonInstrInfo::getBaseAndOffset(const MachineInstr *MI,
2699 int &Offset, unsigned &AccessSize) const {
2700 // Return if it is not a base+offset type instruction or a MemOp.
2701 if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
2702 getAddrMode(MI) != HexagonII::BaseLongOffset &&
2703 !isMemOp(MI) && !isPostIncrement(MI))
2704 return 0;
2705
2706 // Since it is a memory access instruction, getMemAccessSize() should never
2707 // return 0.
2708 assert (getMemAccessSize(MI) &&
2709 "BaseImmOffset or BaseLongOffset or MemOp without accessSize");
2710
2711 // Return Values of getMemAccessSize() are
2712 // 0 - Checked in the assert above.
2713 // 1, 2, 3, 4 & 7, 8 - The statement below is correct for all these.
2714 // MemAccessSize is represented as 1+log2(N) where N is size in bits.
2715 AccessSize = (1U << (getMemAccessSize(MI) - 1));
2716
2717 unsigned basePos = 0, offsetPos = 0;
2718 if (!getBaseAndOffsetPosition(MI, basePos, offsetPos))
2719 return 0;
2720
2721 // Post increment updates its EA after the mem access,
2722 // so we need to treat its offset as zero.
2723 if (isPostIncrement(MI))
2724 Offset = 0;
2725 else {
2726 Offset = MI->getOperand(offsetPos).getImm();
2727 }
2728
2729 return MI->getOperand(basePos).getReg();
2730 }
2731
2732
2733 /// Return the position of the base and offset operands for this instruction.
getBaseAndOffsetPosition(const MachineInstr * MI,unsigned & BasePos,unsigned & OffsetPos) const2734 bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr *MI,
2735 unsigned &BasePos, unsigned &OffsetPos) const {
2736 // Deal with memops first.
2737 if (isMemOp(MI)) {
2738 assert (MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
2739 "Bad Memop.");
2740 BasePos = 0;
2741 OffsetPos = 1;
2742 } else if (MI->mayStore()) {
2743 BasePos = 0;
2744 OffsetPos = 1;
2745 } else if (MI->mayLoad()) {
2746 BasePos = 1;
2747 OffsetPos = 2;
2748 } else
2749 return false;
2750
2751 if (isPredicated(MI)) {
2752 BasePos++;
2753 OffsetPos++;
2754 }
2755 if (isPostIncrement(MI)) {
2756 BasePos++;
2757 OffsetPos++;
2758 }
2759
2760 if (!MI->getOperand(BasePos).isReg() || !MI->getOperand(OffsetPos).isImm())
2761 return false;
2762
2763 return true;
2764 }
2765
2766
2767 // Inserts branching instructions in reverse order of their occurence.
2768 // e.g. jump_t t1 (i1)
2769 // jump t2 (i2)
2770 // Jumpers = {i2, i1}
getBranchingInstrs(MachineBasicBlock & MBB) const2771 SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
2772 MachineBasicBlock& MBB) const {
2773 SmallVector<MachineInstr*, 2> Jumpers;
2774 // If the block has no terminators, it just falls into the block after it.
2775 MachineBasicBlock::instr_iterator I = MBB.instr_end();
2776 if (I == MBB.instr_begin())
2777 return Jumpers;
2778
2779 // A basic block may looks like this:
2780 //
2781 // [ insn
2782 // EH_LABEL
2783 // insn
2784 // insn
2785 // insn
2786 // EH_LABEL
2787 // insn ]
2788 //
2789 // It has two succs but does not have a terminator
2790 // Don't know how to handle it.
2791 do {
2792 --I;
2793 if (I->isEHLabel())
2794 return Jumpers;
2795 } while (I != MBB.instr_begin());
2796
2797 I = MBB.instr_end();
2798 --I;
2799
2800 while (I->isDebugValue()) {
2801 if (I == MBB.instr_begin())
2802 return Jumpers;
2803 --I;
2804 }
2805 if (!isUnpredicatedTerminator(&*I))
2806 return Jumpers;
2807
2808 // Get the last instruction in the block.
2809 MachineInstr *LastInst = &*I;
2810 Jumpers.push_back(LastInst);
2811 MachineInstr *SecondLastInst = nullptr;
2812 // Find one more terminator if present.
2813 do {
2814 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(&*I)) {
2815 if (!SecondLastInst) {
2816 SecondLastInst = &*I;
2817 Jumpers.push_back(SecondLastInst);
2818 } else // This is a third branch.
2819 return Jumpers;
2820 }
2821 if (I == MBB.instr_begin())
2822 break;
2823 --I;
2824 } while (true);
2825 return Jumpers;
2826 }
2827
2828
2829 // Returns Operand Index for the constant extended instruction.
getCExtOpNum(const MachineInstr * MI) const2830 unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
2831 const uint64_t F = MI->getDesc().TSFlags;
2832 return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
2833 }
2834
2835 // See if instruction could potentially be a duplex candidate.
2836 // If so, return its group. Zero otherwise.
getCompoundCandidateGroup(const MachineInstr * MI) const2837 HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
2838 const MachineInstr *MI) const {
2839 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
2840
2841 switch (MI->getOpcode()) {
2842 default:
2843 return HexagonII::HCG_None;
2844 //
2845 // Compound pairs.
2846 // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
2847 // "Rd16=#U6 ; jump #r9:2"
2848 // "Rd16=Rs16 ; jump #r9:2"
2849 //
2850 case Hexagon::C2_cmpeq:
2851 case Hexagon::C2_cmpgt:
2852 case Hexagon::C2_cmpgtu:
2853 DstReg = MI->getOperand(0).getReg();
2854 Src1Reg = MI->getOperand(1).getReg();
2855 Src2Reg = MI->getOperand(2).getReg();
2856 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
2857 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
2858 isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
2859 return HexagonII::HCG_A;
2860 break;
2861 case Hexagon::C2_cmpeqi:
2862 case Hexagon::C2_cmpgti:
2863 case Hexagon::C2_cmpgtui:
2864 // P0 = cmp.eq(Rs,#u2)
2865 DstReg = MI->getOperand(0).getReg();
2866 SrcReg = MI->getOperand(1).getReg();
2867 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
2868 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
2869 isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() &&
2870 ((isUInt<5>(MI->getOperand(2).getImm())) ||
2871 (MI->getOperand(2).getImm() == -1)))
2872 return HexagonII::HCG_A;
2873 break;
2874 case Hexagon::A2_tfr:
2875 // Rd = Rs
2876 DstReg = MI->getOperand(0).getReg();
2877 SrcReg = MI->getOperand(1).getReg();
2878 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
2879 return HexagonII::HCG_A;
2880 break;
2881 case Hexagon::A2_tfrsi:
2882 // Rd = #u6
2883 // Do not test for #u6 size since the const is getting extended
2884 // regardless and compound could be formed.
2885 DstReg = MI->getOperand(0).getReg();
2886 if (isIntRegForSubInst(DstReg))
2887 return HexagonII::HCG_A;
2888 break;
2889 case Hexagon::S2_tstbit_i:
2890 DstReg = MI->getOperand(0).getReg();
2891 Src1Reg = MI->getOperand(1).getReg();
2892 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
2893 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
2894 MI->getOperand(2).isImm() &&
2895 isIntRegForSubInst(Src1Reg) && (MI->getOperand(2).getImm() == 0))
2896 return HexagonII::HCG_A;
2897 break;
2898 // The fact that .new form is used pretty much guarantees
2899 // that predicate register will match. Nevertheless,
2900 // there could be some false positives without additional
2901 // checking.
2902 case Hexagon::J2_jumptnew:
2903 case Hexagon::J2_jumpfnew:
2904 case Hexagon::J2_jumptnewpt:
2905 case Hexagon::J2_jumpfnewpt:
2906 Src1Reg = MI->getOperand(0).getReg();
2907 if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
2908 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
2909 return HexagonII::HCG_B;
2910 break;
2911 // Transfer and jump:
2912 // Rd=#U6 ; jump #r9:2
2913 // Rd=Rs ; jump #r9:2
2914 // Do not test for jump range here.
2915 case Hexagon::J2_jump:
2916 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
2917 return HexagonII::HCG_C;
2918 break;
2919 }
2920
2921 return HexagonII::HCG_None;
2922 }
2923
2924
2925 // Returns -1 when there is no opcode found.
getCompoundOpcode(const MachineInstr * GA,const MachineInstr * GB) const2926 unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr *GA,
2927 const MachineInstr *GB) const {
2928 assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A);
2929 assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B);
2930 if ((GA->getOpcode() != Hexagon::C2_cmpeqi) ||
2931 (GB->getOpcode() != Hexagon::J2_jumptnew))
2932 return -1;
2933 unsigned DestReg = GA->getOperand(0).getReg();
2934 if (!GB->readsRegister(DestReg))
2935 return -1;
2936 if (DestReg == Hexagon::P0)
2937 return Hexagon::J4_cmpeqi_tp0_jump_nt;
2938 if (DestReg == Hexagon::P1)
2939 return Hexagon::J4_cmpeqi_tp1_jump_nt;
2940 return -1;
2941 }
2942
2943
getCondOpcode(int Opc,bool invertPredicate) const2944 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
2945 enum Hexagon::PredSense inPredSense;
2946 inPredSense = invertPredicate ? Hexagon::PredSense_false :
2947 Hexagon::PredSense_true;
2948 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
2949 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
2950 return CondOpcode;
2951
2952 // This switch case will be removed once all the instructions have been
2953 // modified to use relation maps.
2954 switch(Opc) {
2955 case Hexagon::TFRI_f:
2956 return !invertPredicate ? Hexagon::TFRI_cPt_f :
2957 Hexagon::TFRI_cNotPt_f;
2958 }
2959
2960 llvm_unreachable("Unexpected predicable instruction");
2961 }
2962
2963
2964 // Return the cur value instruction for a given store.
getDotCurOp(const MachineInstr * MI) const2965 int HexagonInstrInfo::getDotCurOp(const MachineInstr* MI) const {
2966 switch (MI->getOpcode()) {
2967 default: llvm_unreachable("Unknown .cur type");
2968 case Hexagon::V6_vL32b_pi:
2969 return Hexagon::V6_vL32b_cur_pi;
2970 case Hexagon::V6_vL32b_ai:
2971 return Hexagon::V6_vL32b_cur_ai;
2972 //128B
2973 case Hexagon::V6_vL32b_pi_128B:
2974 return Hexagon::V6_vL32b_cur_pi_128B;
2975 case Hexagon::V6_vL32b_ai_128B:
2976 return Hexagon::V6_vL32b_cur_ai_128B;
2977 }
2978 return 0;
2979 }
2980
2981
2982
2983 // The diagram below shows the steps involved in the conversion of a predicated
2984 // store instruction to its .new predicated new-value form.
2985 //
2986 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
2987 // ^ ^
2988 // / \ (not OK. it will cause new-value store to be
2989 // / X conditional on p0.new while R2 producer is
2990 // / \ on p0)
2991 // / \.
2992 // p.new store p.old NV store
2993 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
2994 // ^ ^
2995 // \ /
2996 // \ /
2997 // \ /
2998 // p.old store
2999 // [if (p0)memw(R0+#0)=R2]
3000 //
3001 //
3002 // The following set of instructions further explains the scenario where
3003 // conditional new-value store becomes invalid when promoted to .new predicate
3004 // form.
3005 //
3006 // { 1) if (p0) r0 = add(r1, r2)
3007 // 2) p0 = cmp.eq(r3, #0) }
3008 //
3009 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3010 // the first two instructions because in instr 1, r0 is conditional on old value
3011 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3012 // is not valid for new-value stores.
3013 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3014 // from the "Conditional Store" list. Because a predicated new value store
3015 // would NOT be promoted to a double dot new store. See diagram below:
3016 // This function returns yes for those stores that are predicated but not
3017 // yet promoted to predicate dot new instructions.
3018 //
3019 // +---------------------+
3020 // /-----| if (p0) memw(..)=r0 |---------\~
3021 // || +---------------------+ ||
3022 // promote || /\ /\ || promote
3023 // || /||\ /||\ ||
3024 // \||/ demote || \||/
3025 // \/ || || \/
3026 // +-------------------------+ || +-------------------------+
3027 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3028 // +-------------------------+ || +-------------------------+
3029 // || || ||
3030 // || demote \||/
3031 // promote || \/ NOT possible
3032 // || || /\~
3033 // \||/ || /||\~
3034 // \/ || ||
3035 // +-----------------------------+
3036 // | if (p0.new) memw(..)=r0.new |
3037 // +-----------------------------+
3038 // Double Dot New Store
3039 //
3040 // Returns the most basic instruction for the .new predicated instructions and
3041 // new-value stores.
3042 // For example, all of the following instructions will be converted back to the
3043 // same instruction:
3044 // 1) if (p0.new) memw(R0+#0) = R1.new --->
3045 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3046 // 3) if (p0.new) memw(R0+#0) = R1 --->
3047 //
3048 // To understand the translation of instruction 1 to its original form, consider
3049 // a packet with 3 instructions.
3050 // { p0 = cmp.eq(R0,R1)
3051 // if (p0.new) R2 = add(R3, R4)
3052 // R5 = add (R3, R1)
3053 // }
3054 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3055 //
3056 // This instruction can be part of the previous packet only if both p0 and R2
3057 // are promoted to .new values. This promotion happens in steps, first
3058 // predicate register is promoted to .new and in the next iteration R2 is
3059 // promoted. Therefore, in case of dependence check failure (due to R5) during
3060 // next iteration, it should be converted back to its most basic form.
3061
3062
3063 // Return the new value instruction for a given store.
getDotNewOp(const MachineInstr * MI) const3064 int HexagonInstrInfo::getDotNewOp(const MachineInstr* MI) const {
3065 int NVOpcode = Hexagon::getNewValueOpcode(MI->getOpcode());
3066 if (NVOpcode >= 0) // Valid new-value store instruction.
3067 return NVOpcode;
3068
3069 switch (MI->getOpcode()) {
3070 default: llvm_unreachable("Unknown .new type");
3071 case Hexagon::S4_storerb_ur:
3072 return Hexagon::S4_storerbnew_ur;
3073
3074 case Hexagon::S2_storerb_pci:
3075 return Hexagon::S2_storerb_pci;
3076
3077 case Hexagon::S2_storeri_pci:
3078 return Hexagon::S2_storeri_pci;
3079
3080 case Hexagon::S2_storerh_pci:
3081 return Hexagon::S2_storerh_pci;
3082
3083 case Hexagon::S2_storerd_pci:
3084 return Hexagon::S2_storerd_pci;
3085
3086 case Hexagon::S2_storerf_pci:
3087 return Hexagon::S2_storerf_pci;
3088
3089 case Hexagon::V6_vS32b_ai:
3090 return Hexagon::V6_vS32b_new_ai;
3091
3092 case Hexagon::V6_vS32b_pi:
3093 return Hexagon::V6_vS32b_new_pi;
3094
3095 // 128B
3096 case Hexagon::V6_vS32b_ai_128B:
3097 return Hexagon::V6_vS32b_new_ai_128B;
3098
3099 case Hexagon::V6_vS32b_pi_128B:
3100 return Hexagon::V6_vS32b_new_pi_128B;
3101 }
3102 return 0;
3103 }
3104
3105 // Returns the opcode to use when converting MI, which is a conditional jump,
3106 // into a conditional instruction which uses the .new value of the predicate.
3107 // We also use branch probabilities to add a hint to the jump.
getDotNewPredJumpOp(const MachineInstr * MI,const MachineBranchProbabilityInfo * MBPI) const3108 int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr *MI,
3109 const MachineBranchProbabilityInfo *MBPI) const {
3110 // We assume that block can have at most two successors.
3111 bool taken = false;
3112 const MachineBasicBlock *Src = MI->getParent();
3113 const MachineOperand *BrTarget = &MI->getOperand(1);
3114 const MachineBasicBlock *Dst = BrTarget->getMBB();
3115
3116 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
3117 if (Prediction >= BranchProbability(1,2))
3118 taken = true;
3119
3120 switch (MI->getOpcode()) {
3121 case Hexagon::J2_jumpt:
3122 return taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3123 case Hexagon::J2_jumpf:
3124 return taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3125
3126 default:
3127 llvm_unreachable("Unexpected jump instruction.");
3128 }
3129 }
3130
3131
3132 // Return .new predicate version for an instruction.
getDotNewPredOp(const MachineInstr * MI,const MachineBranchProbabilityInfo * MBPI) const3133 int HexagonInstrInfo::getDotNewPredOp(const MachineInstr *MI,
3134 const MachineBranchProbabilityInfo *MBPI) const {
3135 int NewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
3136 if (NewOpcode >= 0) // Valid predicate new instruction
3137 return NewOpcode;
3138
3139 switch (MI->getOpcode()) {
3140 // Condtional Jumps
3141 case Hexagon::J2_jumpt:
3142 case Hexagon::J2_jumpf:
3143 return getDotNewPredJumpOp(MI, MBPI);
3144
3145 default:
3146 assert(0 && "Unknown .new type");
3147 }
3148 return 0;
3149 }
3150
3151
getDotOldOp(const int opc) const3152 int HexagonInstrInfo::getDotOldOp(const int opc) const {
3153 int NewOp = opc;
3154 if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3155 NewOp = Hexagon::getPredOldOpcode(NewOp);
3156 assert(NewOp >= 0 &&
3157 "Couldn't change predicate new instruction to its old form.");
3158 }
3159
3160 if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3161 NewOp = Hexagon::getNonNVStore(NewOp);
3162 assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3163 }
3164 return NewOp;
3165 }
3166
3167
3168 // See if instruction could potentially be a duplex candidate.
3169 // If so, return its group. Zero otherwise.
getDuplexCandidateGroup(const MachineInstr * MI) const3170 HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3171 const MachineInstr *MI) const {
3172 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3173 auto &HRI = getRegisterInfo();
3174
3175 switch (MI->getOpcode()) {
3176 default:
3177 return HexagonII::HSIG_None;
3178 //
3179 // Group L1:
3180 //
3181 // Rd = memw(Rs+#u4:2)
3182 // Rd = memub(Rs+#u4:0)
3183 case Hexagon::L2_loadri_io:
3184 DstReg = MI->getOperand(0).getReg();
3185 SrcReg = MI->getOperand(1).getReg();
3186 // Special case this one from Group L2.
3187 // Rd = memw(r29+#u5:2)
3188 if (isIntRegForSubInst(DstReg)) {
3189 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3190 HRI.getStackRegister() == SrcReg &&
3191 MI->getOperand(2).isImm() &&
3192 isShiftedUInt<5,2>(MI->getOperand(2).getImm()))
3193 return HexagonII::HSIG_L2;
3194 // Rd = memw(Rs+#u4:2)
3195 if (isIntRegForSubInst(SrcReg) &&
3196 (MI->getOperand(2).isImm() &&
3197 isShiftedUInt<4,2>(MI->getOperand(2).getImm())))
3198 return HexagonII::HSIG_L1;
3199 }
3200 break;
3201 case Hexagon::L2_loadrub_io:
3202 // Rd = memub(Rs+#u4:0)
3203 DstReg = MI->getOperand(0).getReg();
3204 SrcReg = MI->getOperand(1).getReg();
3205 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3206 MI->getOperand(2).isImm() && isUInt<4>(MI->getOperand(2).getImm()))
3207 return HexagonII::HSIG_L1;
3208 break;
3209 //
3210 // Group L2:
3211 //
3212 // Rd = memh/memuh(Rs+#u3:1)
3213 // Rd = memb(Rs+#u3:0)
3214 // Rd = memw(r29+#u5:2) - Handled above.
3215 // Rdd = memd(r29+#u5:3)
3216 // deallocframe
3217 // [if ([!]p0[.new])] dealloc_return
3218 // [if ([!]p0[.new])] jumpr r31
3219 case Hexagon::L2_loadrh_io:
3220 case Hexagon::L2_loadruh_io:
3221 // Rd = memh/memuh(Rs+#u3:1)
3222 DstReg = MI->getOperand(0).getReg();
3223 SrcReg = MI->getOperand(1).getReg();
3224 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3225 MI->getOperand(2).isImm() &&
3226 isShiftedUInt<3,1>(MI->getOperand(2).getImm()))
3227 return HexagonII::HSIG_L2;
3228 break;
3229 case Hexagon::L2_loadrb_io:
3230 // Rd = memb(Rs+#u3:0)
3231 DstReg = MI->getOperand(0).getReg();
3232 SrcReg = MI->getOperand(1).getReg();
3233 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3234 MI->getOperand(2).isImm() &&
3235 isUInt<3>(MI->getOperand(2).getImm()))
3236 return HexagonII::HSIG_L2;
3237 break;
3238 case Hexagon::L2_loadrd_io:
3239 // Rdd = memd(r29+#u5:3)
3240 DstReg = MI->getOperand(0).getReg();
3241 SrcReg = MI->getOperand(1).getReg();
3242 if (isDblRegForSubInst(DstReg, HRI) &&
3243 Hexagon::IntRegsRegClass.contains(SrcReg) &&
3244 HRI.getStackRegister() == SrcReg &&
3245 MI->getOperand(2).isImm() &&
3246 isShiftedUInt<5,3>(MI->getOperand(2).getImm()))
3247 return HexagonII::HSIG_L2;
3248 break;
3249 // dealloc_return is not documented in Hexagon Manual, but marked
3250 // with A_SUBINSN attribute in iset_v4classic.py.
3251 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3252 case Hexagon::L4_return:
3253 case Hexagon::L2_deallocframe:
3254 return HexagonII::HSIG_L2;
3255 case Hexagon::EH_RETURN_JMPR:
3256 case Hexagon::JMPret :
3257 // jumpr r31
3258 // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
3259 DstReg = MI->getOperand(0).getReg();
3260 if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3261 return HexagonII::HSIG_L2;
3262 break;
3263 case Hexagon::JMPrett:
3264 case Hexagon::JMPretf:
3265 case Hexagon::JMPrettnewpt:
3266 case Hexagon::JMPretfnewpt :
3267 case Hexagon::JMPrettnew :
3268 case Hexagon::JMPretfnew :
3269 DstReg = MI->getOperand(1).getReg();
3270 SrcReg = MI->getOperand(0).getReg();
3271 // [if ([!]p0[.new])] jumpr r31
3272 if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
3273 (Hexagon::P0 == SrcReg)) &&
3274 (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
3275 return HexagonII::HSIG_L2;
3276 break;
3277 case Hexagon::L4_return_t :
3278 case Hexagon::L4_return_f :
3279 case Hexagon::L4_return_tnew_pnt :
3280 case Hexagon::L4_return_fnew_pnt :
3281 case Hexagon::L4_return_tnew_pt :
3282 case Hexagon::L4_return_fnew_pt :
3283 // [if ([!]p0[.new])] dealloc_return
3284 SrcReg = MI->getOperand(0).getReg();
3285 if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
3286 return HexagonII::HSIG_L2;
3287 break;
3288 //
3289 // Group S1:
3290 //
3291 // memw(Rs+#u4:2) = Rt
3292 // memb(Rs+#u4:0) = Rt
3293 case Hexagon::S2_storeri_io:
3294 // Special case this one from Group S2.
3295 // memw(r29+#u5:2) = Rt
3296 Src1Reg = MI->getOperand(0).getReg();
3297 Src2Reg = MI->getOperand(2).getReg();
3298 if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3299 isIntRegForSubInst(Src2Reg) &&
3300 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() &&
3301 isShiftedUInt<5,2>(MI->getOperand(1).getImm()))
3302 return HexagonII::HSIG_S2;
3303 // memw(Rs+#u4:2) = Rt
3304 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3305 MI->getOperand(1).isImm() &&
3306 isShiftedUInt<4,2>(MI->getOperand(1).getImm()))
3307 return HexagonII::HSIG_S1;
3308 break;
3309 case Hexagon::S2_storerb_io:
3310 // memb(Rs+#u4:0) = Rt
3311 Src1Reg = MI->getOperand(0).getReg();
3312 Src2Reg = MI->getOperand(2).getReg();
3313 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3314 MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm()))
3315 return HexagonII::HSIG_S1;
3316 break;
3317 //
3318 // Group S2:
3319 //
3320 // memh(Rs+#u3:1) = Rt
3321 // memw(r29+#u5:2) = Rt
3322 // memd(r29+#s6:3) = Rtt
3323 // memw(Rs+#u4:2) = #U1
3324 // memb(Rs+#u4) = #U1
3325 // allocframe(#u5:3)
3326 case Hexagon::S2_storerh_io:
3327 // memh(Rs+#u3:1) = Rt
3328 Src1Reg = MI->getOperand(0).getReg();
3329 Src2Reg = MI->getOperand(2).getReg();
3330 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3331 MI->getOperand(1).isImm() &&
3332 isShiftedUInt<3,1>(MI->getOperand(1).getImm()))
3333 return HexagonII::HSIG_S1;
3334 break;
3335 case Hexagon::S2_storerd_io:
3336 // memd(r29+#s6:3) = Rtt
3337 Src1Reg = MI->getOperand(0).getReg();
3338 Src2Reg = MI->getOperand(2).getReg();
3339 if (isDblRegForSubInst(Src2Reg, HRI) &&
3340 Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3341 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() &&
3342 isShiftedInt<6,3>(MI->getOperand(1).getImm()))
3343 return HexagonII::HSIG_S2;
3344 break;
3345 case Hexagon::S4_storeiri_io:
3346 // memw(Rs+#u4:2) = #U1
3347 Src1Reg = MI->getOperand(0).getReg();
3348 if (isIntRegForSubInst(Src1Reg) && MI->getOperand(1).isImm() &&
3349 isShiftedUInt<4,2>(MI->getOperand(1).getImm()) &&
3350 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm()))
3351 return HexagonII::HSIG_S2;
3352 break;
3353 case Hexagon::S4_storeirb_io:
3354 // memb(Rs+#u4) = #U1
3355 Src1Reg = MI->getOperand(0).getReg();
3356 if (isIntRegForSubInst(Src1Reg) && MI->getOperand(1).isImm() &&
3357 isUInt<4>(MI->getOperand(1).getImm()) && MI->getOperand(2).isImm() &&
3358 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm()))
3359 return HexagonII::HSIG_S2;
3360 break;
3361 case Hexagon::S2_allocframe:
3362 if (MI->getOperand(0).isImm() &&
3363 isShiftedUInt<5,3>(MI->getOperand(0).getImm()))
3364 return HexagonII::HSIG_S1;
3365 break;
3366 //
3367 // Group A:
3368 //
3369 // Rx = add(Rx,#s7)
3370 // Rd = Rs
3371 // Rd = #u6
3372 // Rd = #-1
3373 // if ([!]P0[.new]) Rd = #0
3374 // Rd = add(r29,#u6:2)
3375 // Rx = add(Rx,Rs)
3376 // P0 = cmp.eq(Rs,#u2)
3377 // Rdd = combine(#0,Rs)
3378 // Rdd = combine(Rs,#0)
3379 // Rdd = combine(#u2,#U2)
3380 // Rd = add(Rs,#1)
3381 // Rd = add(Rs,#-1)
3382 // Rd = sxth/sxtb/zxtb/zxth(Rs)
3383 // Rd = and(Rs,#1)
3384 case Hexagon::A2_addi:
3385 DstReg = MI->getOperand(0).getReg();
3386 SrcReg = MI->getOperand(1).getReg();
3387 if (isIntRegForSubInst(DstReg)) {
3388 // Rd = add(r29,#u6:2)
3389 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3390 HRI.getStackRegister() == SrcReg && MI->getOperand(2).isImm() &&
3391 isShiftedUInt<6,2>(MI->getOperand(2).getImm()))
3392 return HexagonII::HSIG_A;
3393 // Rx = add(Rx,#s7)
3394 if ((DstReg == SrcReg) && MI->getOperand(2).isImm() &&
3395 isInt<7>(MI->getOperand(2).getImm()))
3396 return HexagonII::HSIG_A;
3397 // Rd = add(Rs,#1)
3398 // Rd = add(Rs,#-1)
3399 if (isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() &&
3400 ((MI->getOperand(2).getImm() == 1) ||
3401 (MI->getOperand(2).getImm() == -1)))
3402 return HexagonII::HSIG_A;
3403 }
3404 break;
3405 case Hexagon::A2_add:
3406 // Rx = add(Rx,Rs)
3407 DstReg = MI->getOperand(0).getReg();
3408 Src1Reg = MI->getOperand(1).getReg();
3409 Src2Reg = MI->getOperand(2).getReg();
3410 if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
3411 isIntRegForSubInst(Src2Reg))
3412 return HexagonII::HSIG_A;
3413 break;
3414 case Hexagon::A2_andir:
3415 // Same as zxtb.
3416 // Rd16=and(Rs16,#255)
3417 // Rd16=and(Rs16,#1)
3418 DstReg = MI->getOperand(0).getReg();
3419 SrcReg = MI->getOperand(1).getReg();
3420 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3421 MI->getOperand(2).isImm() &&
3422 ((MI->getOperand(2).getImm() == 1) ||
3423 (MI->getOperand(2).getImm() == 255)))
3424 return HexagonII::HSIG_A;
3425 break;
3426 case Hexagon::A2_tfr:
3427 // Rd = Rs
3428 DstReg = MI->getOperand(0).getReg();
3429 SrcReg = MI->getOperand(1).getReg();
3430 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3431 return HexagonII::HSIG_A;
3432 break;
3433 case Hexagon::A2_tfrsi:
3434 // Rd = #u6
3435 // Do not test for #u6 size since the const is getting extended
3436 // regardless and compound could be formed.
3437 // Rd = #-1
3438 DstReg = MI->getOperand(0).getReg();
3439 if (isIntRegForSubInst(DstReg))
3440 return HexagonII::HSIG_A;
3441 break;
3442 case Hexagon::C2_cmoveit:
3443 case Hexagon::C2_cmovenewit:
3444 case Hexagon::C2_cmoveif:
3445 case Hexagon::C2_cmovenewif:
3446 // if ([!]P0[.new]) Rd = #0
3447 // Actual form:
3448 // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
3449 DstReg = MI->getOperand(0).getReg();
3450 SrcReg = MI->getOperand(1).getReg();
3451 if (isIntRegForSubInst(DstReg) &&
3452 Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
3453 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0)
3454 return HexagonII::HSIG_A;
3455 break;
3456 case Hexagon::C2_cmpeqi:
3457 // P0 = cmp.eq(Rs,#u2)
3458 DstReg = MI->getOperand(0).getReg();
3459 SrcReg = MI->getOperand(1).getReg();
3460 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3461 Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
3462 MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm()))
3463 return HexagonII::HSIG_A;
3464 break;
3465 case Hexagon::A2_combineii:
3466 case Hexagon::A4_combineii:
3467 // Rdd = combine(#u2,#U2)
3468 DstReg = MI->getOperand(0).getReg();
3469 if (isDblRegForSubInst(DstReg, HRI) &&
3470 ((MI->getOperand(1).isImm() && isUInt<2>(MI->getOperand(1).getImm())) ||
3471 (MI->getOperand(1).isGlobal() &&
3472 isUInt<2>(MI->getOperand(1).getOffset()))) &&
3473 ((MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm())) ||
3474 (MI->getOperand(2).isGlobal() &&
3475 isUInt<2>(MI->getOperand(2).getOffset()))))
3476 return HexagonII::HSIG_A;
3477 break;
3478 case Hexagon::A4_combineri:
3479 // Rdd = combine(Rs,#0)
3480 DstReg = MI->getOperand(0).getReg();
3481 SrcReg = MI->getOperand(1).getReg();
3482 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3483 ((MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) ||
3484 (MI->getOperand(2).isGlobal() && MI->getOperand(2).getOffset() == 0)))
3485 return HexagonII::HSIG_A;
3486 break;
3487 case Hexagon::A4_combineir:
3488 // Rdd = combine(#0,Rs)
3489 DstReg = MI->getOperand(0).getReg();
3490 SrcReg = MI->getOperand(2).getReg();
3491 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3492 ((MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) ||
3493 (MI->getOperand(1).isGlobal() && MI->getOperand(1).getOffset() == 0)))
3494 return HexagonII::HSIG_A;
3495 break;
3496 case Hexagon::A2_sxtb:
3497 case Hexagon::A2_sxth:
3498 case Hexagon::A2_zxtb:
3499 case Hexagon::A2_zxth:
3500 // Rd = sxth/sxtb/zxtb/zxth(Rs)
3501 DstReg = MI->getOperand(0).getReg();
3502 SrcReg = MI->getOperand(1).getReg();
3503 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3504 return HexagonII::HSIG_A;
3505 break;
3506 }
3507
3508 return HexagonII::HSIG_None;
3509 }
3510
3511
getEquivalentHWInstr(const MachineInstr * MI) const3512 short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr *MI) const {
3513 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Real);
3514 }
3515
3516
3517 // Return first non-debug instruction in the basic block.
getFirstNonDbgInst(MachineBasicBlock * BB) const3518 MachineInstr *HexagonInstrInfo::getFirstNonDbgInst(MachineBasicBlock *BB)
3519 const {
3520 for (auto MII = BB->instr_begin(), End = BB->instr_end(); MII != End; MII++) {
3521 MachineInstr *MI = &*MII;
3522 if (MI->isDebugValue())
3523 continue;
3524 return MI;
3525 }
3526 return nullptr;
3527 }
3528
3529
getInstrTimingClassLatency(const InstrItineraryData * ItinData,const MachineInstr * MI) const3530 unsigned HexagonInstrInfo::getInstrTimingClassLatency(
3531 const InstrItineraryData *ItinData, const MachineInstr *MI) const {
3532 // Default to one cycle for no itinerary. However, an "empty" itinerary may
3533 // still have a MinLatency property, which getStageLatency checks.
3534 if (!ItinData)
3535 return getInstrLatency(ItinData, MI);
3536
3537 // Get the latency embedded in the itinerary. If we're not using timing class
3538 // latencies or if we using BSB scheduling, then restrict the maximum latency
3539 // to 1 (that is, either 0 or 1).
3540 if (MI->isTransient())
3541 return 0;
3542 unsigned Latency = ItinData->getStageLatency(MI->getDesc().getSchedClass());
3543 if (!EnableTimingClassLatency ||
3544 MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>().
3545 useBSBScheduling())
3546 if (Latency > 1)
3547 Latency = 1;
3548 return Latency;
3549 }
3550
3551
3552 // inverts the predication logic.
3553 // p -> NotP
3554 // NotP -> P
getInvertedPredSense(SmallVectorImpl<MachineOperand> & Cond) const3555 bool HexagonInstrInfo::getInvertedPredSense(
3556 SmallVectorImpl<MachineOperand> &Cond) const {
3557 if (Cond.empty())
3558 return false;
3559 unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
3560 Cond[0].setImm(Opc);
3561 return true;
3562 }
3563
3564
getInvertedPredicatedOpcode(const int Opc) const3565 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
3566 int InvPredOpcode;
3567 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
3568 : Hexagon::getTruePredOpcode(Opc);
3569 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
3570 return InvPredOpcode;
3571
3572 llvm_unreachable("Unexpected predicated instruction");
3573 }
3574
3575
3576 // Returns the max value that doesn't need to be extended.
getMaxValue(const MachineInstr * MI) const3577 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
3578 const uint64_t F = MI->getDesc().TSFlags;
3579 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3580 & HexagonII::ExtentSignedMask;
3581 unsigned bits = (F >> HexagonII::ExtentBitsPos)
3582 & HexagonII::ExtentBitsMask;
3583
3584 if (isSigned) // if value is signed
3585 return ~(-1U << (bits - 1));
3586 else
3587 return ~(-1U << bits);
3588 }
3589
3590
getMemAccessSize(const MachineInstr * MI) const3591 unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr* MI) const {
3592 const uint64_t F = MI->getDesc().TSFlags;
3593 return (F >> HexagonII::MemAccessSizePos) & HexagonII::MemAccesSizeMask;
3594 }
3595
3596
3597 // Returns the min value that doesn't need to be extended.
getMinValue(const MachineInstr * MI) const3598 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
3599 const uint64_t F = MI->getDesc().TSFlags;
3600 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3601 & HexagonII::ExtentSignedMask;
3602 unsigned bits = (F >> HexagonII::ExtentBitsPos)
3603 & HexagonII::ExtentBitsMask;
3604
3605 if (isSigned) // if value is signed
3606 return -1U << (bits - 1);
3607 else
3608 return 0;
3609 }
3610
3611
3612 // Returns opcode of the non-extended equivalent instruction.
getNonExtOpcode(const MachineInstr * MI) const3613 short HexagonInstrInfo::getNonExtOpcode(const MachineInstr *MI) const {
3614 // Check if the instruction has a register form that uses register in place
3615 // of the extended operand, if so return that as the non-extended form.
3616 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
3617 if (NonExtOpcode >= 0)
3618 return NonExtOpcode;
3619
3620 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
3621 // Check addressing mode and retrieve non-ext equivalent instruction.
3622 switch (getAddrMode(MI)) {
3623 case HexagonII::Absolute :
3624 return Hexagon::getBaseWithImmOffset(MI->getOpcode());
3625 case HexagonII::BaseImmOffset :
3626 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
3627 case HexagonII::BaseLongOffset:
3628 return Hexagon::getRegShlForm(MI->getOpcode());
3629
3630 default:
3631 return -1;
3632 }
3633 }
3634 return -1;
3635 }
3636
3637
getPredReg(ArrayRef<MachineOperand> Cond,unsigned & PredReg,unsigned & PredRegPos,unsigned & PredRegFlags) const3638 bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
3639 unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
3640 if (Cond.empty())
3641 return false;
3642 assert(Cond.size() == 2);
3643 if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
3644 DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
3645 return false;
3646 }
3647 PredReg = Cond[1].getReg();
3648 PredRegPos = 1;
3649 // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
3650 PredRegFlags = 0;
3651 if (Cond[1].isImplicit())
3652 PredRegFlags = RegState::Implicit;
3653 if (Cond[1].isUndef())
3654 PredRegFlags |= RegState::Undef;
3655 return true;
3656 }
3657
3658
getPseudoInstrPair(const MachineInstr * MI) const3659 short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr *MI) const {
3660 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Pseudo);
3661 }
3662
3663
getRegForm(const MachineInstr * MI) const3664 short HexagonInstrInfo::getRegForm(const MachineInstr *MI) const {
3665 return Hexagon::getRegForm(MI->getOpcode());
3666 }
3667
3668
3669 // Return the number of bytes required to encode the instruction.
3670 // Hexagon instructions are fixed length, 4 bytes, unless they
3671 // use a constant extender, which requires another 4 bytes.
3672 // For debug instructions and prolog labels, return 0.
getSize(const MachineInstr * MI) const3673 unsigned HexagonInstrInfo::getSize(const MachineInstr *MI) const {
3674 if (MI->isDebugValue() || MI->isPosition())
3675 return 0;
3676
3677 unsigned Size = MI->getDesc().getSize();
3678 if (!Size)
3679 // Assume the default insn size in case it cannot be determined
3680 // for whatever reason.
3681 Size = HEXAGON_INSTR_SIZE;
3682
3683 if (isConstExtended(MI) || isExtended(MI))
3684 Size += HEXAGON_INSTR_SIZE;
3685
3686 // Try and compute number of instructions in asm.
3687 if (BranchRelaxAsmLarge && MI->getOpcode() == Hexagon::INLINEASM) {
3688 const MachineBasicBlock &MBB = *MI->getParent();
3689 const MachineFunction *MF = MBB.getParent();
3690 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
3691
3692 // Count the number of register definitions to find the asm string.
3693 unsigned NumDefs = 0;
3694 for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
3695 ++NumDefs)
3696 assert(NumDefs != MI->getNumOperands()-2 && "No asm string?");
3697
3698 assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
3699 // Disassemble the AsmStr and approximate number of instructions.
3700 const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
3701 Size = getInlineAsmLength(AsmStr, *MAI);
3702 }
3703
3704 return Size;
3705 }
3706
3707
getType(const MachineInstr * MI) const3708 uint64_t HexagonInstrInfo::getType(const MachineInstr* MI) const {
3709 const uint64_t F = MI->getDesc().TSFlags;
3710 return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
3711 }
3712
3713
getUnits(const MachineInstr * MI) const3714 unsigned HexagonInstrInfo::getUnits(const MachineInstr* MI) const {
3715 const TargetSubtargetInfo &ST = MI->getParent()->getParent()->getSubtarget();
3716 const InstrItineraryData &II = *ST.getInstrItineraryData();
3717 const InstrStage &IS = *II.beginStage(MI->getDesc().getSchedClass());
3718
3719 return IS.getUnits();
3720 }
3721
3722
getValidSubTargets(const unsigned Opcode) const3723 unsigned HexagonInstrInfo::getValidSubTargets(const unsigned Opcode) const {
3724 const uint64_t F = get(Opcode).TSFlags;
3725 return (F >> HexagonII::validSubTargetPos) & HexagonII::validSubTargetMask;
3726 }
3727
3728
3729 // Calculate size of the basic block without debug instructions.
nonDbgBBSize(const MachineBasicBlock * BB) const3730 unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
3731 return nonDbgMICount(BB->instr_begin(), BB->instr_end());
3732 }
3733
3734
nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const3735 unsigned HexagonInstrInfo::nonDbgBundleSize(
3736 MachineBasicBlock::const_iterator BundleHead) const {
3737 assert(BundleHead->isBundle() && "Not a bundle header");
3738 auto MII = BundleHead.getInstrIterator();
3739 // Skip the bundle header.
3740 return nonDbgMICount(++MII, getBundleEnd(BundleHead));
3741 }
3742
3743
3744 /// immediateExtend - Changes the instruction in place to one using an immediate
3745 /// extender.
immediateExtend(MachineInstr * MI) const3746 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
3747 assert((isExtendable(MI)||isConstExtended(MI)) &&
3748 "Instruction must be extendable");
3749 // Find which operand is extendable.
3750 short ExtOpNum = getCExtOpNum(MI);
3751 MachineOperand &MO = MI->getOperand(ExtOpNum);
3752 // This needs to be something we understand.
3753 assert((MO.isMBB() || MO.isImm()) &&
3754 "Branch with unknown extendable field type");
3755 // Mark given operand as extended.
3756 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
3757 }
3758
3759
invertAndChangeJumpTarget(MachineInstr * MI,MachineBasicBlock * NewTarget) const3760 bool HexagonInstrInfo::invertAndChangeJumpTarget(
3761 MachineInstr* MI, MachineBasicBlock* NewTarget) const {
3762 DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#"
3763 << NewTarget->getNumber(); MI->dump(););
3764 assert(MI->isBranch());
3765 unsigned NewOpcode = getInvertedPredicatedOpcode(MI->getOpcode());
3766 int TargetPos = MI->getNumOperands() - 1;
3767 // In general branch target is the last operand,
3768 // but some implicit defs added at the end might change it.
3769 while ((TargetPos > -1) && !MI->getOperand(TargetPos).isMBB())
3770 --TargetPos;
3771 assert((TargetPos >= 0) && MI->getOperand(TargetPos).isMBB());
3772 MI->getOperand(TargetPos).setMBB(NewTarget);
3773 if (EnableBranchPrediction && isPredicatedNew(MI)) {
3774 NewOpcode = reversePrediction(NewOpcode);
3775 }
3776 MI->setDesc(get(NewOpcode));
3777 return true;
3778 }
3779
3780
genAllInsnTimingClasses(MachineFunction & MF) const3781 void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
3782 /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
3783 MachineFunction::iterator A = MF.begin();
3784 MachineBasicBlock &B = *A;
3785 MachineBasicBlock::iterator I = B.begin();
3786 MachineInstr *MI = &*I;
3787 DebugLoc DL = MI->getDebugLoc();
3788 MachineInstr *NewMI;
3789
3790 for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
3791 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
3792 NewMI = BuildMI(B, MI, DL, get(insn));
3793 DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) <<
3794 " Class: " << NewMI->getDesc().getSchedClass());
3795 NewMI->eraseFromParent();
3796 }
3797 /* --- The code above is used to generate complete set of Hexagon Insn --- */
3798 }
3799
3800
3801 // inverts the predication logic.
3802 // p -> NotP
3803 // NotP -> P
reversePredSense(MachineInstr * MI) const3804 bool HexagonInstrInfo::reversePredSense(MachineInstr* MI) const {
3805 DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI->dump());
3806 MI->setDesc(get(getInvertedPredicatedOpcode(MI->getOpcode())));
3807 return true;
3808 }
3809
3810
3811 // Reverse the branch prediction.
reversePrediction(unsigned Opcode) const3812 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
3813 int PredRevOpcode = -1;
3814 if (isPredictedTaken(Opcode))
3815 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
3816 else
3817 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
3818 assert(PredRevOpcode > 0);
3819 return PredRevOpcode;
3820 }
3821
3822
3823 // TODO: Add more rigorous validation.
validateBranchCond(const ArrayRef<MachineOperand> & Cond) const3824 bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
3825 const {
3826 return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
3827 }
3828
3829