1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMMCExpr.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCAsmInfo.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCDisassembler.h"
24 #include "llvm/MC/MCELFStreamer.h"
25 #include "llvm/MC/MCExpr.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCInstrDesc.h"
28 #include "llvm/MC/MCInstrInfo.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCRegisterInfo.h"
35 #include "llvm/MC/MCSection.h"
36 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/MC/MCTargetAsmParser.h"
40 #include "llvm/Support/ARMBuildAttributes.h"
41 #include "llvm/Support/ARMEHABI.h"
42 #include "llvm/Support/TargetParser.h"
43 #include "llvm/Support/COFF.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ELF.h"
46 #include "llvm/Support/MathExtras.h"
47 #include "llvm/Support/SourceMgr.h"
48 #include "llvm/Support/TargetRegistry.h"
49 #include "llvm/Support/raw_ostream.h"
50
51 using namespace llvm;
52
53 namespace {
54
55 class ARMOperand;
56
57 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
58
59 class UnwindContext {
60 MCAsmParser &Parser;
61
62 typedef SmallVector<SMLoc, 4> Locs;
63
64 Locs FnStartLocs;
65 Locs CantUnwindLocs;
66 Locs PersonalityLocs;
67 Locs PersonalityIndexLocs;
68 Locs HandlerDataLocs;
69 int FPReg;
70
71 public:
UnwindContext(MCAsmParser & P)72 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
73
hasFnStart() const74 bool hasFnStart() const { return !FnStartLocs.empty(); }
cantUnwind() const75 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
hasHandlerData() const76 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
hasPersonality() const77 bool hasPersonality() const {
78 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
79 }
80
recordFnStart(SMLoc L)81 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
recordCantUnwind(SMLoc L)82 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
recordPersonality(SMLoc L)83 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
recordHandlerData(SMLoc L)84 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
recordPersonalityIndex(SMLoc L)85 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
86
saveFPReg(int Reg)87 void saveFPReg(int Reg) { FPReg = Reg; }
getFPReg() const88 int getFPReg() const { return FPReg; }
89
emitFnStartLocNotes() const90 void emitFnStartLocNotes() const {
91 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
92 FI != FE; ++FI)
93 Parser.Note(*FI, ".fnstart was specified here");
94 }
emitCantUnwindLocNotes() const95 void emitCantUnwindLocNotes() const {
96 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
97 UE = CantUnwindLocs.end(); UI != UE; ++UI)
98 Parser.Note(*UI, ".cantunwind was specified here");
99 }
emitHandlerDataLocNotes() const100 void emitHandlerDataLocNotes() const {
101 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
102 HE = HandlerDataLocs.end(); HI != HE; ++HI)
103 Parser.Note(*HI, ".handlerdata was specified here");
104 }
emitPersonalityLocNotes() const105 void emitPersonalityLocNotes() const {
106 for (Locs::const_iterator PI = PersonalityLocs.begin(),
107 PE = PersonalityLocs.end(),
108 PII = PersonalityIndexLocs.begin(),
109 PIE = PersonalityIndexLocs.end();
110 PI != PE || PII != PIE;) {
111 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
112 Parser.Note(*PI++, ".personality was specified here");
113 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
114 Parser.Note(*PII++, ".personalityindex was specified here");
115 else
116 llvm_unreachable(".personality and .personalityindex cannot be "
117 "at the same location");
118 }
119 }
120
reset()121 void reset() {
122 FnStartLocs = Locs();
123 CantUnwindLocs = Locs();
124 PersonalityLocs = Locs();
125 HandlerDataLocs = Locs();
126 PersonalityIndexLocs = Locs();
127 FPReg = ARM::SP;
128 }
129 };
130
131 class ARMAsmParser : public MCTargetAsmParser {
132 const MCInstrInfo &MII;
133 const MCRegisterInfo *MRI;
134 UnwindContext UC;
135
getTargetStreamer()136 ARMTargetStreamer &getTargetStreamer() {
137 assert(getParser().getStreamer().getTargetStreamer() &&
138 "do not have a target streamer");
139 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
140 return static_cast<ARMTargetStreamer &>(TS);
141 }
142
143 // Map of register aliases registers via the .req directive.
144 StringMap<unsigned> RegisterReqs;
145
146 bool NextSymbolIsThumb;
147
148 struct {
149 ARMCC::CondCodes Cond; // Condition for IT block.
150 unsigned Mask:4; // Condition mask for instructions.
151 // Starting at first 1 (from lsb).
152 // '1' condition as indicated in IT.
153 // '0' inverse of condition (else).
154 // Count of instructions in IT block is
155 // 4 - trailingzeroes(mask)
156
157 bool FirstCond; // Explicit flag for when we're parsing the
158 // First instruction in the IT block. It's
159 // implied in the mask, so needs special
160 // handling.
161
162 unsigned CurPosition; // Current position in parsing of IT
163 // block. In range [0,3]. Initialized
164 // according to count of instructions in block.
165 // ~0U if no active IT block.
166 } ITState;
inITBlock()167 bool inITBlock() { return ITState.CurPosition != ~0U; }
lastInITBlock()168 bool lastInITBlock() {
169 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
170 }
forwardITPosition()171 void forwardITPosition() {
172 if (!inITBlock()) return;
173 // Move to the next instruction in the IT block, if there is one. If not,
174 // mark the block as done.
175 unsigned TZ = countTrailingZeros(ITState.Mask);
176 if (++ITState.CurPosition == 5 - TZ)
177 ITState.CurPosition = ~0U; // Done with the IT block after this.
178 }
179
Note(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)180 void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
181 return getParser().Note(L, Msg, Ranges);
182 }
Warning(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)183 bool Warning(SMLoc L, const Twine &Msg,
184 ArrayRef<SMRange> Ranges = None) {
185 return getParser().Warning(L, Msg, Ranges);
186 }
Error(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)187 bool Error(SMLoc L, const Twine &Msg,
188 ArrayRef<SMRange> Ranges = None) {
189 return getParser().Error(L, Msg, Ranges);
190 }
191
192 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
193 unsigned ListNo, bool IsARPop = false);
194 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
195 unsigned ListNo);
196
197 int tryParseRegister();
198 bool tryParseRegisterWithWriteBack(OperandVector &);
199 int tryParseShiftRegister(OperandVector &);
200 bool parseRegisterList(OperandVector &);
201 bool parseMemory(OperandVector &);
202 bool parseOperand(OperandVector &, StringRef Mnemonic);
203 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
204 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
205 unsigned &ShiftAmount);
206 bool parseLiteralValues(unsigned Size, SMLoc L);
207 bool parseDirectiveThumb(SMLoc L);
208 bool parseDirectiveARM(SMLoc L);
209 bool parseDirectiveThumbFunc(SMLoc L);
210 bool parseDirectiveCode(SMLoc L);
211 bool parseDirectiveSyntax(SMLoc L);
212 bool parseDirectiveReq(StringRef Name, SMLoc L);
213 bool parseDirectiveUnreq(SMLoc L);
214 bool parseDirectiveArch(SMLoc L);
215 bool parseDirectiveEabiAttr(SMLoc L);
216 bool parseDirectiveCPU(SMLoc L);
217 bool parseDirectiveFPU(SMLoc L);
218 bool parseDirectiveFnStart(SMLoc L);
219 bool parseDirectiveFnEnd(SMLoc L);
220 bool parseDirectiveCantUnwind(SMLoc L);
221 bool parseDirectivePersonality(SMLoc L);
222 bool parseDirectiveHandlerData(SMLoc L);
223 bool parseDirectiveSetFP(SMLoc L);
224 bool parseDirectivePad(SMLoc L);
225 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
226 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
227 bool parseDirectiveLtorg(SMLoc L);
228 bool parseDirectiveEven(SMLoc L);
229 bool parseDirectivePersonalityIndex(SMLoc L);
230 bool parseDirectiveUnwindRaw(SMLoc L);
231 bool parseDirectiveTLSDescSeq(SMLoc L);
232 bool parseDirectiveMovSP(SMLoc L);
233 bool parseDirectiveObjectArch(SMLoc L);
234 bool parseDirectiveArchExtension(SMLoc L);
235 bool parseDirectiveAlign(SMLoc L);
236 bool parseDirectiveThumbSet(SMLoc L);
237
238 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
239 bool &CarrySetting, unsigned &ProcessorIMod,
240 StringRef &ITMask);
241 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
242 bool &CanAcceptCarrySet,
243 bool &CanAcceptPredicationCode);
244
245 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
246 OperandVector &Operands);
isThumb() const247 bool isThumb() const {
248 // FIXME: Can tablegen auto-generate this?
249 return getSTI().getFeatureBits()[ARM::ModeThumb];
250 }
isThumbOne() const251 bool isThumbOne() const {
252 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
253 }
isThumbTwo() const254 bool isThumbTwo() const {
255 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
256 }
hasThumb() const257 bool hasThumb() const {
258 return getSTI().getFeatureBits()[ARM::HasV4TOps];
259 }
hasV6Ops() const260 bool hasV6Ops() const {
261 return getSTI().getFeatureBits()[ARM::HasV6Ops];
262 }
hasV6MOps() const263 bool hasV6MOps() const {
264 return getSTI().getFeatureBits()[ARM::HasV6MOps];
265 }
hasV7Ops() const266 bool hasV7Ops() const {
267 return getSTI().getFeatureBits()[ARM::HasV7Ops];
268 }
hasV8Ops() const269 bool hasV8Ops() const {
270 return getSTI().getFeatureBits()[ARM::HasV8Ops];
271 }
hasARM() const272 bool hasARM() const {
273 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
274 }
hasDSP() const275 bool hasDSP() const {
276 return getSTI().getFeatureBits()[ARM::FeatureDSP];
277 }
hasD16() const278 bool hasD16() const {
279 return getSTI().getFeatureBits()[ARM::FeatureD16];
280 }
hasV8_1aOps() const281 bool hasV8_1aOps() const {
282 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
283 }
284
SwitchMode()285 void SwitchMode() {
286 MCSubtargetInfo &STI = copySTI();
287 uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
288 setAvailableFeatures(FB);
289 }
isMClass() const290 bool isMClass() const {
291 return getSTI().getFeatureBits()[ARM::FeatureMClass];
292 }
293
294 /// @name Auto-generated Match Functions
295 /// {
296
297 #define GET_ASSEMBLER_HEADER
298 #include "ARMGenAsmMatcher.inc"
299
300 /// }
301
302 OperandMatchResultTy parseITCondCode(OperandVector &);
303 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
304 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
305 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
306 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
307 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
308 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
309 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
310 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
311 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
312 int High);
parsePKHLSLImm(OperandVector & O)313 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
314 return parsePKHImm(O, "lsl", 0, 31);
315 }
parsePKHASRImm(OperandVector & O)316 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
317 return parsePKHImm(O, "asr", 1, 32);
318 }
319 OperandMatchResultTy parseSetEndImm(OperandVector &);
320 OperandMatchResultTy parseShifterImm(OperandVector &);
321 OperandMatchResultTy parseRotImm(OperandVector &);
322 OperandMatchResultTy parseModImm(OperandVector &);
323 OperandMatchResultTy parseBitfield(OperandVector &);
324 OperandMatchResultTy parsePostIdxReg(OperandVector &);
325 OperandMatchResultTy parseAM3Offset(OperandVector &);
326 OperandMatchResultTy parseFPImm(OperandVector &);
327 OperandMatchResultTy parseVectorList(OperandVector &);
328 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
329 SMLoc &EndLoc);
330
331 // Asm Match Converter Methods
332 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
333 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
334
335 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
336 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
337 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
338 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
339
340 public:
341 enum ARMMatchResultTy {
342 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
343 Match_RequiresNotITBlock,
344 Match_RequiresV6,
345 Match_RequiresThumb2,
346 Match_RequiresV8,
347 #define GET_OPERAND_DIAGNOSTIC_TYPES
348 #include "ARMGenAsmMatcher.inc"
349
350 };
351
ARMAsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)352 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
353 const MCInstrInfo &MII, const MCTargetOptions &Options)
354 : MCTargetAsmParser(Options, STI), MII(MII), UC(Parser) {
355 MCAsmParserExtension::Initialize(Parser);
356
357 // Cache the MCRegisterInfo.
358 MRI = getContext().getRegisterInfo();
359
360 // Initialize the set of available features.
361 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
362
363 // Not in an ITBlock to start with.
364 ITState.CurPosition = ~0U;
365
366 NextSymbolIsThumb = false;
367 }
368
369 // Implementation of the MCTargetAsmParser interface:
370 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
371 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
372 SMLoc NameLoc, OperandVector &Operands) override;
373 bool ParseDirective(AsmToken DirectiveID) override;
374
375 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
376 unsigned Kind) override;
377 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
378
379 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
380 OperandVector &Operands, MCStreamer &Out,
381 uint64_t &ErrorInfo,
382 bool MatchingInlineAsm) override;
383 void onLabelParsed(MCSymbol *Symbol) override;
384 };
385 } // end anonymous namespace
386
387 namespace {
388
389 /// ARMOperand - Instances of this class represent a parsed ARM machine
390 /// operand.
391 class ARMOperand : public MCParsedAsmOperand {
392 enum KindTy {
393 k_CondCode,
394 k_CCOut,
395 k_ITCondMask,
396 k_CoprocNum,
397 k_CoprocReg,
398 k_CoprocOption,
399 k_Immediate,
400 k_MemBarrierOpt,
401 k_InstSyncBarrierOpt,
402 k_Memory,
403 k_PostIndexRegister,
404 k_MSRMask,
405 k_BankedReg,
406 k_ProcIFlags,
407 k_VectorIndex,
408 k_Register,
409 k_RegisterList,
410 k_DPRRegisterList,
411 k_SPRRegisterList,
412 k_VectorList,
413 k_VectorListAllLanes,
414 k_VectorListIndexed,
415 k_ShiftedRegister,
416 k_ShiftedImmediate,
417 k_ShifterImmediate,
418 k_RotateImmediate,
419 k_ModifiedImmediate,
420 k_BitfieldDescriptor,
421 k_Token
422 } Kind;
423
424 SMLoc StartLoc, EndLoc, AlignmentLoc;
425 SmallVector<unsigned, 8> Registers;
426
427 struct CCOp {
428 ARMCC::CondCodes Val;
429 };
430
431 struct CopOp {
432 unsigned Val;
433 };
434
435 struct CoprocOptionOp {
436 unsigned Val;
437 };
438
439 struct ITMaskOp {
440 unsigned Mask:4;
441 };
442
443 struct MBOptOp {
444 ARM_MB::MemBOpt Val;
445 };
446
447 struct ISBOptOp {
448 ARM_ISB::InstSyncBOpt Val;
449 };
450
451 struct IFlagsOp {
452 ARM_PROC::IFlags Val;
453 };
454
455 struct MMaskOp {
456 unsigned Val;
457 };
458
459 struct BankedRegOp {
460 unsigned Val;
461 };
462
463 struct TokOp {
464 const char *Data;
465 unsigned Length;
466 };
467
468 struct RegOp {
469 unsigned RegNum;
470 };
471
472 // A vector register list is a sequential list of 1 to 4 registers.
473 struct VectorListOp {
474 unsigned RegNum;
475 unsigned Count;
476 unsigned LaneIndex;
477 bool isDoubleSpaced;
478 };
479
480 struct VectorIndexOp {
481 unsigned Val;
482 };
483
484 struct ImmOp {
485 const MCExpr *Val;
486 };
487
488 /// Combined record for all forms of ARM address expressions.
489 struct MemoryOp {
490 unsigned BaseRegNum;
491 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
492 // was specified.
493 const MCConstantExpr *OffsetImm; // Offset immediate value
494 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
495 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
496 unsigned ShiftImm; // shift for OffsetReg.
497 unsigned Alignment; // 0 = no alignment specified
498 // n = alignment in bytes (2, 4, 8, 16, or 32)
499 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
500 };
501
502 struct PostIdxRegOp {
503 unsigned RegNum;
504 bool isAdd;
505 ARM_AM::ShiftOpc ShiftTy;
506 unsigned ShiftImm;
507 };
508
509 struct ShifterImmOp {
510 bool isASR;
511 unsigned Imm;
512 };
513
514 struct RegShiftedRegOp {
515 ARM_AM::ShiftOpc ShiftTy;
516 unsigned SrcReg;
517 unsigned ShiftReg;
518 unsigned ShiftImm;
519 };
520
521 struct RegShiftedImmOp {
522 ARM_AM::ShiftOpc ShiftTy;
523 unsigned SrcReg;
524 unsigned ShiftImm;
525 };
526
527 struct RotImmOp {
528 unsigned Imm;
529 };
530
531 struct ModImmOp {
532 unsigned Bits;
533 unsigned Rot;
534 };
535
536 struct BitfieldOp {
537 unsigned LSB;
538 unsigned Width;
539 };
540
541 union {
542 struct CCOp CC;
543 struct CopOp Cop;
544 struct CoprocOptionOp CoprocOption;
545 struct MBOptOp MBOpt;
546 struct ISBOptOp ISBOpt;
547 struct ITMaskOp ITMask;
548 struct IFlagsOp IFlags;
549 struct MMaskOp MMask;
550 struct BankedRegOp BankedReg;
551 struct TokOp Tok;
552 struct RegOp Reg;
553 struct VectorListOp VectorList;
554 struct VectorIndexOp VectorIndex;
555 struct ImmOp Imm;
556 struct MemoryOp Memory;
557 struct PostIdxRegOp PostIdxReg;
558 struct ShifterImmOp ShifterImm;
559 struct RegShiftedRegOp RegShiftedReg;
560 struct RegShiftedImmOp RegShiftedImm;
561 struct RotImmOp RotImm;
562 struct ModImmOp ModImm;
563 struct BitfieldOp Bitfield;
564 };
565
566 public:
ARMOperand(KindTy K)567 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
568
569 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const570 SMLoc getStartLoc() const override { return StartLoc; }
571 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const572 SMLoc getEndLoc() const override { return EndLoc; }
573 /// getLocRange - Get the range between the first and last token of this
574 /// operand.
getLocRange() const575 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
576
577 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
getAlignmentLoc() const578 SMLoc getAlignmentLoc() const {
579 assert(Kind == k_Memory && "Invalid access!");
580 return AlignmentLoc;
581 }
582
getCondCode() const583 ARMCC::CondCodes getCondCode() const {
584 assert(Kind == k_CondCode && "Invalid access!");
585 return CC.Val;
586 }
587
getCoproc() const588 unsigned getCoproc() const {
589 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
590 return Cop.Val;
591 }
592
getToken() const593 StringRef getToken() const {
594 assert(Kind == k_Token && "Invalid access!");
595 return StringRef(Tok.Data, Tok.Length);
596 }
597
getReg() const598 unsigned getReg() const override {
599 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
600 return Reg.RegNum;
601 }
602
getRegList() const603 const SmallVectorImpl<unsigned> &getRegList() const {
604 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
605 Kind == k_SPRRegisterList) && "Invalid access!");
606 return Registers;
607 }
608
getImm() const609 const MCExpr *getImm() const {
610 assert(isImm() && "Invalid access!");
611 return Imm.Val;
612 }
613
getVectorIndex() const614 unsigned getVectorIndex() const {
615 assert(Kind == k_VectorIndex && "Invalid access!");
616 return VectorIndex.Val;
617 }
618
getMemBarrierOpt() const619 ARM_MB::MemBOpt getMemBarrierOpt() const {
620 assert(Kind == k_MemBarrierOpt && "Invalid access!");
621 return MBOpt.Val;
622 }
623
getInstSyncBarrierOpt() const624 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
625 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
626 return ISBOpt.Val;
627 }
628
getProcIFlags() const629 ARM_PROC::IFlags getProcIFlags() const {
630 assert(Kind == k_ProcIFlags && "Invalid access!");
631 return IFlags.Val;
632 }
633
getMSRMask() const634 unsigned getMSRMask() const {
635 assert(Kind == k_MSRMask && "Invalid access!");
636 return MMask.Val;
637 }
638
getBankedReg() const639 unsigned getBankedReg() const {
640 assert(Kind == k_BankedReg && "Invalid access!");
641 return BankedReg.Val;
642 }
643
isCoprocNum() const644 bool isCoprocNum() const { return Kind == k_CoprocNum; }
isCoprocReg() const645 bool isCoprocReg() const { return Kind == k_CoprocReg; }
isCoprocOption() const646 bool isCoprocOption() const { return Kind == k_CoprocOption; }
isCondCode() const647 bool isCondCode() const { return Kind == k_CondCode; }
isCCOut() const648 bool isCCOut() const { return Kind == k_CCOut; }
isITMask() const649 bool isITMask() const { return Kind == k_ITCondMask; }
isITCondCode() const650 bool isITCondCode() const { return Kind == k_CondCode; }
isImm() const651 bool isImm() const override { return Kind == k_Immediate; }
652 // checks whether this operand is an unsigned offset which fits is a field
653 // of specified width and scaled by a specific number of bits
654 template<unsigned width, unsigned scale>
isUnsignedOffset() const655 bool isUnsignedOffset() const {
656 if (!isImm()) return false;
657 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
658 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
659 int64_t Val = CE->getValue();
660 int64_t Align = 1LL << scale;
661 int64_t Max = Align * ((1LL << width) - 1);
662 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
663 }
664 return false;
665 }
666 // checks whether this operand is an signed offset which fits is a field
667 // of specified width and scaled by a specific number of bits
668 template<unsigned width, unsigned scale>
isSignedOffset() const669 bool isSignedOffset() const {
670 if (!isImm()) return false;
671 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
672 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
673 int64_t Val = CE->getValue();
674 int64_t Align = 1LL << scale;
675 int64_t Max = Align * ((1LL << (width-1)) - 1);
676 int64_t Min = -Align * (1LL << (width-1));
677 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
678 }
679 return false;
680 }
681
682 // checks whether this operand is a memory operand computed as an offset
683 // applied to PC. the offset may have 8 bits of magnitude and is represented
684 // with two bits of shift. textually it may be either [pc, #imm], #imm or
685 // relocable expression...
isThumbMemPC() const686 bool isThumbMemPC() const {
687 int64_t Val = 0;
688 if (isImm()) {
689 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
691 if (!CE) return false;
692 Val = CE->getValue();
693 }
694 else if (isMem()) {
695 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
696 if(Memory.BaseRegNum != ARM::PC) return false;
697 Val = Memory.OffsetImm->getValue();
698 }
699 else return false;
700 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
701 }
isFPImm() const702 bool isFPImm() const {
703 if (!isImm()) return false;
704 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
705 if (!CE) return false;
706 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
707 return Val != -1;
708 }
isFBits16() const709 bool isFBits16() const {
710 if (!isImm()) return false;
711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712 if (!CE) return false;
713 int64_t Value = CE->getValue();
714 return Value >= 0 && Value <= 16;
715 }
isFBits32() const716 bool isFBits32() const {
717 if (!isImm()) return false;
718 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
719 if (!CE) return false;
720 int64_t Value = CE->getValue();
721 return Value >= 1 && Value <= 32;
722 }
isImm8s4() const723 bool isImm8s4() const {
724 if (!isImm()) return false;
725 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
726 if (!CE) return false;
727 int64_t Value = CE->getValue();
728 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
729 }
isImm0_1020s4() const730 bool isImm0_1020s4() const {
731 if (!isImm()) return false;
732 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
733 if (!CE) return false;
734 int64_t Value = CE->getValue();
735 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
736 }
isImm0_508s4() const737 bool isImm0_508s4() const {
738 if (!isImm()) return false;
739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740 if (!CE) return false;
741 int64_t Value = CE->getValue();
742 return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
743 }
isImm0_508s4Neg() const744 bool isImm0_508s4Neg() const {
745 if (!isImm()) return false;
746 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747 if (!CE) return false;
748 int64_t Value = -CE->getValue();
749 // explicitly exclude zero. we want that to use the normal 0_508 version.
750 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
751 }
isImm0_239() const752 bool isImm0_239() const {
753 if (!isImm()) return false;
754 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755 if (!CE) return false;
756 int64_t Value = CE->getValue();
757 return Value >= 0 && Value < 240;
758 }
isImm0_255() const759 bool isImm0_255() const {
760 if (!isImm()) return false;
761 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762 if (!CE) return false;
763 int64_t Value = CE->getValue();
764 return Value >= 0 && Value < 256;
765 }
isImm0_4095() const766 bool isImm0_4095() const {
767 if (!isImm()) return false;
768 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769 if (!CE) return false;
770 int64_t Value = CE->getValue();
771 return Value >= 0 && Value < 4096;
772 }
isImm0_4095Neg() const773 bool isImm0_4095Neg() const {
774 if (!isImm()) return false;
775 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776 if (!CE) return false;
777 int64_t Value = -CE->getValue();
778 return Value > 0 && Value < 4096;
779 }
isImm0_1() const780 bool isImm0_1() const {
781 if (!isImm()) return false;
782 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783 if (!CE) return false;
784 int64_t Value = CE->getValue();
785 return Value >= 0 && Value < 2;
786 }
isImm0_3() const787 bool isImm0_3() const {
788 if (!isImm()) return false;
789 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790 if (!CE) return false;
791 int64_t Value = CE->getValue();
792 return Value >= 0 && Value < 4;
793 }
isImm0_7() const794 bool isImm0_7() const {
795 if (!isImm()) return false;
796 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
797 if (!CE) return false;
798 int64_t Value = CE->getValue();
799 return Value >= 0 && Value < 8;
800 }
isImm0_15() const801 bool isImm0_15() const {
802 if (!isImm()) return false;
803 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804 if (!CE) return false;
805 int64_t Value = CE->getValue();
806 return Value >= 0 && Value < 16;
807 }
isImm0_31() const808 bool isImm0_31() const {
809 if (!isImm()) return false;
810 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
811 if (!CE) return false;
812 int64_t Value = CE->getValue();
813 return Value >= 0 && Value < 32;
814 }
isImm0_63() const815 bool isImm0_63() const {
816 if (!isImm()) return false;
817 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818 if (!CE) return false;
819 int64_t Value = CE->getValue();
820 return Value >= 0 && Value < 64;
821 }
isImm8() const822 bool isImm8() const {
823 if (!isImm()) return false;
824 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
825 if (!CE) return false;
826 int64_t Value = CE->getValue();
827 return Value == 8;
828 }
isImm16() const829 bool isImm16() const {
830 if (!isImm()) return false;
831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
832 if (!CE) return false;
833 int64_t Value = CE->getValue();
834 return Value == 16;
835 }
isImm32() const836 bool isImm32() const {
837 if (!isImm()) return false;
838 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
839 if (!CE) return false;
840 int64_t Value = CE->getValue();
841 return Value == 32;
842 }
isShrImm8() const843 bool isShrImm8() const {
844 if (!isImm()) return false;
845 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
846 if (!CE) return false;
847 int64_t Value = CE->getValue();
848 return Value > 0 && Value <= 8;
849 }
isShrImm16() const850 bool isShrImm16() const {
851 if (!isImm()) return false;
852 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
853 if (!CE) return false;
854 int64_t Value = CE->getValue();
855 return Value > 0 && Value <= 16;
856 }
isShrImm32() const857 bool isShrImm32() const {
858 if (!isImm()) return false;
859 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
860 if (!CE) return false;
861 int64_t Value = CE->getValue();
862 return Value > 0 && Value <= 32;
863 }
isShrImm64() const864 bool isShrImm64() const {
865 if (!isImm()) return false;
866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
867 if (!CE) return false;
868 int64_t Value = CE->getValue();
869 return Value > 0 && Value <= 64;
870 }
isImm1_7() const871 bool isImm1_7() const {
872 if (!isImm()) return false;
873 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
874 if (!CE) return false;
875 int64_t Value = CE->getValue();
876 return Value > 0 && Value < 8;
877 }
isImm1_15() const878 bool isImm1_15() const {
879 if (!isImm()) return false;
880 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
881 if (!CE) return false;
882 int64_t Value = CE->getValue();
883 return Value > 0 && Value < 16;
884 }
isImm1_31() const885 bool isImm1_31() const {
886 if (!isImm()) return false;
887 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888 if (!CE) return false;
889 int64_t Value = CE->getValue();
890 return Value > 0 && Value < 32;
891 }
isImm1_16() const892 bool isImm1_16() const {
893 if (!isImm()) return false;
894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895 if (!CE) return false;
896 int64_t Value = CE->getValue();
897 return Value > 0 && Value < 17;
898 }
isImm1_32() const899 bool isImm1_32() const {
900 if (!isImm()) return false;
901 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
902 if (!CE) return false;
903 int64_t Value = CE->getValue();
904 return Value > 0 && Value < 33;
905 }
isImm0_32() const906 bool isImm0_32() const {
907 if (!isImm()) return false;
908 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
909 if (!CE) return false;
910 int64_t Value = CE->getValue();
911 return Value >= 0 && Value < 33;
912 }
isImm0_65535() const913 bool isImm0_65535() const {
914 if (!isImm()) return false;
915 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
916 if (!CE) return false;
917 int64_t Value = CE->getValue();
918 return Value >= 0 && Value < 65536;
919 }
isImm256_65535Expr() const920 bool isImm256_65535Expr() const {
921 if (!isImm()) return false;
922 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
923 // If it's not a constant expression, it'll generate a fixup and be
924 // handled later.
925 if (!CE) return true;
926 int64_t Value = CE->getValue();
927 return Value >= 256 && Value < 65536;
928 }
isImm0_65535Expr() const929 bool isImm0_65535Expr() const {
930 if (!isImm()) return false;
931 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
932 // If it's not a constant expression, it'll generate a fixup and be
933 // handled later.
934 if (!CE) return true;
935 int64_t Value = CE->getValue();
936 return Value >= 0 && Value < 65536;
937 }
isImm24bit() const938 bool isImm24bit() const {
939 if (!isImm()) return false;
940 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
941 if (!CE) return false;
942 int64_t Value = CE->getValue();
943 return Value >= 0 && Value <= 0xffffff;
944 }
isImmThumbSR() const945 bool isImmThumbSR() const {
946 if (!isImm()) return false;
947 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
948 if (!CE) return false;
949 int64_t Value = CE->getValue();
950 return Value > 0 && Value < 33;
951 }
isPKHLSLImm() const952 bool isPKHLSLImm() const {
953 if (!isImm()) return false;
954 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
955 if (!CE) return false;
956 int64_t Value = CE->getValue();
957 return Value >= 0 && Value < 32;
958 }
isPKHASRImm() const959 bool isPKHASRImm() const {
960 if (!isImm()) return false;
961 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
962 if (!CE) return false;
963 int64_t Value = CE->getValue();
964 return Value > 0 && Value <= 32;
965 }
isAdrLabel() const966 bool isAdrLabel() const {
967 // If we have an immediate that's not a constant, treat it as a label
968 // reference needing a fixup.
969 if (isImm() && !isa<MCConstantExpr>(getImm()))
970 return true;
971
972 // If it is a constant, it must fit into a modified immediate encoding.
973 if (!isImm()) return false;
974 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
975 if (!CE) return false;
976 int64_t Value = CE->getValue();
977 return (ARM_AM::getSOImmVal(Value) != -1 ||
978 ARM_AM::getSOImmVal(-Value) != -1);
979 }
isT2SOImm() const980 bool isT2SOImm() const {
981 if (!isImm()) return false;
982 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
983 if (!CE) return false;
984 int64_t Value = CE->getValue();
985 return ARM_AM::getT2SOImmVal(Value) != -1;
986 }
isT2SOImmNot() const987 bool isT2SOImmNot() const {
988 if (!isImm()) return false;
989 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
990 if (!CE) return false;
991 int64_t Value = CE->getValue();
992 return ARM_AM::getT2SOImmVal(Value) == -1 &&
993 ARM_AM::getT2SOImmVal(~Value) != -1;
994 }
isT2SOImmNeg() const995 bool isT2SOImmNeg() const {
996 if (!isImm()) return false;
997 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998 if (!CE) return false;
999 int64_t Value = CE->getValue();
1000 // Only use this when not representable as a plain so_imm.
1001 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1002 ARM_AM::getT2SOImmVal(-Value) != -1;
1003 }
isSetEndImm() const1004 bool isSetEndImm() const {
1005 if (!isImm()) return false;
1006 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1007 if (!CE) return false;
1008 int64_t Value = CE->getValue();
1009 return Value == 1 || Value == 0;
1010 }
isReg() const1011 bool isReg() const override { return Kind == k_Register; }
isRegList() const1012 bool isRegList() const { return Kind == k_RegisterList; }
isDPRRegList() const1013 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
isSPRRegList() const1014 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
isToken() const1015 bool isToken() const override { return Kind == k_Token; }
isMemBarrierOpt() const1016 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
isInstSyncBarrierOpt() const1017 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
isMem() const1018 bool isMem() const override { return Kind == k_Memory; }
isShifterImm() const1019 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
isRegShiftedReg() const1020 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
isRegShiftedImm() const1021 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
isRotImm() const1022 bool isRotImm() const { return Kind == k_RotateImmediate; }
isModImm() const1023 bool isModImm() const { return Kind == k_ModifiedImmediate; }
isModImmNot() const1024 bool isModImmNot() const {
1025 if (!isImm()) return false;
1026 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1027 if (!CE) return false;
1028 int64_t Value = CE->getValue();
1029 return ARM_AM::getSOImmVal(~Value) != -1;
1030 }
isModImmNeg() const1031 bool isModImmNeg() const {
1032 if (!isImm()) return false;
1033 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1034 if (!CE) return false;
1035 int64_t Value = CE->getValue();
1036 return ARM_AM::getSOImmVal(Value) == -1 &&
1037 ARM_AM::getSOImmVal(-Value) != -1;
1038 }
isBitfield() const1039 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
isPostIdxRegShifted() const1040 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
isPostIdxReg() const1041 bool isPostIdxReg() const {
1042 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1043 }
isMemNoOffset(bool alignOK=false,unsigned Alignment=0) const1044 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1045 if (!isMem())
1046 return false;
1047 // No offset of any kind.
1048 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1049 (alignOK || Memory.Alignment == Alignment);
1050 }
isMemPCRelImm12() const1051 bool isMemPCRelImm12() const {
1052 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053 return false;
1054 // Base register must be PC.
1055 if (Memory.BaseRegNum != ARM::PC)
1056 return false;
1057 // Immediate offset in range [-4095, 4095].
1058 if (!Memory.OffsetImm) return true;
1059 int64_t Val = Memory.OffsetImm->getValue();
1060 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1061 }
isAlignedMemory() const1062 bool isAlignedMemory() const {
1063 return isMemNoOffset(true);
1064 }
isAlignedMemoryNone() const1065 bool isAlignedMemoryNone() const {
1066 return isMemNoOffset(false, 0);
1067 }
isDupAlignedMemoryNone() const1068 bool isDupAlignedMemoryNone() const {
1069 return isMemNoOffset(false, 0);
1070 }
isAlignedMemory16() const1071 bool isAlignedMemory16() const {
1072 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1073 return true;
1074 return isMemNoOffset(false, 0);
1075 }
isDupAlignedMemory16() const1076 bool isDupAlignedMemory16() const {
1077 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1078 return true;
1079 return isMemNoOffset(false, 0);
1080 }
isAlignedMemory32() const1081 bool isAlignedMemory32() const {
1082 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1083 return true;
1084 return isMemNoOffset(false, 0);
1085 }
isDupAlignedMemory32() const1086 bool isDupAlignedMemory32() const {
1087 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1088 return true;
1089 return isMemNoOffset(false, 0);
1090 }
isAlignedMemory64() const1091 bool isAlignedMemory64() const {
1092 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1093 return true;
1094 return isMemNoOffset(false, 0);
1095 }
isDupAlignedMemory64() const1096 bool isDupAlignedMemory64() const {
1097 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1098 return true;
1099 return isMemNoOffset(false, 0);
1100 }
isAlignedMemory64or128() const1101 bool isAlignedMemory64or128() const {
1102 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1103 return true;
1104 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1105 return true;
1106 return isMemNoOffset(false, 0);
1107 }
isDupAlignedMemory64or128() const1108 bool isDupAlignedMemory64or128() const {
1109 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1110 return true;
1111 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1112 return true;
1113 return isMemNoOffset(false, 0);
1114 }
isAlignedMemory64or128or256() const1115 bool isAlignedMemory64or128or256() const {
1116 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1117 return true;
1118 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1119 return true;
1120 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1121 return true;
1122 return isMemNoOffset(false, 0);
1123 }
isAddrMode2() const1124 bool isAddrMode2() const {
1125 if (!isMem() || Memory.Alignment != 0) return false;
1126 // Check for register offset.
1127 if (Memory.OffsetRegNum) return true;
1128 // Immediate offset in range [-4095, 4095].
1129 if (!Memory.OffsetImm) return true;
1130 int64_t Val = Memory.OffsetImm->getValue();
1131 return Val > -4096 && Val < 4096;
1132 }
isAM2OffsetImm() const1133 bool isAM2OffsetImm() const {
1134 if (!isImm()) return false;
1135 // Immediate offset in range [-4095, 4095].
1136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1137 if (!CE) return false;
1138 int64_t Val = CE->getValue();
1139 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1140 }
isAddrMode3() const1141 bool isAddrMode3() const {
1142 // If we have an immediate that's not a constant, treat it as a label
1143 // reference needing a fixup. If it is a constant, it's something else
1144 // and we reject it.
1145 if (isImm() && !isa<MCConstantExpr>(getImm()))
1146 return true;
1147 if (!isMem() || Memory.Alignment != 0) return false;
1148 // No shifts are legal for AM3.
1149 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1150 // Check for register offset.
1151 if (Memory.OffsetRegNum) return true;
1152 // Immediate offset in range [-255, 255].
1153 if (!Memory.OffsetImm) return true;
1154 int64_t Val = Memory.OffsetImm->getValue();
1155 // The #-0 offset is encoded as INT32_MIN, and we have to check
1156 // for this too.
1157 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1158 }
isAM3Offset() const1159 bool isAM3Offset() const {
1160 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1161 return false;
1162 if (Kind == k_PostIndexRegister)
1163 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1164 // Immediate offset in range [-255, 255].
1165 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1166 if (!CE) return false;
1167 int64_t Val = CE->getValue();
1168 // Special case, #-0 is INT32_MIN.
1169 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1170 }
isAddrMode5() const1171 bool isAddrMode5() const {
1172 // If we have an immediate that's not a constant, treat it as a label
1173 // reference needing a fixup. If it is a constant, it's something else
1174 // and we reject it.
1175 if (isImm() && !isa<MCConstantExpr>(getImm()))
1176 return true;
1177 if (!isMem() || Memory.Alignment != 0) return false;
1178 // Check for register offset.
1179 if (Memory.OffsetRegNum) return false;
1180 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1181 if (!Memory.OffsetImm) return true;
1182 int64_t Val = Memory.OffsetImm->getValue();
1183 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1184 Val == INT32_MIN;
1185 }
isMemTBB() const1186 bool isMemTBB() const {
1187 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1188 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1189 return false;
1190 return true;
1191 }
isMemTBH() const1192 bool isMemTBH() const {
1193 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1194 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1195 Memory.Alignment != 0 )
1196 return false;
1197 return true;
1198 }
isMemRegOffset() const1199 bool isMemRegOffset() const {
1200 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1201 return false;
1202 return true;
1203 }
isT2MemRegOffset() const1204 bool isT2MemRegOffset() const {
1205 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1206 Memory.Alignment != 0)
1207 return false;
1208 // Only lsl #{0, 1, 2, 3} allowed.
1209 if (Memory.ShiftType == ARM_AM::no_shift)
1210 return true;
1211 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1212 return false;
1213 return true;
1214 }
isMemThumbRR() const1215 bool isMemThumbRR() const {
1216 // Thumb reg+reg addressing is simple. Just two registers, a base and
1217 // an offset. No shifts, negations or any other complicating factors.
1218 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1219 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1220 return false;
1221 return isARMLowRegister(Memory.BaseRegNum) &&
1222 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1223 }
isMemThumbRIs4() const1224 bool isMemThumbRIs4() const {
1225 if (!isMem() || Memory.OffsetRegNum != 0 ||
1226 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1227 return false;
1228 // Immediate offset, multiple of 4 in range [0, 124].
1229 if (!Memory.OffsetImm) return true;
1230 int64_t Val = Memory.OffsetImm->getValue();
1231 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1232 }
isMemThumbRIs2() const1233 bool isMemThumbRIs2() const {
1234 if (!isMem() || Memory.OffsetRegNum != 0 ||
1235 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1236 return false;
1237 // Immediate offset, multiple of 4 in range [0, 62].
1238 if (!Memory.OffsetImm) return true;
1239 int64_t Val = Memory.OffsetImm->getValue();
1240 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1241 }
isMemThumbRIs1() const1242 bool isMemThumbRIs1() const {
1243 if (!isMem() || Memory.OffsetRegNum != 0 ||
1244 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1245 return false;
1246 // Immediate offset in range [0, 31].
1247 if (!Memory.OffsetImm) return true;
1248 int64_t Val = Memory.OffsetImm->getValue();
1249 return Val >= 0 && Val <= 31;
1250 }
isMemThumbSPI() const1251 bool isMemThumbSPI() const {
1252 if (!isMem() || Memory.OffsetRegNum != 0 ||
1253 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1254 return false;
1255 // Immediate offset, multiple of 4 in range [0, 1020].
1256 if (!Memory.OffsetImm) return true;
1257 int64_t Val = Memory.OffsetImm->getValue();
1258 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1259 }
isMemImm8s4Offset() const1260 bool isMemImm8s4Offset() const {
1261 // If we have an immediate that's not a constant, treat it as a label
1262 // reference needing a fixup. If it is a constant, it's something else
1263 // and we reject it.
1264 if (isImm() && !isa<MCConstantExpr>(getImm()))
1265 return true;
1266 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1267 return false;
1268 // Immediate offset a multiple of 4 in range [-1020, 1020].
1269 if (!Memory.OffsetImm) return true;
1270 int64_t Val = Memory.OffsetImm->getValue();
1271 // Special case, #-0 is INT32_MIN.
1272 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1273 }
isMemImm0_1020s4Offset() const1274 bool isMemImm0_1020s4Offset() const {
1275 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1276 return false;
1277 // Immediate offset a multiple of 4 in range [0, 1020].
1278 if (!Memory.OffsetImm) return true;
1279 int64_t Val = Memory.OffsetImm->getValue();
1280 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1281 }
isMemImm8Offset() const1282 bool isMemImm8Offset() const {
1283 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1284 return false;
1285 // Base reg of PC isn't allowed for these encodings.
1286 if (Memory.BaseRegNum == ARM::PC) return false;
1287 // Immediate offset in range [-255, 255].
1288 if (!Memory.OffsetImm) return true;
1289 int64_t Val = Memory.OffsetImm->getValue();
1290 return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1291 }
isMemPosImm8Offset() const1292 bool isMemPosImm8Offset() const {
1293 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1294 return false;
1295 // Immediate offset in range [0, 255].
1296 if (!Memory.OffsetImm) return true;
1297 int64_t Val = Memory.OffsetImm->getValue();
1298 return Val >= 0 && Val < 256;
1299 }
isMemNegImm8Offset() const1300 bool isMemNegImm8Offset() const {
1301 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1302 return false;
1303 // Base reg of PC isn't allowed for these encodings.
1304 if (Memory.BaseRegNum == ARM::PC) return false;
1305 // Immediate offset in range [-255, -1].
1306 if (!Memory.OffsetImm) return false;
1307 int64_t Val = Memory.OffsetImm->getValue();
1308 return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1309 }
isMemUImm12Offset() const1310 bool isMemUImm12Offset() const {
1311 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1312 return false;
1313 // Immediate offset in range [0, 4095].
1314 if (!Memory.OffsetImm) return true;
1315 int64_t Val = Memory.OffsetImm->getValue();
1316 return (Val >= 0 && Val < 4096);
1317 }
isMemImm12Offset() const1318 bool isMemImm12Offset() const {
1319 // If we have an immediate that's not a constant, treat it as a label
1320 // reference needing a fixup. If it is a constant, it's something else
1321 // and we reject it.
1322 if (isImm() && !isa<MCConstantExpr>(getImm()))
1323 return true;
1324
1325 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1326 return false;
1327 // Immediate offset in range [-4095, 4095].
1328 if (!Memory.OffsetImm) return true;
1329 int64_t Val = Memory.OffsetImm->getValue();
1330 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1331 }
isPostIdxImm8() const1332 bool isPostIdxImm8() const {
1333 if (!isImm()) return false;
1334 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335 if (!CE) return false;
1336 int64_t Val = CE->getValue();
1337 return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1338 }
isPostIdxImm8s4() const1339 bool isPostIdxImm8s4() const {
1340 if (!isImm()) return false;
1341 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342 if (!CE) return false;
1343 int64_t Val = CE->getValue();
1344 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1345 (Val == INT32_MIN);
1346 }
1347
isMSRMask() const1348 bool isMSRMask() const { return Kind == k_MSRMask; }
isBankedReg() const1349 bool isBankedReg() const { return Kind == k_BankedReg; }
isProcIFlags() const1350 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1351
1352 // NEON operands.
isSingleSpacedVectorList() const1353 bool isSingleSpacedVectorList() const {
1354 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1355 }
isDoubleSpacedVectorList() const1356 bool isDoubleSpacedVectorList() const {
1357 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1358 }
isVecListOneD() const1359 bool isVecListOneD() const {
1360 if (!isSingleSpacedVectorList()) return false;
1361 return VectorList.Count == 1;
1362 }
1363
isVecListDPair() const1364 bool isVecListDPair() const {
1365 if (!isSingleSpacedVectorList()) return false;
1366 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1367 .contains(VectorList.RegNum));
1368 }
1369
isVecListThreeD() const1370 bool isVecListThreeD() const {
1371 if (!isSingleSpacedVectorList()) return false;
1372 return VectorList.Count == 3;
1373 }
1374
isVecListFourD() const1375 bool isVecListFourD() const {
1376 if (!isSingleSpacedVectorList()) return false;
1377 return VectorList.Count == 4;
1378 }
1379
isVecListDPairSpaced() const1380 bool isVecListDPairSpaced() const {
1381 if (Kind != k_VectorList) return false;
1382 if (isSingleSpacedVectorList()) return false;
1383 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1384 .contains(VectorList.RegNum));
1385 }
1386
isVecListThreeQ() const1387 bool isVecListThreeQ() const {
1388 if (!isDoubleSpacedVectorList()) return false;
1389 return VectorList.Count == 3;
1390 }
1391
isVecListFourQ() const1392 bool isVecListFourQ() const {
1393 if (!isDoubleSpacedVectorList()) return false;
1394 return VectorList.Count == 4;
1395 }
1396
isSingleSpacedVectorAllLanes() const1397 bool isSingleSpacedVectorAllLanes() const {
1398 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1399 }
isDoubleSpacedVectorAllLanes() const1400 bool isDoubleSpacedVectorAllLanes() const {
1401 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1402 }
isVecListOneDAllLanes() const1403 bool isVecListOneDAllLanes() const {
1404 if (!isSingleSpacedVectorAllLanes()) return false;
1405 return VectorList.Count == 1;
1406 }
1407
isVecListDPairAllLanes() const1408 bool isVecListDPairAllLanes() const {
1409 if (!isSingleSpacedVectorAllLanes()) return false;
1410 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1411 .contains(VectorList.RegNum));
1412 }
1413
isVecListDPairSpacedAllLanes() const1414 bool isVecListDPairSpacedAllLanes() const {
1415 if (!isDoubleSpacedVectorAllLanes()) return false;
1416 return VectorList.Count == 2;
1417 }
1418
isVecListThreeDAllLanes() const1419 bool isVecListThreeDAllLanes() const {
1420 if (!isSingleSpacedVectorAllLanes()) return false;
1421 return VectorList.Count == 3;
1422 }
1423
isVecListThreeQAllLanes() const1424 bool isVecListThreeQAllLanes() const {
1425 if (!isDoubleSpacedVectorAllLanes()) return false;
1426 return VectorList.Count == 3;
1427 }
1428
isVecListFourDAllLanes() const1429 bool isVecListFourDAllLanes() const {
1430 if (!isSingleSpacedVectorAllLanes()) return false;
1431 return VectorList.Count == 4;
1432 }
1433
isVecListFourQAllLanes() const1434 bool isVecListFourQAllLanes() const {
1435 if (!isDoubleSpacedVectorAllLanes()) return false;
1436 return VectorList.Count == 4;
1437 }
1438
isSingleSpacedVectorIndexed() const1439 bool isSingleSpacedVectorIndexed() const {
1440 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1441 }
isDoubleSpacedVectorIndexed() const1442 bool isDoubleSpacedVectorIndexed() const {
1443 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1444 }
isVecListOneDByteIndexed() const1445 bool isVecListOneDByteIndexed() const {
1446 if (!isSingleSpacedVectorIndexed()) return false;
1447 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1448 }
1449
isVecListOneDHWordIndexed() const1450 bool isVecListOneDHWordIndexed() const {
1451 if (!isSingleSpacedVectorIndexed()) return false;
1452 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1453 }
1454
isVecListOneDWordIndexed() const1455 bool isVecListOneDWordIndexed() const {
1456 if (!isSingleSpacedVectorIndexed()) return false;
1457 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1458 }
1459
isVecListTwoDByteIndexed() const1460 bool isVecListTwoDByteIndexed() const {
1461 if (!isSingleSpacedVectorIndexed()) return false;
1462 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1463 }
1464
isVecListTwoDHWordIndexed() const1465 bool isVecListTwoDHWordIndexed() const {
1466 if (!isSingleSpacedVectorIndexed()) return false;
1467 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1468 }
1469
isVecListTwoQWordIndexed() const1470 bool isVecListTwoQWordIndexed() const {
1471 if (!isDoubleSpacedVectorIndexed()) return false;
1472 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1473 }
1474
isVecListTwoQHWordIndexed() const1475 bool isVecListTwoQHWordIndexed() const {
1476 if (!isDoubleSpacedVectorIndexed()) return false;
1477 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1478 }
1479
isVecListTwoDWordIndexed() const1480 bool isVecListTwoDWordIndexed() const {
1481 if (!isSingleSpacedVectorIndexed()) return false;
1482 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1483 }
1484
isVecListThreeDByteIndexed() const1485 bool isVecListThreeDByteIndexed() const {
1486 if (!isSingleSpacedVectorIndexed()) return false;
1487 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1488 }
1489
isVecListThreeDHWordIndexed() const1490 bool isVecListThreeDHWordIndexed() const {
1491 if (!isSingleSpacedVectorIndexed()) return false;
1492 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1493 }
1494
isVecListThreeQWordIndexed() const1495 bool isVecListThreeQWordIndexed() const {
1496 if (!isDoubleSpacedVectorIndexed()) return false;
1497 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1498 }
1499
isVecListThreeQHWordIndexed() const1500 bool isVecListThreeQHWordIndexed() const {
1501 if (!isDoubleSpacedVectorIndexed()) return false;
1502 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1503 }
1504
isVecListThreeDWordIndexed() const1505 bool isVecListThreeDWordIndexed() const {
1506 if (!isSingleSpacedVectorIndexed()) return false;
1507 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1508 }
1509
isVecListFourDByteIndexed() const1510 bool isVecListFourDByteIndexed() const {
1511 if (!isSingleSpacedVectorIndexed()) return false;
1512 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1513 }
1514
isVecListFourDHWordIndexed() const1515 bool isVecListFourDHWordIndexed() const {
1516 if (!isSingleSpacedVectorIndexed()) return false;
1517 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1518 }
1519
isVecListFourQWordIndexed() const1520 bool isVecListFourQWordIndexed() const {
1521 if (!isDoubleSpacedVectorIndexed()) return false;
1522 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1523 }
1524
isVecListFourQHWordIndexed() const1525 bool isVecListFourQHWordIndexed() const {
1526 if (!isDoubleSpacedVectorIndexed()) return false;
1527 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1528 }
1529
isVecListFourDWordIndexed() const1530 bool isVecListFourDWordIndexed() const {
1531 if (!isSingleSpacedVectorIndexed()) return false;
1532 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1533 }
1534
isVectorIndex8() const1535 bool isVectorIndex8() const {
1536 if (Kind != k_VectorIndex) return false;
1537 return VectorIndex.Val < 8;
1538 }
isVectorIndex16() const1539 bool isVectorIndex16() const {
1540 if (Kind != k_VectorIndex) return false;
1541 return VectorIndex.Val < 4;
1542 }
isVectorIndex32() const1543 bool isVectorIndex32() const {
1544 if (Kind != k_VectorIndex) return false;
1545 return VectorIndex.Val < 2;
1546 }
1547
isNEONi8splat() const1548 bool isNEONi8splat() const {
1549 if (!isImm()) return false;
1550 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1551 // Must be a constant.
1552 if (!CE) return false;
1553 int64_t Value = CE->getValue();
1554 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1555 // value.
1556 return Value >= 0 && Value < 256;
1557 }
1558
isNEONi16splat() const1559 bool isNEONi16splat() const {
1560 if (isNEONByteReplicate(2))
1561 return false; // Leave that for bytes replication and forbid by default.
1562 if (!isImm())
1563 return false;
1564 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1565 // Must be a constant.
1566 if (!CE) return false;
1567 unsigned Value = CE->getValue();
1568 return ARM_AM::isNEONi16splat(Value);
1569 }
1570
isNEONi16splatNot() const1571 bool isNEONi16splatNot() const {
1572 if (!isImm())
1573 return false;
1574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1575 // Must be a constant.
1576 if (!CE) return false;
1577 unsigned Value = CE->getValue();
1578 return ARM_AM::isNEONi16splat(~Value & 0xffff);
1579 }
1580
isNEONi32splat() const1581 bool isNEONi32splat() const {
1582 if (isNEONByteReplicate(4))
1583 return false; // Leave that for bytes replication and forbid by default.
1584 if (!isImm())
1585 return false;
1586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587 // Must be a constant.
1588 if (!CE) return false;
1589 unsigned Value = CE->getValue();
1590 return ARM_AM::isNEONi32splat(Value);
1591 }
1592
isNEONi32splatNot() const1593 bool isNEONi32splatNot() const {
1594 if (!isImm())
1595 return false;
1596 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1597 // Must be a constant.
1598 if (!CE) return false;
1599 unsigned Value = CE->getValue();
1600 return ARM_AM::isNEONi32splat(~Value);
1601 }
1602
isNEONByteReplicate(unsigned NumBytes) const1603 bool isNEONByteReplicate(unsigned NumBytes) const {
1604 if (!isImm())
1605 return false;
1606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1607 // Must be a constant.
1608 if (!CE)
1609 return false;
1610 int64_t Value = CE->getValue();
1611 if (!Value)
1612 return false; // Don't bother with zero.
1613
1614 unsigned char B = Value & 0xff;
1615 for (unsigned i = 1; i < NumBytes; ++i) {
1616 Value >>= 8;
1617 if ((Value & 0xff) != B)
1618 return false;
1619 }
1620 return true;
1621 }
isNEONi16ByteReplicate() const1622 bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
isNEONi32ByteReplicate() const1623 bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
isNEONi32vmov() const1624 bool isNEONi32vmov() const {
1625 if (isNEONByteReplicate(4))
1626 return false; // Let it to be classified as byte-replicate case.
1627 if (!isImm())
1628 return false;
1629 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1630 // Must be a constant.
1631 if (!CE)
1632 return false;
1633 int64_t Value = CE->getValue();
1634 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1635 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1636 // FIXME: This is probably wrong and a copy and paste from previous example
1637 return (Value >= 0 && Value < 256) ||
1638 (Value >= 0x0100 && Value <= 0xff00) ||
1639 (Value >= 0x010000 && Value <= 0xff0000) ||
1640 (Value >= 0x01000000 && Value <= 0xff000000) ||
1641 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1642 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1643 }
isNEONi32vmovNeg() const1644 bool isNEONi32vmovNeg() const {
1645 if (!isImm()) return false;
1646 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1647 // Must be a constant.
1648 if (!CE) return false;
1649 int64_t Value = ~CE->getValue();
1650 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1651 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1652 // FIXME: This is probably wrong and a copy and paste from previous example
1653 return (Value >= 0 && Value < 256) ||
1654 (Value >= 0x0100 && Value <= 0xff00) ||
1655 (Value >= 0x010000 && Value <= 0xff0000) ||
1656 (Value >= 0x01000000 && Value <= 0xff000000) ||
1657 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1658 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1659 }
1660
isNEONi64splat() const1661 bool isNEONi64splat() const {
1662 if (!isImm()) return false;
1663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1664 // Must be a constant.
1665 if (!CE) return false;
1666 uint64_t Value = CE->getValue();
1667 // i64 value with each byte being either 0 or 0xff.
1668 for (unsigned i = 0; i < 8; ++i)
1669 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1670 return true;
1671 }
1672
addExpr(MCInst & Inst,const MCExpr * Expr) const1673 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1674 // Add as immediates when possible. Null MCExpr = 0.
1675 if (!Expr)
1676 Inst.addOperand(MCOperand::createImm(0));
1677 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1678 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1679 else
1680 Inst.addOperand(MCOperand::createExpr(Expr));
1681 }
1682
addCondCodeOperands(MCInst & Inst,unsigned N) const1683 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1684 assert(N == 2 && "Invalid number of operands!");
1685 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1686 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1687 Inst.addOperand(MCOperand::createReg(RegNum));
1688 }
1689
addCoprocNumOperands(MCInst & Inst,unsigned N) const1690 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1691 assert(N == 1 && "Invalid number of operands!");
1692 Inst.addOperand(MCOperand::createImm(getCoproc()));
1693 }
1694
addCoprocRegOperands(MCInst & Inst,unsigned N) const1695 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1696 assert(N == 1 && "Invalid number of operands!");
1697 Inst.addOperand(MCOperand::createImm(getCoproc()));
1698 }
1699
addCoprocOptionOperands(MCInst & Inst,unsigned N) const1700 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1701 assert(N == 1 && "Invalid number of operands!");
1702 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1703 }
1704
addITMaskOperands(MCInst & Inst,unsigned N) const1705 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1706 assert(N == 1 && "Invalid number of operands!");
1707 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1708 }
1709
addITCondCodeOperands(MCInst & Inst,unsigned N) const1710 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1711 assert(N == 1 && "Invalid number of operands!");
1712 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1713 }
1714
addCCOutOperands(MCInst & Inst,unsigned N) const1715 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1716 assert(N == 1 && "Invalid number of operands!");
1717 Inst.addOperand(MCOperand::createReg(getReg()));
1718 }
1719
addRegOperands(MCInst & Inst,unsigned N) const1720 void addRegOperands(MCInst &Inst, unsigned N) const {
1721 assert(N == 1 && "Invalid number of operands!");
1722 Inst.addOperand(MCOperand::createReg(getReg()));
1723 }
1724
addRegShiftedRegOperands(MCInst & Inst,unsigned N) const1725 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1726 assert(N == 3 && "Invalid number of operands!");
1727 assert(isRegShiftedReg() &&
1728 "addRegShiftedRegOperands() on non-RegShiftedReg!");
1729 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1730 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1731 Inst.addOperand(MCOperand::createImm(
1732 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1733 }
1734
addRegShiftedImmOperands(MCInst & Inst,unsigned N) const1735 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1736 assert(N == 2 && "Invalid number of operands!");
1737 assert(isRegShiftedImm() &&
1738 "addRegShiftedImmOperands() on non-RegShiftedImm!");
1739 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
1740 // Shift of #32 is encoded as 0 where permitted
1741 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1742 Inst.addOperand(MCOperand::createImm(
1743 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1744 }
1745
addShifterImmOperands(MCInst & Inst,unsigned N) const1746 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!");
1748 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
1749 ShifterImm.Imm));
1750 }
1751
addRegListOperands(MCInst & Inst,unsigned N) const1752 void addRegListOperands(MCInst &Inst, unsigned N) const {
1753 assert(N == 1 && "Invalid number of operands!");
1754 const SmallVectorImpl<unsigned> &RegList = getRegList();
1755 for (SmallVectorImpl<unsigned>::const_iterator
1756 I = RegList.begin(), E = RegList.end(); I != E; ++I)
1757 Inst.addOperand(MCOperand::createReg(*I));
1758 }
1759
addDPRRegListOperands(MCInst & Inst,unsigned N) const1760 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1761 addRegListOperands(Inst, N);
1762 }
1763
addSPRRegListOperands(MCInst & Inst,unsigned N) const1764 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1765 addRegListOperands(Inst, N);
1766 }
1767
addRotImmOperands(MCInst & Inst,unsigned N) const1768 void addRotImmOperands(MCInst &Inst, unsigned N) const {
1769 assert(N == 1 && "Invalid number of operands!");
1770 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1771 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
1772 }
1773
addModImmOperands(MCInst & Inst,unsigned N) const1774 void addModImmOperands(MCInst &Inst, unsigned N) const {
1775 assert(N == 1 && "Invalid number of operands!");
1776
1777 // Support for fixups (MCFixup)
1778 if (isImm())
1779 return addImmOperands(Inst, N);
1780
1781 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
1782 }
1783
addModImmNotOperands(MCInst & Inst,unsigned N) const1784 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
1785 assert(N == 1 && "Invalid number of operands!");
1786 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1787 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
1788 Inst.addOperand(MCOperand::createImm(Enc));
1789 }
1790
addModImmNegOperands(MCInst & Inst,unsigned N) const1791 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 1 && "Invalid number of operands!");
1793 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1794 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
1795 Inst.addOperand(MCOperand::createImm(Enc));
1796 }
1797
addBitfieldOperands(MCInst & Inst,unsigned N) const1798 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1799 assert(N == 1 && "Invalid number of operands!");
1800 // Munge the lsb/width into a bitfield mask.
1801 unsigned lsb = Bitfield.LSB;
1802 unsigned width = Bitfield.Width;
1803 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1804 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1805 (32 - (lsb + width)));
1806 Inst.addOperand(MCOperand::createImm(Mask));
1807 }
1808
addImmOperands(MCInst & Inst,unsigned N) const1809 void addImmOperands(MCInst &Inst, unsigned N) const {
1810 assert(N == 1 && "Invalid number of operands!");
1811 addExpr(Inst, getImm());
1812 }
1813
addFBits16Operands(MCInst & Inst,unsigned N) const1814 void addFBits16Operands(MCInst &Inst, unsigned N) const {
1815 assert(N == 1 && "Invalid number of operands!");
1816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1817 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
1818 }
1819
addFBits32Operands(MCInst & Inst,unsigned N) const1820 void addFBits32Operands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1822 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1823 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
1824 }
1825
addFPImmOperands(MCInst & Inst,unsigned N) const1826 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1829 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1830 Inst.addOperand(MCOperand::createImm(Val));
1831 }
1832
addImm8s4Operands(MCInst & Inst,unsigned N) const1833 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1834 assert(N == 1 && "Invalid number of operands!");
1835 // FIXME: We really want to scale the value here, but the LDRD/STRD
1836 // instruction don't encode operands that way yet.
1837 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1838 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1839 }
1840
addImm0_1020s4Operands(MCInst & Inst,unsigned N) const1841 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 // The immediate is scaled by four in the encoding and is stored
1844 // in the MCInst as such. Lop off the low two bits here.
1845 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1846 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
1847 }
1848
addImm0_508s4NegOperands(MCInst & Inst,unsigned N) const1849 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1850 assert(N == 1 && "Invalid number of operands!");
1851 // The immediate is scaled by four in the encoding and is stored
1852 // in the MCInst as such. Lop off the low two bits here.
1853 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1854 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
1855 }
1856
addImm0_508s4Operands(MCInst & Inst,unsigned N) const1857 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!");
1859 // The immediate is scaled by four in the encoding and is stored
1860 // in the MCInst as such. Lop off the low two bits here.
1861 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1862 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
1863 }
1864
addImm1_16Operands(MCInst & Inst,unsigned N) const1865 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 // The constant encodes as the immediate-1, and we store in the instruction
1868 // the bits as encoded, so subtract off one here.
1869 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1870 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
1871 }
1872
addImm1_32Operands(MCInst & Inst,unsigned N) const1873 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 // The constant encodes as the immediate-1, and we store in the instruction
1876 // the bits as encoded, so subtract off one here.
1877 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1878 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
1879 }
1880
addImmThumbSROperands(MCInst & Inst,unsigned N) const1881 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1882 assert(N == 1 && "Invalid number of operands!");
1883 // The constant encodes as the immediate, except for 32, which encodes as
1884 // zero.
1885 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1886 unsigned Imm = CE->getValue();
1887 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
1888 }
1889
addPKHASRImmOperands(MCInst & Inst,unsigned N) const1890 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1891 assert(N == 1 && "Invalid number of operands!");
1892 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1893 // the instruction as well.
1894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1895 int Val = CE->getValue();
1896 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
1897 }
1898
addT2SOImmNotOperands(MCInst & Inst,unsigned N) const1899 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1901 // The operand is actually a t2_so_imm, but we have its bitwise
1902 // negation in the assembly source, so twiddle it here.
1903 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1904 Inst.addOperand(MCOperand::createImm(~CE->getValue()));
1905 }
1906
addT2SOImmNegOperands(MCInst & Inst,unsigned N) const1907 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1908 assert(N == 1 && "Invalid number of operands!");
1909 // The operand is actually a t2_so_imm, but we have its
1910 // negation in the assembly source, so twiddle it here.
1911 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1912 Inst.addOperand(MCOperand::createImm(-CE->getValue()));
1913 }
1914
addImm0_4095NegOperands(MCInst & Inst,unsigned N) const1915 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1916 assert(N == 1 && "Invalid number of operands!");
1917 // The operand is actually an imm0_4095, but we have its
1918 // negation in the assembly source, so twiddle it here.
1919 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1920 Inst.addOperand(MCOperand::createImm(-CE->getValue()));
1921 }
1922
addUnsignedOffset_b8s2Operands(MCInst & Inst,unsigned N) const1923 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
1924 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1925 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
1926 return;
1927 }
1928
1929 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1930 assert(SR && "Unknown value type!");
1931 Inst.addOperand(MCOperand::createExpr(SR));
1932 }
1933
addThumbMemPCOperands(MCInst & Inst,unsigned N) const1934 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
1935 assert(N == 1 && "Invalid number of operands!");
1936 if (isImm()) {
1937 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1938 if (CE) {
1939 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1940 return;
1941 }
1942
1943 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1944 assert(SR && "Unknown value type!");
1945 Inst.addOperand(MCOperand::createExpr(SR));
1946 return;
1947 }
1948
1949 assert(isMem() && "Unknown value type!");
1950 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
1951 Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
1952 }
1953
addMemBarrierOptOperands(MCInst & Inst,unsigned N) const1954 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1955 assert(N == 1 && "Invalid number of operands!");
1956 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
1957 }
1958
addInstSyncBarrierOptOperands(MCInst & Inst,unsigned N) const1959 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!");
1961 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
1962 }
1963
addMemNoOffsetOperands(MCInst & Inst,unsigned N) const1964 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1965 assert(N == 1 && "Invalid number of operands!");
1966 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
1967 }
1968
addMemPCRelImm12Operands(MCInst & Inst,unsigned N) const1969 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1970 assert(N == 1 && "Invalid number of operands!");
1971 int32_t Imm = Memory.OffsetImm->getValue();
1972 Inst.addOperand(MCOperand::createImm(Imm));
1973 }
1974
addAdrLabelOperands(MCInst & Inst,unsigned N) const1975 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 assert(isImm() && "Not an immediate!");
1978
1979 // If we have an immediate that's not a constant, treat it as a label
1980 // reference needing a fixup.
1981 if (!isa<MCConstantExpr>(getImm())) {
1982 Inst.addOperand(MCOperand::createExpr(getImm()));
1983 return;
1984 }
1985
1986 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1987 int Val = CE->getValue();
1988 Inst.addOperand(MCOperand::createImm(Val));
1989 }
1990
addAlignedMemoryOperands(MCInst & Inst,unsigned N) const1991 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 2 && "Invalid number of operands!");
1993 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
1994 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
1995 }
1996
addDupAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const1997 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
1998 addAlignedMemoryOperands(Inst, N);
1999 }
2000
addAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2001 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2002 addAlignedMemoryOperands(Inst, N);
2003 }
2004
addAlignedMemory16Operands(MCInst & Inst,unsigned N) const2005 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2006 addAlignedMemoryOperands(Inst, N);
2007 }
2008
addDupAlignedMemory16Operands(MCInst & Inst,unsigned N) const2009 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2010 addAlignedMemoryOperands(Inst, N);
2011 }
2012
addAlignedMemory32Operands(MCInst & Inst,unsigned N) const2013 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2014 addAlignedMemoryOperands(Inst, N);
2015 }
2016
addDupAlignedMemory32Operands(MCInst & Inst,unsigned N) const2017 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2018 addAlignedMemoryOperands(Inst, N);
2019 }
2020
addAlignedMemory64Operands(MCInst & Inst,unsigned N) const2021 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2022 addAlignedMemoryOperands(Inst, N);
2023 }
2024
addDupAlignedMemory64Operands(MCInst & Inst,unsigned N) const2025 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2026 addAlignedMemoryOperands(Inst, N);
2027 }
2028
addAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2029 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2030 addAlignedMemoryOperands(Inst, N);
2031 }
2032
addDupAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2033 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2034 addAlignedMemoryOperands(Inst, N);
2035 }
2036
addAlignedMemory64or128or256Operands(MCInst & Inst,unsigned N) const2037 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2038 addAlignedMemoryOperands(Inst, N);
2039 }
2040
addAddrMode2Operands(MCInst & Inst,unsigned N) const2041 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2042 assert(N == 3 && "Invalid number of operands!");
2043 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2044 if (!Memory.OffsetRegNum) {
2045 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2046 // Special case for #-0
2047 if (Val == INT32_MIN) Val = 0;
2048 if (Val < 0) Val = -Val;
2049 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2050 } else {
2051 // For register offset, we encode the shift type and negation flag
2052 // here.
2053 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2054 Memory.ShiftImm, Memory.ShiftType);
2055 }
2056 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2057 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2058 Inst.addOperand(MCOperand::createImm(Val));
2059 }
2060
addAM2OffsetImmOperands(MCInst & Inst,unsigned N) const2061 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2062 assert(N == 2 && "Invalid number of operands!");
2063 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2064 assert(CE && "non-constant AM2OffsetImm operand!");
2065 int32_t Val = CE->getValue();
2066 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2067 // Special case for #-0
2068 if (Val == INT32_MIN) Val = 0;
2069 if (Val < 0) Val = -Val;
2070 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2071 Inst.addOperand(MCOperand::createReg(0));
2072 Inst.addOperand(MCOperand::createImm(Val));
2073 }
2074
addAddrMode3Operands(MCInst & Inst,unsigned N) const2075 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2076 assert(N == 3 && "Invalid number of operands!");
2077 // If we have an immediate that's not a constant, treat it as a label
2078 // reference needing a fixup. If it is a constant, it's something else
2079 // and we reject it.
2080 if (isImm()) {
2081 Inst.addOperand(MCOperand::createExpr(getImm()));
2082 Inst.addOperand(MCOperand::createReg(0));
2083 Inst.addOperand(MCOperand::createImm(0));
2084 return;
2085 }
2086
2087 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2088 if (!Memory.OffsetRegNum) {
2089 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2090 // Special case for #-0
2091 if (Val == INT32_MIN) Val = 0;
2092 if (Val < 0) Val = -Val;
2093 Val = ARM_AM::getAM3Opc(AddSub, Val);
2094 } else {
2095 // For register offset, we encode the shift type and negation flag
2096 // here.
2097 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2098 }
2099 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2100 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2101 Inst.addOperand(MCOperand::createImm(Val));
2102 }
2103
addAM3OffsetOperands(MCInst & Inst,unsigned N) const2104 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2105 assert(N == 2 && "Invalid number of operands!");
2106 if (Kind == k_PostIndexRegister) {
2107 int32_t Val =
2108 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2109 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2110 Inst.addOperand(MCOperand::createImm(Val));
2111 return;
2112 }
2113
2114 // Constant offset.
2115 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2116 int32_t Val = CE->getValue();
2117 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2118 // Special case for #-0
2119 if (Val == INT32_MIN) Val = 0;
2120 if (Val < 0) Val = -Val;
2121 Val = ARM_AM::getAM3Opc(AddSub, Val);
2122 Inst.addOperand(MCOperand::createReg(0));
2123 Inst.addOperand(MCOperand::createImm(Val));
2124 }
2125
addAddrMode5Operands(MCInst & Inst,unsigned N) const2126 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2127 assert(N == 2 && "Invalid number of operands!");
2128 // If we have an immediate that's not a constant, treat it as a label
2129 // reference needing a fixup. If it is a constant, it's something else
2130 // and we reject it.
2131 if (isImm()) {
2132 Inst.addOperand(MCOperand::createExpr(getImm()));
2133 Inst.addOperand(MCOperand::createImm(0));
2134 return;
2135 }
2136
2137 // The lower two bits are always zero and as such are not encoded.
2138 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2139 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2140 // Special case for #-0
2141 if (Val == INT32_MIN) Val = 0;
2142 if (Val < 0) Val = -Val;
2143 Val = ARM_AM::getAM5Opc(AddSub, Val);
2144 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2145 Inst.addOperand(MCOperand::createImm(Val));
2146 }
2147
addMemImm8s4OffsetOperands(MCInst & Inst,unsigned N) const2148 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2149 assert(N == 2 && "Invalid number of operands!");
2150 // If we have an immediate that's not a constant, treat it as a label
2151 // reference needing a fixup. If it is a constant, it's something else
2152 // and we reject it.
2153 if (isImm()) {
2154 Inst.addOperand(MCOperand::createExpr(getImm()));
2155 Inst.addOperand(MCOperand::createImm(0));
2156 return;
2157 }
2158
2159 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2160 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2161 Inst.addOperand(MCOperand::createImm(Val));
2162 }
2163
addMemImm0_1020s4OffsetOperands(MCInst & Inst,unsigned N) const2164 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2165 assert(N == 2 && "Invalid number of operands!");
2166 // The lower two bits are always zero and as such are not encoded.
2167 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2168 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2169 Inst.addOperand(MCOperand::createImm(Val));
2170 }
2171
addMemImm8OffsetOperands(MCInst & Inst,unsigned N) const2172 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2173 assert(N == 2 && "Invalid number of operands!");
2174 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2175 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2176 Inst.addOperand(MCOperand::createImm(Val));
2177 }
2178
addMemPosImm8OffsetOperands(MCInst & Inst,unsigned N) const2179 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2180 addMemImm8OffsetOperands(Inst, N);
2181 }
2182
addMemNegImm8OffsetOperands(MCInst & Inst,unsigned N) const2183 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2184 addMemImm8OffsetOperands(Inst, N);
2185 }
2186
addMemUImm12OffsetOperands(MCInst & Inst,unsigned N) const2187 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2188 assert(N == 2 && "Invalid number of operands!");
2189 // If this is an immediate, it's a label reference.
2190 if (isImm()) {
2191 addExpr(Inst, getImm());
2192 Inst.addOperand(MCOperand::createImm(0));
2193 return;
2194 }
2195
2196 // Otherwise, it's a normal memory reg+offset.
2197 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2198 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2199 Inst.addOperand(MCOperand::createImm(Val));
2200 }
2201
addMemImm12OffsetOperands(MCInst & Inst,unsigned N) const2202 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2203 assert(N == 2 && "Invalid number of operands!");
2204 // If this is an immediate, it's a label reference.
2205 if (isImm()) {
2206 addExpr(Inst, getImm());
2207 Inst.addOperand(MCOperand::createImm(0));
2208 return;
2209 }
2210
2211 // Otherwise, it's a normal memory reg+offset.
2212 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2213 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2214 Inst.addOperand(MCOperand::createImm(Val));
2215 }
2216
addMemTBBOperands(MCInst & Inst,unsigned N) const2217 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2218 assert(N == 2 && "Invalid number of operands!");
2219 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2220 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2221 }
2222
addMemTBHOperands(MCInst & Inst,unsigned N) const2223 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2224 assert(N == 2 && "Invalid number of operands!");
2225 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2226 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2227 }
2228
addMemRegOffsetOperands(MCInst & Inst,unsigned N) const2229 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 3 && "Invalid number of operands!");
2231 unsigned Val =
2232 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2233 Memory.ShiftImm, Memory.ShiftType);
2234 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2235 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2236 Inst.addOperand(MCOperand::createImm(Val));
2237 }
2238
addT2MemRegOffsetOperands(MCInst & Inst,unsigned N) const2239 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2240 assert(N == 3 && "Invalid number of operands!");
2241 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2242 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2243 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2244 }
2245
addMemThumbRROperands(MCInst & Inst,unsigned N) const2246 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2247 assert(N == 2 && "Invalid number of operands!");
2248 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2249 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2250 }
2251
addMemThumbRIs4Operands(MCInst & Inst,unsigned N) const2252 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2253 assert(N == 2 && "Invalid number of operands!");
2254 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2255 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2256 Inst.addOperand(MCOperand::createImm(Val));
2257 }
2258
addMemThumbRIs2Operands(MCInst & Inst,unsigned N) const2259 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2260 assert(N == 2 && "Invalid number of operands!");
2261 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2262 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2263 Inst.addOperand(MCOperand::createImm(Val));
2264 }
2265
addMemThumbRIs1Operands(MCInst & Inst,unsigned N) const2266 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2267 assert(N == 2 && "Invalid number of operands!");
2268 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2269 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2270 Inst.addOperand(MCOperand::createImm(Val));
2271 }
2272
addMemThumbSPIOperands(MCInst & Inst,unsigned N) const2273 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2274 assert(N == 2 && "Invalid number of operands!");
2275 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2276 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2277 Inst.addOperand(MCOperand::createImm(Val));
2278 }
2279
addPostIdxImm8Operands(MCInst & Inst,unsigned N) const2280 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2281 assert(N == 1 && "Invalid number of operands!");
2282 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2283 assert(CE && "non-constant post-idx-imm8 operand!");
2284 int Imm = CE->getValue();
2285 bool isAdd = Imm >= 0;
2286 if (Imm == INT32_MIN) Imm = 0;
2287 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2288 Inst.addOperand(MCOperand::createImm(Imm));
2289 }
2290
addPostIdxImm8s4Operands(MCInst & Inst,unsigned N) const2291 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2294 assert(CE && "non-constant post-idx-imm8s4 operand!");
2295 int Imm = CE->getValue();
2296 bool isAdd = Imm >= 0;
2297 if (Imm == INT32_MIN) Imm = 0;
2298 // Immediate is scaled by 4.
2299 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2300 Inst.addOperand(MCOperand::createImm(Imm));
2301 }
2302
addPostIdxRegOperands(MCInst & Inst,unsigned N) const2303 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2304 assert(N == 2 && "Invalid number of operands!");
2305 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2306 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2307 }
2308
addPostIdxRegShiftedOperands(MCInst & Inst,unsigned N) const2309 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2310 assert(N == 2 && "Invalid number of operands!");
2311 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2312 // The sign, shift type, and shift amount are encoded in a single operand
2313 // using the AM2 encoding helpers.
2314 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2315 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2316 PostIdxReg.ShiftTy);
2317 Inst.addOperand(MCOperand::createImm(Imm));
2318 }
2319
addMSRMaskOperands(MCInst & Inst,unsigned N) const2320 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2323 }
2324
addBankedRegOperands(MCInst & Inst,unsigned N) const2325 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2326 assert(N == 1 && "Invalid number of operands!");
2327 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2328 }
2329
addProcIFlagsOperands(MCInst & Inst,unsigned N) const2330 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2331 assert(N == 1 && "Invalid number of operands!");
2332 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2333 }
2334
addVecListOperands(MCInst & Inst,unsigned N) const2335 void addVecListOperands(MCInst &Inst, unsigned N) const {
2336 assert(N == 1 && "Invalid number of operands!");
2337 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2338 }
2339
addVecListIndexedOperands(MCInst & Inst,unsigned N) const2340 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2341 assert(N == 2 && "Invalid number of operands!");
2342 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2343 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2344 }
2345
addVectorIndex8Operands(MCInst & Inst,unsigned N) const2346 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2347 assert(N == 1 && "Invalid number of operands!");
2348 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2349 }
2350
addVectorIndex16Operands(MCInst & Inst,unsigned N) const2351 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2352 assert(N == 1 && "Invalid number of operands!");
2353 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2354 }
2355
addVectorIndex32Operands(MCInst & Inst,unsigned N) const2356 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2357 assert(N == 1 && "Invalid number of operands!");
2358 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2359 }
2360
addNEONi8splatOperands(MCInst & Inst,unsigned N) const2361 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2362 assert(N == 1 && "Invalid number of operands!");
2363 // The immediate encodes the type of constant as well as the value.
2364 // Mask in that this is an i8 splat.
2365 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2366 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2367 }
2368
addNEONi16splatOperands(MCInst & Inst,unsigned N) const2369 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2370 assert(N == 1 && "Invalid number of operands!");
2371 // The immediate encodes the type of constant as well as the value.
2372 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2373 unsigned Value = CE->getValue();
2374 Value = ARM_AM::encodeNEONi16splat(Value);
2375 Inst.addOperand(MCOperand::createImm(Value));
2376 }
2377
addNEONi16splatNotOperands(MCInst & Inst,unsigned N) const2378 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2379 assert(N == 1 && "Invalid number of operands!");
2380 // The immediate encodes the type of constant as well as the value.
2381 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2382 unsigned Value = CE->getValue();
2383 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2384 Inst.addOperand(MCOperand::createImm(Value));
2385 }
2386
addNEONi32splatOperands(MCInst & Inst,unsigned N) const2387 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2388 assert(N == 1 && "Invalid number of operands!");
2389 // The immediate encodes the type of constant as well as the value.
2390 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2391 unsigned Value = CE->getValue();
2392 Value = ARM_AM::encodeNEONi32splat(Value);
2393 Inst.addOperand(MCOperand::createImm(Value));
2394 }
2395
addNEONi32splatNotOperands(MCInst & Inst,unsigned N) const2396 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2397 assert(N == 1 && "Invalid number of operands!");
2398 // The immediate encodes the type of constant as well as the value.
2399 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2400 unsigned Value = CE->getValue();
2401 Value = ARM_AM::encodeNEONi32splat(~Value);
2402 Inst.addOperand(MCOperand::createImm(Value));
2403 }
2404
addNEONinvByteReplicateOperands(MCInst & Inst,unsigned N) const2405 void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2406 assert(N == 1 && "Invalid number of operands!");
2407 // The immediate encodes the type of constant as well as the value.
2408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409 unsigned Value = CE->getValue();
2410 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2411 Inst.getOpcode() == ARM::VMOVv16i8) &&
2412 "All vmvn instructions that wants to replicate non-zero byte "
2413 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2414 unsigned B = ((~Value) & 0xff);
2415 B |= 0xe00; // cmode = 0b1110
2416 Inst.addOperand(MCOperand::createImm(B));
2417 }
addNEONi32vmovOperands(MCInst & Inst,unsigned N) const2418 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2419 assert(N == 1 && "Invalid number of operands!");
2420 // The immediate encodes the type of constant as well as the value.
2421 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2422 unsigned Value = CE->getValue();
2423 if (Value >= 256 && Value <= 0xffff)
2424 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2425 else if (Value > 0xffff && Value <= 0xffffff)
2426 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2427 else if (Value > 0xffffff)
2428 Value = (Value >> 24) | 0x600;
2429 Inst.addOperand(MCOperand::createImm(Value));
2430 }
2431
addNEONvmovByteReplicateOperands(MCInst & Inst,unsigned N) const2432 void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2433 assert(N == 1 && "Invalid number of operands!");
2434 // The immediate encodes the type of constant as well as the value.
2435 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2436 unsigned Value = CE->getValue();
2437 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2438 Inst.getOpcode() == ARM::VMOVv16i8) &&
2439 "All instructions that wants to replicate non-zero byte "
2440 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2441 unsigned B = Value & 0xff;
2442 B |= 0xe00; // cmode = 0b1110
2443 Inst.addOperand(MCOperand::createImm(B));
2444 }
addNEONi32vmovNegOperands(MCInst & Inst,unsigned N) const2445 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2446 assert(N == 1 && "Invalid number of operands!");
2447 // The immediate encodes the type of constant as well as the value.
2448 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2449 unsigned Value = ~CE->getValue();
2450 if (Value >= 256 && Value <= 0xffff)
2451 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2452 else if (Value > 0xffff && Value <= 0xffffff)
2453 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2454 else if (Value > 0xffffff)
2455 Value = (Value >> 24) | 0x600;
2456 Inst.addOperand(MCOperand::createImm(Value));
2457 }
2458
addNEONi64splatOperands(MCInst & Inst,unsigned N) const2459 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2460 assert(N == 1 && "Invalid number of operands!");
2461 // The immediate encodes the type of constant as well as the value.
2462 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2463 uint64_t Value = CE->getValue();
2464 unsigned Imm = 0;
2465 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2466 Imm |= (Value & 1) << i;
2467 }
2468 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2469 }
2470
2471 void print(raw_ostream &OS) const override;
2472
CreateITMask(unsigned Mask,SMLoc S)2473 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2474 auto Op = make_unique<ARMOperand>(k_ITCondMask);
2475 Op->ITMask.Mask = Mask;
2476 Op->StartLoc = S;
2477 Op->EndLoc = S;
2478 return Op;
2479 }
2480
CreateCondCode(ARMCC::CondCodes CC,SMLoc S)2481 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2482 SMLoc S) {
2483 auto Op = make_unique<ARMOperand>(k_CondCode);
2484 Op->CC.Val = CC;
2485 Op->StartLoc = S;
2486 Op->EndLoc = S;
2487 return Op;
2488 }
2489
CreateCoprocNum(unsigned CopVal,SMLoc S)2490 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2491 auto Op = make_unique<ARMOperand>(k_CoprocNum);
2492 Op->Cop.Val = CopVal;
2493 Op->StartLoc = S;
2494 Op->EndLoc = S;
2495 return Op;
2496 }
2497
CreateCoprocReg(unsigned CopVal,SMLoc S)2498 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2499 auto Op = make_unique<ARMOperand>(k_CoprocReg);
2500 Op->Cop.Val = CopVal;
2501 Op->StartLoc = S;
2502 Op->EndLoc = S;
2503 return Op;
2504 }
2505
CreateCoprocOption(unsigned Val,SMLoc S,SMLoc E)2506 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2507 SMLoc E) {
2508 auto Op = make_unique<ARMOperand>(k_CoprocOption);
2509 Op->Cop.Val = Val;
2510 Op->StartLoc = S;
2511 Op->EndLoc = E;
2512 return Op;
2513 }
2514
CreateCCOut(unsigned RegNum,SMLoc S)2515 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2516 auto Op = make_unique<ARMOperand>(k_CCOut);
2517 Op->Reg.RegNum = RegNum;
2518 Op->StartLoc = S;
2519 Op->EndLoc = S;
2520 return Op;
2521 }
2522
CreateToken(StringRef Str,SMLoc S)2523 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2524 auto Op = make_unique<ARMOperand>(k_Token);
2525 Op->Tok.Data = Str.data();
2526 Op->Tok.Length = Str.size();
2527 Op->StartLoc = S;
2528 Op->EndLoc = S;
2529 return Op;
2530 }
2531
CreateReg(unsigned RegNum,SMLoc S,SMLoc E)2532 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2533 SMLoc E) {
2534 auto Op = make_unique<ARMOperand>(k_Register);
2535 Op->Reg.RegNum = RegNum;
2536 Op->StartLoc = S;
2537 Op->EndLoc = E;
2538 return Op;
2539 }
2540
2541 static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftReg,unsigned ShiftImm,SMLoc S,SMLoc E)2542 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2543 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2544 SMLoc E) {
2545 auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2546 Op->RegShiftedReg.ShiftTy = ShTy;
2547 Op->RegShiftedReg.SrcReg = SrcReg;
2548 Op->RegShiftedReg.ShiftReg = ShiftReg;
2549 Op->RegShiftedReg.ShiftImm = ShiftImm;
2550 Op->StartLoc = S;
2551 Op->EndLoc = E;
2552 return Op;
2553 }
2554
2555 static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftImm,SMLoc S,SMLoc E)2556 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2557 unsigned ShiftImm, SMLoc S, SMLoc E) {
2558 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2559 Op->RegShiftedImm.ShiftTy = ShTy;
2560 Op->RegShiftedImm.SrcReg = SrcReg;
2561 Op->RegShiftedImm.ShiftImm = ShiftImm;
2562 Op->StartLoc = S;
2563 Op->EndLoc = E;
2564 return Op;
2565 }
2566
CreateShifterImm(bool isASR,unsigned Imm,SMLoc S,SMLoc E)2567 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2568 SMLoc S, SMLoc E) {
2569 auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2570 Op->ShifterImm.isASR = isASR;
2571 Op->ShifterImm.Imm = Imm;
2572 Op->StartLoc = S;
2573 Op->EndLoc = E;
2574 return Op;
2575 }
2576
CreateRotImm(unsigned Imm,SMLoc S,SMLoc E)2577 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2578 SMLoc E) {
2579 auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2580 Op->RotImm.Imm = Imm;
2581 Op->StartLoc = S;
2582 Op->EndLoc = E;
2583 return Op;
2584 }
2585
CreateModImm(unsigned Bits,unsigned Rot,SMLoc S,SMLoc E)2586 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2587 SMLoc S, SMLoc E) {
2588 auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2589 Op->ModImm.Bits = Bits;
2590 Op->ModImm.Rot = Rot;
2591 Op->StartLoc = S;
2592 Op->EndLoc = E;
2593 return Op;
2594 }
2595
2596 static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB,unsigned Width,SMLoc S,SMLoc E)2597 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2598 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2599 Op->Bitfield.LSB = LSB;
2600 Op->Bitfield.Width = Width;
2601 Op->StartLoc = S;
2602 Op->EndLoc = E;
2603 return Op;
2604 }
2605
2606 static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,SMLoc StartLoc,SMLoc EndLoc)2607 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2608 SMLoc StartLoc, SMLoc EndLoc) {
2609 assert (Regs.size() > 0 && "RegList contains no registers?");
2610 KindTy Kind = k_RegisterList;
2611
2612 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2613 Kind = k_DPRRegisterList;
2614 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2615 contains(Regs.front().second))
2616 Kind = k_SPRRegisterList;
2617
2618 // Sort based on the register encoding values.
2619 array_pod_sort(Regs.begin(), Regs.end());
2620
2621 auto Op = make_unique<ARMOperand>(Kind);
2622 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2623 I = Regs.begin(), E = Regs.end(); I != E; ++I)
2624 Op->Registers.push_back(I->second);
2625 Op->StartLoc = StartLoc;
2626 Op->EndLoc = EndLoc;
2627 return Op;
2628 }
2629
CreateVectorList(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2630 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2631 unsigned Count,
2632 bool isDoubleSpaced,
2633 SMLoc S, SMLoc E) {
2634 auto Op = make_unique<ARMOperand>(k_VectorList);
2635 Op->VectorList.RegNum = RegNum;
2636 Op->VectorList.Count = Count;
2637 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2638 Op->StartLoc = S;
2639 Op->EndLoc = E;
2640 return Op;
2641 }
2642
2643 static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2644 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2645 SMLoc S, SMLoc E) {
2646 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2647 Op->VectorList.RegNum = RegNum;
2648 Op->VectorList.Count = Count;
2649 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2650 Op->StartLoc = S;
2651 Op->EndLoc = E;
2652 return Op;
2653 }
2654
2655 static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum,unsigned Count,unsigned Index,bool isDoubleSpaced,SMLoc S,SMLoc E)2656 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2657 bool isDoubleSpaced, SMLoc S, SMLoc E) {
2658 auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2659 Op->VectorList.RegNum = RegNum;
2660 Op->VectorList.Count = Count;
2661 Op->VectorList.LaneIndex = Index;
2662 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2663 Op->StartLoc = S;
2664 Op->EndLoc = E;
2665 return Op;
2666 }
2667
2668 static std::unique_ptr<ARMOperand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)2669 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2670 auto Op = make_unique<ARMOperand>(k_VectorIndex);
2671 Op->VectorIndex.Val = Idx;
2672 Op->StartLoc = S;
2673 Op->EndLoc = E;
2674 return Op;
2675 }
2676
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E)2677 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2678 SMLoc E) {
2679 auto Op = make_unique<ARMOperand>(k_Immediate);
2680 Op->Imm.Val = Val;
2681 Op->StartLoc = S;
2682 Op->EndLoc = E;
2683 return Op;
2684 }
2685
2686 static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum,const MCConstantExpr * OffsetImm,unsigned OffsetRegNum,ARM_AM::ShiftOpc ShiftType,unsigned ShiftImm,unsigned Alignment,bool isNegative,SMLoc S,SMLoc E,SMLoc AlignmentLoc=SMLoc ())2687 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2688 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2689 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2690 SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2691 auto Op = make_unique<ARMOperand>(k_Memory);
2692 Op->Memory.BaseRegNum = BaseRegNum;
2693 Op->Memory.OffsetImm = OffsetImm;
2694 Op->Memory.OffsetRegNum = OffsetRegNum;
2695 Op->Memory.ShiftType = ShiftType;
2696 Op->Memory.ShiftImm = ShiftImm;
2697 Op->Memory.Alignment = Alignment;
2698 Op->Memory.isNegative = isNegative;
2699 Op->StartLoc = S;
2700 Op->EndLoc = E;
2701 Op->AlignmentLoc = AlignmentLoc;
2702 return Op;
2703 }
2704
2705 static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum,bool isAdd,ARM_AM::ShiftOpc ShiftTy,unsigned ShiftImm,SMLoc S,SMLoc E)2706 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2707 unsigned ShiftImm, SMLoc S, SMLoc E) {
2708 auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2709 Op->PostIdxReg.RegNum = RegNum;
2710 Op->PostIdxReg.isAdd = isAdd;
2711 Op->PostIdxReg.ShiftTy = ShiftTy;
2712 Op->PostIdxReg.ShiftImm = ShiftImm;
2713 Op->StartLoc = S;
2714 Op->EndLoc = E;
2715 return Op;
2716 }
2717
CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,SMLoc S)2718 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
2719 SMLoc S) {
2720 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
2721 Op->MBOpt.Val = Opt;
2722 Op->StartLoc = S;
2723 Op->EndLoc = S;
2724 return Op;
2725 }
2726
2727 static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,SMLoc S)2728 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
2729 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
2730 Op->ISBOpt.Val = Opt;
2731 Op->StartLoc = S;
2732 Op->EndLoc = S;
2733 return Op;
2734 }
2735
CreateProcIFlags(ARM_PROC::IFlags IFlags,SMLoc S)2736 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
2737 SMLoc S) {
2738 auto Op = make_unique<ARMOperand>(k_ProcIFlags);
2739 Op->IFlags.Val = IFlags;
2740 Op->StartLoc = S;
2741 Op->EndLoc = S;
2742 return Op;
2743 }
2744
CreateMSRMask(unsigned MMask,SMLoc S)2745 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
2746 auto Op = make_unique<ARMOperand>(k_MSRMask);
2747 Op->MMask.Val = MMask;
2748 Op->StartLoc = S;
2749 Op->EndLoc = S;
2750 return Op;
2751 }
2752
CreateBankedReg(unsigned Reg,SMLoc S)2753 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
2754 auto Op = make_unique<ARMOperand>(k_BankedReg);
2755 Op->BankedReg.Val = Reg;
2756 Op->StartLoc = S;
2757 Op->EndLoc = S;
2758 return Op;
2759 }
2760 };
2761
2762 } // end anonymous namespace.
2763
print(raw_ostream & OS) const2764 void ARMOperand::print(raw_ostream &OS) const {
2765 switch (Kind) {
2766 case k_CondCode:
2767 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2768 break;
2769 case k_CCOut:
2770 OS << "<ccout " << getReg() << ">";
2771 break;
2772 case k_ITCondMask: {
2773 static const char *const MaskStr[] = {
2774 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2775 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2776 };
2777 assert((ITMask.Mask & 0xf) == ITMask.Mask);
2778 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2779 break;
2780 }
2781 case k_CoprocNum:
2782 OS << "<coprocessor number: " << getCoproc() << ">";
2783 break;
2784 case k_CoprocReg:
2785 OS << "<coprocessor register: " << getCoproc() << ">";
2786 break;
2787 case k_CoprocOption:
2788 OS << "<coprocessor option: " << CoprocOption.Val << ">";
2789 break;
2790 case k_MSRMask:
2791 OS << "<mask: " << getMSRMask() << ">";
2792 break;
2793 case k_BankedReg:
2794 OS << "<banked reg: " << getBankedReg() << ">";
2795 break;
2796 case k_Immediate:
2797 OS << *getImm();
2798 break;
2799 case k_MemBarrierOpt:
2800 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2801 break;
2802 case k_InstSyncBarrierOpt:
2803 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2804 break;
2805 case k_Memory:
2806 OS << "<memory "
2807 << " base:" << Memory.BaseRegNum;
2808 OS << ">";
2809 break;
2810 case k_PostIndexRegister:
2811 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2812 << PostIdxReg.RegNum;
2813 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2814 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2815 << PostIdxReg.ShiftImm;
2816 OS << ">";
2817 break;
2818 case k_ProcIFlags: {
2819 OS << "<ARM_PROC::";
2820 unsigned IFlags = getProcIFlags();
2821 for (int i=2; i >= 0; --i)
2822 if (IFlags & (1 << i))
2823 OS << ARM_PROC::IFlagsToString(1 << i);
2824 OS << ">";
2825 break;
2826 }
2827 case k_Register:
2828 OS << "<register " << getReg() << ">";
2829 break;
2830 case k_ShifterImmediate:
2831 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2832 << " #" << ShifterImm.Imm << ">";
2833 break;
2834 case k_ShiftedRegister:
2835 OS << "<so_reg_reg "
2836 << RegShiftedReg.SrcReg << " "
2837 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2838 << " " << RegShiftedReg.ShiftReg << ">";
2839 break;
2840 case k_ShiftedImmediate:
2841 OS << "<so_reg_imm "
2842 << RegShiftedImm.SrcReg << " "
2843 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2844 << " #" << RegShiftedImm.ShiftImm << ">";
2845 break;
2846 case k_RotateImmediate:
2847 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2848 break;
2849 case k_ModifiedImmediate:
2850 OS << "<mod_imm #" << ModImm.Bits << ", #"
2851 << ModImm.Rot << ")>";
2852 break;
2853 case k_BitfieldDescriptor:
2854 OS << "<bitfield " << "lsb: " << Bitfield.LSB
2855 << ", width: " << Bitfield.Width << ">";
2856 break;
2857 case k_RegisterList:
2858 case k_DPRRegisterList:
2859 case k_SPRRegisterList: {
2860 OS << "<register_list ";
2861
2862 const SmallVectorImpl<unsigned> &RegList = getRegList();
2863 for (SmallVectorImpl<unsigned>::const_iterator
2864 I = RegList.begin(), E = RegList.end(); I != E; ) {
2865 OS << *I;
2866 if (++I < E) OS << ", ";
2867 }
2868
2869 OS << ">";
2870 break;
2871 }
2872 case k_VectorList:
2873 OS << "<vector_list " << VectorList.Count << " * "
2874 << VectorList.RegNum << ">";
2875 break;
2876 case k_VectorListAllLanes:
2877 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2878 << VectorList.RegNum << ">";
2879 break;
2880 case k_VectorListIndexed:
2881 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2882 << VectorList.Count << " * " << VectorList.RegNum << ">";
2883 break;
2884 case k_Token:
2885 OS << "'" << getToken() << "'";
2886 break;
2887 case k_VectorIndex:
2888 OS << "<vectorindex " << getVectorIndex() << ">";
2889 break;
2890 }
2891 }
2892
2893 /// @name Auto-generated Match Functions
2894 /// {
2895
2896 static unsigned MatchRegisterName(StringRef Name);
2897
2898 /// }
2899
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)2900 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2901 SMLoc &StartLoc, SMLoc &EndLoc) {
2902 const AsmToken &Tok = getParser().getTok();
2903 StartLoc = Tok.getLoc();
2904 EndLoc = Tok.getEndLoc();
2905 RegNo = tryParseRegister();
2906
2907 return (RegNo == (unsigned)-1);
2908 }
2909
2910 /// Try to parse a register name. The token must be an Identifier when called,
2911 /// and if it is a register name the token is eaten and the register number is
2912 /// returned. Otherwise return -1.
2913 ///
tryParseRegister()2914 int ARMAsmParser::tryParseRegister() {
2915 MCAsmParser &Parser = getParser();
2916 const AsmToken &Tok = Parser.getTok();
2917 if (Tok.isNot(AsmToken::Identifier)) return -1;
2918
2919 std::string lowerCase = Tok.getString().lower();
2920 unsigned RegNum = MatchRegisterName(lowerCase);
2921 if (!RegNum) {
2922 RegNum = StringSwitch<unsigned>(lowerCase)
2923 .Case("r13", ARM::SP)
2924 .Case("r14", ARM::LR)
2925 .Case("r15", ARM::PC)
2926 .Case("ip", ARM::R12)
2927 // Additional register name aliases for 'gas' compatibility.
2928 .Case("a1", ARM::R0)
2929 .Case("a2", ARM::R1)
2930 .Case("a3", ARM::R2)
2931 .Case("a4", ARM::R3)
2932 .Case("v1", ARM::R4)
2933 .Case("v2", ARM::R5)
2934 .Case("v3", ARM::R6)
2935 .Case("v4", ARM::R7)
2936 .Case("v5", ARM::R8)
2937 .Case("v6", ARM::R9)
2938 .Case("v7", ARM::R10)
2939 .Case("v8", ARM::R11)
2940 .Case("sb", ARM::R9)
2941 .Case("sl", ARM::R10)
2942 .Case("fp", ARM::R11)
2943 .Default(0);
2944 }
2945 if (!RegNum) {
2946 // Check for aliases registered via .req. Canonicalize to lower case.
2947 // That's more consistent since register names are case insensitive, and
2948 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2949 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2950 // If no match, return failure.
2951 if (Entry == RegisterReqs.end())
2952 return -1;
2953 Parser.Lex(); // Eat identifier token.
2954 return Entry->getValue();
2955 }
2956
2957 // Some FPUs only have 16 D registers, so D16-D31 are invalid
2958 if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
2959 return -1;
2960
2961 Parser.Lex(); // Eat identifier token.
2962
2963 return RegNum;
2964 }
2965
2966 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
2967 // If a recoverable error occurs, return 1. If an irrecoverable error
2968 // occurs, return -1. An irrecoverable error is one where tokens have been
2969 // consumed in the process of trying to parse the shifter (i.e., when it is
2970 // indeed a shifter operand, but malformed).
tryParseShiftRegister(OperandVector & Operands)2971 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
2972 MCAsmParser &Parser = getParser();
2973 SMLoc S = Parser.getTok().getLoc();
2974 const AsmToken &Tok = Parser.getTok();
2975 if (Tok.isNot(AsmToken::Identifier))
2976 return -1;
2977
2978 std::string lowerCase = Tok.getString().lower();
2979 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2980 .Case("asl", ARM_AM::lsl)
2981 .Case("lsl", ARM_AM::lsl)
2982 .Case("lsr", ARM_AM::lsr)
2983 .Case("asr", ARM_AM::asr)
2984 .Case("ror", ARM_AM::ror)
2985 .Case("rrx", ARM_AM::rrx)
2986 .Default(ARM_AM::no_shift);
2987
2988 if (ShiftTy == ARM_AM::no_shift)
2989 return 1;
2990
2991 Parser.Lex(); // Eat the operator.
2992
2993 // The source register for the shift has already been added to the
2994 // operand list, so we need to pop it off and combine it into the shifted
2995 // register operand instead.
2996 std::unique_ptr<ARMOperand> PrevOp(
2997 (ARMOperand *)Operands.pop_back_val().release());
2998 if (!PrevOp->isReg())
2999 return Error(PrevOp->getStartLoc(), "shift must be of a register");
3000 int SrcReg = PrevOp->getReg();
3001
3002 SMLoc EndLoc;
3003 int64_t Imm = 0;
3004 int ShiftReg = 0;
3005 if (ShiftTy == ARM_AM::rrx) {
3006 // RRX Doesn't have an explicit shift amount. The encoder expects
3007 // the shift register to be the same as the source register. Seems odd,
3008 // but OK.
3009 ShiftReg = SrcReg;
3010 } else {
3011 // Figure out if this is shifted by a constant or a register (for non-RRX).
3012 if (Parser.getTok().is(AsmToken::Hash) ||
3013 Parser.getTok().is(AsmToken::Dollar)) {
3014 Parser.Lex(); // Eat hash.
3015 SMLoc ImmLoc = Parser.getTok().getLoc();
3016 const MCExpr *ShiftExpr = nullptr;
3017 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3018 Error(ImmLoc, "invalid immediate shift value");
3019 return -1;
3020 }
3021 // The expression must be evaluatable as an immediate.
3022 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3023 if (!CE) {
3024 Error(ImmLoc, "invalid immediate shift value");
3025 return -1;
3026 }
3027 // Range check the immediate.
3028 // lsl, ror: 0 <= imm <= 31
3029 // lsr, asr: 0 <= imm <= 32
3030 Imm = CE->getValue();
3031 if (Imm < 0 ||
3032 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3033 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3034 Error(ImmLoc, "immediate shift value out of range");
3035 return -1;
3036 }
3037 // shift by zero is a nop. Always send it through as lsl.
3038 // ('as' compatibility)
3039 if (Imm == 0)
3040 ShiftTy = ARM_AM::lsl;
3041 } else if (Parser.getTok().is(AsmToken::Identifier)) {
3042 SMLoc L = Parser.getTok().getLoc();
3043 EndLoc = Parser.getTok().getEndLoc();
3044 ShiftReg = tryParseRegister();
3045 if (ShiftReg == -1) {
3046 Error(L, "expected immediate or register in shift operand");
3047 return -1;
3048 }
3049 } else {
3050 Error(Parser.getTok().getLoc(),
3051 "expected immediate or register in shift operand");
3052 return -1;
3053 }
3054 }
3055
3056 if (ShiftReg && ShiftTy != ARM_AM::rrx)
3057 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3058 ShiftReg, Imm,
3059 S, EndLoc));
3060 else
3061 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3062 S, EndLoc));
3063
3064 return 0;
3065 }
3066
3067
3068 /// Try to parse a register name. The token must be an Identifier when called.
3069 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3070 /// if there is a "writeback". 'true' if it's not a register.
3071 ///
3072 /// TODO this is likely to change to allow different register types and or to
3073 /// parse for a specific register type.
tryParseRegisterWithWriteBack(OperandVector & Operands)3074 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3075 MCAsmParser &Parser = getParser();
3076 const AsmToken &RegTok = Parser.getTok();
3077 int RegNo = tryParseRegister();
3078 if (RegNo == -1)
3079 return true;
3080
3081 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3082 RegTok.getEndLoc()));
3083
3084 const AsmToken &ExclaimTok = Parser.getTok();
3085 if (ExclaimTok.is(AsmToken::Exclaim)) {
3086 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3087 ExclaimTok.getLoc()));
3088 Parser.Lex(); // Eat exclaim token
3089 return false;
3090 }
3091
3092 // Also check for an index operand. This is only legal for vector registers,
3093 // but that'll get caught OK in operand matching, so we don't need to
3094 // explicitly filter everything else out here.
3095 if (Parser.getTok().is(AsmToken::LBrac)) {
3096 SMLoc SIdx = Parser.getTok().getLoc();
3097 Parser.Lex(); // Eat left bracket token.
3098
3099 const MCExpr *ImmVal;
3100 if (getParser().parseExpression(ImmVal))
3101 return true;
3102 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3103 if (!MCE)
3104 return TokError("immediate value expected for vector index");
3105
3106 if (Parser.getTok().isNot(AsmToken::RBrac))
3107 return Error(Parser.getTok().getLoc(), "']' expected");
3108
3109 SMLoc E = Parser.getTok().getEndLoc();
3110 Parser.Lex(); // Eat right bracket token.
3111
3112 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3113 SIdx, E,
3114 getContext()));
3115 }
3116
3117 return false;
3118 }
3119
3120 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3121 /// instruction with a symbolic operand name.
3122 /// We accept "crN" syntax for GAS compatibility.
3123 /// <operand-name> ::= <prefix><number>
3124 /// If CoprocOp is 'c', then:
3125 /// <prefix> ::= c | cr
3126 /// If CoprocOp is 'p', then :
3127 /// <prefix> ::= p
3128 /// <number> ::= integer in range [0, 15]
MatchCoprocessorOperandName(StringRef Name,char CoprocOp)3129 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3130 // Use the same layout as the tablegen'erated register name matcher. Ugly,
3131 // but efficient.
3132 if (Name.size() < 2 || Name[0] != CoprocOp)
3133 return -1;
3134 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3135
3136 switch (Name.size()) {
3137 default: return -1;
3138 case 1:
3139 switch (Name[0]) {
3140 default: return -1;
3141 case '0': return 0;
3142 case '1': return 1;
3143 case '2': return 2;
3144 case '3': return 3;
3145 case '4': return 4;
3146 case '5': return 5;
3147 case '6': return 6;
3148 case '7': return 7;
3149 case '8': return 8;
3150 case '9': return 9;
3151 }
3152 case 2:
3153 if (Name[0] != '1')
3154 return -1;
3155 switch (Name[1]) {
3156 default: return -1;
3157 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3158 // However, old cores (v5/v6) did use them in that way.
3159 case '0': return 10;
3160 case '1': return 11;
3161 case '2': return 12;
3162 case '3': return 13;
3163 case '4': return 14;
3164 case '5': return 15;
3165 }
3166 }
3167 }
3168
3169 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3170 ARMAsmParser::OperandMatchResultTy
parseITCondCode(OperandVector & Operands)3171 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3172 MCAsmParser &Parser = getParser();
3173 SMLoc S = Parser.getTok().getLoc();
3174 const AsmToken &Tok = Parser.getTok();
3175 if (!Tok.is(AsmToken::Identifier))
3176 return MatchOperand_NoMatch;
3177 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
3178 .Case("eq", ARMCC::EQ)
3179 .Case("ne", ARMCC::NE)
3180 .Case("hs", ARMCC::HS)
3181 .Case("cs", ARMCC::HS)
3182 .Case("lo", ARMCC::LO)
3183 .Case("cc", ARMCC::LO)
3184 .Case("mi", ARMCC::MI)
3185 .Case("pl", ARMCC::PL)
3186 .Case("vs", ARMCC::VS)
3187 .Case("vc", ARMCC::VC)
3188 .Case("hi", ARMCC::HI)
3189 .Case("ls", ARMCC::LS)
3190 .Case("ge", ARMCC::GE)
3191 .Case("lt", ARMCC::LT)
3192 .Case("gt", ARMCC::GT)
3193 .Case("le", ARMCC::LE)
3194 .Case("al", ARMCC::AL)
3195 .Default(~0U);
3196 if (CC == ~0U)
3197 return MatchOperand_NoMatch;
3198 Parser.Lex(); // Eat the token.
3199
3200 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3201
3202 return MatchOperand_Success;
3203 }
3204
3205 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3206 /// token must be an Identifier when called, and if it is a coprocessor
3207 /// number, the token is eaten and the operand is added to the operand list.
3208 ARMAsmParser::OperandMatchResultTy
parseCoprocNumOperand(OperandVector & Operands)3209 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3210 MCAsmParser &Parser = getParser();
3211 SMLoc S = Parser.getTok().getLoc();
3212 const AsmToken &Tok = Parser.getTok();
3213 if (Tok.isNot(AsmToken::Identifier))
3214 return MatchOperand_NoMatch;
3215
3216 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3217 if (Num == -1)
3218 return MatchOperand_NoMatch;
3219 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3220 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3221 return MatchOperand_NoMatch;
3222
3223 Parser.Lex(); // Eat identifier token.
3224 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3225 return MatchOperand_Success;
3226 }
3227
3228 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3229 /// token must be an Identifier when called, and if it is a coprocessor
3230 /// number, the token is eaten and the operand is added to the operand list.
3231 ARMAsmParser::OperandMatchResultTy
parseCoprocRegOperand(OperandVector & Operands)3232 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3233 MCAsmParser &Parser = getParser();
3234 SMLoc S = Parser.getTok().getLoc();
3235 const AsmToken &Tok = Parser.getTok();
3236 if (Tok.isNot(AsmToken::Identifier))
3237 return MatchOperand_NoMatch;
3238
3239 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3240 if (Reg == -1)
3241 return MatchOperand_NoMatch;
3242
3243 Parser.Lex(); // Eat identifier token.
3244 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3245 return MatchOperand_Success;
3246 }
3247
3248 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3249 /// coproc_option : '{' imm0_255 '}'
3250 ARMAsmParser::OperandMatchResultTy
parseCoprocOptionOperand(OperandVector & Operands)3251 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3252 MCAsmParser &Parser = getParser();
3253 SMLoc S = Parser.getTok().getLoc();
3254
3255 // If this isn't a '{', this isn't a coprocessor immediate operand.
3256 if (Parser.getTok().isNot(AsmToken::LCurly))
3257 return MatchOperand_NoMatch;
3258 Parser.Lex(); // Eat the '{'
3259
3260 const MCExpr *Expr;
3261 SMLoc Loc = Parser.getTok().getLoc();
3262 if (getParser().parseExpression(Expr)) {
3263 Error(Loc, "illegal expression");
3264 return MatchOperand_ParseFail;
3265 }
3266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3267 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3268 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3269 return MatchOperand_ParseFail;
3270 }
3271 int Val = CE->getValue();
3272
3273 // Check for and consume the closing '}'
3274 if (Parser.getTok().isNot(AsmToken::RCurly))
3275 return MatchOperand_ParseFail;
3276 SMLoc E = Parser.getTok().getEndLoc();
3277 Parser.Lex(); // Eat the '}'
3278
3279 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3280 return MatchOperand_Success;
3281 }
3282
3283 // For register list parsing, we need to map from raw GPR register numbering
3284 // to the enumeration values. The enumeration values aren't sorted by
3285 // register number due to our using "sp", "lr" and "pc" as canonical names.
getNextRegister(unsigned Reg)3286 static unsigned getNextRegister(unsigned Reg) {
3287 // If this is a GPR, we need to do it manually, otherwise we can rely
3288 // on the sort ordering of the enumeration since the other reg-classes
3289 // are sane.
3290 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3291 return Reg + 1;
3292 switch(Reg) {
3293 default: llvm_unreachable("Invalid GPR number!");
3294 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3295 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3296 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3297 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3298 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3299 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3300 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3301 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3302 }
3303 }
3304
3305 // Return the low-subreg of a given Q register.
getDRegFromQReg(unsigned QReg)3306 static unsigned getDRegFromQReg(unsigned QReg) {
3307 switch (QReg) {
3308 default: llvm_unreachable("expected a Q register!");
3309 case ARM::Q0: return ARM::D0;
3310 case ARM::Q1: return ARM::D2;
3311 case ARM::Q2: return ARM::D4;
3312 case ARM::Q3: return ARM::D6;
3313 case ARM::Q4: return ARM::D8;
3314 case ARM::Q5: return ARM::D10;
3315 case ARM::Q6: return ARM::D12;
3316 case ARM::Q7: return ARM::D14;
3317 case ARM::Q8: return ARM::D16;
3318 case ARM::Q9: return ARM::D18;
3319 case ARM::Q10: return ARM::D20;
3320 case ARM::Q11: return ARM::D22;
3321 case ARM::Q12: return ARM::D24;
3322 case ARM::Q13: return ARM::D26;
3323 case ARM::Q14: return ARM::D28;
3324 case ARM::Q15: return ARM::D30;
3325 }
3326 }
3327
3328 /// Parse a register list.
parseRegisterList(OperandVector & Operands)3329 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3330 MCAsmParser &Parser = getParser();
3331 assert(Parser.getTok().is(AsmToken::LCurly) &&
3332 "Token is not a Left Curly Brace");
3333 SMLoc S = Parser.getTok().getLoc();
3334 Parser.Lex(); // Eat '{' token.
3335 SMLoc RegLoc = Parser.getTok().getLoc();
3336
3337 // Check the first register in the list to see what register class
3338 // this is a list of.
3339 int Reg = tryParseRegister();
3340 if (Reg == -1)
3341 return Error(RegLoc, "register expected");
3342
3343 // The reglist instructions have at most 16 registers, so reserve
3344 // space for that many.
3345 int EReg = 0;
3346 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3347
3348 // Allow Q regs and just interpret them as the two D sub-registers.
3349 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3350 Reg = getDRegFromQReg(Reg);
3351 EReg = MRI->getEncodingValue(Reg);
3352 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3353 ++Reg;
3354 }
3355 const MCRegisterClass *RC;
3356 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3357 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3358 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3359 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3360 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3361 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3362 else
3363 return Error(RegLoc, "invalid register in register list");
3364
3365 // Store the register.
3366 EReg = MRI->getEncodingValue(Reg);
3367 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3368
3369 // This starts immediately after the first register token in the list,
3370 // so we can see either a comma or a minus (range separator) as a legal
3371 // next token.
3372 while (Parser.getTok().is(AsmToken::Comma) ||
3373 Parser.getTok().is(AsmToken::Minus)) {
3374 if (Parser.getTok().is(AsmToken::Minus)) {
3375 Parser.Lex(); // Eat the minus.
3376 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3377 int EndReg = tryParseRegister();
3378 if (EndReg == -1)
3379 return Error(AfterMinusLoc, "register expected");
3380 // Allow Q regs and just interpret them as the two D sub-registers.
3381 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3382 EndReg = getDRegFromQReg(EndReg) + 1;
3383 // If the register is the same as the start reg, there's nothing
3384 // more to do.
3385 if (Reg == EndReg)
3386 continue;
3387 // The register must be in the same register class as the first.
3388 if (!RC->contains(EndReg))
3389 return Error(AfterMinusLoc, "invalid register in register list");
3390 // Ranges must go from low to high.
3391 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3392 return Error(AfterMinusLoc, "bad range in register list");
3393
3394 // Add all the registers in the range to the register list.
3395 while (Reg != EndReg) {
3396 Reg = getNextRegister(Reg);
3397 EReg = MRI->getEncodingValue(Reg);
3398 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3399 }
3400 continue;
3401 }
3402 Parser.Lex(); // Eat the comma.
3403 RegLoc = Parser.getTok().getLoc();
3404 int OldReg = Reg;
3405 const AsmToken RegTok = Parser.getTok();
3406 Reg = tryParseRegister();
3407 if (Reg == -1)
3408 return Error(RegLoc, "register expected");
3409 // Allow Q regs and just interpret them as the two D sub-registers.
3410 bool isQReg = false;
3411 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3412 Reg = getDRegFromQReg(Reg);
3413 isQReg = true;
3414 }
3415 // The register must be in the same register class as the first.
3416 if (!RC->contains(Reg))
3417 return Error(RegLoc, "invalid register in register list");
3418 // List must be monotonically increasing.
3419 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3420 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3421 Warning(RegLoc, "register list not in ascending order");
3422 else
3423 return Error(RegLoc, "register list not in ascending order");
3424 }
3425 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3426 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3427 ") in register list");
3428 continue;
3429 }
3430 // VFP register lists must also be contiguous.
3431 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3432 Reg != OldReg + 1)
3433 return Error(RegLoc, "non-contiguous register range");
3434 EReg = MRI->getEncodingValue(Reg);
3435 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3436 if (isQReg) {
3437 EReg = MRI->getEncodingValue(++Reg);
3438 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3439 }
3440 }
3441
3442 if (Parser.getTok().isNot(AsmToken::RCurly))
3443 return Error(Parser.getTok().getLoc(), "'}' expected");
3444 SMLoc E = Parser.getTok().getEndLoc();
3445 Parser.Lex(); // Eat '}' token.
3446
3447 // Push the register list operand.
3448 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3449
3450 // The ARM system instruction variants for LDM/STM have a '^' token here.
3451 if (Parser.getTok().is(AsmToken::Caret)) {
3452 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3453 Parser.Lex(); // Eat '^' token.
3454 }
3455
3456 return false;
3457 }
3458
3459 // Helper function to parse the lane index for vector lists.
3460 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseVectorLane(VectorLaneTy & LaneKind,unsigned & Index,SMLoc & EndLoc)3461 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3462 MCAsmParser &Parser = getParser();
3463 Index = 0; // Always return a defined index value.
3464 if (Parser.getTok().is(AsmToken::LBrac)) {
3465 Parser.Lex(); // Eat the '['.
3466 if (Parser.getTok().is(AsmToken::RBrac)) {
3467 // "Dn[]" is the 'all lanes' syntax.
3468 LaneKind = AllLanes;
3469 EndLoc = Parser.getTok().getEndLoc();
3470 Parser.Lex(); // Eat the ']'.
3471 return MatchOperand_Success;
3472 }
3473
3474 // There's an optional '#' token here. Normally there wouldn't be, but
3475 // inline assemble puts one in, and it's friendly to accept that.
3476 if (Parser.getTok().is(AsmToken::Hash))
3477 Parser.Lex(); // Eat '#' or '$'.
3478
3479 const MCExpr *LaneIndex;
3480 SMLoc Loc = Parser.getTok().getLoc();
3481 if (getParser().parseExpression(LaneIndex)) {
3482 Error(Loc, "illegal expression");
3483 return MatchOperand_ParseFail;
3484 }
3485 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3486 if (!CE) {
3487 Error(Loc, "lane index must be empty or an integer");
3488 return MatchOperand_ParseFail;
3489 }
3490 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3491 Error(Parser.getTok().getLoc(), "']' expected");
3492 return MatchOperand_ParseFail;
3493 }
3494 EndLoc = Parser.getTok().getEndLoc();
3495 Parser.Lex(); // Eat the ']'.
3496 int64_t Val = CE->getValue();
3497
3498 // FIXME: Make this range check context sensitive for .8, .16, .32.
3499 if (Val < 0 || Val > 7) {
3500 Error(Parser.getTok().getLoc(), "lane index out of range");
3501 return MatchOperand_ParseFail;
3502 }
3503 Index = Val;
3504 LaneKind = IndexedLane;
3505 return MatchOperand_Success;
3506 }
3507 LaneKind = NoLanes;
3508 return MatchOperand_Success;
3509 }
3510
3511 // parse a vector register list
3512 ARMAsmParser::OperandMatchResultTy
parseVectorList(OperandVector & Operands)3513 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3514 MCAsmParser &Parser = getParser();
3515 VectorLaneTy LaneKind;
3516 unsigned LaneIndex;
3517 SMLoc S = Parser.getTok().getLoc();
3518 // As an extension (to match gas), support a plain D register or Q register
3519 // (without encosing curly braces) as a single or double entry list,
3520 // respectively.
3521 if (Parser.getTok().is(AsmToken::Identifier)) {
3522 SMLoc E = Parser.getTok().getEndLoc();
3523 int Reg = tryParseRegister();
3524 if (Reg == -1)
3525 return MatchOperand_NoMatch;
3526 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3527 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3528 if (Res != MatchOperand_Success)
3529 return Res;
3530 switch (LaneKind) {
3531 case NoLanes:
3532 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3533 break;
3534 case AllLanes:
3535 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3536 S, E));
3537 break;
3538 case IndexedLane:
3539 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3540 LaneIndex,
3541 false, S, E));
3542 break;
3543 }
3544 return MatchOperand_Success;
3545 }
3546 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3547 Reg = getDRegFromQReg(Reg);
3548 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3549 if (Res != MatchOperand_Success)
3550 return Res;
3551 switch (LaneKind) {
3552 case NoLanes:
3553 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3554 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3555 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3556 break;
3557 case AllLanes:
3558 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3559 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3560 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3561 S, E));
3562 break;
3563 case IndexedLane:
3564 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3565 LaneIndex,
3566 false, S, E));
3567 break;
3568 }
3569 return MatchOperand_Success;
3570 }
3571 Error(S, "vector register expected");
3572 return MatchOperand_ParseFail;
3573 }
3574
3575 if (Parser.getTok().isNot(AsmToken::LCurly))
3576 return MatchOperand_NoMatch;
3577
3578 Parser.Lex(); // Eat '{' token.
3579 SMLoc RegLoc = Parser.getTok().getLoc();
3580
3581 int Reg = tryParseRegister();
3582 if (Reg == -1) {
3583 Error(RegLoc, "register expected");
3584 return MatchOperand_ParseFail;
3585 }
3586 unsigned Count = 1;
3587 int Spacing = 0;
3588 unsigned FirstReg = Reg;
3589 // The list is of D registers, but we also allow Q regs and just interpret
3590 // them as the two D sub-registers.
3591 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3592 FirstReg = Reg = getDRegFromQReg(Reg);
3593 Spacing = 1; // double-spacing requires explicit D registers, otherwise
3594 // it's ambiguous with four-register single spaced.
3595 ++Reg;
3596 ++Count;
3597 }
3598
3599 SMLoc E;
3600 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3601 return MatchOperand_ParseFail;
3602
3603 while (Parser.getTok().is(AsmToken::Comma) ||
3604 Parser.getTok().is(AsmToken::Minus)) {
3605 if (Parser.getTok().is(AsmToken::Minus)) {
3606 if (!Spacing)
3607 Spacing = 1; // Register range implies a single spaced list.
3608 else if (Spacing == 2) {
3609 Error(Parser.getTok().getLoc(),
3610 "sequential registers in double spaced list");
3611 return MatchOperand_ParseFail;
3612 }
3613 Parser.Lex(); // Eat the minus.
3614 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3615 int EndReg = tryParseRegister();
3616 if (EndReg == -1) {
3617 Error(AfterMinusLoc, "register expected");
3618 return MatchOperand_ParseFail;
3619 }
3620 // Allow Q regs and just interpret them as the two D sub-registers.
3621 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3622 EndReg = getDRegFromQReg(EndReg) + 1;
3623 // If the register is the same as the start reg, there's nothing
3624 // more to do.
3625 if (Reg == EndReg)
3626 continue;
3627 // The register must be in the same register class as the first.
3628 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3629 Error(AfterMinusLoc, "invalid register in register list");
3630 return MatchOperand_ParseFail;
3631 }
3632 // Ranges must go from low to high.
3633 if (Reg > EndReg) {
3634 Error(AfterMinusLoc, "bad range in register list");
3635 return MatchOperand_ParseFail;
3636 }
3637 // Parse the lane specifier if present.
3638 VectorLaneTy NextLaneKind;
3639 unsigned NextLaneIndex;
3640 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3641 MatchOperand_Success)
3642 return MatchOperand_ParseFail;
3643 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3644 Error(AfterMinusLoc, "mismatched lane index in register list");
3645 return MatchOperand_ParseFail;
3646 }
3647
3648 // Add all the registers in the range to the register list.
3649 Count += EndReg - Reg;
3650 Reg = EndReg;
3651 continue;
3652 }
3653 Parser.Lex(); // Eat the comma.
3654 RegLoc = Parser.getTok().getLoc();
3655 int OldReg = Reg;
3656 Reg = tryParseRegister();
3657 if (Reg == -1) {
3658 Error(RegLoc, "register expected");
3659 return MatchOperand_ParseFail;
3660 }
3661 // vector register lists must be contiguous.
3662 // It's OK to use the enumeration values directly here rather, as the
3663 // VFP register classes have the enum sorted properly.
3664 //
3665 // The list is of D registers, but we also allow Q regs and just interpret
3666 // them as the two D sub-registers.
3667 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3668 if (!Spacing)
3669 Spacing = 1; // Register range implies a single spaced list.
3670 else if (Spacing == 2) {
3671 Error(RegLoc,
3672 "invalid register in double-spaced list (must be 'D' register')");
3673 return MatchOperand_ParseFail;
3674 }
3675 Reg = getDRegFromQReg(Reg);
3676 if (Reg != OldReg + 1) {
3677 Error(RegLoc, "non-contiguous register range");
3678 return MatchOperand_ParseFail;
3679 }
3680 ++Reg;
3681 Count += 2;
3682 // Parse the lane specifier if present.
3683 VectorLaneTy NextLaneKind;
3684 unsigned NextLaneIndex;
3685 SMLoc LaneLoc = Parser.getTok().getLoc();
3686 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3687 MatchOperand_Success)
3688 return MatchOperand_ParseFail;
3689 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3690 Error(LaneLoc, "mismatched lane index in register list");
3691 return MatchOperand_ParseFail;
3692 }
3693 continue;
3694 }
3695 // Normal D register.
3696 // Figure out the register spacing (single or double) of the list if
3697 // we don't know it already.
3698 if (!Spacing)
3699 Spacing = 1 + (Reg == OldReg + 2);
3700
3701 // Just check that it's contiguous and keep going.
3702 if (Reg != OldReg + Spacing) {
3703 Error(RegLoc, "non-contiguous register range");
3704 return MatchOperand_ParseFail;
3705 }
3706 ++Count;
3707 // Parse the lane specifier if present.
3708 VectorLaneTy NextLaneKind;
3709 unsigned NextLaneIndex;
3710 SMLoc EndLoc = Parser.getTok().getLoc();
3711 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3712 return MatchOperand_ParseFail;
3713 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3714 Error(EndLoc, "mismatched lane index in register list");
3715 return MatchOperand_ParseFail;
3716 }
3717 }
3718
3719 if (Parser.getTok().isNot(AsmToken::RCurly)) {
3720 Error(Parser.getTok().getLoc(), "'}' expected");
3721 return MatchOperand_ParseFail;
3722 }
3723 E = Parser.getTok().getEndLoc();
3724 Parser.Lex(); // Eat '}' token.
3725
3726 switch (LaneKind) {
3727 case NoLanes:
3728 // Two-register operands have been converted to the
3729 // composite register classes.
3730 if (Count == 2) {
3731 const MCRegisterClass *RC = (Spacing == 1) ?
3732 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3733 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3734 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3735 }
3736
3737 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3738 (Spacing == 2), S, E));
3739 break;
3740 case AllLanes:
3741 // Two-register operands have been converted to the
3742 // composite register classes.
3743 if (Count == 2) {
3744 const MCRegisterClass *RC = (Spacing == 1) ?
3745 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3746 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3747 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3748 }
3749 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3750 (Spacing == 2),
3751 S, E));
3752 break;
3753 case IndexedLane:
3754 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3755 LaneIndex,
3756 (Spacing == 2),
3757 S, E));
3758 break;
3759 }
3760 return MatchOperand_Success;
3761 }
3762
3763 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3764 ARMAsmParser::OperandMatchResultTy
parseMemBarrierOptOperand(OperandVector & Operands)3765 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
3766 MCAsmParser &Parser = getParser();
3767 SMLoc S = Parser.getTok().getLoc();
3768 const AsmToken &Tok = Parser.getTok();
3769 unsigned Opt;
3770
3771 if (Tok.is(AsmToken::Identifier)) {
3772 StringRef OptStr = Tok.getString();
3773
3774 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3775 .Case("sy", ARM_MB::SY)
3776 .Case("st", ARM_MB::ST)
3777 .Case("ld", ARM_MB::LD)
3778 .Case("sh", ARM_MB::ISH)
3779 .Case("ish", ARM_MB::ISH)
3780 .Case("shst", ARM_MB::ISHST)
3781 .Case("ishst", ARM_MB::ISHST)
3782 .Case("ishld", ARM_MB::ISHLD)
3783 .Case("nsh", ARM_MB::NSH)
3784 .Case("un", ARM_MB::NSH)
3785 .Case("nshst", ARM_MB::NSHST)
3786 .Case("nshld", ARM_MB::NSHLD)
3787 .Case("unst", ARM_MB::NSHST)
3788 .Case("osh", ARM_MB::OSH)
3789 .Case("oshst", ARM_MB::OSHST)
3790 .Case("oshld", ARM_MB::OSHLD)
3791 .Default(~0U);
3792
3793 // ishld, oshld, nshld and ld are only available from ARMv8.
3794 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3795 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3796 Opt = ~0U;
3797
3798 if (Opt == ~0U)
3799 return MatchOperand_NoMatch;
3800
3801 Parser.Lex(); // Eat identifier token.
3802 } else if (Tok.is(AsmToken::Hash) ||
3803 Tok.is(AsmToken::Dollar) ||
3804 Tok.is(AsmToken::Integer)) {
3805 if (Parser.getTok().isNot(AsmToken::Integer))
3806 Parser.Lex(); // Eat '#' or '$'.
3807 SMLoc Loc = Parser.getTok().getLoc();
3808
3809 const MCExpr *MemBarrierID;
3810 if (getParser().parseExpression(MemBarrierID)) {
3811 Error(Loc, "illegal expression");
3812 return MatchOperand_ParseFail;
3813 }
3814
3815 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3816 if (!CE) {
3817 Error(Loc, "constant expression expected");
3818 return MatchOperand_ParseFail;
3819 }
3820
3821 int Val = CE->getValue();
3822 if (Val & ~0xf) {
3823 Error(Loc, "immediate value out of range");
3824 return MatchOperand_ParseFail;
3825 }
3826
3827 Opt = ARM_MB::RESERVED_0 + Val;
3828 } else
3829 return MatchOperand_ParseFail;
3830
3831 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3832 return MatchOperand_Success;
3833 }
3834
3835 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3836 ARMAsmParser::OperandMatchResultTy
parseInstSyncBarrierOptOperand(OperandVector & Operands)3837 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
3838 MCAsmParser &Parser = getParser();
3839 SMLoc S = Parser.getTok().getLoc();
3840 const AsmToken &Tok = Parser.getTok();
3841 unsigned Opt;
3842
3843 if (Tok.is(AsmToken::Identifier)) {
3844 StringRef OptStr = Tok.getString();
3845
3846 if (OptStr.equals_lower("sy"))
3847 Opt = ARM_ISB::SY;
3848 else
3849 return MatchOperand_NoMatch;
3850
3851 Parser.Lex(); // Eat identifier token.
3852 } else if (Tok.is(AsmToken::Hash) ||
3853 Tok.is(AsmToken::Dollar) ||
3854 Tok.is(AsmToken::Integer)) {
3855 if (Parser.getTok().isNot(AsmToken::Integer))
3856 Parser.Lex(); // Eat '#' or '$'.
3857 SMLoc Loc = Parser.getTok().getLoc();
3858
3859 const MCExpr *ISBarrierID;
3860 if (getParser().parseExpression(ISBarrierID)) {
3861 Error(Loc, "illegal expression");
3862 return MatchOperand_ParseFail;
3863 }
3864
3865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3866 if (!CE) {
3867 Error(Loc, "constant expression expected");
3868 return MatchOperand_ParseFail;
3869 }
3870
3871 int Val = CE->getValue();
3872 if (Val & ~0xf) {
3873 Error(Loc, "immediate value out of range");
3874 return MatchOperand_ParseFail;
3875 }
3876
3877 Opt = ARM_ISB::RESERVED_0 + Val;
3878 } else
3879 return MatchOperand_ParseFail;
3880
3881 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3882 (ARM_ISB::InstSyncBOpt)Opt, S));
3883 return MatchOperand_Success;
3884 }
3885
3886
3887 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3888 ARMAsmParser::OperandMatchResultTy
parseProcIFlagsOperand(OperandVector & Operands)3889 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
3890 MCAsmParser &Parser = getParser();
3891 SMLoc S = Parser.getTok().getLoc();
3892 const AsmToken &Tok = Parser.getTok();
3893 if (!Tok.is(AsmToken::Identifier))
3894 return MatchOperand_NoMatch;
3895 StringRef IFlagsStr = Tok.getString();
3896
3897 // An iflags string of "none" is interpreted to mean that none of the AIF
3898 // bits are set. Not a terribly useful instruction, but a valid encoding.
3899 unsigned IFlags = 0;
3900 if (IFlagsStr != "none") {
3901 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3902 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3903 .Case("a", ARM_PROC::A)
3904 .Case("i", ARM_PROC::I)
3905 .Case("f", ARM_PROC::F)
3906 .Default(~0U);
3907
3908 // If some specific iflag is already set, it means that some letter is
3909 // present more than once, this is not acceptable.
3910 if (Flag == ~0U || (IFlags & Flag))
3911 return MatchOperand_NoMatch;
3912
3913 IFlags |= Flag;
3914 }
3915 }
3916
3917 Parser.Lex(); // Eat identifier token.
3918 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3919 return MatchOperand_Success;
3920 }
3921
3922 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3923 ARMAsmParser::OperandMatchResultTy
parseMSRMaskOperand(OperandVector & Operands)3924 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
3925 MCAsmParser &Parser = getParser();
3926 SMLoc S = Parser.getTok().getLoc();
3927 const AsmToken &Tok = Parser.getTok();
3928 if (!Tok.is(AsmToken::Identifier))
3929 return MatchOperand_NoMatch;
3930 StringRef Mask = Tok.getString();
3931
3932 if (isMClass()) {
3933 // See ARMv6-M 10.1.1
3934 std::string Name = Mask.lower();
3935 unsigned FlagsVal = StringSwitch<unsigned>(Name)
3936 // Note: in the documentation:
3937 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3938 // for MSR APSR_nzcvq.
3939 // but we do make it an alias here. This is so to get the "mask encoding"
3940 // bits correct on MSR APSR writes.
3941 //
3942 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3943 // should really only be allowed when writing a special register. Note
3944 // they get dropped in the MRS instruction reading a special register as
3945 // the SYSm field is only 8 bits.
3946 .Case("apsr", 0x800)
3947 .Case("apsr_nzcvq", 0x800)
3948 .Case("apsr_g", 0x400)
3949 .Case("apsr_nzcvqg", 0xc00)
3950 .Case("iapsr", 0x801)
3951 .Case("iapsr_nzcvq", 0x801)
3952 .Case("iapsr_g", 0x401)
3953 .Case("iapsr_nzcvqg", 0xc01)
3954 .Case("eapsr", 0x802)
3955 .Case("eapsr_nzcvq", 0x802)
3956 .Case("eapsr_g", 0x402)
3957 .Case("eapsr_nzcvqg", 0xc02)
3958 .Case("xpsr", 0x803)
3959 .Case("xpsr_nzcvq", 0x803)
3960 .Case("xpsr_g", 0x403)
3961 .Case("xpsr_nzcvqg", 0xc03)
3962 .Case("ipsr", 0x805)
3963 .Case("epsr", 0x806)
3964 .Case("iepsr", 0x807)
3965 .Case("msp", 0x808)
3966 .Case("psp", 0x809)
3967 .Case("primask", 0x810)
3968 .Case("basepri", 0x811)
3969 .Case("basepri_max", 0x812)
3970 .Case("faultmask", 0x813)
3971 .Case("control", 0x814)
3972 .Default(~0U);
3973
3974 if (FlagsVal == ~0U)
3975 return MatchOperand_NoMatch;
3976
3977 if (!hasDSP() && (FlagsVal & 0x400))
3978 // The _g and _nzcvqg versions are only valid if the DSP extension is
3979 // available.
3980 return MatchOperand_NoMatch;
3981
3982 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3983 // basepri, basepri_max and faultmask only valid for V7m.
3984 return MatchOperand_NoMatch;
3985
3986 Parser.Lex(); // Eat identifier token.
3987 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3988 return MatchOperand_Success;
3989 }
3990
3991 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3992 size_t Start = 0, Next = Mask.find('_');
3993 StringRef Flags = "";
3994 std::string SpecReg = Mask.slice(Start, Next).lower();
3995 if (Next != StringRef::npos)
3996 Flags = Mask.slice(Next+1, Mask.size());
3997
3998 // FlagsVal contains the complete mask:
3999 // 3-0: Mask
4000 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4001 unsigned FlagsVal = 0;
4002
4003 if (SpecReg == "apsr") {
4004 FlagsVal = StringSwitch<unsigned>(Flags)
4005 .Case("nzcvq", 0x8) // same as CPSR_f
4006 .Case("g", 0x4) // same as CPSR_s
4007 .Case("nzcvqg", 0xc) // same as CPSR_fs
4008 .Default(~0U);
4009
4010 if (FlagsVal == ~0U) {
4011 if (!Flags.empty())
4012 return MatchOperand_NoMatch;
4013 else
4014 FlagsVal = 8; // No flag
4015 }
4016 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4017 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4018 if (Flags == "all" || Flags == "")
4019 Flags = "fc";
4020 for (int i = 0, e = Flags.size(); i != e; ++i) {
4021 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4022 .Case("c", 1)
4023 .Case("x", 2)
4024 .Case("s", 4)
4025 .Case("f", 8)
4026 .Default(~0U);
4027
4028 // If some specific flag is already set, it means that some letter is
4029 // present more than once, this is not acceptable.
4030 if (FlagsVal == ~0U || (FlagsVal & Flag))
4031 return MatchOperand_NoMatch;
4032 FlagsVal |= Flag;
4033 }
4034 } else // No match for special register.
4035 return MatchOperand_NoMatch;
4036
4037 // Special register without flags is NOT equivalent to "fc" flags.
4038 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4039 // two lines would enable gas compatibility at the expense of breaking
4040 // round-tripping.
4041 //
4042 // if (!FlagsVal)
4043 // FlagsVal = 0x9;
4044
4045 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4046 if (SpecReg == "spsr")
4047 FlagsVal |= 16;
4048
4049 Parser.Lex(); // Eat identifier token.
4050 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4051 return MatchOperand_Success;
4052 }
4053
4054 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4055 /// use in the MRS/MSR instructions added to support virtualization.
4056 ARMAsmParser::OperandMatchResultTy
parseBankedRegOperand(OperandVector & Operands)4057 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4058 MCAsmParser &Parser = getParser();
4059 SMLoc S = Parser.getTok().getLoc();
4060 const AsmToken &Tok = Parser.getTok();
4061 if (!Tok.is(AsmToken::Identifier))
4062 return MatchOperand_NoMatch;
4063 StringRef RegName = Tok.getString();
4064
4065 // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM
4066 // and bit 5 is R.
4067 unsigned Encoding = StringSwitch<unsigned>(RegName.lower())
4068 .Case("r8_usr", 0x00)
4069 .Case("r9_usr", 0x01)
4070 .Case("r10_usr", 0x02)
4071 .Case("r11_usr", 0x03)
4072 .Case("r12_usr", 0x04)
4073 .Case("sp_usr", 0x05)
4074 .Case("lr_usr", 0x06)
4075 .Case("r8_fiq", 0x08)
4076 .Case("r9_fiq", 0x09)
4077 .Case("r10_fiq", 0x0a)
4078 .Case("r11_fiq", 0x0b)
4079 .Case("r12_fiq", 0x0c)
4080 .Case("sp_fiq", 0x0d)
4081 .Case("lr_fiq", 0x0e)
4082 .Case("lr_irq", 0x10)
4083 .Case("sp_irq", 0x11)
4084 .Case("lr_svc", 0x12)
4085 .Case("sp_svc", 0x13)
4086 .Case("lr_abt", 0x14)
4087 .Case("sp_abt", 0x15)
4088 .Case("lr_und", 0x16)
4089 .Case("sp_und", 0x17)
4090 .Case("lr_mon", 0x1c)
4091 .Case("sp_mon", 0x1d)
4092 .Case("elr_hyp", 0x1e)
4093 .Case("sp_hyp", 0x1f)
4094 .Case("spsr_fiq", 0x2e)
4095 .Case("spsr_irq", 0x30)
4096 .Case("spsr_svc", 0x32)
4097 .Case("spsr_abt", 0x34)
4098 .Case("spsr_und", 0x36)
4099 .Case("spsr_mon", 0x3c)
4100 .Case("spsr_hyp", 0x3e)
4101 .Default(~0U);
4102
4103 if (Encoding == ~0U)
4104 return MatchOperand_NoMatch;
4105
4106 Parser.Lex(); // Eat identifier token.
4107 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4108 return MatchOperand_Success;
4109 }
4110
4111 ARMAsmParser::OperandMatchResultTy
parsePKHImm(OperandVector & Operands,StringRef Op,int Low,int High)4112 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4113 int High) {
4114 MCAsmParser &Parser = getParser();
4115 const AsmToken &Tok = Parser.getTok();
4116 if (Tok.isNot(AsmToken::Identifier)) {
4117 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4118 return MatchOperand_ParseFail;
4119 }
4120 StringRef ShiftName = Tok.getString();
4121 std::string LowerOp = Op.lower();
4122 std::string UpperOp = Op.upper();
4123 if (ShiftName != LowerOp && ShiftName != UpperOp) {
4124 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4125 return MatchOperand_ParseFail;
4126 }
4127 Parser.Lex(); // Eat shift type token.
4128
4129 // There must be a '#' and a shift amount.
4130 if (Parser.getTok().isNot(AsmToken::Hash) &&
4131 Parser.getTok().isNot(AsmToken::Dollar)) {
4132 Error(Parser.getTok().getLoc(), "'#' expected");
4133 return MatchOperand_ParseFail;
4134 }
4135 Parser.Lex(); // Eat hash token.
4136
4137 const MCExpr *ShiftAmount;
4138 SMLoc Loc = Parser.getTok().getLoc();
4139 SMLoc EndLoc;
4140 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4141 Error(Loc, "illegal expression");
4142 return MatchOperand_ParseFail;
4143 }
4144 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4145 if (!CE) {
4146 Error(Loc, "constant expression expected");
4147 return MatchOperand_ParseFail;
4148 }
4149 int Val = CE->getValue();
4150 if (Val < Low || Val > High) {
4151 Error(Loc, "immediate value out of range");
4152 return MatchOperand_ParseFail;
4153 }
4154
4155 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4156
4157 return MatchOperand_Success;
4158 }
4159
4160 ARMAsmParser::OperandMatchResultTy
parseSetEndImm(OperandVector & Operands)4161 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4162 MCAsmParser &Parser = getParser();
4163 const AsmToken &Tok = Parser.getTok();
4164 SMLoc S = Tok.getLoc();
4165 if (Tok.isNot(AsmToken::Identifier)) {
4166 Error(S, "'be' or 'le' operand expected");
4167 return MatchOperand_ParseFail;
4168 }
4169 int Val = StringSwitch<int>(Tok.getString().lower())
4170 .Case("be", 1)
4171 .Case("le", 0)
4172 .Default(-1);
4173 Parser.Lex(); // Eat the token.
4174
4175 if (Val == -1) {
4176 Error(S, "'be' or 'le' operand expected");
4177 return MatchOperand_ParseFail;
4178 }
4179 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4180 getContext()),
4181 S, Tok.getEndLoc()));
4182 return MatchOperand_Success;
4183 }
4184
4185 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4186 /// instructions. Legal values are:
4187 /// lsl #n 'n' in [0,31]
4188 /// asr #n 'n' in [1,32]
4189 /// n == 32 encoded as n == 0.
4190 ARMAsmParser::OperandMatchResultTy
parseShifterImm(OperandVector & Operands)4191 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4192 MCAsmParser &Parser = getParser();
4193 const AsmToken &Tok = Parser.getTok();
4194 SMLoc S = Tok.getLoc();
4195 if (Tok.isNot(AsmToken::Identifier)) {
4196 Error(S, "shift operator 'asr' or 'lsl' expected");
4197 return MatchOperand_ParseFail;
4198 }
4199 StringRef ShiftName = Tok.getString();
4200 bool isASR;
4201 if (ShiftName == "lsl" || ShiftName == "LSL")
4202 isASR = false;
4203 else if (ShiftName == "asr" || ShiftName == "ASR")
4204 isASR = true;
4205 else {
4206 Error(S, "shift operator 'asr' or 'lsl' expected");
4207 return MatchOperand_ParseFail;
4208 }
4209 Parser.Lex(); // Eat the operator.
4210
4211 // A '#' and a shift amount.
4212 if (Parser.getTok().isNot(AsmToken::Hash) &&
4213 Parser.getTok().isNot(AsmToken::Dollar)) {
4214 Error(Parser.getTok().getLoc(), "'#' expected");
4215 return MatchOperand_ParseFail;
4216 }
4217 Parser.Lex(); // Eat hash token.
4218 SMLoc ExLoc = Parser.getTok().getLoc();
4219
4220 const MCExpr *ShiftAmount;
4221 SMLoc EndLoc;
4222 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4223 Error(ExLoc, "malformed shift expression");
4224 return MatchOperand_ParseFail;
4225 }
4226 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4227 if (!CE) {
4228 Error(ExLoc, "shift amount must be an immediate");
4229 return MatchOperand_ParseFail;
4230 }
4231
4232 int64_t Val = CE->getValue();
4233 if (isASR) {
4234 // Shift amount must be in [1,32]
4235 if (Val < 1 || Val > 32) {
4236 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4237 return MatchOperand_ParseFail;
4238 }
4239 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4240 if (isThumb() && Val == 32) {
4241 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4242 return MatchOperand_ParseFail;
4243 }
4244 if (Val == 32) Val = 0;
4245 } else {
4246 // Shift amount must be in [1,32]
4247 if (Val < 0 || Val > 31) {
4248 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4249 return MatchOperand_ParseFail;
4250 }
4251 }
4252
4253 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4254
4255 return MatchOperand_Success;
4256 }
4257
4258 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4259 /// of instructions. Legal values are:
4260 /// ror #n 'n' in {0, 8, 16, 24}
4261 ARMAsmParser::OperandMatchResultTy
parseRotImm(OperandVector & Operands)4262 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4263 MCAsmParser &Parser = getParser();
4264 const AsmToken &Tok = Parser.getTok();
4265 SMLoc S = Tok.getLoc();
4266 if (Tok.isNot(AsmToken::Identifier))
4267 return MatchOperand_NoMatch;
4268 StringRef ShiftName = Tok.getString();
4269 if (ShiftName != "ror" && ShiftName != "ROR")
4270 return MatchOperand_NoMatch;
4271 Parser.Lex(); // Eat the operator.
4272
4273 // A '#' and a rotate amount.
4274 if (Parser.getTok().isNot(AsmToken::Hash) &&
4275 Parser.getTok().isNot(AsmToken::Dollar)) {
4276 Error(Parser.getTok().getLoc(), "'#' expected");
4277 return MatchOperand_ParseFail;
4278 }
4279 Parser.Lex(); // Eat hash token.
4280 SMLoc ExLoc = Parser.getTok().getLoc();
4281
4282 const MCExpr *ShiftAmount;
4283 SMLoc EndLoc;
4284 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4285 Error(ExLoc, "malformed rotate expression");
4286 return MatchOperand_ParseFail;
4287 }
4288 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4289 if (!CE) {
4290 Error(ExLoc, "rotate amount must be an immediate");
4291 return MatchOperand_ParseFail;
4292 }
4293
4294 int64_t Val = CE->getValue();
4295 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4296 // normally, zero is represented in asm by omitting the rotate operand
4297 // entirely.
4298 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4299 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4300 return MatchOperand_ParseFail;
4301 }
4302
4303 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4304
4305 return MatchOperand_Success;
4306 }
4307
4308 ARMAsmParser::OperandMatchResultTy
parseModImm(OperandVector & Operands)4309 ARMAsmParser::parseModImm(OperandVector &Operands) {
4310 MCAsmParser &Parser = getParser();
4311 MCAsmLexer &Lexer = getLexer();
4312 int64_t Imm1, Imm2;
4313
4314 SMLoc S = Parser.getTok().getLoc();
4315
4316 // 1) A mod_imm operand can appear in the place of a register name:
4317 // add r0, #mod_imm
4318 // add r0, r0, #mod_imm
4319 // to correctly handle the latter, we bail out as soon as we see an
4320 // identifier.
4321 //
4322 // 2) Similarly, we do not want to parse into complex operands:
4323 // mov r0, #mod_imm
4324 // mov r0, :lower16:(_foo)
4325 if (Parser.getTok().is(AsmToken::Identifier) ||
4326 Parser.getTok().is(AsmToken::Colon))
4327 return MatchOperand_NoMatch;
4328
4329 // Hash (dollar) is optional as per the ARMARM
4330 if (Parser.getTok().is(AsmToken::Hash) ||
4331 Parser.getTok().is(AsmToken::Dollar)) {
4332 // Avoid parsing into complex operands (#:)
4333 if (Lexer.peekTok().is(AsmToken::Colon))
4334 return MatchOperand_NoMatch;
4335
4336 // Eat the hash (dollar)
4337 Parser.Lex();
4338 }
4339
4340 SMLoc Sx1, Ex1;
4341 Sx1 = Parser.getTok().getLoc();
4342 const MCExpr *Imm1Exp;
4343 if (getParser().parseExpression(Imm1Exp, Ex1)) {
4344 Error(Sx1, "malformed expression");
4345 return MatchOperand_ParseFail;
4346 }
4347
4348 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4349
4350 if (CE) {
4351 // Immediate must fit within 32-bits
4352 Imm1 = CE->getValue();
4353 int Enc = ARM_AM::getSOImmVal(Imm1);
4354 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4355 // We have a match!
4356 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4357 (Enc & 0xF00) >> 7,
4358 Sx1, Ex1));
4359 return MatchOperand_Success;
4360 }
4361
4362 // We have parsed an immediate which is not for us, fallback to a plain
4363 // immediate. This can happen for instruction aliases. For an example,
4364 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4365 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4366 // instruction with a mod_imm operand. The alias is defined such that the
4367 // parser method is shared, that's why we have to do this here.
4368 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4369 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4370 return MatchOperand_Success;
4371 }
4372 } else {
4373 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4374 // MCFixup). Fallback to a plain immediate.
4375 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4376 return MatchOperand_Success;
4377 }
4378
4379 // From this point onward, we expect the input to be a (#bits, #rot) pair
4380 if (Parser.getTok().isNot(AsmToken::Comma)) {
4381 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4382 return MatchOperand_ParseFail;
4383 }
4384
4385 if (Imm1 & ~0xFF) {
4386 Error(Sx1, "immediate operand must a number in the range [0, 255]");
4387 return MatchOperand_ParseFail;
4388 }
4389
4390 // Eat the comma
4391 Parser.Lex();
4392
4393 // Repeat for #rot
4394 SMLoc Sx2, Ex2;
4395 Sx2 = Parser.getTok().getLoc();
4396
4397 // Eat the optional hash (dollar)
4398 if (Parser.getTok().is(AsmToken::Hash) ||
4399 Parser.getTok().is(AsmToken::Dollar))
4400 Parser.Lex();
4401
4402 const MCExpr *Imm2Exp;
4403 if (getParser().parseExpression(Imm2Exp, Ex2)) {
4404 Error(Sx2, "malformed expression");
4405 return MatchOperand_ParseFail;
4406 }
4407
4408 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4409
4410 if (CE) {
4411 Imm2 = CE->getValue();
4412 if (!(Imm2 & ~0x1E)) {
4413 // We have a match!
4414 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4415 return MatchOperand_Success;
4416 }
4417 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4418 return MatchOperand_ParseFail;
4419 } else {
4420 Error(Sx2, "constant expression expected");
4421 return MatchOperand_ParseFail;
4422 }
4423 }
4424
4425 ARMAsmParser::OperandMatchResultTy
parseBitfield(OperandVector & Operands)4426 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4427 MCAsmParser &Parser = getParser();
4428 SMLoc S = Parser.getTok().getLoc();
4429 // The bitfield descriptor is really two operands, the LSB and the width.
4430 if (Parser.getTok().isNot(AsmToken::Hash) &&
4431 Parser.getTok().isNot(AsmToken::Dollar)) {
4432 Error(Parser.getTok().getLoc(), "'#' expected");
4433 return MatchOperand_ParseFail;
4434 }
4435 Parser.Lex(); // Eat hash token.
4436
4437 const MCExpr *LSBExpr;
4438 SMLoc E = Parser.getTok().getLoc();
4439 if (getParser().parseExpression(LSBExpr)) {
4440 Error(E, "malformed immediate expression");
4441 return MatchOperand_ParseFail;
4442 }
4443 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4444 if (!CE) {
4445 Error(E, "'lsb' operand must be an immediate");
4446 return MatchOperand_ParseFail;
4447 }
4448
4449 int64_t LSB = CE->getValue();
4450 // The LSB must be in the range [0,31]
4451 if (LSB < 0 || LSB > 31) {
4452 Error(E, "'lsb' operand must be in the range [0,31]");
4453 return MatchOperand_ParseFail;
4454 }
4455 E = Parser.getTok().getLoc();
4456
4457 // Expect another immediate operand.
4458 if (Parser.getTok().isNot(AsmToken::Comma)) {
4459 Error(Parser.getTok().getLoc(), "too few operands");
4460 return MatchOperand_ParseFail;
4461 }
4462 Parser.Lex(); // Eat hash token.
4463 if (Parser.getTok().isNot(AsmToken::Hash) &&
4464 Parser.getTok().isNot(AsmToken::Dollar)) {
4465 Error(Parser.getTok().getLoc(), "'#' expected");
4466 return MatchOperand_ParseFail;
4467 }
4468 Parser.Lex(); // Eat hash token.
4469
4470 const MCExpr *WidthExpr;
4471 SMLoc EndLoc;
4472 if (getParser().parseExpression(WidthExpr, EndLoc)) {
4473 Error(E, "malformed immediate expression");
4474 return MatchOperand_ParseFail;
4475 }
4476 CE = dyn_cast<MCConstantExpr>(WidthExpr);
4477 if (!CE) {
4478 Error(E, "'width' operand must be an immediate");
4479 return MatchOperand_ParseFail;
4480 }
4481
4482 int64_t Width = CE->getValue();
4483 // The LSB must be in the range [1,32-lsb]
4484 if (Width < 1 || Width > 32 - LSB) {
4485 Error(E, "'width' operand must be in the range [1,32-lsb]");
4486 return MatchOperand_ParseFail;
4487 }
4488
4489 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4490
4491 return MatchOperand_Success;
4492 }
4493
4494 ARMAsmParser::OperandMatchResultTy
parsePostIdxReg(OperandVector & Operands)4495 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4496 // Check for a post-index addressing register operand. Specifically:
4497 // postidx_reg := '+' register {, shift}
4498 // | '-' register {, shift}
4499 // | register {, shift}
4500
4501 // This method must return MatchOperand_NoMatch without consuming any tokens
4502 // in the case where there is no match, as other alternatives take other
4503 // parse methods.
4504 MCAsmParser &Parser = getParser();
4505 AsmToken Tok = Parser.getTok();
4506 SMLoc S = Tok.getLoc();
4507 bool haveEaten = false;
4508 bool isAdd = true;
4509 if (Tok.is(AsmToken::Plus)) {
4510 Parser.Lex(); // Eat the '+' token.
4511 haveEaten = true;
4512 } else if (Tok.is(AsmToken::Minus)) {
4513 Parser.Lex(); // Eat the '-' token.
4514 isAdd = false;
4515 haveEaten = true;
4516 }
4517
4518 SMLoc E = Parser.getTok().getEndLoc();
4519 int Reg = tryParseRegister();
4520 if (Reg == -1) {
4521 if (!haveEaten)
4522 return MatchOperand_NoMatch;
4523 Error(Parser.getTok().getLoc(), "register expected");
4524 return MatchOperand_ParseFail;
4525 }
4526
4527 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4528 unsigned ShiftImm = 0;
4529 if (Parser.getTok().is(AsmToken::Comma)) {
4530 Parser.Lex(); // Eat the ','.
4531 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4532 return MatchOperand_ParseFail;
4533
4534 // FIXME: Only approximates end...may include intervening whitespace.
4535 E = Parser.getTok().getLoc();
4536 }
4537
4538 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4539 ShiftImm, S, E));
4540
4541 return MatchOperand_Success;
4542 }
4543
4544 ARMAsmParser::OperandMatchResultTy
parseAM3Offset(OperandVector & Operands)4545 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4546 // Check for a post-index addressing register operand. Specifically:
4547 // am3offset := '+' register
4548 // | '-' register
4549 // | register
4550 // | # imm
4551 // | # + imm
4552 // | # - imm
4553
4554 // This method must return MatchOperand_NoMatch without consuming any tokens
4555 // in the case where there is no match, as other alternatives take other
4556 // parse methods.
4557 MCAsmParser &Parser = getParser();
4558 AsmToken Tok = Parser.getTok();
4559 SMLoc S = Tok.getLoc();
4560
4561 // Do immediates first, as we always parse those if we have a '#'.
4562 if (Parser.getTok().is(AsmToken::Hash) ||
4563 Parser.getTok().is(AsmToken::Dollar)) {
4564 Parser.Lex(); // Eat '#' or '$'.
4565 // Explicitly look for a '-', as we need to encode negative zero
4566 // differently.
4567 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4568 const MCExpr *Offset;
4569 SMLoc E;
4570 if (getParser().parseExpression(Offset, E))
4571 return MatchOperand_ParseFail;
4572 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4573 if (!CE) {
4574 Error(S, "constant expression expected");
4575 return MatchOperand_ParseFail;
4576 }
4577 // Negative zero is encoded as the flag value INT32_MIN.
4578 int32_t Val = CE->getValue();
4579 if (isNegative && Val == 0)
4580 Val = INT32_MIN;
4581
4582 Operands.push_back(
4583 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4584
4585 return MatchOperand_Success;
4586 }
4587
4588
4589 bool haveEaten = false;
4590 bool isAdd = true;
4591 if (Tok.is(AsmToken::Plus)) {
4592 Parser.Lex(); // Eat the '+' token.
4593 haveEaten = true;
4594 } else if (Tok.is(AsmToken::Minus)) {
4595 Parser.Lex(); // Eat the '-' token.
4596 isAdd = false;
4597 haveEaten = true;
4598 }
4599
4600 Tok = Parser.getTok();
4601 int Reg = tryParseRegister();
4602 if (Reg == -1) {
4603 if (!haveEaten)
4604 return MatchOperand_NoMatch;
4605 Error(Tok.getLoc(), "register expected");
4606 return MatchOperand_ParseFail;
4607 }
4608
4609 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4610 0, S, Tok.getEndLoc()));
4611
4612 return MatchOperand_Success;
4613 }
4614
4615 /// Convert parsed operands to MCInst. Needed here because this instruction
4616 /// only has two register operands, but multiplication is commutative so
4617 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
cvtThumbMultiply(MCInst & Inst,const OperandVector & Operands)4618 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4619 const OperandVector &Operands) {
4620 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4621 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4622 // If we have a three-operand form, make sure to set Rn to be the operand
4623 // that isn't the same as Rd.
4624 unsigned RegOp = 4;
4625 if (Operands.size() == 6 &&
4626 ((ARMOperand &)*Operands[4]).getReg() ==
4627 ((ARMOperand &)*Operands[3]).getReg())
4628 RegOp = 5;
4629 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4630 Inst.addOperand(Inst.getOperand(0));
4631 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4632 }
4633
cvtThumbBranches(MCInst & Inst,const OperandVector & Operands)4634 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4635 const OperandVector &Operands) {
4636 int CondOp = -1, ImmOp = -1;
4637 switch(Inst.getOpcode()) {
4638 case ARM::tB:
4639 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4640
4641 case ARM::t2B:
4642 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4643
4644 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4645 }
4646 // first decide whether or not the branch should be conditional
4647 // by looking at it's location relative to an IT block
4648 if(inITBlock()) {
4649 // inside an IT block we cannot have any conditional branches. any
4650 // such instructions needs to be converted to unconditional form
4651 switch(Inst.getOpcode()) {
4652 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4653 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4654 }
4655 } else {
4656 // outside IT blocks we can only have unconditional branches with AL
4657 // condition code or conditional branches with non-AL condition code
4658 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4659 switch(Inst.getOpcode()) {
4660 case ARM::tB:
4661 case ARM::tBcc:
4662 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4663 break;
4664 case ARM::t2B:
4665 case ARM::t2Bcc:
4666 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4667 break;
4668 }
4669 }
4670
4671 // now decide on encoding size based on branch target range
4672 switch(Inst.getOpcode()) {
4673 // classify tB as either t2B or t1B based on range of immediate operand
4674 case ARM::tB: {
4675 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4676 if (!op.isSignedOffset<11, 1>() && isThumbTwo())
4677 Inst.setOpcode(ARM::t2B);
4678 break;
4679 }
4680 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4681 case ARM::tBcc: {
4682 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4683 if (!op.isSignedOffset<8, 1>() && isThumbTwo())
4684 Inst.setOpcode(ARM::t2Bcc);
4685 break;
4686 }
4687 }
4688 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4689 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4690 }
4691
4692 /// Parse an ARM memory expression, return false if successful else return true
4693 /// or an error. The first token must be a '[' when called.
parseMemory(OperandVector & Operands)4694 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4695 MCAsmParser &Parser = getParser();
4696 SMLoc S, E;
4697 assert(Parser.getTok().is(AsmToken::LBrac) &&
4698 "Token is not a Left Bracket");
4699 S = Parser.getTok().getLoc();
4700 Parser.Lex(); // Eat left bracket token.
4701
4702 const AsmToken &BaseRegTok = Parser.getTok();
4703 int BaseRegNum = tryParseRegister();
4704 if (BaseRegNum == -1)
4705 return Error(BaseRegTok.getLoc(), "register expected");
4706
4707 // The next token must either be a comma, a colon or a closing bracket.
4708 const AsmToken &Tok = Parser.getTok();
4709 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4710 !Tok.is(AsmToken::RBrac))
4711 return Error(Tok.getLoc(), "malformed memory operand");
4712
4713 if (Tok.is(AsmToken::RBrac)) {
4714 E = Tok.getEndLoc();
4715 Parser.Lex(); // Eat right bracket token.
4716
4717 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4718 ARM_AM::no_shift, 0, 0, false,
4719 S, E));
4720
4721 // If there's a pre-indexing writeback marker, '!', just add it as a token
4722 // operand. It's rather odd, but syntactically valid.
4723 if (Parser.getTok().is(AsmToken::Exclaim)) {
4724 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4725 Parser.Lex(); // Eat the '!'.
4726 }
4727
4728 return false;
4729 }
4730
4731 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4732 "Lost colon or comma in memory operand?!");
4733 if (Tok.is(AsmToken::Comma)) {
4734 Parser.Lex(); // Eat the comma.
4735 }
4736
4737 // If we have a ':', it's an alignment specifier.
4738 if (Parser.getTok().is(AsmToken::Colon)) {
4739 Parser.Lex(); // Eat the ':'.
4740 E = Parser.getTok().getLoc();
4741 SMLoc AlignmentLoc = Tok.getLoc();
4742
4743 const MCExpr *Expr;
4744 if (getParser().parseExpression(Expr))
4745 return true;
4746
4747 // The expression has to be a constant. Memory references with relocations
4748 // don't come through here, as they use the <label> forms of the relevant
4749 // instructions.
4750 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4751 if (!CE)
4752 return Error (E, "constant expression expected");
4753
4754 unsigned Align = 0;
4755 switch (CE->getValue()) {
4756 default:
4757 return Error(E,
4758 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4759 case 16: Align = 2; break;
4760 case 32: Align = 4; break;
4761 case 64: Align = 8; break;
4762 case 128: Align = 16; break;
4763 case 256: Align = 32; break;
4764 }
4765
4766 // Now we should have the closing ']'
4767 if (Parser.getTok().isNot(AsmToken::RBrac))
4768 return Error(Parser.getTok().getLoc(), "']' expected");
4769 E = Parser.getTok().getEndLoc();
4770 Parser.Lex(); // Eat right bracket token.
4771
4772 // Don't worry about range checking the value here. That's handled by
4773 // the is*() predicates.
4774 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4775 ARM_AM::no_shift, 0, Align,
4776 false, S, E, AlignmentLoc));
4777
4778 // If there's a pre-indexing writeback marker, '!', just add it as a token
4779 // operand.
4780 if (Parser.getTok().is(AsmToken::Exclaim)) {
4781 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4782 Parser.Lex(); // Eat the '!'.
4783 }
4784
4785 return false;
4786 }
4787
4788 // If we have a '#', it's an immediate offset, else assume it's a register
4789 // offset. Be friendly and also accept a plain integer (without a leading
4790 // hash) for gas compatibility.
4791 if (Parser.getTok().is(AsmToken::Hash) ||
4792 Parser.getTok().is(AsmToken::Dollar) ||
4793 Parser.getTok().is(AsmToken::Integer)) {
4794 if (Parser.getTok().isNot(AsmToken::Integer))
4795 Parser.Lex(); // Eat '#' or '$'.
4796 E = Parser.getTok().getLoc();
4797
4798 bool isNegative = getParser().getTok().is(AsmToken::Minus);
4799 const MCExpr *Offset;
4800 if (getParser().parseExpression(Offset))
4801 return true;
4802
4803 // The expression has to be a constant. Memory references with relocations
4804 // don't come through here, as they use the <label> forms of the relevant
4805 // instructions.
4806 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4807 if (!CE)
4808 return Error (E, "constant expression expected");
4809
4810 // If the constant was #-0, represent it as INT32_MIN.
4811 int32_t Val = CE->getValue();
4812 if (isNegative && Val == 0)
4813 CE = MCConstantExpr::create(INT32_MIN, getContext());
4814
4815 // Now we should have the closing ']'
4816 if (Parser.getTok().isNot(AsmToken::RBrac))
4817 return Error(Parser.getTok().getLoc(), "']' expected");
4818 E = Parser.getTok().getEndLoc();
4819 Parser.Lex(); // Eat right bracket token.
4820
4821 // Don't worry about range checking the value here. That's handled by
4822 // the is*() predicates.
4823 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4824 ARM_AM::no_shift, 0, 0,
4825 false, S, E));
4826
4827 // If there's a pre-indexing writeback marker, '!', just add it as a token
4828 // operand.
4829 if (Parser.getTok().is(AsmToken::Exclaim)) {
4830 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4831 Parser.Lex(); // Eat the '!'.
4832 }
4833
4834 return false;
4835 }
4836
4837 // The register offset is optionally preceded by a '+' or '-'
4838 bool isNegative = false;
4839 if (Parser.getTok().is(AsmToken::Minus)) {
4840 isNegative = true;
4841 Parser.Lex(); // Eat the '-'.
4842 } else if (Parser.getTok().is(AsmToken::Plus)) {
4843 // Nothing to do.
4844 Parser.Lex(); // Eat the '+'.
4845 }
4846
4847 E = Parser.getTok().getLoc();
4848 int OffsetRegNum = tryParseRegister();
4849 if (OffsetRegNum == -1)
4850 return Error(E, "register expected");
4851
4852 // If there's a shift operator, handle it.
4853 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4854 unsigned ShiftImm = 0;
4855 if (Parser.getTok().is(AsmToken::Comma)) {
4856 Parser.Lex(); // Eat the ','.
4857 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4858 return true;
4859 }
4860
4861 // Now we should have the closing ']'
4862 if (Parser.getTok().isNot(AsmToken::RBrac))
4863 return Error(Parser.getTok().getLoc(), "']' expected");
4864 E = Parser.getTok().getEndLoc();
4865 Parser.Lex(); // Eat right bracket token.
4866
4867 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
4868 ShiftType, ShiftImm, 0, isNegative,
4869 S, E));
4870
4871 // If there's a pre-indexing writeback marker, '!', just add it as a token
4872 // operand.
4873 if (Parser.getTok().is(AsmToken::Exclaim)) {
4874 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4875 Parser.Lex(); // Eat the '!'.
4876 }
4877
4878 return false;
4879 }
4880
4881 /// parseMemRegOffsetShift - one of these two:
4882 /// ( lsl | lsr | asr | ror ) , # shift_amount
4883 /// rrx
4884 /// return true if it parses a shift otherwise it returns false.
parseMemRegOffsetShift(ARM_AM::ShiftOpc & St,unsigned & Amount)4885 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4886 unsigned &Amount) {
4887 MCAsmParser &Parser = getParser();
4888 SMLoc Loc = Parser.getTok().getLoc();
4889 const AsmToken &Tok = Parser.getTok();
4890 if (Tok.isNot(AsmToken::Identifier))
4891 return true;
4892 StringRef ShiftName = Tok.getString();
4893 if (ShiftName == "lsl" || ShiftName == "LSL" ||
4894 ShiftName == "asl" || ShiftName == "ASL")
4895 St = ARM_AM::lsl;
4896 else if (ShiftName == "lsr" || ShiftName == "LSR")
4897 St = ARM_AM::lsr;
4898 else if (ShiftName == "asr" || ShiftName == "ASR")
4899 St = ARM_AM::asr;
4900 else if (ShiftName == "ror" || ShiftName == "ROR")
4901 St = ARM_AM::ror;
4902 else if (ShiftName == "rrx" || ShiftName == "RRX")
4903 St = ARM_AM::rrx;
4904 else
4905 return Error(Loc, "illegal shift operator");
4906 Parser.Lex(); // Eat shift type token.
4907
4908 // rrx stands alone.
4909 Amount = 0;
4910 if (St != ARM_AM::rrx) {
4911 Loc = Parser.getTok().getLoc();
4912 // A '#' and a shift amount.
4913 const AsmToken &HashTok = Parser.getTok();
4914 if (HashTok.isNot(AsmToken::Hash) &&
4915 HashTok.isNot(AsmToken::Dollar))
4916 return Error(HashTok.getLoc(), "'#' expected");
4917 Parser.Lex(); // Eat hash token.
4918
4919 const MCExpr *Expr;
4920 if (getParser().parseExpression(Expr))
4921 return true;
4922 // Range check the immediate.
4923 // lsl, ror: 0 <= imm <= 31
4924 // lsr, asr: 0 <= imm <= 32
4925 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4926 if (!CE)
4927 return Error(Loc, "shift amount must be an immediate");
4928 int64_t Imm = CE->getValue();
4929 if (Imm < 0 ||
4930 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4931 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4932 return Error(Loc, "immediate shift value out of range");
4933 // If <ShiftTy> #0, turn it into a no_shift.
4934 if (Imm == 0)
4935 St = ARM_AM::lsl;
4936 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4937 if (Imm == 32)
4938 Imm = 0;
4939 Amount = Imm;
4940 }
4941
4942 return false;
4943 }
4944
4945 /// parseFPImm - A floating point immediate expression operand.
4946 ARMAsmParser::OperandMatchResultTy
parseFPImm(OperandVector & Operands)4947 ARMAsmParser::parseFPImm(OperandVector &Operands) {
4948 MCAsmParser &Parser = getParser();
4949 // Anything that can accept a floating point constant as an operand
4950 // needs to go through here, as the regular parseExpression is
4951 // integer only.
4952 //
4953 // This routine still creates a generic Immediate operand, containing
4954 // a bitcast of the 64-bit floating point value. The various operands
4955 // that accept floats can check whether the value is valid for them
4956 // via the standard is*() predicates.
4957
4958 SMLoc S = Parser.getTok().getLoc();
4959
4960 if (Parser.getTok().isNot(AsmToken::Hash) &&
4961 Parser.getTok().isNot(AsmToken::Dollar))
4962 return MatchOperand_NoMatch;
4963
4964 // Disambiguate the VMOV forms that can accept an FP immediate.
4965 // vmov.f32 <sreg>, #imm
4966 // vmov.f64 <dreg>, #imm
4967 // vmov.f32 <dreg>, #imm @ vector f32x2
4968 // vmov.f32 <qreg>, #imm @ vector f32x4
4969 //
4970 // There are also the NEON VMOV instructions which expect an
4971 // integer constant. Make sure we don't try to parse an FPImm
4972 // for these:
4973 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4974 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
4975 bool isVmovf = TyOp.isToken() &&
4976 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
4977 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
4978 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
4979 Mnemonic.getToken() == "fconsts");
4980 if (!(isVmovf || isFconst))
4981 return MatchOperand_NoMatch;
4982
4983 Parser.Lex(); // Eat '#' or '$'.
4984
4985 // Handle negation, as that still comes through as a separate token.
4986 bool isNegative = false;
4987 if (Parser.getTok().is(AsmToken::Minus)) {
4988 isNegative = true;
4989 Parser.Lex();
4990 }
4991 const AsmToken &Tok = Parser.getTok();
4992 SMLoc Loc = Tok.getLoc();
4993 if (Tok.is(AsmToken::Real) && isVmovf) {
4994 APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4995 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4996 // If we had a '-' in front, toggle the sign bit.
4997 IntVal ^= (uint64_t)isNegative << 31;
4998 Parser.Lex(); // Eat the token.
4999 Operands.push_back(ARMOperand::CreateImm(
5000 MCConstantExpr::create(IntVal, getContext()),
5001 S, Parser.getTok().getLoc()));
5002 return MatchOperand_Success;
5003 }
5004 // Also handle plain integers. Instructions which allow floating point
5005 // immediates also allow a raw encoded 8-bit value.
5006 if (Tok.is(AsmToken::Integer) && isFconst) {
5007 int64_t Val = Tok.getIntVal();
5008 Parser.Lex(); // Eat the token.
5009 if (Val > 255 || Val < 0) {
5010 Error(Loc, "encoded floating point value out of range");
5011 return MatchOperand_ParseFail;
5012 }
5013 float RealVal = ARM_AM::getFPImmFloat(Val);
5014 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5015
5016 Operands.push_back(ARMOperand::CreateImm(
5017 MCConstantExpr::create(Val, getContext()), S,
5018 Parser.getTok().getLoc()));
5019 return MatchOperand_Success;
5020 }
5021
5022 Error(Loc, "invalid floating point immediate");
5023 return MatchOperand_ParseFail;
5024 }
5025
5026 /// Parse a arm instruction operand. For now this parses the operand regardless
5027 /// of the mnemonic.
parseOperand(OperandVector & Operands,StringRef Mnemonic)5028 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5029 MCAsmParser &Parser = getParser();
5030 SMLoc S, E;
5031
5032 // Check if the current operand has a custom associated parser, if so, try to
5033 // custom parse the operand, or fallback to the general approach.
5034 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5035 if (ResTy == MatchOperand_Success)
5036 return false;
5037 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5038 // there was a match, but an error occurred, in which case, just return that
5039 // the operand parsing failed.
5040 if (ResTy == MatchOperand_ParseFail)
5041 return true;
5042
5043 switch (getLexer().getKind()) {
5044 default:
5045 Error(Parser.getTok().getLoc(), "unexpected token in operand");
5046 return true;
5047 case AsmToken::Identifier: {
5048 // If we've seen a branch mnemonic, the next operand must be a label. This
5049 // is true even if the label is a register name. So "br r1" means branch to
5050 // label "r1".
5051 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5052 if (!ExpectLabel) {
5053 if (!tryParseRegisterWithWriteBack(Operands))
5054 return false;
5055 int Res = tryParseShiftRegister(Operands);
5056 if (Res == 0) // success
5057 return false;
5058 else if (Res == -1) // irrecoverable error
5059 return true;
5060 // If this is VMRS, check for the apsr_nzcv operand.
5061 if (Mnemonic == "vmrs" &&
5062 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5063 S = Parser.getTok().getLoc();
5064 Parser.Lex();
5065 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5066 return false;
5067 }
5068 }
5069
5070 // Fall though for the Identifier case that is not a register or a
5071 // special name.
5072 }
5073 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
5074 case AsmToken::Integer: // things like 1f and 2b as a branch targets
5075 case AsmToken::String: // quoted label names.
5076 case AsmToken::Dot: { // . as a branch target
5077 // This was not a register so parse other operands that start with an
5078 // identifier (like labels) as expressions and create them as immediates.
5079 const MCExpr *IdVal;
5080 S = Parser.getTok().getLoc();
5081 if (getParser().parseExpression(IdVal))
5082 return true;
5083 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5084 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5085 return false;
5086 }
5087 case AsmToken::LBrac:
5088 return parseMemory(Operands);
5089 case AsmToken::LCurly:
5090 return parseRegisterList(Operands);
5091 case AsmToken::Dollar:
5092 case AsmToken::Hash: {
5093 // #42 -> immediate.
5094 S = Parser.getTok().getLoc();
5095 Parser.Lex();
5096
5097 if (Parser.getTok().isNot(AsmToken::Colon)) {
5098 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5099 const MCExpr *ImmVal;
5100 if (getParser().parseExpression(ImmVal))
5101 return true;
5102 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5103 if (CE) {
5104 int32_t Val = CE->getValue();
5105 if (isNegative && Val == 0)
5106 ImmVal = MCConstantExpr::create(INT32_MIN, getContext());
5107 }
5108 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5109 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5110
5111 // There can be a trailing '!' on operands that we want as a separate
5112 // '!' Token operand. Handle that here. For example, the compatibility
5113 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5114 if (Parser.getTok().is(AsmToken::Exclaim)) {
5115 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5116 Parser.getTok().getLoc()));
5117 Parser.Lex(); // Eat exclaim token
5118 }
5119 return false;
5120 }
5121 // w/ a ':' after the '#', it's just like a plain ':'.
5122 // FALLTHROUGH
5123 }
5124 case AsmToken::Colon: {
5125 S = Parser.getTok().getLoc();
5126 // ":lower16:" and ":upper16:" expression prefixes
5127 // FIXME: Check it's an expression prefix,
5128 // e.g. (FOO - :lower16:BAR) isn't legal.
5129 ARMMCExpr::VariantKind RefKind;
5130 if (parsePrefix(RefKind))
5131 return true;
5132
5133 const MCExpr *SubExprVal;
5134 if (getParser().parseExpression(SubExprVal))
5135 return true;
5136
5137 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5138 getContext());
5139 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5140 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5141 return false;
5142 }
5143 case AsmToken::Equal: {
5144 S = Parser.getTok().getLoc();
5145 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5146 return Error(S, "unexpected token in operand");
5147
5148 Parser.Lex(); // Eat '='
5149 const MCExpr *SubExprVal;
5150 if (getParser().parseExpression(SubExprVal))
5151 return true;
5152 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5153
5154 const MCExpr *CPLoc =
5155 getTargetStreamer().addConstantPoolEntry(SubExprVal, S);
5156 Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
5157 return false;
5158 }
5159 }
5160 }
5161
5162 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5163 // :lower16: and :upper16:.
parsePrefix(ARMMCExpr::VariantKind & RefKind)5164 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5165 MCAsmParser &Parser = getParser();
5166 RefKind = ARMMCExpr::VK_ARM_None;
5167
5168 // consume an optional '#' (GNU compatibility)
5169 if (getLexer().is(AsmToken::Hash))
5170 Parser.Lex();
5171
5172 // :lower16: and :upper16: modifiers
5173 assert(getLexer().is(AsmToken::Colon) && "expected a :");
5174 Parser.Lex(); // Eat ':'
5175
5176 if (getLexer().isNot(AsmToken::Identifier)) {
5177 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5178 return true;
5179 }
5180
5181 enum {
5182 COFF = (1 << MCObjectFileInfo::IsCOFF),
5183 ELF = (1 << MCObjectFileInfo::IsELF),
5184 MACHO = (1 << MCObjectFileInfo::IsMachO)
5185 };
5186 static const struct PrefixEntry {
5187 const char *Spelling;
5188 ARMMCExpr::VariantKind VariantKind;
5189 uint8_t SupportedFormats;
5190 } PrefixEntries[] = {
5191 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5192 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5193 };
5194
5195 StringRef IDVal = Parser.getTok().getIdentifier();
5196
5197 const auto &Prefix =
5198 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5199 [&IDVal](const PrefixEntry &PE) {
5200 return PE.Spelling == IDVal;
5201 });
5202 if (Prefix == std::end(PrefixEntries)) {
5203 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5204 return true;
5205 }
5206
5207 uint8_t CurrentFormat;
5208 switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5209 case MCObjectFileInfo::IsMachO:
5210 CurrentFormat = MACHO;
5211 break;
5212 case MCObjectFileInfo::IsELF:
5213 CurrentFormat = ELF;
5214 break;
5215 case MCObjectFileInfo::IsCOFF:
5216 CurrentFormat = COFF;
5217 break;
5218 }
5219
5220 if (~Prefix->SupportedFormats & CurrentFormat) {
5221 Error(Parser.getTok().getLoc(),
5222 "cannot represent relocation in the current file format");
5223 return true;
5224 }
5225
5226 RefKind = Prefix->VariantKind;
5227 Parser.Lex();
5228
5229 if (getLexer().isNot(AsmToken::Colon)) {
5230 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5231 return true;
5232 }
5233 Parser.Lex(); // Eat the last ':'
5234
5235 return false;
5236 }
5237
5238 /// \brief Given a mnemonic, split out possible predication code and carry
5239 /// setting letters to form a canonical mnemonic and flags.
5240 //
5241 // FIXME: Would be nice to autogen this.
5242 // FIXME: This is a bit of a maze of special cases.
splitMnemonic(StringRef Mnemonic,unsigned & PredicationCode,bool & CarrySetting,unsigned & ProcessorIMod,StringRef & ITMask)5243 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5244 unsigned &PredicationCode,
5245 bool &CarrySetting,
5246 unsigned &ProcessorIMod,
5247 StringRef &ITMask) {
5248 PredicationCode = ARMCC::AL;
5249 CarrySetting = false;
5250 ProcessorIMod = 0;
5251
5252 // Ignore some mnemonics we know aren't predicated forms.
5253 //
5254 // FIXME: Would be nice to autogen this.
5255 if ((Mnemonic == "movs" && isThumb()) ||
5256 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
5257 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
5258 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
5259 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
5260 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
5261 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
5262 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
5263 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5264 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5265 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
5266 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5267 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5268 Mnemonic.startswith("vsel"))
5269 return Mnemonic;
5270
5271 // First, split out any predication code. Ignore mnemonics we know aren't
5272 // predicated but do have a carry-set and so weren't caught above.
5273 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5274 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5275 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5276 Mnemonic != "sbcs" && Mnemonic != "rscs") {
5277 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
5278 .Case("eq", ARMCC::EQ)
5279 .Case("ne", ARMCC::NE)
5280 .Case("hs", ARMCC::HS)
5281 .Case("cs", ARMCC::HS)
5282 .Case("lo", ARMCC::LO)
5283 .Case("cc", ARMCC::LO)
5284 .Case("mi", ARMCC::MI)
5285 .Case("pl", ARMCC::PL)
5286 .Case("vs", ARMCC::VS)
5287 .Case("vc", ARMCC::VC)
5288 .Case("hi", ARMCC::HI)
5289 .Case("ls", ARMCC::LS)
5290 .Case("ge", ARMCC::GE)
5291 .Case("lt", ARMCC::LT)
5292 .Case("gt", ARMCC::GT)
5293 .Case("le", ARMCC::LE)
5294 .Case("al", ARMCC::AL)
5295 .Default(~0U);
5296 if (CC != ~0U) {
5297 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5298 PredicationCode = CC;
5299 }
5300 }
5301
5302 // Next, determine if we have a carry setting bit. We explicitly ignore all
5303 // the instructions we know end in 's'.
5304 if (Mnemonic.endswith("s") &&
5305 !(Mnemonic == "cps" || Mnemonic == "mls" ||
5306 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5307 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5308 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5309 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5310 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5311 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5312 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5313 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5314 (Mnemonic == "movs" && isThumb()))) {
5315 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5316 CarrySetting = true;
5317 }
5318
5319 // The "cps" instruction can have a interrupt mode operand which is glued into
5320 // the mnemonic. Check if this is the case, split it and parse the imod op
5321 if (Mnemonic.startswith("cps")) {
5322 // Split out any imod code.
5323 unsigned IMod =
5324 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5325 .Case("ie", ARM_PROC::IE)
5326 .Case("id", ARM_PROC::ID)
5327 .Default(~0U);
5328 if (IMod != ~0U) {
5329 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5330 ProcessorIMod = IMod;
5331 }
5332 }
5333
5334 // The "it" instruction has the condition mask on the end of the mnemonic.
5335 if (Mnemonic.startswith("it")) {
5336 ITMask = Mnemonic.slice(2, Mnemonic.size());
5337 Mnemonic = Mnemonic.slice(0, 2);
5338 }
5339
5340 return Mnemonic;
5341 }
5342
5343 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5344 /// inclusion of carry set or predication code operands.
5345 //
5346 // FIXME: It would be nice to autogen this.
getMnemonicAcceptInfo(StringRef Mnemonic,StringRef FullInst,bool & CanAcceptCarrySet,bool & CanAcceptPredicationCode)5347 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5348 bool &CanAcceptCarrySet,
5349 bool &CanAcceptPredicationCode) {
5350 CanAcceptCarrySet =
5351 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5352 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5353 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5354 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5355 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5356 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5357 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5358 (!isThumb() &&
5359 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5360 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5361
5362 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5363 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5364 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5365 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5366 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5367 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5368 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5369 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5370 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5371 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5372 (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
5373 // These mnemonics are never predicable
5374 CanAcceptPredicationCode = false;
5375 } else if (!isThumb()) {
5376 // Some instructions are only predicable in Thumb mode
5377 CanAcceptPredicationCode =
5378 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5379 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5380 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5381 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5382 Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" &&
5383 Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") &&
5384 !Mnemonic.startswith("srs");
5385 } else if (isThumbOne()) {
5386 if (hasV6MOps())
5387 CanAcceptPredicationCode = Mnemonic != "movs";
5388 else
5389 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5390 } else
5391 CanAcceptPredicationCode = true;
5392 }
5393
5394 // \brief Some Thumb instructions have two operand forms that are not
5395 // available as three operand, convert to two operand form if possible.
5396 //
5397 // FIXME: We would really like to be able to tablegen'erate this.
tryConvertingToTwoOperandForm(StringRef Mnemonic,bool CarrySetting,OperandVector & Operands)5398 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5399 bool CarrySetting,
5400 OperandVector &Operands) {
5401 if (Operands.size() != 6)
5402 return;
5403
5404 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5405 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5406 if (!Op3.isReg() || !Op4.isReg())
5407 return;
5408
5409 auto Op3Reg = Op3.getReg();
5410 auto Op4Reg = Op4.getReg();
5411
5412 // For most Thumb2 cases we just generate the 3 operand form and reduce
5413 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5414 // won't accept SP or PC so we do the transformation here taking care
5415 // with immediate range in the 'add sp, sp #imm' case.
5416 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5417 if (isThumbTwo()) {
5418 if (Mnemonic != "add")
5419 return;
5420 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5421 (Op5.isReg() && Op5.getReg() == ARM::PC);
5422 if (!TryTransform) {
5423 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5424 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5425 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5426 Op5.isImm() && !Op5.isImm0_508s4());
5427 }
5428 if (!TryTransform)
5429 return;
5430 } else if (!isThumbOne())
5431 return;
5432
5433 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5434 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5435 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5436 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5437 return;
5438
5439 // If first 2 operands of a 3 operand instruction are the same
5440 // then transform to 2 operand version of the same instruction
5441 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5442 bool Transform = Op3Reg == Op4Reg;
5443
5444 // For communtative operations, we might be able to transform if we swap
5445 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
5446 // as tADDrsp.
5447 const ARMOperand *LastOp = &Op5;
5448 bool Swap = false;
5449 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5450 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5451 Mnemonic == "and" || Mnemonic == "eor" ||
5452 Mnemonic == "adc" || Mnemonic == "orr")) {
5453 Swap = true;
5454 LastOp = &Op4;
5455 Transform = true;
5456 }
5457
5458 // If both registers are the same then remove one of them from
5459 // the operand list, with certain exceptions.
5460 if (Transform) {
5461 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5462 // 2 operand forms don't exist.
5463 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5464 LastOp->isReg())
5465 Transform = false;
5466
5467 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5468 // 3-bits because the ARMARM says not to.
5469 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5470 Transform = false;
5471 }
5472
5473 if (Transform) {
5474 if (Swap)
5475 std::swap(Op4, Op5);
5476 Operands.erase(Operands.begin() + 3);
5477 }
5478 }
5479
shouldOmitCCOutOperand(StringRef Mnemonic,OperandVector & Operands)5480 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5481 OperandVector &Operands) {
5482 // FIXME: This is all horribly hacky. We really need a better way to deal
5483 // with optional operands like this in the matcher table.
5484
5485 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5486 // another does not. Specifically, the MOVW instruction does not. So we
5487 // special case it here and remove the defaulted (non-setting) cc_out
5488 // operand if that's the instruction we're trying to match.
5489 //
5490 // We do this as post-processing of the explicit operands rather than just
5491 // conditionally adding the cc_out in the first place because we need
5492 // to check the type of the parsed immediate operand.
5493 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5494 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5495 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5496 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5497 return true;
5498
5499 // Register-register 'add' for thumb does not have a cc_out operand
5500 // when there are only two register operands.
5501 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5502 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5503 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5504 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5505 return true;
5506 // Register-register 'add' for thumb does not have a cc_out operand
5507 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5508 // have to check the immediate range here since Thumb2 has a variant
5509 // that can handle a different range and has a cc_out operand.
5510 if (((isThumb() && Mnemonic == "add") ||
5511 (isThumbTwo() && Mnemonic == "sub")) &&
5512 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5513 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5514 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5515 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5516 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5517 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5518 return true;
5519 // For Thumb2, add/sub immediate does not have a cc_out operand for the
5520 // imm0_4095 variant. That's the least-preferred variant when
5521 // selecting via the generic "add" mnemonic, so to know that we
5522 // should remove the cc_out operand, we have to explicitly check that
5523 // it's not one of the other variants. Ugh.
5524 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5525 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5526 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5527 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5528 // Nest conditions rather than one big 'if' statement for readability.
5529 //
5530 // If both registers are low, we're in an IT block, and the immediate is
5531 // in range, we should use encoding T1 instead, which has a cc_out.
5532 if (inITBlock() &&
5533 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5534 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5535 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5536 return false;
5537 // Check against T3. If the second register is the PC, this is an
5538 // alternate form of ADR, which uses encoding T4, so check for that too.
5539 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5540 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5541 return false;
5542
5543 // Otherwise, we use encoding T4, which does not have a cc_out
5544 // operand.
5545 return true;
5546 }
5547
5548 // The thumb2 multiply instruction doesn't have a CCOut register, so
5549 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5550 // use the 16-bit encoding or not.
5551 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5552 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5553 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5554 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5555 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5556 // If the registers aren't low regs, the destination reg isn't the
5557 // same as one of the source regs, or the cc_out operand is zero
5558 // outside of an IT block, we have to use the 32-bit encoding, so
5559 // remove the cc_out operand.
5560 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5561 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5562 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5563 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5564 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5565 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5566 static_cast<ARMOperand &>(*Operands[4]).getReg())))
5567 return true;
5568
5569 // Also check the 'mul' syntax variant that doesn't specify an explicit
5570 // destination register.
5571 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5572 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5573 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5574 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5575 // If the registers aren't low regs or the cc_out operand is zero
5576 // outside of an IT block, we have to use the 32-bit encoding, so
5577 // remove the cc_out operand.
5578 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5579 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5580 !inITBlock()))
5581 return true;
5582
5583
5584
5585 // Register-register 'add/sub' for thumb does not have a cc_out operand
5586 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5587 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5588 // right, this will result in better diagnostics (which operand is off)
5589 // anyway.
5590 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5591 (Operands.size() == 5 || Operands.size() == 6) &&
5592 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5593 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5594 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5595 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5596 (Operands.size() == 6 &&
5597 static_cast<ARMOperand &>(*Operands[5]).isImm())))
5598 return true;
5599
5600 return false;
5601 }
5602
shouldOmitPredicateOperand(StringRef Mnemonic,OperandVector & Operands)5603 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5604 OperandVector &Operands) {
5605 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5606 unsigned RegIdx = 3;
5607 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5608 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5609 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5610 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5611 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5612 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5613 RegIdx = 4;
5614
5615 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5616 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5617 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5618 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5619 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5620 return true;
5621 }
5622 return false;
5623 }
5624
isDataTypeToken(StringRef Tok)5625 static bool isDataTypeToken(StringRef Tok) {
5626 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5627 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5628 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5629 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5630 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5631 Tok == ".f" || Tok == ".d";
5632 }
5633
5634 // FIXME: This bit should probably be handled via an explicit match class
5635 // in the .td files that matches the suffix instead of having it be
5636 // a literal string token the way it is now.
doesIgnoreDataTypeSuffix(StringRef Mnemonic,StringRef DT)5637 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5638 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5639 }
5640 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5641 unsigned VariantID);
5642
RequiresVFPRegListValidation(StringRef Inst,bool & AcceptSinglePrecisionOnly,bool & AcceptDoublePrecisionOnly)5643 static bool RequiresVFPRegListValidation(StringRef Inst,
5644 bool &AcceptSinglePrecisionOnly,
5645 bool &AcceptDoublePrecisionOnly) {
5646 if (Inst.size() < 7)
5647 return false;
5648
5649 if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5650 StringRef AddressingMode = Inst.substr(4, 2);
5651 if (AddressingMode == "ia" || AddressingMode == "db" ||
5652 AddressingMode == "ea" || AddressingMode == "fd") {
5653 AcceptSinglePrecisionOnly = Inst[6] == 's';
5654 AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5655 return true;
5656 }
5657 }
5658
5659 return false;
5660 }
5661
5662 /// Parse an arm instruction mnemonic followed by its operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)5663 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5664 SMLoc NameLoc, OperandVector &Operands) {
5665 MCAsmParser &Parser = getParser();
5666 // FIXME: Can this be done via tablegen in some fashion?
5667 bool RequireVFPRegisterListCheck;
5668 bool AcceptSinglePrecisionOnly;
5669 bool AcceptDoublePrecisionOnly;
5670 RequireVFPRegisterListCheck =
5671 RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5672 AcceptDoublePrecisionOnly);
5673
5674 // Apply mnemonic aliases before doing anything else, as the destination
5675 // mnemonic may include suffices and we want to handle them normally.
5676 // The generic tblgen'erated code does this later, at the start of
5677 // MatchInstructionImpl(), but that's too late for aliases that include
5678 // any sort of suffix.
5679 uint64_t AvailableFeatures = getAvailableFeatures();
5680 unsigned AssemblerDialect = getParser().getAssemblerDialect();
5681 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5682
5683 // First check for the ARM-specific .req directive.
5684 if (Parser.getTok().is(AsmToken::Identifier) &&
5685 Parser.getTok().getIdentifier() == ".req") {
5686 parseDirectiveReq(Name, NameLoc);
5687 // We always return 'error' for this, as we're done with this
5688 // statement and don't need to match the 'instruction."
5689 return true;
5690 }
5691
5692 // Create the leading tokens for the mnemonic, split by '.' characters.
5693 size_t Start = 0, Next = Name.find('.');
5694 StringRef Mnemonic = Name.slice(Start, Next);
5695
5696 // Split out the predication code and carry setting flag from the mnemonic.
5697 unsigned PredicationCode;
5698 unsigned ProcessorIMod;
5699 bool CarrySetting;
5700 StringRef ITMask;
5701 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5702 ProcessorIMod, ITMask);
5703
5704 // In Thumb1, only the branch (B) instruction can be predicated.
5705 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5706 Parser.eatToEndOfStatement();
5707 return Error(NameLoc, "conditional execution not supported in Thumb1");
5708 }
5709
5710 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5711
5712 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5713 // is the mask as it will be for the IT encoding if the conditional
5714 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5715 // where the conditional bit0 is zero, the instruction post-processing
5716 // will adjust the mask accordingly.
5717 if (Mnemonic == "it") {
5718 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5719 if (ITMask.size() > 3) {
5720 Parser.eatToEndOfStatement();
5721 return Error(Loc, "too many conditions on IT instruction");
5722 }
5723 unsigned Mask = 8;
5724 for (unsigned i = ITMask.size(); i != 0; --i) {
5725 char pos = ITMask[i - 1];
5726 if (pos != 't' && pos != 'e') {
5727 Parser.eatToEndOfStatement();
5728 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5729 }
5730 Mask >>= 1;
5731 if (ITMask[i - 1] == 't')
5732 Mask |= 8;
5733 }
5734 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5735 }
5736
5737 // FIXME: This is all a pretty gross hack. We should automatically handle
5738 // optional operands like this via tblgen.
5739
5740 // Next, add the CCOut and ConditionCode operands, if needed.
5741 //
5742 // For mnemonics which can ever incorporate a carry setting bit or predication
5743 // code, our matching model involves us always generating CCOut and
5744 // ConditionCode operands to match the mnemonic "as written" and then we let
5745 // the matcher deal with finding the right instruction or generating an
5746 // appropriate error.
5747 bool CanAcceptCarrySet, CanAcceptPredicationCode;
5748 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5749
5750 // If we had a carry-set on an instruction that can't do that, issue an
5751 // error.
5752 if (!CanAcceptCarrySet && CarrySetting) {
5753 Parser.eatToEndOfStatement();
5754 return Error(NameLoc, "instruction '" + Mnemonic +
5755 "' can not set flags, but 's' suffix specified");
5756 }
5757 // If we had a predication code on an instruction that can't do that, issue an
5758 // error.
5759 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5760 Parser.eatToEndOfStatement();
5761 return Error(NameLoc, "instruction '" + Mnemonic +
5762 "' is not predicable, but condition code specified");
5763 }
5764
5765 // Add the carry setting operand, if necessary.
5766 if (CanAcceptCarrySet) {
5767 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5768 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5769 Loc));
5770 }
5771
5772 // Add the predication code operand, if necessary.
5773 if (CanAcceptPredicationCode) {
5774 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5775 CarrySetting);
5776 Operands.push_back(ARMOperand::CreateCondCode(
5777 ARMCC::CondCodes(PredicationCode), Loc));
5778 }
5779
5780 // Add the processor imod operand, if necessary.
5781 if (ProcessorIMod) {
5782 Operands.push_back(ARMOperand::CreateImm(
5783 MCConstantExpr::create(ProcessorIMod, getContext()),
5784 NameLoc, NameLoc));
5785 } else if (Mnemonic == "cps" && isMClass()) {
5786 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
5787 }
5788
5789 // Add the remaining tokens in the mnemonic.
5790 while (Next != StringRef::npos) {
5791 Start = Next;
5792 Next = Name.find('.', Start + 1);
5793 StringRef ExtraToken = Name.slice(Start, Next);
5794
5795 // Some NEON instructions have an optional datatype suffix that is
5796 // completely ignored. Check for that.
5797 if (isDataTypeToken(ExtraToken) &&
5798 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5799 continue;
5800
5801 // For for ARM mode generate an error if the .n qualifier is used.
5802 if (ExtraToken == ".n" && !isThumb()) {
5803 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5804 Parser.eatToEndOfStatement();
5805 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5806 "arm mode");
5807 }
5808
5809 // The .n qualifier is always discarded as that is what the tables
5810 // and matcher expect. In ARM mode the .w qualifier has no effect,
5811 // so discard it to avoid errors that can be caused by the matcher.
5812 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5813 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5814 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5815 }
5816 }
5817
5818 // Read the remaining operands.
5819 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5820 // Read the first operand.
5821 if (parseOperand(Operands, Mnemonic)) {
5822 Parser.eatToEndOfStatement();
5823 return true;
5824 }
5825
5826 while (getLexer().is(AsmToken::Comma)) {
5827 Parser.Lex(); // Eat the comma.
5828
5829 // Parse and remember the operand.
5830 if (parseOperand(Operands, Mnemonic)) {
5831 Parser.eatToEndOfStatement();
5832 return true;
5833 }
5834 }
5835 }
5836
5837 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5838 SMLoc Loc = getLexer().getLoc();
5839 Parser.eatToEndOfStatement();
5840 return Error(Loc, "unexpected token in argument list");
5841 }
5842
5843 Parser.Lex(); // Consume the EndOfStatement
5844
5845 if (RequireVFPRegisterListCheck) {
5846 ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
5847 if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
5848 return Error(Op.getStartLoc(),
5849 "VFP/Neon single precision register expected");
5850 if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
5851 return Error(Op.getStartLoc(),
5852 "VFP/Neon double precision register expected");
5853 }
5854
5855 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
5856
5857 // Some instructions, mostly Thumb, have forms for the same mnemonic that
5858 // do and don't have a cc_out optional-def operand. With some spot-checks
5859 // of the operand list, we can figure out which variant we're trying to
5860 // parse and adjust accordingly before actually matching. We shouldn't ever
5861 // try to remove a cc_out operand that was explicitly set on the
5862 // mnemonic, of course (CarrySetting == true). Reason number #317 the
5863 // table driven matcher doesn't fit well with the ARM instruction set.
5864 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
5865 Operands.erase(Operands.begin() + 1);
5866
5867 // Some instructions have the same mnemonic, but don't always
5868 // have a predicate. Distinguish them here and delete the
5869 // predicate if needed.
5870 if (shouldOmitPredicateOperand(Mnemonic, Operands))
5871 Operands.erase(Operands.begin() + 1);
5872
5873 // ARM mode 'blx' need special handling, as the register operand version
5874 // is predicable, but the label operand version is not. So, we can't rely
5875 // on the Mnemonic based checking to correctly figure out when to put
5876 // a k_CondCode operand in the list. If we're trying to match the label
5877 // version, remove the k_CondCode operand here.
5878 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5879 static_cast<ARMOperand &>(*Operands[2]).isImm())
5880 Operands.erase(Operands.begin() + 1);
5881
5882 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5883 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5884 // a single GPRPair reg operand is used in the .td file to replace the two
5885 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5886 // expressed as a GPRPair, so we have to manually merge them.
5887 // FIXME: We would really like to be able to tablegen'erate this.
5888 if (!isThumb() && Operands.size() > 4 &&
5889 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5890 Mnemonic == "stlexd")) {
5891 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5892 unsigned Idx = isLoad ? 2 : 3;
5893 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
5894 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
5895
5896 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5897 // Adjust only if Op1 and Op2 are GPRs.
5898 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
5899 MRC.contains(Op2.getReg())) {
5900 unsigned Reg1 = Op1.getReg();
5901 unsigned Reg2 = Op2.getReg();
5902 unsigned Rt = MRI->getEncodingValue(Reg1);
5903 unsigned Rt2 = MRI->getEncodingValue(Reg2);
5904
5905 // Rt2 must be Rt + 1 and Rt must be even.
5906 if (Rt + 1 != Rt2 || (Rt & 1)) {
5907 Error(Op2.getStartLoc(), isLoad
5908 ? "destination operands must be sequential"
5909 : "source operands must be sequential");
5910 return true;
5911 }
5912 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5913 &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5914 Operands[Idx] =
5915 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
5916 Operands.erase(Operands.begin() + Idx + 1);
5917 }
5918 }
5919
5920 // GNU Assembler extension (compatibility)
5921 if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
5922 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5923 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5924 if (Op3.isMem()) {
5925 assert(Op2.isReg() && "expected register argument");
5926
5927 unsigned SuperReg = MRI->getMatchingSuperReg(
5928 Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
5929
5930 assert(SuperReg && "expected register pair");
5931
5932 unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
5933
5934 Operands.insert(
5935 Operands.begin() + 3,
5936 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5937 }
5938 }
5939
5940 // FIXME: As said above, this is all a pretty gross hack. This instruction
5941 // does not fit with other "subs" and tblgen.
5942 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5943 // so the Mnemonic is the original name "subs" and delete the predicate
5944 // operand so it will match the table entry.
5945 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5946 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5947 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
5948 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5949 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
5950 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5951 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
5952 Operands.erase(Operands.begin() + 1);
5953 }
5954 return false;
5955 }
5956
5957 // Validate context-sensitive operand constraints.
5958
5959 // return 'true' if register list contains non-low GPR registers,
5960 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5961 // 'containsReg' to true.
checkLowRegisterList(const MCInst & Inst,unsigned OpNo,unsigned Reg,unsigned HiReg,bool & containsReg)5962 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
5963 unsigned Reg, unsigned HiReg,
5964 bool &containsReg) {
5965 containsReg = false;
5966 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5967 unsigned OpReg = Inst.getOperand(i).getReg();
5968 if (OpReg == Reg)
5969 containsReg = true;
5970 // Anything other than a low register isn't legal here.
5971 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5972 return true;
5973 }
5974 return false;
5975 }
5976
5977 // Check if the specified regisgter is in the register list of the inst,
5978 // starting at the indicated operand number.
listContainsReg(const MCInst & Inst,unsigned OpNo,unsigned Reg)5979 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
5980 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
5981 unsigned OpReg = Inst.getOperand(i).getReg();
5982 if (OpReg == Reg)
5983 return true;
5984 }
5985 return false;
5986 }
5987
5988 // Return true if instruction has the interesting property of being
5989 // allowed in IT blocks, but not being predicable.
instIsBreakpoint(const MCInst & Inst)5990 static bool instIsBreakpoint(const MCInst &Inst) {
5991 return Inst.getOpcode() == ARM::tBKPT ||
5992 Inst.getOpcode() == ARM::BKPT ||
5993 Inst.getOpcode() == ARM::tHLT ||
5994 Inst.getOpcode() == ARM::HLT;
5995
5996 }
5997
validatetLDMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo,bool IsARPop)5998 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
5999 const OperandVector &Operands,
6000 unsigned ListNo, bool IsARPop) {
6001 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6002 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6003
6004 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6005 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6006 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6007
6008 if (!IsARPop && ListContainsSP)
6009 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6010 "SP may not be in the register list");
6011 else if (ListContainsPC && ListContainsLR)
6012 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6013 "PC and LR may not be in the register list simultaneously");
6014 else if (inITBlock() && !lastInITBlock() && ListContainsPC)
6015 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6016 "instruction must be outside of IT block or the last "
6017 "instruction in an IT block");
6018 return false;
6019 }
6020
validatetSTMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo)6021 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6022 const OperandVector &Operands,
6023 unsigned ListNo) {
6024 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6025 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6026
6027 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6028 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6029
6030 if (ListContainsSP && ListContainsPC)
6031 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6032 "SP and PC may not be in the register list");
6033 else if (ListContainsSP)
6034 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6035 "SP may not be in the register list");
6036 else if (ListContainsPC)
6037 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6038 "PC may not be in the register list");
6039 return false;
6040 }
6041
6042 // FIXME: We would really like to be able to tablegen'erate this.
validateInstruction(MCInst & Inst,const OperandVector & Operands)6043 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6044 const OperandVector &Operands) {
6045 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6046 SMLoc Loc = Operands[0]->getStartLoc();
6047
6048 // Check the IT block state first.
6049 // NOTE: BKPT and HLT instructions have the interesting property of being
6050 // allowed in IT blocks, but not being predicable. They just always execute.
6051 if (inITBlock() && !instIsBreakpoint(Inst)) {
6052 unsigned Bit = 1;
6053 if (ITState.FirstCond)
6054 ITState.FirstCond = false;
6055 else
6056 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
6057 // The instruction must be predicable.
6058 if (!MCID.isPredicable())
6059 return Error(Loc, "instructions in IT block must be predicable");
6060 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6061 unsigned ITCond = Bit ? ITState.Cond :
6062 ARMCC::getOppositeCondition(ITState.Cond);
6063 if (Cond != ITCond) {
6064 // Find the condition code Operand to get its SMLoc information.
6065 SMLoc CondLoc;
6066 for (unsigned I = 1; I < Operands.size(); ++I)
6067 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6068 CondLoc = Operands[I]->getStartLoc();
6069 return Error(CondLoc, "incorrect condition in IT block; got '" +
6070 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6071 "', but expected '" +
6072 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
6073 }
6074 // Check for non-'al' condition codes outside of the IT block.
6075 } else if (isThumbTwo() && MCID.isPredicable() &&
6076 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6077 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6078 Inst.getOpcode() != ARM::t2Bcc)
6079 return Error(Loc, "predicated instructions must be in IT block");
6080
6081 const unsigned Opcode = Inst.getOpcode();
6082 switch (Opcode) {
6083 case ARM::LDRD:
6084 case ARM::LDRD_PRE:
6085 case ARM::LDRD_POST: {
6086 const unsigned RtReg = Inst.getOperand(0).getReg();
6087
6088 // Rt can't be R14.
6089 if (RtReg == ARM::LR)
6090 return Error(Operands[3]->getStartLoc(),
6091 "Rt can't be R14");
6092
6093 const unsigned Rt = MRI->getEncodingValue(RtReg);
6094 // Rt must be even-numbered.
6095 if ((Rt & 1) == 1)
6096 return Error(Operands[3]->getStartLoc(),
6097 "Rt must be even-numbered");
6098
6099 // Rt2 must be Rt + 1.
6100 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6101 if (Rt2 != Rt + 1)
6102 return Error(Operands[3]->getStartLoc(),
6103 "destination operands must be sequential");
6104
6105 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6106 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6107 // For addressing modes with writeback, the base register needs to be
6108 // different from the destination registers.
6109 if (Rn == Rt || Rn == Rt2)
6110 return Error(Operands[3]->getStartLoc(),
6111 "base register needs to be different from destination "
6112 "registers");
6113 }
6114
6115 return false;
6116 }
6117 case ARM::t2LDRDi8:
6118 case ARM::t2LDRD_PRE:
6119 case ARM::t2LDRD_POST: {
6120 // Rt2 must be different from Rt.
6121 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6122 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6123 if (Rt2 == Rt)
6124 return Error(Operands[3]->getStartLoc(),
6125 "destination operands can't be identical");
6126 return false;
6127 }
6128 case ARM::t2BXJ: {
6129 const unsigned RmReg = Inst.getOperand(0).getReg();
6130 // Rm = SP is no longer unpredictable in v8-A
6131 if (RmReg == ARM::SP && !hasV8Ops())
6132 return Error(Operands[2]->getStartLoc(),
6133 "r13 (SP) is an unpredictable operand to BXJ");
6134 return false;
6135 }
6136 case ARM::STRD: {
6137 // Rt2 must be Rt + 1.
6138 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6139 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6140 if (Rt2 != Rt + 1)
6141 return Error(Operands[3]->getStartLoc(),
6142 "source operands must be sequential");
6143 return false;
6144 }
6145 case ARM::STRD_PRE:
6146 case ARM::STRD_POST: {
6147 // Rt2 must be Rt + 1.
6148 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6149 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6150 if (Rt2 != Rt + 1)
6151 return Error(Operands[3]->getStartLoc(),
6152 "source operands must be sequential");
6153 return false;
6154 }
6155 case ARM::STR_PRE_IMM:
6156 case ARM::STR_PRE_REG:
6157 case ARM::STR_POST_IMM:
6158 case ARM::STR_POST_REG:
6159 case ARM::STRH_PRE:
6160 case ARM::STRH_POST:
6161 case ARM::STRB_PRE_IMM:
6162 case ARM::STRB_PRE_REG:
6163 case ARM::STRB_POST_IMM:
6164 case ARM::STRB_POST_REG: {
6165 // Rt must be different from Rn.
6166 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6167 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6168
6169 if (Rt == Rn)
6170 return Error(Operands[3]->getStartLoc(),
6171 "source register and base register can't be identical");
6172 return false;
6173 }
6174 case ARM::LDR_PRE_IMM:
6175 case ARM::LDR_PRE_REG:
6176 case ARM::LDR_POST_IMM:
6177 case ARM::LDR_POST_REG:
6178 case ARM::LDRH_PRE:
6179 case ARM::LDRH_POST:
6180 case ARM::LDRSH_PRE:
6181 case ARM::LDRSH_POST:
6182 case ARM::LDRB_PRE_IMM:
6183 case ARM::LDRB_PRE_REG:
6184 case ARM::LDRB_POST_IMM:
6185 case ARM::LDRB_POST_REG:
6186 case ARM::LDRSB_PRE:
6187 case ARM::LDRSB_POST: {
6188 // Rt must be different from Rn.
6189 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6190 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6191
6192 if (Rt == Rn)
6193 return Error(Operands[3]->getStartLoc(),
6194 "destination register and base register can't be identical");
6195 return false;
6196 }
6197 case ARM::SBFX:
6198 case ARM::UBFX: {
6199 // Width must be in range [1, 32-lsb].
6200 unsigned LSB = Inst.getOperand(2).getImm();
6201 unsigned Widthm1 = Inst.getOperand(3).getImm();
6202 if (Widthm1 >= 32 - LSB)
6203 return Error(Operands[5]->getStartLoc(),
6204 "bitfield width must be in range [1,32-lsb]");
6205 return false;
6206 }
6207 // Notionally handles ARM::tLDMIA_UPD too.
6208 case ARM::tLDMIA: {
6209 // If we're parsing Thumb2, the .w variant is available and handles
6210 // most cases that are normally illegal for a Thumb1 LDM instruction.
6211 // We'll make the transformation in processInstruction() if necessary.
6212 //
6213 // Thumb LDM instructions are writeback iff the base register is not
6214 // in the register list.
6215 unsigned Rn = Inst.getOperand(0).getReg();
6216 bool HasWritebackToken =
6217 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6218 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6219 bool ListContainsBase;
6220 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6221 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6222 "registers must be in range r0-r7");
6223 // If we should have writeback, then there should be a '!' token.
6224 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6225 return Error(Operands[2]->getStartLoc(),
6226 "writeback operator '!' expected");
6227 // If we should not have writeback, there must not be a '!'. This is
6228 // true even for the 32-bit wide encodings.
6229 if (ListContainsBase && HasWritebackToken)
6230 return Error(Operands[3]->getStartLoc(),
6231 "writeback operator '!' not allowed when base register "
6232 "in register list");
6233
6234 if (validatetLDMRegList(Inst, Operands, 3))
6235 return true;
6236 break;
6237 }
6238 case ARM::LDMIA_UPD:
6239 case ARM::LDMDB_UPD:
6240 case ARM::LDMIB_UPD:
6241 case ARM::LDMDA_UPD:
6242 // ARM variants loading and updating the same register are only officially
6243 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6244 if (!hasV7Ops())
6245 break;
6246 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6247 return Error(Operands.back()->getStartLoc(),
6248 "writeback register not allowed in register list");
6249 break;
6250 case ARM::t2LDMIA:
6251 case ARM::t2LDMDB:
6252 if (validatetLDMRegList(Inst, Operands, 3))
6253 return true;
6254 break;
6255 case ARM::t2STMIA:
6256 case ARM::t2STMDB:
6257 if (validatetSTMRegList(Inst, Operands, 3))
6258 return true;
6259 break;
6260 case ARM::t2LDMIA_UPD:
6261 case ARM::t2LDMDB_UPD:
6262 case ARM::t2STMIA_UPD:
6263 case ARM::t2STMDB_UPD: {
6264 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6265 return Error(Operands.back()->getStartLoc(),
6266 "writeback register not allowed in register list");
6267
6268 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6269 if (validatetLDMRegList(Inst, Operands, 3))
6270 return true;
6271 } else {
6272 if (validatetSTMRegList(Inst, Operands, 3))
6273 return true;
6274 }
6275 break;
6276 }
6277 case ARM::sysLDMIA_UPD:
6278 case ARM::sysLDMDA_UPD:
6279 case ARM::sysLDMDB_UPD:
6280 case ARM::sysLDMIB_UPD:
6281 if (!listContainsReg(Inst, 3, ARM::PC))
6282 return Error(Operands[4]->getStartLoc(),
6283 "writeback register only allowed on system LDM "
6284 "if PC in register-list");
6285 break;
6286 case ARM::sysSTMIA_UPD:
6287 case ARM::sysSTMDA_UPD:
6288 case ARM::sysSTMDB_UPD:
6289 case ARM::sysSTMIB_UPD:
6290 return Error(Operands[2]->getStartLoc(),
6291 "system STM cannot have writeback register");
6292 case ARM::tMUL: {
6293 // The second source operand must be the same register as the destination
6294 // operand.
6295 //
6296 // In this case, we must directly check the parsed operands because the
6297 // cvtThumbMultiply() function is written in such a way that it guarantees
6298 // this first statement is always true for the new Inst. Essentially, the
6299 // destination is unconditionally copied into the second source operand
6300 // without checking to see if it matches what we actually parsed.
6301 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6302 ((ARMOperand &)*Operands[5]).getReg()) &&
6303 (((ARMOperand &)*Operands[3]).getReg() !=
6304 ((ARMOperand &)*Operands[4]).getReg())) {
6305 return Error(Operands[3]->getStartLoc(),
6306 "destination register must match source register");
6307 }
6308 break;
6309 }
6310 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6311 // so only issue a diagnostic for thumb1. The instructions will be
6312 // switched to the t2 encodings in processInstruction() if necessary.
6313 case ARM::tPOP: {
6314 bool ListContainsBase;
6315 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6316 !isThumbTwo())
6317 return Error(Operands[2]->getStartLoc(),
6318 "registers must be in range r0-r7 or pc");
6319 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6320 return true;
6321 break;
6322 }
6323 case ARM::tPUSH: {
6324 bool ListContainsBase;
6325 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6326 !isThumbTwo())
6327 return Error(Operands[2]->getStartLoc(),
6328 "registers must be in range r0-r7 or lr");
6329 if (validatetSTMRegList(Inst, Operands, 2))
6330 return true;
6331 break;
6332 }
6333 case ARM::tSTMIA_UPD: {
6334 bool ListContainsBase, InvalidLowList;
6335 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6336 0, ListContainsBase);
6337 if (InvalidLowList && !isThumbTwo())
6338 return Error(Operands[4]->getStartLoc(),
6339 "registers must be in range r0-r7");
6340
6341 // This would be converted to a 32-bit stm, but that's not valid if the
6342 // writeback register is in the list.
6343 if (InvalidLowList && ListContainsBase)
6344 return Error(Operands[4]->getStartLoc(),
6345 "writeback operator '!' not allowed when base register "
6346 "in register list");
6347
6348 if (validatetSTMRegList(Inst, Operands, 4))
6349 return true;
6350 break;
6351 }
6352 case ARM::tADDrSP: {
6353 // If the non-SP source operand and the destination operand are not the
6354 // same, we need thumb2 (for the wide encoding), or we have an error.
6355 if (!isThumbTwo() &&
6356 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6357 return Error(Operands[4]->getStartLoc(),
6358 "source register must be the same as destination");
6359 }
6360 break;
6361 }
6362 // Final range checking for Thumb unconditional branch instructions.
6363 case ARM::tB:
6364 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6365 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6366 break;
6367 case ARM::t2B: {
6368 int op = (Operands[2]->isImm()) ? 2 : 3;
6369 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6370 return Error(Operands[op]->getStartLoc(), "branch target out of range");
6371 break;
6372 }
6373 // Final range checking for Thumb conditional branch instructions.
6374 case ARM::tBcc:
6375 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6376 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6377 break;
6378 case ARM::t2Bcc: {
6379 int Op = (Operands[2]->isImm()) ? 2 : 3;
6380 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6381 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6382 break;
6383 }
6384 case ARM::MOVi16:
6385 case ARM::t2MOVi16:
6386 case ARM::t2MOVTi16:
6387 {
6388 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6389 // especially when we turn it into a movw and the expression <symbol> does
6390 // not have a :lower16: or :upper16 as part of the expression. We don't
6391 // want the behavior of silently truncating, which can be unexpected and
6392 // lead to bugs that are difficult to find since this is an easy mistake
6393 // to make.
6394 int i = (Operands[3]->isImm()) ? 3 : 4;
6395 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6396 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6397 if (CE) break;
6398 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6399 if (!E) break;
6400 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6401 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6402 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6403 return Error(
6404 Op.getStartLoc(),
6405 "immediate expression for mov requires :lower16: or :upper16");
6406 break;
6407 }
6408 }
6409
6410 return false;
6411 }
6412
getRealVSTOpcode(unsigned Opc,unsigned & Spacing)6413 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6414 switch(Opc) {
6415 default: llvm_unreachable("unexpected opcode!");
6416 // VST1LN
6417 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6418 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6419 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6420 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6421 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6422 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6423 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
6424 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6425 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6426
6427 // VST2LN
6428 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6429 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6430 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6431 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6432 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6433
6434 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6435 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6436 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6437 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6438 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6439
6440 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
6441 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6442 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6443 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6444 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6445
6446 // VST3LN
6447 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6448 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6449 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6450 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6451 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6452 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6453 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6454 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6455 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6456 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6457 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
6458 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6459 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6460 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6461 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6462
6463 // VST3
6464 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6465 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6466 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6467 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6468 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6469 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6470 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6471 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6472 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6473 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6474 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6475 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6476 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
6477 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6478 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6479 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
6480 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6481 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6482
6483 // VST4LN
6484 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6485 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6486 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6487 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6488 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6489 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6490 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6491 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6492 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6493 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6494 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
6495 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6496 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6497 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6498 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6499
6500 // VST4
6501 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6502 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6503 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6504 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6505 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6506 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6507 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6508 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6509 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6510 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6511 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6512 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6513 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
6514 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6515 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6516 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
6517 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6518 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6519 }
6520 }
6521
getRealVLDOpcode(unsigned Opc,unsigned & Spacing)6522 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6523 switch(Opc) {
6524 default: llvm_unreachable("unexpected opcode!");
6525 // VLD1LN
6526 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6527 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6528 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6529 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6530 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6531 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6532 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
6533 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6534 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6535
6536 // VLD2LN
6537 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6538 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6539 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6540 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6541 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6542 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6543 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6544 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6545 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6546 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6547 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
6548 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6549 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6550 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6551 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6552
6553 // VLD3DUP
6554 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6555 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6556 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6557 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6558 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6559 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6560 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6561 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6562 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6563 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6564 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6565 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6566 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
6567 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6568 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6569 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6570 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6571 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6572
6573 // VLD3LN
6574 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6575 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6576 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6577 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6578 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6579 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6580 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6581 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6582 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6583 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6584 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
6585 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6586 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6587 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6588 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6589
6590 // VLD3
6591 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6592 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6593 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6594 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6595 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6596 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6597 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6598 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6599 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6600 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6601 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6602 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6603 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
6604 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6605 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6606 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
6607 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6608 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6609
6610 // VLD4LN
6611 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6612 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6613 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6614 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6615 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6616 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6617 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6618 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6619 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6620 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6621 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
6622 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6623 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6624 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6625 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6626
6627 // VLD4DUP
6628 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6629 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6630 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6631 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6632 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6633 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6634 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6635 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6636 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6637 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6638 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6639 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6640 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
6641 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6642 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6643 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6644 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6645 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6646
6647 // VLD4
6648 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6649 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6650 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6651 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6652 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6653 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6654 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6655 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6656 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6657 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6658 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6659 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6660 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
6661 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6662 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6663 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
6664 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6665 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6666 }
6667 }
6668
processInstruction(MCInst & Inst,const OperandVector & Operands,MCStreamer & Out)6669 bool ARMAsmParser::processInstruction(MCInst &Inst,
6670 const OperandVector &Operands,
6671 MCStreamer &Out) {
6672 switch (Inst.getOpcode()) {
6673 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6674 case ARM::LDRT_POST:
6675 case ARM::LDRBT_POST: {
6676 const unsigned Opcode =
6677 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6678 : ARM::LDRBT_POST_IMM;
6679 MCInst TmpInst;
6680 TmpInst.setOpcode(Opcode);
6681 TmpInst.addOperand(Inst.getOperand(0));
6682 TmpInst.addOperand(Inst.getOperand(1));
6683 TmpInst.addOperand(Inst.getOperand(1));
6684 TmpInst.addOperand(MCOperand::createReg(0));
6685 TmpInst.addOperand(MCOperand::createImm(0));
6686 TmpInst.addOperand(Inst.getOperand(2));
6687 TmpInst.addOperand(Inst.getOperand(3));
6688 Inst = TmpInst;
6689 return true;
6690 }
6691 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6692 case ARM::STRT_POST:
6693 case ARM::STRBT_POST: {
6694 const unsigned Opcode =
6695 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6696 : ARM::STRBT_POST_IMM;
6697 MCInst TmpInst;
6698 TmpInst.setOpcode(Opcode);
6699 TmpInst.addOperand(Inst.getOperand(1));
6700 TmpInst.addOperand(Inst.getOperand(0));
6701 TmpInst.addOperand(Inst.getOperand(1));
6702 TmpInst.addOperand(MCOperand::createReg(0));
6703 TmpInst.addOperand(MCOperand::createImm(0));
6704 TmpInst.addOperand(Inst.getOperand(2));
6705 TmpInst.addOperand(Inst.getOperand(3));
6706 Inst = TmpInst;
6707 return true;
6708 }
6709 // Alias for alternate form of 'ADR Rd, #imm' instruction.
6710 case ARM::ADDri: {
6711 if (Inst.getOperand(1).getReg() != ARM::PC ||
6712 Inst.getOperand(5).getReg() != 0 ||
6713 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6714 return false;
6715 MCInst TmpInst;
6716 TmpInst.setOpcode(ARM::ADR);
6717 TmpInst.addOperand(Inst.getOperand(0));
6718 if (Inst.getOperand(2).isImm()) {
6719 // Immediate (mod_imm) will be in its encoded form, we must unencode it
6720 // before passing it to the ADR instruction.
6721 unsigned Enc = Inst.getOperand(2).getImm();
6722 TmpInst.addOperand(MCOperand::createImm(
6723 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
6724 } else {
6725 // Turn PC-relative expression into absolute expression.
6726 // Reading PC provides the start of the current instruction + 8 and
6727 // the transform to adr is biased by that.
6728 MCSymbol *Dot = getContext().createTempSymbol();
6729 Out.EmitLabel(Dot);
6730 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6731 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
6732 MCSymbolRefExpr::VK_None,
6733 getContext());
6734 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
6735 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
6736 getContext());
6737 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
6738 getContext());
6739 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
6740 }
6741 TmpInst.addOperand(Inst.getOperand(3));
6742 TmpInst.addOperand(Inst.getOperand(4));
6743 Inst = TmpInst;
6744 return true;
6745 }
6746 // Aliases for alternate PC+imm syntax of LDR instructions.
6747 case ARM::t2LDRpcrel:
6748 // Select the narrow version if the immediate will fit.
6749 if (Inst.getOperand(1).getImm() > 0 &&
6750 Inst.getOperand(1).getImm() <= 0xff &&
6751 !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
6752 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
6753 Inst.setOpcode(ARM::tLDRpci);
6754 else
6755 Inst.setOpcode(ARM::t2LDRpci);
6756 return true;
6757 case ARM::t2LDRBpcrel:
6758 Inst.setOpcode(ARM::t2LDRBpci);
6759 return true;
6760 case ARM::t2LDRHpcrel:
6761 Inst.setOpcode(ARM::t2LDRHpci);
6762 return true;
6763 case ARM::t2LDRSBpcrel:
6764 Inst.setOpcode(ARM::t2LDRSBpci);
6765 return true;
6766 case ARM::t2LDRSHpcrel:
6767 Inst.setOpcode(ARM::t2LDRSHpci);
6768 return true;
6769 // Handle NEON VST complex aliases.
6770 case ARM::VST1LNdWB_register_Asm_8:
6771 case ARM::VST1LNdWB_register_Asm_16:
6772 case ARM::VST1LNdWB_register_Asm_32: {
6773 MCInst TmpInst;
6774 // Shuffle the operands around so the lane index operand is in the
6775 // right place.
6776 unsigned Spacing;
6777 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6778 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6779 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6780 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6781 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6782 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6783 TmpInst.addOperand(Inst.getOperand(1)); // lane
6784 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6785 TmpInst.addOperand(Inst.getOperand(6));
6786 Inst = TmpInst;
6787 return true;
6788 }
6789
6790 case ARM::VST2LNdWB_register_Asm_8:
6791 case ARM::VST2LNdWB_register_Asm_16:
6792 case ARM::VST2LNdWB_register_Asm_32:
6793 case ARM::VST2LNqWB_register_Asm_16:
6794 case ARM::VST2LNqWB_register_Asm_32: {
6795 MCInst TmpInst;
6796 // Shuffle the operands around so the lane index operand is in the
6797 // right place.
6798 unsigned Spacing;
6799 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6800 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6801 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6802 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6803 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6804 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6805 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6806 Spacing));
6807 TmpInst.addOperand(Inst.getOperand(1)); // lane
6808 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6809 TmpInst.addOperand(Inst.getOperand(6));
6810 Inst = TmpInst;
6811 return true;
6812 }
6813
6814 case ARM::VST3LNdWB_register_Asm_8:
6815 case ARM::VST3LNdWB_register_Asm_16:
6816 case ARM::VST3LNdWB_register_Asm_32:
6817 case ARM::VST3LNqWB_register_Asm_16:
6818 case ARM::VST3LNqWB_register_Asm_32: {
6819 MCInst TmpInst;
6820 // Shuffle the operands around so the lane index operand is in the
6821 // right place.
6822 unsigned Spacing;
6823 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6824 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6825 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6826 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6827 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6828 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6829 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6830 Spacing));
6831 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6832 Spacing * 2));
6833 TmpInst.addOperand(Inst.getOperand(1)); // lane
6834 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6835 TmpInst.addOperand(Inst.getOperand(6));
6836 Inst = TmpInst;
6837 return true;
6838 }
6839
6840 case ARM::VST4LNdWB_register_Asm_8:
6841 case ARM::VST4LNdWB_register_Asm_16:
6842 case ARM::VST4LNdWB_register_Asm_32:
6843 case ARM::VST4LNqWB_register_Asm_16:
6844 case ARM::VST4LNqWB_register_Asm_32: {
6845 MCInst TmpInst;
6846 // Shuffle the operands around so the lane index operand is in the
6847 // right place.
6848 unsigned Spacing;
6849 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6850 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6851 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6852 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6853 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6854 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6855 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6856 Spacing));
6857 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6858 Spacing * 2));
6859 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6860 Spacing * 3));
6861 TmpInst.addOperand(Inst.getOperand(1)); // lane
6862 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6863 TmpInst.addOperand(Inst.getOperand(6));
6864 Inst = TmpInst;
6865 return true;
6866 }
6867
6868 case ARM::VST1LNdWB_fixed_Asm_8:
6869 case ARM::VST1LNdWB_fixed_Asm_16:
6870 case ARM::VST1LNdWB_fixed_Asm_32: {
6871 MCInst TmpInst;
6872 // Shuffle the operands around so the lane index operand is in the
6873 // right place.
6874 unsigned Spacing;
6875 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6876 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6877 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6878 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6879 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
6880 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6881 TmpInst.addOperand(Inst.getOperand(1)); // lane
6882 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6883 TmpInst.addOperand(Inst.getOperand(5));
6884 Inst = TmpInst;
6885 return true;
6886 }
6887
6888 case ARM::VST2LNdWB_fixed_Asm_8:
6889 case ARM::VST2LNdWB_fixed_Asm_16:
6890 case ARM::VST2LNdWB_fixed_Asm_32:
6891 case ARM::VST2LNqWB_fixed_Asm_16:
6892 case ARM::VST2LNqWB_fixed_Asm_32: {
6893 MCInst TmpInst;
6894 // Shuffle the operands around so the lane index operand is in the
6895 // right place.
6896 unsigned Spacing;
6897 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6898 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6899 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6900 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6901 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
6902 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6903 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6904 Spacing));
6905 TmpInst.addOperand(Inst.getOperand(1)); // lane
6906 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6907 TmpInst.addOperand(Inst.getOperand(5));
6908 Inst = TmpInst;
6909 return true;
6910 }
6911
6912 case ARM::VST3LNdWB_fixed_Asm_8:
6913 case ARM::VST3LNdWB_fixed_Asm_16:
6914 case ARM::VST3LNdWB_fixed_Asm_32:
6915 case ARM::VST3LNqWB_fixed_Asm_16:
6916 case ARM::VST3LNqWB_fixed_Asm_32: {
6917 MCInst TmpInst;
6918 // Shuffle the operands around so the lane index operand is in the
6919 // right place.
6920 unsigned Spacing;
6921 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6922 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6923 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6924 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6925 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
6926 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6927 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6928 Spacing));
6929 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6930 Spacing * 2));
6931 TmpInst.addOperand(Inst.getOperand(1)); // lane
6932 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6933 TmpInst.addOperand(Inst.getOperand(5));
6934 Inst = TmpInst;
6935 return true;
6936 }
6937
6938 case ARM::VST4LNdWB_fixed_Asm_8:
6939 case ARM::VST4LNdWB_fixed_Asm_16:
6940 case ARM::VST4LNdWB_fixed_Asm_32:
6941 case ARM::VST4LNqWB_fixed_Asm_16:
6942 case ARM::VST4LNqWB_fixed_Asm_32: {
6943 MCInst TmpInst;
6944 // Shuffle the operands around so the lane index operand is in the
6945 // right place.
6946 unsigned Spacing;
6947 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6948 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6949 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6950 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6951 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
6952 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6953 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6954 Spacing));
6955 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6956 Spacing * 2));
6957 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6958 Spacing * 3));
6959 TmpInst.addOperand(Inst.getOperand(1)); // lane
6960 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6961 TmpInst.addOperand(Inst.getOperand(5));
6962 Inst = TmpInst;
6963 return true;
6964 }
6965
6966 case ARM::VST1LNdAsm_8:
6967 case ARM::VST1LNdAsm_16:
6968 case ARM::VST1LNdAsm_32: {
6969 MCInst TmpInst;
6970 // Shuffle the operands around so the lane index operand is in the
6971 // right place.
6972 unsigned Spacing;
6973 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6974 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6975 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6976 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6977 TmpInst.addOperand(Inst.getOperand(1)); // lane
6978 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6979 TmpInst.addOperand(Inst.getOperand(5));
6980 Inst = TmpInst;
6981 return true;
6982 }
6983
6984 case ARM::VST2LNdAsm_8:
6985 case ARM::VST2LNdAsm_16:
6986 case ARM::VST2LNdAsm_32:
6987 case ARM::VST2LNqAsm_16:
6988 case ARM::VST2LNqAsm_32: {
6989 MCInst TmpInst;
6990 // Shuffle the operands around so the lane index operand is in the
6991 // right place.
6992 unsigned Spacing;
6993 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6994 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6995 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6996 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6997 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
6998 Spacing));
6999 TmpInst.addOperand(Inst.getOperand(1)); // lane
7000 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7001 TmpInst.addOperand(Inst.getOperand(5));
7002 Inst = TmpInst;
7003 return true;
7004 }
7005
7006 case ARM::VST3LNdAsm_8:
7007 case ARM::VST3LNdAsm_16:
7008 case ARM::VST3LNdAsm_32:
7009 case ARM::VST3LNqAsm_16:
7010 case ARM::VST3LNqAsm_32: {
7011 MCInst TmpInst;
7012 // Shuffle the operands around so the lane index operand is in the
7013 // right place.
7014 unsigned Spacing;
7015 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7016 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7017 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7018 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7019 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7020 Spacing));
7021 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7022 Spacing * 2));
7023 TmpInst.addOperand(Inst.getOperand(1)); // lane
7024 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7025 TmpInst.addOperand(Inst.getOperand(5));
7026 Inst = TmpInst;
7027 return true;
7028 }
7029
7030 case ARM::VST4LNdAsm_8:
7031 case ARM::VST4LNdAsm_16:
7032 case ARM::VST4LNdAsm_32:
7033 case ARM::VST4LNqAsm_16:
7034 case ARM::VST4LNqAsm_32: {
7035 MCInst TmpInst;
7036 // Shuffle the operands around so the lane index operand is in the
7037 // right place.
7038 unsigned Spacing;
7039 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7040 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7041 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7042 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7043 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7044 Spacing));
7045 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7046 Spacing * 2));
7047 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7048 Spacing * 3));
7049 TmpInst.addOperand(Inst.getOperand(1)); // lane
7050 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7051 TmpInst.addOperand(Inst.getOperand(5));
7052 Inst = TmpInst;
7053 return true;
7054 }
7055
7056 // Handle NEON VLD complex aliases.
7057 case ARM::VLD1LNdWB_register_Asm_8:
7058 case ARM::VLD1LNdWB_register_Asm_16:
7059 case ARM::VLD1LNdWB_register_Asm_32: {
7060 MCInst TmpInst;
7061 // Shuffle the operands around so the lane index operand is in the
7062 // right place.
7063 unsigned Spacing;
7064 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7065 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7066 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7067 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7068 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7069 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7070 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7071 TmpInst.addOperand(Inst.getOperand(1)); // lane
7072 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7073 TmpInst.addOperand(Inst.getOperand(6));
7074 Inst = TmpInst;
7075 return true;
7076 }
7077
7078 case ARM::VLD2LNdWB_register_Asm_8:
7079 case ARM::VLD2LNdWB_register_Asm_16:
7080 case ARM::VLD2LNdWB_register_Asm_32:
7081 case ARM::VLD2LNqWB_register_Asm_16:
7082 case ARM::VLD2LNqWB_register_Asm_32: {
7083 MCInst TmpInst;
7084 // Shuffle the operands around so the lane index operand is in the
7085 // right place.
7086 unsigned Spacing;
7087 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7088 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7089 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7090 Spacing));
7091 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7092 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7093 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7094 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7095 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7096 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7097 Spacing));
7098 TmpInst.addOperand(Inst.getOperand(1)); // lane
7099 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7100 TmpInst.addOperand(Inst.getOperand(6));
7101 Inst = TmpInst;
7102 return true;
7103 }
7104
7105 case ARM::VLD3LNdWB_register_Asm_8:
7106 case ARM::VLD3LNdWB_register_Asm_16:
7107 case ARM::VLD3LNdWB_register_Asm_32:
7108 case ARM::VLD3LNqWB_register_Asm_16:
7109 case ARM::VLD3LNqWB_register_Asm_32: {
7110 MCInst TmpInst;
7111 // Shuffle the operands around so the lane index operand is in the
7112 // right place.
7113 unsigned Spacing;
7114 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7115 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7116 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7117 Spacing));
7118 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7119 Spacing * 2));
7120 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7121 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7122 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7123 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7124 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7125 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7126 Spacing));
7127 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7128 Spacing * 2));
7129 TmpInst.addOperand(Inst.getOperand(1)); // lane
7130 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7131 TmpInst.addOperand(Inst.getOperand(6));
7132 Inst = TmpInst;
7133 return true;
7134 }
7135
7136 case ARM::VLD4LNdWB_register_Asm_8:
7137 case ARM::VLD4LNdWB_register_Asm_16:
7138 case ARM::VLD4LNdWB_register_Asm_32:
7139 case ARM::VLD4LNqWB_register_Asm_16:
7140 case ARM::VLD4LNqWB_register_Asm_32: {
7141 MCInst TmpInst;
7142 // Shuffle the operands around so the lane index operand is in the
7143 // right place.
7144 unsigned Spacing;
7145 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7146 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7147 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7148 Spacing));
7149 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7150 Spacing * 2));
7151 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7152 Spacing * 3));
7153 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7154 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7155 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7156 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7157 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7158 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7159 Spacing));
7160 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7161 Spacing * 2));
7162 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7163 Spacing * 3));
7164 TmpInst.addOperand(Inst.getOperand(1)); // lane
7165 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7166 TmpInst.addOperand(Inst.getOperand(6));
7167 Inst = TmpInst;
7168 return true;
7169 }
7170
7171 case ARM::VLD1LNdWB_fixed_Asm_8:
7172 case ARM::VLD1LNdWB_fixed_Asm_16:
7173 case ARM::VLD1LNdWB_fixed_Asm_32: {
7174 MCInst TmpInst;
7175 // Shuffle the operands around so the lane index operand is in the
7176 // right place.
7177 unsigned Spacing;
7178 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7179 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7180 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7181 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7182 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7183 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7184 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7185 TmpInst.addOperand(Inst.getOperand(1)); // lane
7186 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7187 TmpInst.addOperand(Inst.getOperand(5));
7188 Inst = TmpInst;
7189 return true;
7190 }
7191
7192 case ARM::VLD2LNdWB_fixed_Asm_8:
7193 case ARM::VLD2LNdWB_fixed_Asm_16:
7194 case ARM::VLD2LNdWB_fixed_Asm_32:
7195 case ARM::VLD2LNqWB_fixed_Asm_16:
7196 case ARM::VLD2LNqWB_fixed_Asm_32: {
7197 MCInst TmpInst;
7198 // Shuffle the operands around so the lane index operand is in the
7199 // right place.
7200 unsigned Spacing;
7201 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7202 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7203 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7204 Spacing));
7205 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7206 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7207 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7208 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7209 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7210 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7211 Spacing));
7212 TmpInst.addOperand(Inst.getOperand(1)); // lane
7213 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7214 TmpInst.addOperand(Inst.getOperand(5));
7215 Inst = TmpInst;
7216 return true;
7217 }
7218
7219 case ARM::VLD3LNdWB_fixed_Asm_8:
7220 case ARM::VLD3LNdWB_fixed_Asm_16:
7221 case ARM::VLD3LNdWB_fixed_Asm_32:
7222 case ARM::VLD3LNqWB_fixed_Asm_16:
7223 case ARM::VLD3LNqWB_fixed_Asm_32: {
7224 MCInst TmpInst;
7225 // Shuffle the operands around so the lane index operand is in the
7226 // right place.
7227 unsigned Spacing;
7228 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7229 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7230 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7231 Spacing));
7232 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7233 Spacing * 2));
7234 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7235 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7236 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7237 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7238 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7239 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7240 Spacing));
7241 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7242 Spacing * 2));
7243 TmpInst.addOperand(Inst.getOperand(1)); // lane
7244 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7245 TmpInst.addOperand(Inst.getOperand(5));
7246 Inst = TmpInst;
7247 return true;
7248 }
7249
7250 case ARM::VLD4LNdWB_fixed_Asm_8:
7251 case ARM::VLD4LNdWB_fixed_Asm_16:
7252 case ARM::VLD4LNdWB_fixed_Asm_32:
7253 case ARM::VLD4LNqWB_fixed_Asm_16:
7254 case ARM::VLD4LNqWB_fixed_Asm_32: {
7255 MCInst TmpInst;
7256 // Shuffle the operands around so the lane index operand is in the
7257 // right place.
7258 unsigned Spacing;
7259 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7260 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7261 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7262 Spacing));
7263 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7264 Spacing * 2));
7265 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7266 Spacing * 3));
7267 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7268 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7269 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7270 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7271 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7272 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7273 Spacing));
7274 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7275 Spacing * 2));
7276 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7277 Spacing * 3));
7278 TmpInst.addOperand(Inst.getOperand(1)); // lane
7279 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7280 TmpInst.addOperand(Inst.getOperand(5));
7281 Inst = TmpInst;
7282 return true;
7283 }
7284
7285 case ARM::VLD1LNdAsm_8:
7286 case ARM::VLD1LNdAsm_16:
7287 case ARM::VLD1LNdAsm_32: {
7288 MCInst TmpInst;
7289 // Shuffle the operands around so the lane index operand is in the
7290 // right place.
7291 unsigned Spacing;
7292 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7293 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7294 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7295 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7296 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7297 TmpInst.addOperand(Inst.getOperand(1)); // lane
7298 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7299 TmpInst.addOperand(Inst.getOperand(5));
7300 Inst = TmpInst;
7301 return true;
7302 }
7303
7304 case ARM::VLD2LNdAsm_8:
7305 case ARM::VLD2LNdAsm_16:
7306 case ARM::VLD2LNdAsm_32:
7307 case ARM::VLD2LNqAsm_16:
7308 case ARM::VLD2LNqAsm_32: {
7309 MCInst TmpInst;
7310 // Shuffle the operands around so the lane index operand is in the
7311 // right place.
7312 unsigned Spacing;
7313 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7314 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7315 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7316 Spacing));
7317 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7318 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7319 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7320 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7321 Spacing));
7322 TmpInst.addOperand(Inst.getOperand(1)); // lane
7323 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7324 TmpInst.addOperand(Inst.getOperand(5));
7325 Inst = TmpInst;
7326 return true;
7327 }
7328
7329 case ARM::VLD3LNdAsm_8:
7330 case ARM::VLD3LNdAsm_16:
7331 case ARM::VLD3LNdAsm_32:
7332 case ARM::VLD3LNqAsm_16:
7333 case ARM::VLD3LNqAsm_32: {
7334 MCInst TmpInst;
7335 // Shuffle the operands around so the lane index operand is in the
7336 // right place.
7337 unsigned Spacing;
7338 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7339 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7340 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7341 Spacing));
7342 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7343 Spacing * 2));
7344 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7345 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7346 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7347 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7348 Spacing));
7349 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7350 Spacing * 2));
7351 TmpInst.addOperand(Inst.getOperand(1)); // lane
7352 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7353 TmpInst.addOperand(Inst.getOperand(5));
7354 Inst = TmpInst;
7355 return true;
7356 }
7357
7358 case ARM::VLD4LNdAsm_8:
7359 case ARM::VLD4LNdAsm_16:
7360 case ARM::VLD4LNdAsm_32:
7361 case ARM::VLD4LNqAsm_16:
7362 case ARM::VLD4LNqAsm_32: {
7363 MCInst TmpInst;
7364 // Shuffle the operands around so the lane index operand is in the
7365 // right place.
7366 unsigned Spacing;
7367 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7368 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7369 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7370 Spacing));
7371 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7372 Spacing * 2));
7373 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7374 Spacing * 3));
7375 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7376 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7377 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7378 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7379 Spacing));
7380 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7381 Spacing * 2));
7382 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7383 Spacing * 3));
7384 TmpInst.addOperand(Inst.getOperand(1)); // lane
7385 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7386 TmpInst.addOperand(Inst.getOperand(5));
7387 Inst = TmpInst;
7388 return true;
7389 }
7390
7391 // VLD3DUP single 3-element structure to all lanes instructions.
7392 case ARM::VLD3DUPdAsm_8:
7393 case ARM::VLD3DUPdAsm_16:
7394 case ARM::VLD3DUPdAsm_32:
7395 case ARM::VLD3DUPqAsm_8:
7396 case ARM::VLD3DUPqAsm_16:
7397 case ARM::VLD3DUPqAsm_32: {
7398 MCInst TmpInst;
7399 unsigned Spacing;
7400 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7401 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7402 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7403 Spacing));
7404 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7405 Spacing * 2));
7406 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7407 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7408 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7409 TmpInst.addOperand(Inst.getOperand(4));
7410 Inst = TmpInst;
7411 return true;
7412 }
7413
7414 case ARM::VLD3DUPdWB_fixed_Asm_8:
7415 case ARM::VLD3DUPdWB_fixed_Asm_16:
7416 case ARM::VLD3DUPdWB_fixed_Asm_32:
7417 case ARM::VLD3DUPqWB_fixed_Asm_8:
7418 case ARM::VLD3DUPqWB_fixed_Asm_16:
7419 case ARM::VLD3DUPqWB_fixed_Asm_32: {
7420 MCInst TmpInst;
7421 unsigned Spacing;
7422 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7423 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7424 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7425 Spacing));
7426 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7427 Spacing * 2));
7428 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7429 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7430 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7431 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7432 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7433 TmpInst.addOperand(Inst.getOperand(4));
7434 Inst = TmpInst;
7435 return true;
7436 }
7437
7438 case ARM::VLD3DUPdWB_register_Asm_8:
7439 case ARM::VLD3DUPdWB_register_Asm_16:
7440 case ARM::VLD3DUPdWB_register_Asm_32:
7441 case ARM::VLD3DUPqWB_register_Asm_8:
7442 case ARM::VLD3DUPqWB_register_Asm_16:
7443 case ARM::VLD3DUPqWB_register_Asm_32: {
7444 MCInst TmpInst;
7445 unsigned Spacing;
7446 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7447 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7448 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7449 Spacing));
7450 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7451 Spacing * 2));
7452 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7453 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7454 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7455 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7456 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7457 TmpInst.addOperand(Inst.getOperand(5));
7458 Inst = TmpInst;
7459 return true;
7460 }
7461
7462 // VLD3 multiple 3-element structure instructions.
7463 case ARM::VLD3dAsm_8:
7464 case ARM::VLD3dAsm_16:
7465 case ARM::VLD3dAsm_32:
7466 case ARM::VLD3qAsm_8:
7467 case ARM::VLD3qAsm_16:
7468 case ARM::VLD3qAsm_32: {
7469 MCInst TmpInst;
7470 unsigned Spacing;
7471 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7472 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7473 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7474 Spacing));
7475 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7476 Spacing * 2));
7477 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7478 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7479 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7480 TmpInst.addOperand(Inst.getOperand(4));
7481 Inst = TmpInst;
7482 return true;
7483 }
7484
7485 case ARM::VLD3dWB_fixed_Asm_8:
7486 case ARM::VLD3dWB_fixed_Asm_16:
7487 case ARM::VLD3dWB_fixed_Asm_32:
7488 case ARM::VLD3qWB_fixed_Asm_8:
7489 case ARM::VLD3qWB_fixed_Asm_16:
7490 case ARM::VLD3qWB_fixed_Asm_32: {
7491 MCInst TmpInst;
7492 unsigned Spacing;
7493 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7494 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7495 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7496 Spacing));
7497 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7498 Spacing * 2));
7499 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7500 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7501 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7502 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7503 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7504 TmpInst.addOperand(Inst.getOperand(4));
7505 Inst = TmpInst;
7506 return true;
7507 }
7508
7509 case ARM::VLD3dWB_register_Asm_8:
7510 case ARM::VLD3dWB_register_Asm_16:
7511 case ARM::VLD3dWB_register_Asm_32:
7512 case ARM::VLD3qWB_register_Asm_8:
7513 case ARM::VLD3qWB_register_Asm_16:
7514 case ARM::VLD3qWB_register_Asm_32: {
7515 MCInst TmpInst;
7516 unsigned Spacing;
7517 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7518 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7519 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7520 Spacing));
7521 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7522 Spacing * 2));
7523 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7524 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7525 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7526 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7527 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7528 TmpInst.addOperand(Inst.getOperand(5));
7529 Inst = TmpInst;
7530 return true;
7531 }
7532
7533 // VLD4DUP single 3-element structure to all lanes instructions.
7534 case ARM::VLD4DUPdAsm_8:
7535 case ARM::VLD4DUPdAsm_16:
7536 case ARM::VLD4DUPdAsm_32:
7537 case ARM::VLD4DUPqAsm_8:
7538 case ARM::VLD4DUPqAsm_16:
7539 case ARM::VLD4DUPqAsm_32: {
7540 MCInst TmpInst;
7541 unsigned Spacing;
7542 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7543 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7544 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7545 Spacing));
7546 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7547 Spacing * 2));
7548 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7549 Spacing * 3));
7550 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7551 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7552 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7553 TmpInst.addOperand(Inst.getOperand(4));
7554 Inst = TmpInst;
7555 return true;
7556 }
7557
7558 case ARM::VLD4DUPdWB_fixed_Asm_8:
7559 case ARM::VLD4DUPdWB_fixed_Asm_16:
7560 case ARM::VLD4DUPdWB_fixed_Asm_32:
7561 case ARM::VLD4DUPqWB_fixed_Asm_8:
7562 case ARM::VLD4DUPqWB_fixed_Asm_16:
7563 case ARM::VLD4DUPqWB_fixed_Asm_32: {
7564 MCInst TmpInst;
7565 unsigned Spacing;
7566 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7567 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7568 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7569 Spacing));
7570 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7571 Spacing * 2));
7572 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7573 Spacing * 3));
7574 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7575 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7576 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7577 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7578 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7579 TmpInst.addOperand(Inst.getOperand(4));
7580 Inst = TmpInst;
7581 return true;
7582 }
7583
7584 case ARM::VLD4DUPdWB_register_Asm_8:
7585 case ARM::VLD4DUPdWB_register_Asm_16:
7586 case ARM::VLD4DUPdWB_register_Asm_32:
7587 case ARM::VLD4DUPqWB_register_Asm_8:
7588 case ARM::VLD4DUPqWB_register_Asm_16:
7589 case ARM::VLD4DUPqWB_register_Asm_32: {
7590 MCInst TmpInst;
7591 unsigned Spacing;
7592 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7593 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7594 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7595 Spacing));
7596 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7597 Spacing * 2));
7598 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7599 Spacing * 3));
7600 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7601 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7602 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7603 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7604 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7605 TmpInst.addOperand(Inst.getOperand(5));
7606 Inst = TmpInst;
7607 return true;
7608 }
7609
7610 // VLD4 multiple 4-element structure instructions.
7611 case ARM::VLD4dAsm_8:
7612 case ARM::VLD4dAsm_16:
7613 case ARM::VLD4dAsm_32:
7614 case ARM::VLD4qAsm_8:
7615 case ARM::VLD4qAsm_16:
7616 case ARM::VLD4qAsm_32: {
7617 MCInst TmpInst;
7618 unsigned Spacing;
7619 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7620 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7621 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7622 Spacing));
7623 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7624 Spacing * 2));
7625 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7626 Spacing * 3));
7627 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7628 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7629 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7630 TmpInst.addOperand(Inst.getOperand(4));
7631 Inst = TmpInst;
7632 return true;
7633 }
7634
7635 case ARM::VLD4dWB_fixed_Asm_8:
7636 case ARM::VLD4dWB_fixed_Asm_16:
7637 case ARM::VLD4dWB_fixed_Asm_32:
7638 case ARM::VLD4qWB_fixed_Asm_8:
7639 case ARM::VLD4qWB_fixed_Asm_16:
7640 case ARM::VLD4qWB_fixed_Asm_32: {
7641 MCInst TmpInst;
7642 unsigned Spacing;
7643 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7644 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7645 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7646 Spacing));
7647 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7648 Spacing * 2));
7649 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7650 Spacing * 3));
7651 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7652 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7653 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7654 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7655 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7656 TmpInst.addOperand(Inst.getOperand(4));
7657 Inst = TmpInst;
7658 return true;
7659 }
7660
7661 case ARM::VLD4dWB_register_Asm_8:
7662 case ARM::VLD4dWB_register_Asm_16:
7663 case ARM::VLD4dWB_register_Asm_32:
7664 case ARM::VLD4qWB_register_Asm_8:
7665 case ARM::VLD4qWB_register_Asm_16:
7666 case ARM::VLD4qWB_register_Asm_32: {
7667 MCInst TmpInst;
7668 unsigned Spacing;
7669 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7670 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7671 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7672 Spacing));
7673 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7674 Spacing * 2));
7675 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7676 Spacing * 3));
7677 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7678 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7679 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7680 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7681 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7682 TmpInst.addOperand(Inst.getOperand(5));
7683 Inst = TmpInst;
7684 return true;
7685 }
7686
7687 // VST3 multiple 3-element structure instructions.
7688 case ARM::VST3dAsm_8:
7689 case ARM::VST3dAsm_16:
7690 case ARM::VST3dAsm_32:
7691 case ARM::VST3qAsm_8:
7692 case ARM::VST3qAsm_16:
7693 case ARM::VST3qAsm_32: {
7694 MCInst TmpInst;
7695 unsigned Spacing;
7696 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7697 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7698 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7699 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7700 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7701 Spacing));
7702 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7703 Spacing * 2));
7704 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7705 TmpInst.addOperand(Inst.getOperand(4));
7706 Inst = TmpInst;
7707 return true;
7708 }
7709
7710 case ARM::VST3dWB_fixed_Asm_8:
7711 case ARM::VST3dWB_fixed_Asm_16:
7712 case ARM::VST3dWB_fixed_Asm_32:
7713 case ARM::VST3qWB_fixed_Asm_8:
7714 case ARM::VST3qWB_fixed_Asm_16:
7715 case ARM::VST3qWB_fixed_Asm_32: {
7716 MCInst TmpInst;
7717 unsigned Spacing;
7718 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7719 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7720 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7721 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7722 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7723 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7724 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7725 Spacing));
7726 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7727 Spacing * 2));
7728 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7729 TmpInst.addOperand(Inst.getOperand(4));
7730 Inst = TmpInst;
7731 return true;
7732 }
7733
7734 case ARM::VST3dWB_register_Asm_8:
7735 case ARM::VST3dWB_register_Asm_16:
7736 case ARM::VST3dWB_register_Asm_32:
7737 case ARM::VST3qWB_register_Asm_8:
7738 case ARM::VST3qWB_register_Asm_16:
7739 case ARM::VST3qWB_register_Asm_32: {
7740 MCInst TmpInst;
7741 unsigned Spacing;
7742 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7743 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7744 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7745 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7746 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7747 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7748 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7749 Spacing));
7750 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7751 Spacing * 2));
7752 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7753 TmpInst.addOperand(Inst.getOperand(5));
7754 Inst = TmpInst;
7755 return true;
7756 }
7757
7758 // VST4 multiple 3-element structure instructions.
7759 case ARM::VST4dAsm_8:
7760 case ARM::VST4dAsm_16:
7761 case ARM::VST4dAsm_32:
7762 case ARM::VST4qAsm_8:
7763 case ARM::VST4qAsm_16:
7764 case ARM::VST4qAsm_32: {
7765 MCInst TmpInst;
7766 unsigned Spacing;
7767 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7768 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7769 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7770 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7771 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7772 Spacing));
7773 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7774 Spacing * 2));
7775 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7776 Spacing * 3));
7777 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7778 TmpInst.addOperand(Inst.getOperand(4));
7779 Inst = TmpInst;
7780 return true;
7781 }
7782
7783 case ARM::VST4dWB_fixed_Asm_8:
7784 case ARM::VST4dWB_fixed_Asm_16:
7785 case ARM::VST4dWB_fixed_Asm_32:
7786 case ARM::VST4qWB_fixed_Asm_8:
7787 case ARM::VST4qWB_fixed_Asm_16:
7788 case ARM::VST4qWB_fixed_Asm_32: {
7789 MCInst TmpInst;
7790 unsigned Spacing;
7791 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7792 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7793 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7794 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7795 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7796 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7797 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7798 Spacing));
7799 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7800 Spacing * 2));
7801 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7802 Spacing * 3));
7803 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7804 TmpInst.addOperand(Inst.getOperand(4));
7805 Inst = TmpInst;
7806 return true;
7807 }
7808
7809 case ARM::VST4dWB_register_Asm_8:
7810 case ARM::VST4dWB_register_Asm_16:
7811 case ARM::VST4dWB_register_Asm_32:
7812 case ARM::VST4qWB_register_Asm_8:
7813 case ARM::VST4qWB_register_Asm_16:
7814 case ARM::VST4qWB_register_Asm_32: {
7815 MCInst TmpInst;
7816 unsigned Spacing;
7817 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7818 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7819 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7820 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7821 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7822 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7823 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7824 Spacing));
7825 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7826 Spacing * 2));
7827 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7828 Spacing * 3));
7829 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7830 TmpInst.addOperand(Inst.getOperand(5));
7831 Inst = TmpInst;
7832 return true;
7833 }
7834
7835 // Handle encoding choice for the shift-immediate instructions.
7836 case ARM::t2LSLri:
7837 case ARM::t2LSRri:
7838 case ARM::t2ASRri: {
7839 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7840 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7841 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
7842 !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7843 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
7844 unsigned NewOpc;
7845 switch (Inst.getOpcode()) {
7846 default: llvm_unreachable("unexpected opcode");
7847 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
7848 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
7849 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
7850 }
7851 // The Thumb1 operands aren't in the same order. Awesome, eh?
7852 MCInst TmpInst;
7853 TmpInst.setOpcode(NewOpc);
7854 TmpInst.addOperand(Inst.getOperand(0));
7855 TmpInst.addOperand(Inst.getOperand(5));
7856 TmpInst.addOperand(Inst.getOperand(1));
7857 TmpInst.addOperand(Inst.getOperand(2));
7858 TmpInst.addOperand(Inst.getOperand(3));
7859 TmpInst.addOperand(Inst.getOperand(4));
7860 Inst = TmpInst;
7861 return true;
7862 }
7863 return false;
7864 }
7865
7866 // Handle the Thumb2 mode MOV complex aliases.
7867 case ARM::t2MOVsr:
7868 case ARM::t2MOVSsr: {
7869 // Which instruction to expand to depends on the CCOut operand and
7870 // whether we're in an IT block if the register operands are low
7871 // registers.
7872 bool isNarrow = false;
7873 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7874 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7875 isARMLowRegister(Inst.getOperand(2).getReg()) &&
7876 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7877 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
7878 isNarrow = true;
7879 MCInst TmpInst;
7880 unsigned newOpc;
7881 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
7882 default: llvm_unreachable("unexpected opcode!");
7883 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
7884 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
7885 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
7886 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
7887 }
7888 TmpInst.setOpcode(newOpc);
7889 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7890 if (isNarrow)
7891 TmpInst.addOperand(MCOperand::createReg(
7892 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7893 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7894 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7895 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7896 TmpInst.addOperand(Inst.getOperand(5));
7897 if (!isNarrow)
7898 TmpInst.addOperand(MCOperand::createReg(
7899 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7900 Inst = TmpInst;
7901 return true;
7902 }
7903 case ARM::t2MOVsi:
7904 case ARM::t2MOVSsi: {
7905 // Which instruction to expand to depends on the CCOut operand and
7906 // whether we're in an IT block if the register operands are low
7907 // registers.
7908 bool isNarrow = false;
7909 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7910 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7911 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7912 isNarrow = true;
7913 MCInst TmpInst;
7914 unsigned newOpc;
7915 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7916 default: llvm_unreachable("unexpected opcode!");
7917 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7918 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7919 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7920 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7921 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7922 }
7923 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7924 if (Amount == 32) Amount = 0;
7925 TmpInst.setOpcode(newOpc);
7926 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7927 if (isNarrow)
7928 TmpInst.addOperand(MCOperand::createReg(
7929 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7930 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7931 if (newOpc != ARM::t2RRX)
7932 TmpInst.addOperand(MCOperand::createImm(Amount));
7933 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7934 TmpInst.addOperand(Inst.getOperand(4));
7935 if (!isNarrow)
7936 TmpInst.addOperand(MCOperand::createReg(
7937 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7938 Inst = TmpInst;
7939 return true;
7940 }
7941 // Handle the ARM mode MOV complex aliases.
7942 case ARM::ASRr:
7943 case ARM::LSRr:
7944 case ARM::LSLr:
7945 case ARM::RORr: {
7946 ARM_AM::ShiftOpc ShiftTy;
7947 switch(Inst.getOpcode()) {
7948 default: llvm_unreachable("unexpected opcode!");
7949 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7950 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7951 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7952 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7953 }
7954 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7955 MCInst TmpInst;
7956 TmpInst.setOpcode(ARM::MOVsr);
7957 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7958 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7959 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7960 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
7961 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7962 TmpInst.addOperand(Inst.getOperand(4));
7963 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7964 Inst = TmpInst;
7965 return true;
7966 }
7967 case ARM::ASRi:
7968 case ARM::LSRi:
7969 case ARM::LSLi:
7970 case ARM::RORi: {
7971 ARM_AM::ShiftOpc ShiftTy;
7972 switch(Inst.getOpcode()) {
7973 default: llvm_unreachable("unexpected opcode!");
7974 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
7975 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
7976 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
7977 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7978 }
7979 // A shift by zero is a plain MOVr, not a MOVsi.
7980 unsigned Amt = Inst.getOperand(2).getImm();
7981 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7982 // A shift by 32 should be encoded as 0 when permitted
7983 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7984 Amt = 0;
7985 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7986 MCInst TmpInst;
7987 TmpInst.setOpcode(Opc);
7988 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7989 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7990 if (Opc == ARM::MOVsi)
7991 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
7992 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7993 TmpInst.addOperand(Inst.getOperand(4));
7994 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7995 Inst = TmpInst;
7996 return true;
7997 }
7998 case ARM::RRXi: {
7999 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8000 MCInst TmpInst;
8001 TmpInst.setOpcode(ARM::MOVsi);
8002 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8003 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8004 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8005 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8006 TmpInst.addOperand(Inst.getOperand(3));
8007 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8008 Inst = TmpInst;
8009 return true;
8010 }
8011 case ARM::t2LDMIA_UPD: {
8012 // If this is a load of a single register, then we should use
8013 // a post-indexed LDR instruction instead, per the ARM ARM.
8014 if (Inst.getNumOperands() != 5)
8015 return false;
8016 MCInst TmpInst;
8017 TmpInst.setOpcode(ARM::t2LDR_POST);
8018 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8019 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8020 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8021 TmpInst.addOperand(MCOperand::createImm(4));
8022 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8023 TmpInst.addOperand(Inst.getOperand(3));
8024 Inst = TmpInst;
8025 return true;
8026 }
8027 case ARM::t2STMDB_UPD: {
8028 // If this is a store of a single register, then we should use
8029 // a pre-indexed STR instruction instead, per the ARM ARM.
8030 if (Inst.getNumOperands() != 5)
8031 return false;
8032 MCInst TmpInst;
8033 TmpInst.setOpcode(ARM::t2STR_PRE);
8034 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8035 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8036 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8037 TmpInst.addOperand(MCOperand::createImm(-4));
8038 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8039 TmpInst.addOperand(Inst.getOperand(3));
8040 Inst = TmpInst;
8041 return true;
8042 }
8043 case ARM::LDMIA_UPD:
8044 // If this is a load of a single register via a 'pop', then we should use
8045 // a post-indexed LDR instruction instead, per the ARM ARM.
8046 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8047 Inst.getNumOperands() == 5) {
8048 MCInst TmpInst;
8049 TmpInst.setOpcode(ARM::LDR_POST_IMM);
8050 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8051 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8052 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8053 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
8054 TmpInst.addOperand(MCOperand::createImm(4));
8055 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8056 TmpInst.addOperand(Inst.getOperand(3));
8057 Inst = TmpInst;
8058 return true;
8059 }
8060 break;
8061 case ARM::STMDB_UPD:
8062 // If this is a store of a single register via a 'push', then we should use
8063 // a pre-indexed STR instruction instead, per the ARM ARM.
8064 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8065 Inst.getNumOperands() == 5) {
8066 MCInst TmpInst;
8067 TmpInst.setOpcode(ARM::STR_PRE_IMM);
8068 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8069 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8070 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8071 TmpInst.addOperand(MCOperand::createImm(-4));
8072 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8073 TmpInst.addOperand(Inst.getOperand(3));
8074 Inst = TmpInst;
8075 }
8076 break;
8077 case ARM::t2ADDri12:
8078 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8079 // mnemonic was used (not "addw"), encoding T3 is preferred.
8080 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8081 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8082 break;
8083 Inst.setOpcode(ARM::t2ADDri);
8084 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8085 break;
8086 case ARM::t2SUBri12:
8087 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8088 // mnemonic was used (not "subw"), encoding T3 is preferred.
8089 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8090 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8091 break;
8092 Inst.setOpcode(ARM::t2SUBri);
8093 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8094 break;
8095 case ARM::tADDi8:
8096 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8097 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8098 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8099 // to encoding T1 if <Rd> is omitted."
8100 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8101 Inst.setOpcode(ARM::tADDi3);
8102 return true;
8103 }
8104 break;
8105 case ARM::tSUBi8:
8106 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8107 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8108 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8109 // to encoding T1 if <Rd> is omitted."
8110 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8111 Inst.setOpcode(ARM::tSUBi3);
8112 return true;
8113 }
8114 break;
8115 case ARM::t2ADDri:
8116 case ARM::t2SUBri: {
8117 // If the destination and first source operand are the same, and
8118 // the flags are compatible with the current IT status, use encoding T2
8119 // instead of T3. For compatibility with the system 'as'. Make sure the
8120 // wide encoding wasn't explicit.
8121 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8122 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8123 (unsigned)Inst.getOperand(2).getImm() > 255 ||
8124 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
8125 (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
8126 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8127 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8128 break;
8129 MCInst TmpInst;
8130 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8131 ARM::tADDi8 : ARM::tSUBi8);
8132 TmpInst.addOperand(Inst.getOperand(0));
8133 TmpInst.addOperand(Inst.getOperand(5));
8134 TmpInst.addOperand(Inst.getOperand(0));
8135 TmpInst.addOperand(Inst.getOperand(2));
8136 TmpInst.addOperand(Inst.getOperand(3));
8137 TmpInst.addOperand(Inst.getOperand(4));
8138 Inst = TmpInst;
8139 return true;
8140 }
8141 case ARM::t2ADDrr: {
8142 // If the destination and first source operand are the same, and
8143 // there's no setting of the flags, use encoding T2 instead of T3.
8144 // Note that this is only for ADD, not SUB. This mirrors the system
8145 // 'as' behaviour. Also take advantage of ADD being commutative.
8146 // Make sure the wide encoding wasn't explicit.
8147 bool Swap = false;
8148 auto DestReg = Inst.getOperand(0).getReg();
8149 bool Transform = DestReg == Inst.getOperand(1).getReg();
8150 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8151 Transform = true;
8152 Swap = true;
8153 }
8154 if (!Transform ||
8155 Inst.getOperand(5).getReg() != 0 ||
8156 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8157 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8158 break;
8159 MCInst TmpInst;
8160 TmpInst.setOpcode(ARM::tADDhirr);
8161 TmpInst.addOperand(Inst.getOperand(0));
8162 TmpInst.addOperand(Inst.getOperand(0));
8163 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8164 TmpInst.addOperand(Inst.getOperand(3));
8165 TmpInst.addOperand(Inst.getOperand(4));
8166 Inst = TmpInst;
8167 return true;
8168 }
8169 case ARM::tADDrSP: {
8170 // If the non-SP source operand and the destination operand are not the
8171 // same, we need to use the 32-bit encoding if it's available.
8172 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8173 Inst.setOpcode(ARM::t2ADDrr);
8174 Inst.addOperand(MCOperand::createReg(0)); // cc_out
8175 return true;
8176 }
8177 break;
8178 }
8179 case ARM::tB:
8180 // A Thumb conditional branch outside of an IT block is a tBcc.
8181 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8182 Inst.setOpcode(ARM::tBcc);
8183 return true;
8184 }
8185 break;
8186 case ARM::t2B:
8187 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8188 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8189 Inst.setOpcode(ARM::t2Bcc);
8190 return true;
8191 }
8192 break;
8193 case ARM::t2Bcc:
8194 // If the conditional is AL or we're in an IT block, we really want t2B.
8195 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8196 Inst.setOpcode(ARM::t2B);
8197 return true;
8198 }
8199 break;
8200 case ARM::tBcc:
8201 // If the conditional is AL, we really want tB.
8202 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8203 Inst.setOpcode(ARM::tB);
8204 return true;
8205 }
8206 break;
8207 case ARM::tLDMIA: {
8208 // If the register list contains any high registers, or if the writeback
8209 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8210 // instead if we're in Thumb2. Otherwise, this should have generated
8211 // an error in validateInstruction().
8212 unsigned Rn = Inst.getOperand(0).getReg();
8213 bool hasWritebackToken =
8214 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8215 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8216 bool listContainsBase;
8217 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8218 (!listContainsBase && !hasWritebackToken) ||
8219 (listContainsBase && hasWritebackToken)) {
8220 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8221 assert (isThumbTwo());
8222 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8223 // If we're switching to the updating version, we need to insert
8224 // the writeback tied operand.
8225 if (hasWritebackToken)
8226 Inst.insert(Inst.begin(),
8227 MCOperand::createReg(Inst.getOperand(0).getReg()));
8228 return true;
8229 }
8230 break;
8231 }
8232 case ARM::tSTMIA_UPD: {
8233 // If the register list contains any high registers, we need to use
8234 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8235 // should have generated an error in validateInstruction().
8236 unsigned Rn = Inst.getOperand(0).getReg();
8237 bool listContainsBase;
8238 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8239 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8240 assert (isThumbTwo());
8241 Inst.setOpcode(ARM::t2STMIA_UPD);
8242 return true;
8243 }
8244 break;
8245 }
8246 case ARM::tPOP: {
8247 bool listContainsBase;
8248 // If the register list contains any high registers, we need to use
8249 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8250 // should have generated an error in validateInstruction().
8251 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8252 return false;
8253 assert (isThumbTwo());
8254 Inst.setOpcode(ARM::t2LDMIA_UPD);
8255 // Add the base register and writeback operands.
8256 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8257 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8258 return true;
8259 }
8260 case ARM::tPUSH: {
8261 bool listContainsBase;
8262 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8263 return false;
8264 assert (isThumbTwo());
8265 Inst.setOpcode(ARM::t2STMDB_UPD);
8266 // Add the base register and writeback operands.
8267 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8268 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8269 return true;
8270 }
8271 case ARM::t2MOVi: {
8272 // If we can use the 16-bit encoding and the user didn't explicitly
8273 // request the 32-bit variant, transform it here.
8274 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8275 (unsigned)Inst.getOperand(1).getImm() <= 255 &&
8276 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
8277 Inst.getOperand(4).getReg() == ARM::CPSR) ||
8278 (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
8279 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8280 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8281 // The operands aren't in the same order for tMOVi8...
8282 MCInst TmpInst;
8283 TmpInst.setOpcode(ARM::tMOVi8);
8284 TmpInst.addOperand(Inst.getOperand(0));
8285 TmpInst.addOperand(Inst.getOperand(4));
8286 TmpInst.addOperand(Inst.getOperand(1));
8287 TmpInst.addOperand(Inst.getOperand(2));
8288 TmpInst.addOperand(Inst.getOperand(3));
8289 Inst = TmpInst;
8290 return true;
8291 }
8292 break;
8293 }
8294 case ARM::t2MOVr: {
8295 // If we can use the 16-bit encoding and the user didn't explicitly
8296 // request the 32-bit variant, transform it here.
8297 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8298 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8299 Inst.getOperand(2).getImm() == ARMCC::AL &&
8300 Inst.getOperand(4).getReg() == ARM::CPSR &&
8301 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8302 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8303 // The operands aren't the same for tMOV[S]r... (no cc_out)
8304 MCInst TmpInst;
8305 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8306 TmpInst.addOperand(Inst.getOperand(0));
8307 TmpInst.addOperand(Inst.getOperand(1));
8308 TmpInst.addOperand(Inst.getOperand(2));
8309 TmpInst.addOperand(Inst.getOperand(3));
8310 Inst = TmpInst;
8311 return true;
8312 }
8313 break;
8314 }
8315 case ARM::t2SXTH:
8316 case ARM::t2SXTB:
8317 case ARM::t2UXTH:
8318 case ARM::t2UXTB: {
8319 // If we can use the 16-bit encoding and the user didn't explicitly
8320 // request the 32-bit variant, transform it here.
8321 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8322 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8323 Inst.getOperand(2).getImm() == 0 &&
8324 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8325 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8326 unsigned NewOpc;
8327 switch (Inst.getOpcode()) {
8328 default: llvm_unreachable("Illegal opcode!");
8329 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8330 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8331 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8332 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8333 }
8334 // The operands aren't the same for thumb1 (no rotate operand).
8335 MCInst TmpInst;
8336 TmpInst.setOpcode(NewOpc);
8337 TmpInst.addOperand(Inst.getOperand(0));
8338 TmpInst.addOperand(Inst.getOperand(1));
8339 TmpInst.addOperand(Inst.getOperand(3));
8340 TmpInst.addOperand(Inst.getOperand(4));
8341 Inst = TmpInst;
8342 return true;
8343 }
8344 break;
8345 }
8346 case ARM::MOVsi: {
8347 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8348 // rrx shifts and asr/lsr of #32 is encoded as 0
8349 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8350 return false;
8351 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8352 // Shifting by zero is accepted as a vanilla 'MOVr'
8353 MCInst TmpInst;
8354 TmpInst.setOpcode(ARM::MOVr);
8355 TmpInst.addOperand(Inst.getOperand(0));
8356 TmpInst.addOperand(Inst.getOperand(1));
8357 TmpInst.addOperand(Inst.getOperand(3));
8358 TmpInst.addOperand(Inst.getOperand(4));
8359 TmpInst.addOperand(Inst.getOperand(5));
8360 Inst = TmpInst;
8361 return true;
8362 }
8363 return false;
8364 }
8365 case ARM::ANDrsi:
8366 case ARM::ORRrsi:
8367 case ARM::EORrsi:
8368 case ARM::BICrsi:
8369 case ARM::SUBrsi:
8370 case ARM::ADDrsi: {
8371 unsigned newOpc;
8372 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8373 if (SOpc == ARM_AM::rrx) return false;
8374 switch (Inst.getOpcode()) {
8375 default: llvm_unreachable("unexpected opcode!");
8376 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8377 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8378 case ARM::EORrsi: newOpc = ARM::EORrr; break;
8379 case ARM::BICrsi: newOpc = ARM::BICrr; break;
8380 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8381 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8382 }
8383 // If the shift is by zero, use the non-shifted instruction definition.
8384 // The exception is for right shifts, where 0 == 32
8385 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8386 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8387 MCInst TmpInst;
8388 TmpInst.setOpcode(newOpc);
8389 TmpInst.addOperand(Inst.getOperand(0));
8390 TmpInst.addOperand(Inst.getOperand(1));
8391 TmpInst.addOperand(Inst.getOperand(2));
8392 TmpInst.addOperand(Inst.getOperand(4));
8393 TmpInst.addOperand(Inst.getOperand(5));
8394 TmpInst.addOperand(Inst.getOperand(6));
8395 Inst = TmpInst;
8396 return true;
8397 }
8398 return false;
8399 }
8400 case ARM::ITasm:
8401 case ARM::t2IT: {
8402 // The mask bits for all but the first condition are represented as
8403 // the low bit of the condition code value implies 't'. We currently
8404 // always have 1 implies 't', so XOR toggle the bits if the low bit
8405 // of the condition code is zero.
8406 MCOperand &MO = Inst.getOperand(1);
8407 unsigned Mask = MO.getImm();
8408 unsigned OrigMask = Mask;
8409 unsigned TZ = countTrailingZeros(Mask);
8410 if ((Inst.getOperand(0).getImm() & 1) == 0) {
8411 assert(Mask && TZ <= 3 && "illegal IT mask value!");
8412 Mask ^= (0xE << TZ) & 0xF;
8413 }
8414 MO.setImm(Mask);
8415
8416 // Set up the IT block state according to the IT instruction we just
8417 // matched.
8418 assert(!inITBlock() && "nested IT blocks?!");
8419 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8420 ITState.Mask = OrigMask; // Use the original mask, not the updated one.
8421 ITState.CurPosition = 0;
8422 ITState.FirstCond = true;
8423 break;
8424 }
8425 case ARM::t2LSLrr:
8426 case ARM::t2LSRrr:
8427 case ARM::t2ASRrr:
8428 case ARM::t2SBCrr:
8429 case ARM::t2RORrr:
8430 case ARM::t2BICrr:
8431 {
8432 // Assemblers should use the narrow encodings of these instructions when permissible.
8433 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8434 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8435 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8436 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8437 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8438 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8439 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8440 ".w"))) {
8441 unsigned NewOpc;
8442 switch (Inst.getOpcode()) {
8443 default: llvm_unreachable("unexpected opcode");
8444 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8445 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8446 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8447 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8448 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8449 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8450 }
8451 MCInst TmpInst;
8452 TmpInst.setOpcode(NewOpc);
8453 TmpInst.addOperand(Inst.getOperand(0));
8454 TmpInst.addOperand(Inst.getOperand(5));
8455 TmpInst.addOperand(Inst.getOperand(1));
8456 TmpInst.addOperand(Inst.getOperand(2));
8457 TmpInst.addOperand(Inst.getOperand(3));
8458 TmpInst.addOperand(Inst.getOperand(4));
8459 Inst = TmpInst;
8460 return true;
8461 }
8462 return false;
8463 }
8464 case ARM::t2ANDrr:
8465 case ARM::t2EORrr:
8466 case ARM::t2ADCrr:
8467 case ARM::t2ORRrr:
8468 {
8469 // Assemblers should use the narrow encodings of these instructions when permissible.
8470 // These instructions are special in that they are commutable, so shorter encodings
8471 // are available more often.
8472 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8473 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8474 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8475 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8476 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8477 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8478 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8479 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8480 ".w"))) {
8481 unsigned NewOpc;
8482 switch (Inst.getOpcode()) {
8483 default: llvm_unreachable("unexpected opcode");
8484 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8485 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8486 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8487 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8488 }
8489 MCInst TmpInst;
8490 TmpInst.setOpcode(NewOpc);
8491 TmpInst.addOperand(Inst.getOperand(0));
8492 TmpInst.addOperand(Inst.getOperand(5));
8493 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8494 TmpInst.addOperand(Inst.getOperand(1));
8495 TmpInst.addOperand(Inst.getOperand(2));
8496 } else {
8497 TmpInst.addOperand(Inst.getOperand(2));
8498 TmpInst.addOperand(Inst.getOperand(1));
8499 }
8500 TmpInst.addOperand(Inst.getOperand(3));
8501 TmpInst.addOperand(Inst.getOperand(4));
8502 Inst = TmpInst;
8503 return true;
8504 }
8505 return false;
8506 }
8507 }
8508 return false;
8509 }
8510
checkTargetMatchPredicate(MCInst & Inst)8511 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8512 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8513 // suffix depending on whether they're in an IT block or not.
8514 unsigned Opc = Inst.getOpcode();
8515 const MCInstrDesc &MCID = MII.get(Opc);
8516 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8517 assert(MCID.hasOptionalDef() &&
8518 "optionally flag setting instruction missing optional def operand");
8519 assert(MCID.NumOperands == Inst.getNumOperands() &&
8520 "operand count mismatch!");
8521 // Find the optional-def operand (cc_out).
8522 unsigned OpNo;
8523 for (OpNo = 0;
8524 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8525 ++OpNo)
8526 ;
8527 // If we're parsing Thumb1, reject it completely.
8528 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8529 return Match_MnemonicFail;
8530 // If we're parsing Thumb2, which form is legal depends on whether we're
8531 // in an IT block.
8532 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8533 !inITBlock())
8534 return Match_RequiresITBlock;
8535 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8536 inITBlock())
8537 return Match_RequiresNotITBlock;
8538 } else if (isThumbOne()) {
8539 // Some high-register supporting Thumb1 encodings only allow both registers
8540 // to be from r0-r7 when in Thumb2.
8541 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
8542 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8543 isARMLowRegister(Inst.getOperand(2).getReg()))
8544 return Match_RequiresThumb2;
8545 // Others only require ARMv6 or later.
8546 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
8547 isARMLowRegister(Inst.getOperand(0).getReg()) &&
8548 isARMLowRegister(Inst.getOperand(1).getReg()))
8549 return Match_RequiresV6;
8550 }
8551
8552 for (unsigned I = 0; I < MCID.NumOperands; ++I)
8553 if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
8554 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
8555 if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
8556 return Match_RequiresV8;
8557 else if (Inst.getOperand(I).getReg() == ARM::PC)
8558 return Match_InvalidOperand;
8559 }
8560
8561 return Match_Success;
8562 }
8563
8564 namespace llvm {
IsCPSRDead(MCInst * Instr)8565 template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
8566 return true; // In an assembly source, no need to second-guess
8567 }
8568 }
8569
8570 static const char *getSubtargetFeatureName(uint64_t Val);
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)8571 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
8572 OperandVector &Operands,
8573 MCStreamer &Out, uint64_t &ErrorInfo,
8574 bool MatchingInlineAsm) {
8575 MCInst Inst;
8576 unsigned MatchResult;
8577
8578 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
8579 MatchingInlineAsm);
8580 switch (MatchResult) {
8581 case Match_Success:
8582 // Context sensitive operand constraints aren't handled by the matcher,
8583 // so check them here.
8584 if (validateInstruction(Inst, Operands)) {
8585 // Still progress the IT block, otherwise one wrong condition causes
8586 // nasty cascading errors.
8587 forwardITPosition();
8588 return true;
8589 }
8590
8591 { // processInstruction() updates inITBlock state, we need to save it away
8592 bool wasInITBlock = inITBlock();
8593
8594 // Some instructions need post-processing to, for example, tweak which
8595 // encoding is selected. Loop on it while changes happen so the
8596 // individual transformations can chain off each other. E.g.,
8597 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
8598 while (processInstruction(Inst, Operands, Out))
8599 ;
8600
8601 // Only after the instruction is fully processed, we can validate it
8602 if (wasInITBlock && hasV8Ops() && isThumb() &&
8603 !isV8EligibleForIT(&Inst)) {
8604 Warning(IDLoc, "deprecated instruction in IT block");
8605 }
8606 }
8607
8608 // Only move forward at the very end so that everything in validate
8609 // and process gets a consistent answer about whether we're in an IT
8610 // block.
8611 forwardITPosition();
8612
8613 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
8614 // doesn't actually encode.
8615 if (Inst.getOpcode() == ARM::ITasm)
8616 return false;
8617
8618 Inst.setLoc(IDLoc);
8619 Out.EmitInstruction(Inst, getSTI());
8620 return false;
8621 case Match_MissingFeature: {
8622 assert(ErrorInfo && "Unknown missing feature!");
8623 // Special case the error message for the very common case where only
8624 // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
8625 std::string Msg = "instruction requires:";
8626 uint64_t Mask = 1;
8627 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
8628 if (ErrorInfo & Mask) {
8629 Msg += " ";
8630 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
8631 }
8632 Mask <<= 1;
8633 }
8634 return Error(IDLoc, Msg);
8635 }
8636 case Match_InvalidOperand: {
8637 SMLoc ErrorLoc = IDLoc;
8638 if (ErrorInfo != ~0ULL) {
8639 if (ErrorInfo >= Operands.size())
8640 return Error(IDLoc, "too few operands for instruction");
8641
8642 ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8643 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8644 }
8645
8646 return Error(ErrorLoc, "invalid operand for instruction");
8647 }
8648 case Match_MnemonicFail:
8649 return Error(IDLoc, "invalid instruction",
8650 ((ARMOperand &)*Operands[0]).getLocRange());
8651 case Match_RequiresNotITBlock:
8652 return Error(IDLoc, "flag setting instruction only valid outside IT block");
8653 case Match_RequiresITBlock:
8654 return Error(IDLoc, "instruction only valid inside IT block");
8655 case Match_RequiresV6:
8656 return Error(IDLoc, "instruction variant requires ARMv6 or later");
8657 case Match_RequiresThumb2:
8658 return Error(IDLoc, "instruction variant requires Thumb2");
8659 case Match_RequiresV8:
8660 return Error(IDLoc, "instruction variant requires ARMv8 or later");
8661 case Match_ImmRange0_15: {
8662 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8663 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8664 return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
8665 }
8666 case Match_ImmRange0_239: {
8667 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8668 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8669 return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
8670 }
8671 case Match_AlignedMemoryRequiresNone:
8672 case Match_DupAlignedMemoryRequiresNone:
8673 case Match_AlignedMemoryRequires16:
8674 case Match_DupAlignedMemoryRequires16:
8675 case Match_AlignedMemoryRequires32:
8676 case Match_DupAlignedMemoryRequires32:
8677 case Match_AlignedMemoryRequires64:
8678 case Match_DupAlignedMemoryRequires64:
8679 case Match_AlignedMemoryRequires64or128:
8680 case Match_DupAlignedMemoryRequires64or128:
8681 case Match_AlignedMemoryRequires64or128or256:
8682 {
8683 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
8684 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8685 switch (MatchResult) {
8686 default:
8687 llvm_unreachable("Missing Match_Aligned type");
8688 case Match_AlignedMemoryRequiresNone:
8689 case Match_DupAlignedMemoryRequiresNone:
8690 return Error(ErrorLoc, "alignment must be omitted");
8691 case Match_AlignedMemoryRequires16:
8692 case Match_DupAlignedMemoryRequires16:
8693 return Error(ErrorLoc, "alignment must be 16 or omitted");
8694 case Match_AlignedMemoryRequires32:
8695 case Match_DupAlignedMemoryRequires32:
8696 return Error(ErrorLoc, "alignment must be 32 or omitted");
8697 case Match_AlignedMemoryRequires64:
8698 case Match_DupAlignedMemoryRequires64:
8699 return Error(ErrorLoc, "alignment must be 64 or omitted");
8700 case Match_AlignedMemoryRequires64or128:
8701 case Match_DupAlignedMemoryRequires64or128:
8702 return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
8703 case Match_AlignedMemoryRequires64or128or256:
8704 return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
8705 }
8706 }
8707 }
8708
8709 llvm_unreachable("Implement any new match types added!");
8710 }
8711
8712 /// parseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)8713 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
8714 const MCObjectFileInfo::Environment Format =
8715 getContext().getObjectFileInfo()->getObjectFileType();
8716 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8717 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
8718
8719 StringRef IDVal = DirectiveID.getIdentifier();
8720 if (IDVal == ".word")
8721 return parseLiteralValues(4, DirectiveID.getLoc());
8722 else if (IDVal == ".short" || IDVal == ".hword")
8723 return parseLiteralValues(2, DirectiveID.getLoc());
8724 else if (IDVal == ".thumb")
8725 return parseDirectiveThumb(DirectiveID.getLoc());
8726 else if (IDVal == ".arm")
8727 return parseDirectiveARM(DirectiveID.getLoc());
8728 else if (IDVal == ".thumb_func")
8729 return parseDirectiveThumbFunc(DirectiveID.getLoc());
8730 else if (IDVal == ".code")
8731 return parseDirectiveCode(DirectiveID.getLoc());
8732 else if (IDVal == ".syntax")
8733 return parseDirectiveSyntax(DirectiveID.getLoc());
8734 else if (IDVal == ".unreq")
8735 return parseDirectiveUnreq(DirectiveID.getLoc());
8736 else if (IDVal == ".fnend")
8737 return parseDirectiveFnEnd(DirectiveID.getLoc());
8738 else if (IDVal == ".cantunwind")
8739 return parseDirectiveCantUnwind(DirectiveID.getLoc());
8740 else if (IDVal == ".personality")
8741 return parseDirectivePersonality(DirectiveID.getLoc());
8742 else if (IDVal == ".handlerdata")
8743 return parseDirectiveHandlerData(DirectiveID.getLoc());
8744 else if (IDVal == ".setfp")
8745 return parseDirectiveSetFP(DirectiveID.getLoc());
8746 else if (IDVal == ".pad")
8747 return parseDirectivePad(DirectiveID.getLoc());
8748 else if (IDVal == ".save")
8749 return parseDirectiveRegSave(DirectiveID.getLoc(), false);
8750 else if (IDVal == ".vsave")
8751 return parseDirectiveRegSave(DirectiveID.getLoc(), true);
8752 else if (IDVal == ".ltorg" || IDVal == ".pool")
8753 return parseDirectiveLtorg(DirectiveID.getLoc());
8754 else if (IDVal == ".even")
8755 return parseDirectiveEven(DirectiveID.getLoc());
8756 else if (IDVal == ".personalityindex")
8757 return parseDirectivePersonalityIndex(DirectiveID.getLoc());
8758 else if (IDVal == ".unwind_raw")
8759 return parseDirectiveUnwindRaw(DirectiveID.getLoc());
8760 else if (IDVal == ".movsp")
8761 return parseDirectiveMovSP(DirectiveID.getLoc());
8762 else if (IDVal == ".arch_extension")
8763 return parseDirectiveArchExtension(DirectiveID.getLoc());
8764 else if (IDVal == ".align")
8765 return parseDirectiveAlign(DirectiveID.getLoc());
8766 else if (IDVal == ".thumb_set")
8767 return parseDirectiveThumbSet(DirectiveID.getLoc());
8768
8769 if (!IsMachO && !IsCOFF) {
8770 if (IDVal == ".arch")
8771 return parseDirectiveArch(DirectiveID.getLoc());
8772 else if (IDVal == ".cpu")
8773 return parseDirectiveCPU(DirectiveID.getLoc());
8774 else if (IDVal == ".eabi_attribute")
8775 return parseDirectiveEabiAttr(DirectiveID.getLoc());
8776 else if (IDVal == ".fpu")
8777 return parseDirectiveFPU(DirectiveID.getLoc());
8778 else if (IDVal == ".fnstart")
8779 return parseDirectiveFnStart(DirectiveID.getLoc());
8780 else if (IDVal == ".inst")
8781 return parseDirectiveInst(DirectiveID.getLoc());
8782 else if (IDVal == ".inst.n")
8783 return parseDirectiveInst(DirectiveID.getLoc(), 'n');
8784 else if (IDVal == ".inst.w")
8785 return parseDirectiveInst(DirectiveID.getLoc(), 'w');
8786 else if (IDVal == ".object_arch")
8787 return parseDirectiveObjectArch(DirectiveID.getLoc());
8788 else if (IDVal == ".tlsdescseq")
8789 return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
8790 }
8791
8792 return true;
8793 }
8794
8795 /// parseLiteralValues
8796 /// ::= .hword expression [, expression]*
8797 /// ::= .short expression [, expression]*
8798 /// ::= .word expression [, expression]*
parseLiteralValues(unsigned Size,SMLoc L)8799 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
8800 MCAsmParser &Parser = getParser();
8801 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8802 for (;;) {
8803 const MCExpr *Value;
8804 if (getParser().parseExpression(Value)) {
8805 Parser.eatToEndOfStatement();
8806 return false;
8807 }
8808
8809 getParser().getStreamer().EmitValue(Value, Size, L);
8810
8811 if (getLexer().is(AsmToken::EndOfStatement))
8812 break;
8813
8814 // FIXME: Improve diagnostic.
8815 if (getLexer().isNot(AsmToken::Comma)) {
8816 Error(L, "unexpected token in directive");
8817 return false;
8818 }
8819 Parser.Lex();
8820 }
8821 }
8822
8823 Parser.Lex();
8824 return false;
8825 }
8826
8827 /// parseDirectiveThumb
8828 /// ::= .thumb
parseDirectiveThumb(SMLoc L)8829 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
8830 MCAsmParser &Parser = getParser();
8831 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8832 Error(L, "unexpected token in directive");
8833 return false;
8834 }
8835 Parser.Lex();
8836
8837 if (!hasThumb()) {
8838 Error(L, "target does not support Thumb mode");
8839 return false;
8840 }
8841
8842 if (!isThumb())
8843 SwitchMode();
8844
8845 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8846 return false;
8847 }
8848
8849 /// parseDirectiveARM
8850 /// ::= .arm
parseDirectiveARM(SMLoc L)8851 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
8852 MCAsmParser &Parser = getParser();
8853 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8854 Error(L, "unexpected token in directive");
8855 return false;
8856 }
8857 Parser.Lex();
8858
8859 if (!hasARM()) {
8860 Error(L, "target does not support ARM mode");
8861 return false;
8862 }
8863
8864 if (isThumb())
8865 SwitchMode();
8866
8867 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8868 return false;
8869 }
8870
onLabelParsed(MCSymbol * Symbol)8871 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
8872 if (NextSymbolIsThumb) {
8873 getParser().getStreamer().EmitThumbFunc(Symbol);
8874 NextSymbolIsThumb = false;
8875 }
8876 }
8877
8878 /// parseDirectiveThumbFunc
8879 /// ::= .thumbfunc symbol_name
parseDirectiveThumbFunc(SMLoc L)8880 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
8881 MCAsmParser &Parser = getParser();
8882 const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
8883 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8884
8885 // Darwin asm has (optionally) function name after .thumb_func direction
8886 // ELF doesn't
8887 if (IsMachO) {
8888 const AsmToken &Tok = Parser.getTok();
8889 if (Tok.isNot(AsmToken::EndOfStatement)) {
8890 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
8891 Error(L, "unexpected token in .thumb_func directive");
8892 return false;
8893 }
8894
8895 MCSymbol *Func =
8896 getParser().getContext().getOrCreateSymbol(Tok.getIdentifier());
8897 getParser().getStreamer().EmitThumbFunc(Func);
8898 Parser.Lex(); // Consume the identifier token.
8899 return false;
8900 }
8901 }
8902
8903 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8904 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8905 Parser.eatToEndOfStatement();
8906 return false;
8907 }
8908
8909 NextSymbolIsThumb = true;
8910 return false;
8911 }
8912
8913 /// parseDirectiveSyntax
8914 /// ::= .syntax unified | divided
parseDirectiveSyntax(SMLoc L)8915 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
8916 MCAsmParser &Parser = getParser();
8917 const AsmToken &Tok = Parser.getTok();
8918 if (Tok.isNot(AsmToken::Identifier)) {
8919 Error(L, "unexpected token in .syntax directive");
8920 return false;
8921 }
8922
8923 StringRef Mode = Tok.getString();
8924 if (Mode == "unified" || Mode == "UNIFIED") {
8925 Parser.Lex();
8926 } else if (Mode == "divided" || Mode == "DIVIDED") {
8927 Error(L, "'.syntax divided' arm asssembly not supported");
8928 return false;
8929 } else {
8930 Error(L, "unrecognized syntax mode in .syntax directive");
8931 return false;
8932 }
8933
8934 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8935 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8936 return false;
8937 }
8938 Parser.Lex();
8939
8940 // TODO tell the MC streamer the mode
8941 // getParser().getStreamer().Emit???();
8942 return false;
8943 }
8944
8945 /// parseDirectiveCode
8946 /// ::= .code 16 | 32
parseDirectiveCode(SMLoc L)8947 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
8948 MCAsmParser &Parser = getParser();
8949 const AsmToken &Tok = Parser.getTok();
8950 if (Tok.isNot(AsmToken::Integer)) {
8951 Error(L, "unexpected token in .code directive");
8952 return false;
8953 }
8954 int64_t Val = Parser.getTok().getIntVal();
8955 if (Val != 16 && Val != 32) {
8956 Error(L, "invalid operand to .code directive");
8957 return false;
8958 }
8959 Parser.Lex();
8960
8961 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8962 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8963 return false;
8964 }
8965 Parser.Lex();
8966
8967 if (Val == 16) {
8968 if (!hasThumb()) {
8969 Error(L, "target does not support Thumb mode");
8970 return false;
8971 }
8972
8973 if (!isThumb())
8974 SwitchMode();
8975 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8976 } else {
8977 if (!hasARM()) {
8978 Error(L, "target does not support ARM mode");
8979 return false;
8980 }
8981
8982 if (isThumb())
8983 SwitchMode();
8984 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8985 }
8986
8987 return false;
8988 }
8989
8990 /// parseDirectiveReq
8991 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)8992 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
8993 MCAsmParser &Parser = getParser();
8994 Parser.Lex(); // Eat the '.req' token.
8995 unsigned Reg;
8996 SMLoc SRegLoc, ERegLoc;
8997 if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
8998 Parser.eatToEndOfStatement();
8999 Error(SRegLoc, "register name expected");
9000 return false;
9001 }
9002
9003 // Shouldn't be anything else.
9004 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
9005 Parser.eatToEndOfStatement();
9006 Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
9007 return false;
9008 }
9009
9010 Parser.Lex(); // Consume the EndOfStatement
9011
9012 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) {
9013 Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
9014 return false;
9015 }
9016
9017 return false;
9018 }
9019
9020 /// parseDirectiveUneq
9021 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)9022 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9023 MCAsmParser &Parser = getParser();
9024 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9025 Parser.eatToEndOfStatement();
9026 Error(L, "unexpected input in .unreq directive.");
9027 return false;
9028 }
9029 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9030 Parser.Lex(); // Eat the identifier.
9031 return false;
9032 }
9033
9034 /// parseDirectiveArch
9035 /// ::= .arch token
parseDirectiveArch(SMLoc L)9036 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9037 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9038
9039 unsigned ID = ARM::parseArch(Arch);
9040
9041 if (ID == ARM::AK_INVALID) {
9042 Error(L, "Unknown arch name");
9043 return false;
9044 }
9045
9046 Triple T;
9047 MCSubtargetInfo &STI = copySTI();
9048 STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9049 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9050
9051 getTargetStreamer().emitArch(ID);
9052 return false;
9053 }
9054
9055 /// parseDirectiveEabiAttr
9056 /// ::= .eabi_attribute int, int [, "str"]
9057 /// ::= .eabi_attribute Tag_name, int [, "str"]
parseDirectiveEabiAttr(SMLoc L)9058 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9059 MCAsmParser &Parser = getParser();
9060 int64_t Tag;
9061 SMLoc TagLoc;
9062 TagLoc = Parser.getTok().getLoc();
9063 if (Parser.getTok().is(AsmToken::Identifier)) {
9064 StringRef Name = Parser.getTok().getIdentifier();
9065 Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9066 if (Tag == -1) {
9067 Error(TagLoc, "attribute name not recognised: " + Name);
9068 Parser.eatToEndOfStatement();
9069 return false;
9070 }
9071 Parser.Lex();
9072 } else {
9073 const MCExpr *AttrExpr;
9074
9075 TagLoc = Parser.getTok().getLoc();
9076 if (Parser.parseExpression(AttrExpr)) {
9077 Parser.eatToEndOfStatement();
9078 return false;
9079 }
9080
9081 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9082 if (!CE) {
9083 Error(TagLoc, "expected numeric constant");
9084 Parser.eatToEndOfStatement();
9085 return false;
9086 }
9087
9088 Tag = CE->getValue();
9089 }
9090
9091 if (Parser.getTok().isNot(AsmToken::Comma)) {
9092 Error(Parser.getTok().getLoc(), "comma expected");
9093 Parser.eatToEndOfStatement();
9094 return false;
9095 }
9096 Parser.Lex(); // skip comma
9097
9098 StringRef StringValue = "";
9099 bool IsStringValue = false;
9100
9101 int64_t IntegerValue = 0;
9102 bool IsIntegerValue = false;
9103
9104 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9105 IsStringValue = true;
9106 else if (Tag == ARMBuildAttrs::compatibility) {
9107 IsStringValue = true;
9108 IsIntegerValue = true;
9109 } else if (Tag < 32 || Tag % 2 == 0)
9110 IsIntegerValue = true;
9111 else if (Tag % 2 == 1)
9112 IsStringValue = true;
9113 else
9114 llvm_unreachable("invalid tag type");
9115
9116 if (IsIntegerValue) {
9117 const MCExpr *ValueExpr;
9118 SMLoc ValueExprLoc = Parser.getTok().getLoc();
9119 if (Parser.parseExpression(ValueExpr)) {
9120 Parser.eatToEndOfStatement();
9121 return false;
9122 }
9123
9124 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9125 if (!CE) {
9126 Error(ValueExprLoc, "expected numeric constant");
9127 Parser.eatToEndOfStatement();
9128 return false;
9129 }
9130
9131 IntegerValue = CE->getValue();
9132 }
9133
9134 if (Tag == ARMBuildAttrs::compatibility) {
9135 if (Parser.getTok().isNot(AsmToken::Comma))
9136 IsStringValue = false;
9137 if (Parser.getTok().isNot(AsmToken::Comma)) {
9138 Error(Parser.getTok().getLoc(), "comma expected");
9139 Parser.eatToEndOfStatement();
9140 return false;
9141 } else {
9142 Parser.Lex();
9143 }
9144 }
9145
9146 if (IsStringValue) {
9147 if (Parser.getTok().isNot(AsmToken::String)) {
9148 Error(Parser.getTok().getLoc(), "bad string constant");
9149 Parser.eatToEndOfStatement();
9150 return false;
9151 }
9152
9153 StringValue = Parser.getTok().getStringContents();
9154 Parser.Lex();
9155 }
9156
9157 if (IsIntegerValue && IsStringValue) {
9158 assert(Tag == ARMBuildAttrs::compatibility);
9159 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9160 } else if (IsIntegerValue)
9161 getTargetStreamer().emitAttribute(Tag, IntegerValue);
9162 else if (IsStringValue)
9163 getTargetStreamer().emitTextAttribute(Tag, StringValue);
9164 return false;
9165 }
9166
9167 /// parseDirectiveCPU
9168 /// ::= .cpu str
parseDirectiveCPU(SMLoc L)9169 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9170 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9171 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9172
9173 // FIXME: This is using table-gen data, but should be moved to
9174 // ARMTargetParser once that is table-gen'd.
9175 if (!getSTI().isCPUStringValid(CPU)) {
9176 Error(L, "Unknown CPU name");
9177 return false;
9178 }
9179
9180 MCSubtargetInfo &STI = copySTI();
9181 STI.setDefaultFeatures(CPU, "");
9182 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9183
9184 return false;
9185 }
9186 /// parseDirectiveFPU
9187 /// ::= .fpu str
parseDirectiveFPU(SMLoc L)9188 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9189 SMLoc FPUNameLoc = getTok().getLoc();
9190 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9191
9192 unsigned ID = ARM::parseFPU(FPU);
9193 std::vector<const char *> Features;
9194 if (!ARM::getFPUFeatures(ID, Features)) {
9195 Error(FPUNameLoc, "Unknown FPU name");
9196 return false;
9197 }
9198
9199 MCSubtargetInfo &STI = copySTI();
9200 for (auto Feature : Features)
9201 STI.ApplyFeatureFlag(Feature);
9202 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9203
9204 getTargetStreamer().emitFPU(ID);
9205 return false;
9206 }
9207
9208 /// parseDirectiveFnStart
9209 /// ::= .fnstart
parseDirectiveFnStart(SMLoc L)9210 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9211 if (UC.hasFnStart()) {
9212 Error(L, ".fnstart starts before the end of previous one");
9213 UC.emitFnStartLocNotes();
9214 return false;
9215 }
9216
9217 // Reset the unwind directives parser state
9218 UC.reset();
9219
9220 getTargetStreamer().emitFnStart();
9221
9222 UC.recordFnStart(L);
9223 return false;
9224 }
9225
9226 /// parseDirectiveFnEnd
9227 /// ::= .fnend
parseDirectiveFnEnd(SMLoc L)9228 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9229 // Check the ordering of unwind directives
9230 if (!UC.hasFnStart()) {
9231 Error(L, ".fnstart must precede .fnend directive");
9232 return false;
9233 }
9234
9235 // Reset the unwind directives parser state
9236 getTargetStreamer().emitFnEnd();
9237
9238 UC.reset();
9239 return false;
9240 }
9241
9242 /// parseDirectiveCantUnwind
9243 /// ::= .cantunwind
parseDirectiveCantUnwind(SMLoc L)9244 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9245 UC.recordCantUnwind(L);
9246
9247 // Check the ordering of unwind directives
9248 if (!UC.hasFnStart()) {
9249 Error(L, ".fnstart must precede .cantunwind directive");
9250 return false;
9251 }
9252 if (UC.hasHandlerData()) {
9253 Error(L, ".cantunwind can't be used with .handlerdata directive");
9254 UC.emitHandlerDataLocNotes();
9255 return false;
9256 }
9257 if (UC.hasPersonality()) {
9258 Error(L, ".cantunwind can't be used with .personality directive");
9259 UC.emitPersonalityLocNotes();
9260 return false;
9261 }
9262
9263 getTargetStreamer().emitCantUnwind();
9264 return false;
9265 }
9266
9267 /// parseDirectivePersonality
9268 /// ::= .personality name
parseDirectivePersonality(SMLoc L)9269 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9270 MCAsmParser &Parser = getParser();
9271 bool HasExistingPersonality = UC.hasPersonality();
9272
9273 UC.recordPersonality(L);
9274
9275 // Check the ordering of unwind directives
9276 if (!UC.hasFnStart()) {
9277 Error(L, ".fnstart must precede .personality directive");
9278 return false;
9279 }
9280 if (UC.cantUnwind()) {
9281 Error(L, ".personality can't be used with .cantunwind directive");
9282 UC.emitCantUnwindLocNotes();
9283 return false;
9284 }
9285 if (UC.hasHandlerData()) {
9286 Error(L, ".personality must precede .handlerdata directive");
9287 UC.emitHandlerDataLocNotes();
9288 return false;
9289 }
9290 if (HasExistingPersonality) {
9291 Parser.eatToEndOfStatement();
9292 Error(L, "multiple personality directives");
9293 UC.emitPersonalityLocNotes();
9294 return false;
9295 }
9296
9297 // Parse the name of the personality routine
9298 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9299 Parser.eatToEndOfStatement();
9300 Error(L, "unexpected input in .personality directive.");
9301 return false;
9302 }
9303 StringRef Name(Parser.getTok().getIdentifier());
9304 Parser.Lex();
9305
9306 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9307 getTargetStreamer().emitPersonality(PR);
9308 return false;
9309 }
9310
9311 /// parseDirectiveHandlerData
9312 /// ::= .handlerdata
parseDirectiveHandlerData(SMLoc L)9313 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9314 UC.recordHandlerData(L);
9315
9316 // Check the ordering of unwind directives
9317 if (!UC.hasFnStart()) {
9318 Error(L, ".fnstart must precede .personality directive");
9319 return false;
9320 }
9321 if (UC.cantUnwind()) {
9322 Error(L, ".handlerdata can't be used with .cantunwind directive");
9323 UC.emitCantUnwindLocNotes();
9324 return false;
9325 }
9326
9327 getTargetStreamer().emitHandlerData();
9328 return false;
9329 }
9330
9331 /// parseDirectiveSetFP
9332 /// ::= .setfp fpreg, spreg [, offset]
parseDirectiveSetFP(SMLoc L)9333 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9334 MCAsmParser &Parser = getParser();
9335 // Check the ordering of unwind directives
9336 if (!UC.hasFnStart()) {
9337 Error(L, ".fnstart must precede .setfp directive");
9338 return false;
9339 }
9340 if (UC.hasHandlerData()) {
9341 Error(L, ".setfp must precede .handlerdata directive");
9342 return false;
9343 }
9344
9345 // Parse fpreg
9346 SMLoc FPRegLoc = Parser.getTok().getLoc();
9347 int FPReg = tryParseRegister();
9348 if (FPReg == -1) {
9349 Error(FPRegLoc, "frame pointer register expected");
9350 return false;
9351 }
9352
9353 // Consume comma
9354 if (Parser.getTok().isNot(AsmToken::Comma)) {
9355 Error(Parser.getTok().getLoc(), "comma expected");
9356 return false;
9357 }
9358 Parser.Lex(); // skip comma
9359
9360 // Parse spreg
9361 SMLoc SPRegLoc = Parser.getTok().getLoc();
9362 int SPReg = tryParseRegister();
9363 if (SPReg == -1) {
9364 Error(SPRegLoc, "stack pointer register expected");
9365 return false;
9366 }
9367
9368 if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
9369 Error(SPRegLoc, "register should be either $sp or the latest fp register");
9370 return false;
9371 }
9372
9373 // Update the frame pointer register
9374 UC.saveFPReg(FPReg);
9375
9376 // Parse offset
9377 int64_t Offset = 0;
9378 if (Parser.getTok().is(AsmToken::Comma)) {
9379 Parser.Lex(); // skip comma
9380
9381 if (Parser.getTok().isNot(AsmToken::Hash) &&
9382 Parser.getTok().isNot(AsmToken::Dollar)) {
9383 Error(Parser.getTok().getLoc(), "'#' expected");
9384 return false;
9385 }
9386 Parser.Lex(); // skip hash token.
9387
9388 const MCExpr *OffsetExpr;
9389 SMLoc ExLoc = Parser.getTok().getLoc();
9390 SMLoc EndLoc;
9391 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9392 Error(ExLoc, "malformed setfp offset");
9393 return false;
9394 }
9395 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9396 if (!CE) {
9397 Error(ExLoc, "setfp offset must be an immediate");
9398 return false;
9399 }
9400
9401 Offset = CE->getValue();
9402 }
9403
9404 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9405 static_cast<unsigned>(SPReg), Offset);
9406 return false;
9407 }
9408
9409 /// parseDirective
9410 /// ::= .pad offset
parseDirectivePad(SMLoc L)9411 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9412 MCAsmParser &Parser = getParser();
9413 // Check the ordering of unwind directives
9414 if (!UC.hasFnStart()) {
9415 Error(L, ".fnstart must precede .pad directive");
9416 return false;
9417 }
9418 if (UC.hasHandlerData()) {
9419 Error(L, ".pad must precede .handlerdata directive");
9420 return false;
9421 }
9422
9423 // Parse the offset
9424 if (Parser.getTok().isNot(AsmToken::Hash) &&
9425 Parser.getTok().isNot(AsmToken::Dollar)) {
9426 Error(Parser.getTok().getLoc(), "'#' expected");
9427 return false;
9428 }
9429 Parser.Lex(); // skip hash token.
9430
9431 const MCExpr *OffsetExpr;
9432 SMLoc ExLoc = Parser.getTok().getLoc();
9433 SMLoc EndLoc;
9434 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9435 Error(ExLoc, "malformed pad offset");
9436 return false;
9437 }
9438 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9439 if (!CE) {
9440 Error(ExLoc, "pad offset must be an immediate");
9441 return false;
9442 }
9443
9444 getTargetStreamer().emitPad(CE->getValue());
9445 return false;
9446 }
9447
9448 /// parseDirectiveRegSave
9449 /// ::= .save { registers }
9450 /// ::= .vsave { registers }
parseDirectiveRegSave(SMLoc L,bool IsVector)9451 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9452 // Check the ordering of unwind directives
9453 if (!UC.hasFnStart()) {
9454 Error(L, ".fnstart must precede .save or .vsave directives");
9455 return false;
9456 }
9457 if (UC.hasHandlerData()) {
9458 Error(L, ".save or .vsave must precede .handlerdata directive");
9459 return false;
9460 }
9461
9462 // RAII object to make sure parsed operands are deleted.
9463 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
9464
9465 // Parse the register list
9466 if (parseRegisterList(Operands))
9467 return false;
9468 ARMOperand &Op = (ARMOperand &)*Operands[0];
9469 if (!IsVector && !Op.isRegList()) {
9470 Error(L, ".save expects GPR registers");
9471 return false;
9472 }
9473 if (IsVector && !Op.isDPRRegList()) {
9474 Error(L, ".vsave expects DPR registers");
9475 return false;
9476 }
9477
9478 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
9479 return false;
9480 }
9481
9482 /// parseDirectiveInst
9483 /// ::= .inst opcode [, ...]
9484 /// ::= .inst.n opcode [, ...]
9485 /// ::= .inst.w opcode [, ...]
parseDirectiveInst(SMLoc Loc,char Suffix)9486 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
9487 MCAsmParser &Parser = getParser();
9488 int Width;
9489
9490 if (isThumb()) {
9491 switch (Suffix) {
9492 case 'n':
9493 Width = 2;
9494 break;
9495 case 'w':
9496 Width = 4;
9497 break;
9498 default:
9499 Parser.eatToEndOfStatement();
9500 Error(Loc, "cannot determine Thumb instruction size, "
9501 "use inst.n/inst.w instead");
9502 return false;
9503 }
9504 } else {
9505 if (Suffix) {
9506 Parser.eatToEndOfStatement();
9507 Error(Loc, "width suffixes are invalid in ARM mode");
9508 return false;
9509 }
9510 Width = 4;
9511 }
9512
9513 if (getLexer().is(AsmToken::EndOfStatement)) {
9514 Parser.eatToEndOfStatement();
9515 Error(Loc, "expected expression following directive");
9516 return false;
9517 }
9518
9519 for (;;) {
9520 const MCExpr *Expr;
9521
9522 if (getParser().parseExpression(Expr)) {
9523 Error(Loc, "expected expression");
9524 return false;
9525 }
9526
9527 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9528 if (!Value) {
9529 Error(Loc, "expected constant expression");
9530 return false;
9531 }
9532
9533 switch (Width) {
9534 case 2:
9535 if (Value->getValue() > 0xffff) {
9536 Error(Loc, "inst.n operand is too big, use inst.w instead");
9537 return false;
9538 }
9539 break;
9540 case 4:
9541 if (Value->getValue() > 0xffffffff) {
9542 Error(Loc,
9543 StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
9544 return false;
9545 }
9546 break;
9547 default:
9548 llvm_unreachable("only supported widths are 2 and 4");
9549 }
9550
9551 getTargetStreamer().emitInst(Value->getValue(), Suffix);
9552
9553 if (getLexer().is(AsmToken::EndOfStatement))
9554 break;
9555
9556 if (getLexer().isNot(AsmToken::Comma)) {
9557 Error(Loc, "unexpected token in directive");
9558 return false;
9559 }
9560
9561 Parser.Lex();
9562 }
9563
9564 Parser.Lex();
9565 return false;
9566 }
9567
9568 /// parseDirectiveLtorg
9569 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)9570 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9571 getTargetStreamer().emitCurrentConstantPool();
9572 return false;
9573 }
9574
parseDirectiveEven(SMLoc L)9575 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9576 const MCSection *Section = getStreamer().getCurrentSection().first;
9577
9578 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9579 TokError("unexpected token in directive");
9580 return false;
9581 }
9582
9583 if (!Section) {
9584 getStreamer().InitSections(false);
9585 Section = getStreamer().getCurrentSection().first;
9586 }
9587
9588 assert(Section && "must have section to emit alignment");
9589 if (Section->UseCodeAlign())
9590 getStreamer().EmitCodeAlignment(2);
9591 else
9592 getStreamer().EmitValueToAlignment(2);
9593
9594 return false;
9595 }
9596
9597 /// parseDirectivePersonalityIndex
9598 /// ::= .personalityindex index
parseDirectivePersonalityIndex(SMLoc L)9599 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9600 MCAsmParser &Parser = getParser();
9601 bool HasExistingPersonality = UC.hasPersonality();
9602
9603 UC.recordPersonalityIndex(L);
9604
9605 if (!UC.hasFnStart()) {
9606 Parser.eatToEndOfStatement();
9607 Error(L, ".fnstart must precede .personalityindex directive");
9608 return false;
9609 }
9610 if (UC.cantUnwind()) {
9611 Parser.eatToEndOfStatement();
9612 Error(L, ".personalityindex cannot be used with .cantunwind");
9613 UC.emitCantUnwindLocNotes();
9614 return false;
9615 }
9616 if (UC.hasHandlerData()) {
9617 Parser.eatToEndOfStatement();
9618 Error(L, ".personalityindex must precede .handlerdata directive");
9619 UC.emitHandlerDataLocNotes();
9620 return false;
9621 }
9622 if (HasExistingPersonality) {
9623 Parser.eatToEndOfStatement();
9624 Error(L, "multiple personality directives");
9625 UC.emitPersonalityLocNotes();
9626 return false;
9627 }
9628
9629 const MCExpr *IndexExpression;
9630 SMLoc IndexLoc = Parser.getTok().getLoc();
9631 if (Parser.parseExpression(IndexExpression)) {
9632 Parser.eatToEndOfStatement();
9633 return false;
9634 }
9635
9636 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
9637 if (!CE) {
9638 Parser.eatToEndOfStatement();
9639 Error(IndexLoc, "index must be a constant number");
9640 return false;
9641 }
9642 if (CE->getValue() < 0 ||
9643 CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
9644 Parser.eatToEndOfStatement();
9645 Error(IndexLoc, "personality routine index should be in range [0-3]");
9646 return false;
9647 }
9648
9649 getTargetStreamer().emitPersonalityIndex(CE->getValue());
9650 return false;
9651 }
9652
9653 /// parseDirectiveUnwindRaw
9654 /// ::= .unwind_raw offset, opcode [, opcode...]
parseDirectiveUnwindRaw(SMLoc L)9655 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
9656 MCAsmParser &Parser = getParser();
9657 if (!UC.hasFnStart()) {
9658 Parser.eatToEndOfStatement();
9659 Error(L, ".fnstart must precede .unwind_raw directives");
9660 return false;
9661 }
9662
9663 int64_t StackOffset;
9664
9665 const MCExpr *OffsetExpr;
9666 SMLoc OffsetLoc = getLexer().getLoc();
9667 if (getLexer().is(AsmToken::EndOfStatement) ||
9668 getParser().parseExpression(OffsetExpr)) {
9669 Error(OffsetLoc, "expected expression");
9670 Parser.eatToEndOfStatement();
9671 return false;
9672 }
9673
9674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9675 if (!CE) {
9676 Error(OffsetLoc, "offset must be a constant");
9677 Parser.eatToEndOfStatement();
9678 return false;
9679 }
9680
9681 StackOffset = CE->getValue();
9682
9683 if (getLexer().isNot(AsmToken::Comma)) {
9684 Error(getLexer().getLoc(), "expected comma");
9685 Parser.eatToEndOfStatement();
9686 return false;
9687 }
9688 Parser.Lex();
9689
9690 SmallVector<uint8_t, 16> Opcodes;
9691 for (;;) {
9692 const MCExpr *OE;
9693
9694 SMLoc OpcodeLoc = getLexer().getLoc();
9695 if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
9696 Error(OpcodeLoc, "expected opcode expression");
9697 Parser.eatToEndOfStatement();
9698 return false;
9699 }
9700
9701 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
9702 if (!OC) {
9703 Error(OpcodeLoc, "opcode value must be a constant");
9704 Parser.eatToEndOfStatement();
9705 return false;
9706 }
9707
9708 const int64_t Opcode = OC->getValue();
9709 if (Opcode & ~0xff) {
9710 Error(OpcodeLoc, "invalid opcode");
9711 Parser.eatToEndOfStatement();
9712 return false;
9713 }
9714
9715 Opcodes.push_back(uint8_t(Opcode));
9716
9717 if (getLexer().is(AsmToken::EndOfStatement))
9718 break;
9719
9720 if (getLexer().isNot(AsmToken::Comma)) {
9721 Error(getLexer().getLoc(), "unexpected token in directive");
9722 Parser.eatToEndOfStatement();
9723 return false;
9724 }
9725
9726 Parser.Lex();
9727 }
9728
9729 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9730
9731 Parser.Lex();
9732 return false;
9733 }
9734
9735 /// parseDirectiveTLSDescSeq
9736 /// ::= .tlsdescseq tls-variable
parseDirectiveTLSDescSeq(SMLoc L)9737 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
9738 MCAsmParser &Parser = getParser();
9739
9740 if (getLexer().isNot(AsmToken::Identifier)) {
9741 TokError("expected variable after '.tlsdescseq' directive");
9742 Parser.eatToEndOfStatement();
9743 return false;
9744 }
9745
9746 const MCSymbolRefExpr *SRE =
9747 MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
9748 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
9749 Lex();
9750
9751 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9752 Error(Parser.getTok().getLoc(), "unexpected token");
9753 Parser.eatToEndOfStatement();
9754 return false;
9755 }
9756
9757 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
9758 return false;
9759 }
9760
9761 /// parseDirectiveMovSP
9762 /// ::= .movsp reg [, #offset]
parseDirectiveMovSP(SMLoc L)9763 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
9764 MCAsmParser &Parser = getParser();
9765 if (!UC.hasFnStart()) {
9766 Parser.eatToEndOfStatement();
9767 Error(L, ".fnstart must precede .movsp directives");
9768 return false;
9769 }
9770 if (UC.getFPReg() != ARM::SP) {
9771 Parser.eatToEndOfStatement();
9772 Error(L, "unexpected .movsp directive");
9773 return false;
9774 }
9775
9776 SMLoc SPRegLoc = Parser.getTok().getLoc();
9777 int SPReg = tryParseRegister();
9778 if (SPReg == -1) {
9779 Parser.eatToEndOfStatement();
9780 Error(SPRegLoc, "register expected");
9781 return false;
9782 }
9783
9784 if (SPReg == ARM::SP || SPReg == ARM::PC) {
9785 Parser.eatToEndOfStatement();
9786 Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
9787 return false;
9788 }
9789
9790 int64_t Offset = 0;
9791 if (Parser.getTok().is(AsmToken::Comma)) {
9792 Parser.Lex();
9793
9794 if (Parser.getTok().isNot(AsmToken::Hash)) {
9795 Error(Parser.getTok().getLoc(), "expected #constant");
9796 Parser.eatToEndOfStatement();
9797 return false;
9798 }
9799 Parser.Lex();
9800
9801 const MCExpr *OffsetExpr;
9802 SMLoc OffsetLoc = Parser.getTok().getLoc();
9803 if (Parser.parseExpression(OffsetExpr)) {
9804 Parser.eatToEndOfStatement();
9805 Error(OffsetLoc, "malformed offset expression");
9806 return false;
9807 }
9808
9809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9810 if (!CE) {
9811 Parser.eatToEndOfStatement();
9812 Error(OffsetLoc, "offset must be an immediate constant");
9813 return false;
9814 }
9815
9816 Offset = CE->getValue();
9817 }
9818
9819 getTargetStreamer().emitMovSP(SPReg, Offset);
9820 UC.saveFPReg(SPReg);
9821
9822 return false;
9823 }
9824
9825 /// parseDirectiveObjectArch
9826 /// ::= .object_arch name
parseDirectiveObjectArch(SMLoc L)9827 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
9828 MCAsmParser &Parser = getParser();
9829 if (getLexer().isNot(AsmToken::Identifier)) {
9830 Error(getLexer().getLoc(), "unexpected token");
9831 Parser.eatToEndOfStatement();
9832 return false;
9833 }
9834
9835 StringRef Arch = Parser.getTok().getString();
9836 SMLoc ArchLoc = Parser.getTok().getLoc();
9837 getLexer().Lex();
9838
9839 unsigned ID = ARM::parseArch(Arch);
9840
9841 if (ID == ARM::AK_INVALID) {
9842 Error(ArchLoc, "unknown architecture '" + Arch + "'");
9843 Parser.eatToEndOfStatement();
9844 return false;
9845 }
9846
9847 getTargetStreamer().emitObjectArch(ID);
9848
9849 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9850 Error(getLexer().getLoc(), "unexpected token");
9851 Parser.eatToEndOfStatement();
9852 }
9853
9854 return false;
9855 }
9856
9857 /// parseDirectiveAlign
9858 /// ::= .align
parseDirectiveAlign(SMLoc L)9859 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
9860 // NOTE: if this is not the end of the statement, fall back to the target
9861 // agnostic handling for this directive which will correctly handle this.
9862 if (getLexer().isNot(AsmToken::EndOfStatement))
9863 return true;
9864
9865 // '.align' is target specifically handled to mean 2**2 byte alignment.
9866 if (getStreamer().getCurrentSection().first->UseCodeAlign())
9867 getStreamer().EmitCodeAlignment(4, 0);
9868 else
9869 getStreamer().EmitValueToAlignment(4, 0, 1, 0);
9870
9871 return false;
9872 }
9873
9874 /// parseDirectiveThumbSet
9875 /// ::= .thumb_set name, value
parseDirectiveThumbSet(SMLoc L)9876 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
9877 MCAsmParser &Parser = getParser();
9878
9879 StringRef Name;
9880 if (Parser.parseIdentifier(Name)) {
9881 TokError("expected identifier after '.thumb_set'");
9882 Parser.eatToEndOfStatement();
9883 return false;
9884 }
9885
9886 if (getLexer().isNot(AsmToken::Comma)) {
9887 TokError("expected comma after name '" + Name + "'");
9888 Parser.eatToEndOfStatement();
9889 return false;
9890 }
9891 Lex();
9892
9893 MCSymbol *Sym;
9894 const MCExpr *Value;
9895 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
9896 Parser, Sym, Value))
9897 return true;
9898
9899 getTargetStreamer().emitThumbSet(Sym, Value);
9900 return false;
9901 }
9902
9903 /// Force static initialization.
LLVMInitializeARMAsmParser()9904 extern "C" void LLVMInitializeARMAsmParser() {
9905 RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget);
9906 RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget);
9907 RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget);
9908 RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget);
9909 }
9910
9911 #define GET_REGISTER_MATCHER
9912 #define GET_SUBTARGET_FEATURE_NAME
9913 #define GET_MATCHER_IMPLEMENTATION
9914 #include "ARMGenAsmMatcher.inc"
9915
9916 // FIXME: This structure should be moved inside ARMTargetParser
9917 // when we start to table-generate them, and we can use the ARM
9918 // flags below, that were generated by table-gen.
9919 static const struct {
9920 const unsigned Kind;
9921 const uint64_t ArchCheck;
9922 const FeatureBitset Features;
9923 } Extensions[] = {
9924 { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
9925 { ARM::AEK_CRYPTO, Feature_HasV8,
9926 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
9927 { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
9928 { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
9929 {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} },
9930 { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
9931 { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
9932 { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
9933 // FIXME: Only available in A-class, isel not predicated
9934 { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
9935 { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
9936 // FIXME: Unsupported extensions.
9937 { ARM::AEK_OS, Feature_None, {} },
9938 { ARM::AEK_IWMMXT, Feature_None, {} },
9939 { ARM::AEK_IWMMXT2, Feature_None, {} },
9940 { ARM::AEK_MAVERICK, Feature_None, {} },
9941 { ARM::AEK_XSCALE, Feature_None, {} },
9942 };
9943
9944 /// parseDirectiveArchExtension
9945 /// ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)9946 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
9947 MCAsmParser &Parser = getParser();
9948
9949 if (getLexer().isNot(AsmToken::Identifier)) {
9950 Error(getLexer().getLoc(), "unexpected token");
9951 Parser.eatToEndOfStatement();
9952 return false;
9953 }
9954
9955 StringRef Name = Parser.getTok().getString();
9956 SMLoc ExtLoc = Parser.getTok().getLoc();
9957 getLexer().Lex();
9958
9959 bool EnableFeature = true;
9960 if (Name.startswith_lower("no")) {
9961 EnableFeature = false;
9962 Name = Name.substr(2);
9963 }
9964 unsigned FeatureKind = ARM::parseArchExt(Name);
9965 if (FeatureKind == ARM::AEK_INVALID)
9966 Error(ExtLoc, "unknown architectural extension: " + Name);
9967
9968 for (const auto &Extension : Extensions) {
9969 if (Extension.Kind != FeatureKind)
9970 continue;
9971
9972 if (Extension.Features.none())
9973 report_fatal_error("unsupported architectural extension: " + Name);
9974
9975 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) {
9976 Error(ExtLoc, "architectural extension '" + Name + "' is not "
9977 "allowed for the current base architecture");
9978 return false;
9979 }
9980
9981 MCSubtargetInfo &STI = copySTI();
9982 FeatureBitset ToggleFeatures = EnableFeature
9983 ? (~STI.getFeatureBits() & Extension.Features)
9984 : ( STI.getFeatureBits() & Extension.Features);
9985
9986 uint64_t Features =
9987 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
9988 setAvailableFeatures(Features);
9989 return false;
9990 }
9991
9992 Error(ExtLoc, "unknown architectural extension: " + Name);
9993 Parser.eatToEndOfStatement();
9994 return false;
9995 }
9996
9997 // Define this matcher function after the auto-generated include so we
9998 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)9999 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10000 unsigned Kind) {
10001 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10002 // If the kind is a token for a literal immediate, check if our asm
10003 // operand matches. This is for InstAliases which have a fixed-value
10004 // immediate in the syntax.
10005 switch (Kind) {
10006 default: break;
10007 case MCK__35_0:
10008 if (Op.isImm())
10009 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10010 if (CE->getValue() == 0)
10011 return Match_Success;
10012 break;
10013 case MCK_ModImm:
10014 if (Op.isImm()) {
10015 const MCExpr *SOExpr = Op.getImm();
10016 int64_t Value;
10017 if (!SOExpr->evaluateAsAbsolute(Value))
10018 return Match_Success;
10019 assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
10020 "expression value must be representable in 32 bits");
10021 }
10022 break;
10023 case MCK_rGPR:
10024 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10025 return Match_Success;
10026 break;
10027 case MCK_GPRPair:
10028 if (Op.isReg() &&
10029 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10030 return Match_Success;
10031 break;
10032 }
10033 return Match_InvalidOperand;
10034 }
10035