1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "ARMFPUName.h"
11 #include "ARMFeatures.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "MCTargetDesc/ARMArchName.h"
14 #include "MCTargetDesc/ARMBaseInfo.h"
15 #include "MCTargetDesc/ARMMCExpr.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/MC/MCAsmInfo.h"
22 #include "llvm/MC/MCAssembler.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCDisassembler.h"
25 #include "llvm/MC/MCELFStreamer.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCInstrDesc.h"
29 #include "llvm/MC/MCInstrInfo.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmLexer.h"
32 #include "llvm/MC/MCParser/MCAsmParser.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCRegisterInfo.h"
35 #include "llvm/MC/MCSection.h"
36 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/MC/MCTargetAsmParser.h"
40 #include "llvm/Support/ARMBuildAttributes.h"
41 #include "llvm/Support/ARMEHABI.h"
42 #include "llvm/Support/COFF.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ELF.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/SourceMgr.h"
47 #include "llvm/Support/TargetRegistry.h"
48 #include "llvm/Support/raw_ostream.h"
49
50 using namespace llvm;
51
52 namespace {
53
54 class ARMOperand;
55
56 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
57
58 class UnwindContext {
59 MCAsmParser &Parser;
60
61 typedef SmallVector<SMLoc, 4> Locs;
62
63 Locs FnStartLocs;
64 Locs CantUnwindLocs;
65 Locs PersonalityLocs;
66 Locs PersonalityIndexLocs;
67 Locs HandlerDataLocs;
68 int FPReg;
69
70 public:
UnwindContext(MCAsmParser & P)71 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
72
hasFnStart() const73 bool hasFnStart() const { return !FnStartLocs.empty(); }
cantUnwind() const74 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
hasHandlerData() const75 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
hasPersonality() const76 bool hasPersonality() const {
77 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
78 }
79
recordFnStart(SMLoc L)80 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
recordCantUnwind(SMLoc L)81 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
recordPersonality(SMLoc L)82 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
recordHandlerData(SMLoc L)83 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
recordPersonalityIndex(SMLoc L)84 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
85
saveFPReg(int Reg)86 void saveFPReg(int Reg) { FPReg = Reg; }
getFPReg() const87 int getFPReg() const { return FPReg; }
88
emitFnStartLocNotes() const89 void emitFnStartLocNotes() const {
90 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
91 FI != FE; ++FI)
92 Parser.Note(*FI, ".fnstart was specified here");
93 }
emitCantUnwindLocNotes() const94 void emitCantUnwindLocNotes() const {
95 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
96 UE = CantUnwindLocs.end(); UI != UE; ++UI)
97 Parser.Note(*UI, ".cantunwind was specified here");
98 }
emitHandlerDataLocNotes() const99 void emitHandlerDataLocNotes() const {
100 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
101 HE = HandlerDataLocs.end(); HI != HE; ++HI)
102 Parser.Note(*HI, ".handlerdata was specified here");
103 }
emitPersonalityLocNotes() const104 void emitPersonalityLocNotes() const {
105 for (Locs::const_iterator PI = PersonalityLocs.begin(),
106 PE = PersonalityLocs.end(),
107 PII = PersonalityIndexLocs.begin(),
108 PIE = PersonalityIndexLocs.end();
109 PI != PE || PII != PIE;) {
110 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
111 Parser.Note(*PI++, ".personality was specified here");
112 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
113 Parser.Note(*PII++, ".personalityindex was specified here");
114 else
115 llvm_unreachable(".personality and .personalityindex cannot be "
116 "at the same location");
117 }
118 }
119
reset()120 void reset() {
121 FnStartLocs = Locs();
122 CantUnwindLocs = Locs();
123 PersonalityLocs = Locs();
124 HandlerDataLocs = Locs();
125 PersonalityIndexLocs = Locs();
126 FPReg = ARM::SP;
127 }
128 };
129
130 class ARMAsmParser : public MCTargetAsmParser {
131 MCSubtargetInfo &STI;
132 const MCInstrInfo &MII;
133 const MCRegisterInfo *MRI;
134 UnwindContext UC;
135
getTargetStreamer()136 ARMTargetStreamer &getTargetStreamer() {
137 assert(getParser().getStreamer().getTargetStreamer() &&
138 "do not have a target streamer");
139 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
140 return static_cast<ARMTargetStreamer &>(TS);
141 }
142
143 // Map of register aliases registers via the .req directive.
144 StringMap<unsigned> RegisterReqs;
145
146 bool NextSymbolIsThumb;
147
148 struct {
149 ARMCC::CondCodes Cond; // Condition for IT block.
150 unsigned Mask:4; // Condition mask for instructions.
151 // Starting at first 1 (from lsb).
152 // '1' condition as indicated in IT.
153 // '0' inverse of condition (else).
154 // Count of instructions in IT block is
155 // 4 - trailingzeroes(mask)
156
157 bool FirstCond; // Explicit flag for when we're parsing the
158 // First instruction in the IT block. It's
159 // implied in the mask, so needs special
160 // handling.
161
162 unsigned CurPosition; // Current position in parsing of IT
163 // block. In range [0,3]. Initialized
164 // according to count of instructions in block.
165 // ~0U if no active IT block.
166 } ITState;
inITBlock()167 bool inITBlock() { return ITState.CurPosition != ~0U; }
lastInITBlock()168 bool lastInITBlock() {
169 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
170 }
forwardITPosition()171 void forwardITPosition() {
172 if (!inITBlock()) return;
173 // Move to the next instruction in the IT block, if there is one. If not,
174 // mark the block as done.
175 unsigned TZ = countTrailingZeros(ITState.Mask);
176 if (++ITState.CurPosition == 5 - TZ)
177 ITState.CurPosition = ~0U; // Done with the IT block after this.
178 }
179
Note(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)180 void Note(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges = None) {
181 return getParser().Note(L, Msg, Ranges);
182 }
Warning(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)183 bool Warning(SMLoc L, const Twine &Msg,
184 ArrayRef<SMRange> Ranges = None) {
185 return getParser().Warning(L, Msg, Ranges);
186 }
Error(SMLoc L,const Twine & Msg,ArrayRef<SMRange> Ranges=None)187 bool Error(SMLoc L, const Twine &Msg,
188 ArrayRef<SMRange> Ranges = None) {
189 return getParser().Error(L, Msg, Ranges);
190 }
191
192 bool validatetLDMRegList(MCInst Inst, const OperandVector &Operands,
193 unsigned ListNo, bool IsARPop = false);
194 bool validatetSTMRegList(MCInst Inst, const OperandVector &Operands,
195 unsigned ListNo);
196
197 int tryParseRegister();
198 bool tryParseRegisterWithWriteBack(OperandVector &);
199 int tryParseShiftRegister(OperandVector &);
200 bool parseRegisterList(OperandVector &);
201 bool parseMemory(OperandVector &);
202 bool parseOperand(OperandVector &, StringRef Mnemonic);
203 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
204 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
205 unsigned &ShiftAmount);
206 bool parseLiteralValues(unsigned Size, SMLoc L);
207 bool parseDirectiveThumb(SMLoc L);
208 bool parseDirectiveARM(SMLoc L);
209 bool parseDirectiveThumbFunc(SMLoc L);
210 bool parseDirectiveCode(SMLoc L);
211 bool parseDirectiveSyntax(SMLoc L);
212 bool parseDirectiveReq(StringRef Name, SMLoc L);
213 bool parseDirectiveUnreq(SMLoc L);
214 bool parseDirectiveArch(SMLoc L);
215 bool parseDirectiveEabiAttr(SMLoc L);
216 bool parseDirectiveCPU(SMLoc L);
217 bool parseDirectiveFPU(SMLoc L);
218 bool parseDirectiveFnStart(SMLoc L);
219 bool parseDirectiveFnEnd(SMLoc L);
220 bool parseDirectiveCantUnwind(SMLoc L);
221 bool parseDirectivePersonality(SMLoc L);
222 bool parseDirectiveHandlerData(SMLoc L);
223 bool parseDirectiveSetFP(SMLoc L);
224 bool parseDirectivePad(SMLoc L);
225 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
226 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
227 bool parseDirectiveLtorg(SMLoc L);
228 bool parseDirectiveEven(SMLoc L);
229 bool parseDirectivePersonalityIndex(SMLoc L);
230 bool parseDirectiveUnwindRaw(SMLoc L);
231 bool parseDirectiveTLSDescSeq(SMLoc L);
232 bool parseDirectiveMovSP(SMLoc L);
233 bool parseDirectiveObjectArch(SMLoc L);
234 bool parseDirectiveArchExtension(SMLoc L);
235 bool parseDirectiveAlign(SMLoc L);
236 bool parseDirectiveThumbSet(SMLoc L);
237
238 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
239 bool &CarrySetting, unsigned &ProcessorIMod,
240 StringRef &ITMask);
241 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
242 bool &CanAcceptCarrySet,
243 bool &CanAcceptPredicationCode);
244
isThumb() const245 bool isThumb() const {
246 // FIXME: Can tablegen auto-generate this?
247 return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
248 }
isThumbOne() const249 bool isThumbOne() const {
250 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
251 }
isThumbTwo() const252 bool isThumbTwo() const {
253 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
254 }
hasThumb() const255 bool hasThumb() const {
256 return STI.getFeatureBits() & ARM::HasV4TOps;
257 }
hasV6Ops() const258 bool hasV6Ops() const {
259 return STI.getFeatureBits() & ARM::HasV6Ops;
260 }
hasV6MOps() const261 bool hasV6MOps() const {
262 return STI.getFeatureBits() & ARM::HasV6MOps;
263 }
hasV7Ops() const264 bool hasV7Ops() const {
265 return STI.getFeatureBits() & ARM::HasV7Ops;
266 }
hasV8Ops() const267 bool hasV8Ops() const {
268 return STI.getFeatureBits() & ARM::HasV8Ops;
269 }
hasARM() const270 bool hasARM() const {
271 return !(STI.getFeatureBits() & ARM::FeatureNoARM);
272 }
hasThumb2DSP() const273 bool hasThumb2DSP() const {
274 return STI.getFeatureBits() & ARM::FeatureDSPThumb2;
275 }
hasD16() const276 bool hasD16() const {
277 return STI.getFeatureBits() & ARM::FeatureD16;
278 }
hasV8_1aOps() const279 bool hasV8_1aOps() const {
280 return STI.getFeatureBits() & ARM::HasV8_1aOps;
281 }
282
SwitchMode()283 void SwitchMode() {
284 uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
285 setAvailableFeatures(FB);
286 }
isMClass() const287 bool isMClass() const {
288 return STI.getFeatureBits() & ARM::FeatureMClass;
289 }
290
291 /// @name Auto-generated Match Functions
292 /// {
293
294 #define GET_ASSEMBLER_HEADER
295 #include "ARMGenAsmMatcher.inc"
296
297 /// }
298
299 OperandMatchResultTy parseITCondCode(OperandVector &);
300 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
301 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
302 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
303 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
304 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
305 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
306 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
307 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
308 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
309 int High);
parsePKHLSLImm(OperandVector & O)310 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
311 return parsePKHImm(O, "lsl", 0, 31);
312 }
parsePKHASRImm(OperandVector & O)313 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
314 return parsePKHImm(O, "asr", 1, 32);
315 }
316 OperandMatchResultTy parseSetEndImm(OperandVector &);
317 OperandMatchResultTy parseShifterImm(OperandVector &);
318 OperandMatchResultTy parseRotImm(OperandVector &);
319 OperandMatchResultTy parseModImm(OperandVector &);
320 OperandMatchResultTy parseBitfield(OperandVector &);
321 OperandMatchResultTy parsePostIdxReg(OperandVector &);
322 OperandMatchResultTy parseAM3Offset(OperandVector &);
323 OperandMatchResultTy parseFPImm(OperandVector &);
324 OperandMatchResultTy parseVectorList(OperandVector &);
325 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
326 SMLoc &EndLoc);
327
328 // Asm Match Converter Methods
329 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
330 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
331
332 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
333 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
334 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
335 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
336
337 public:
338 enum ARMMatchResultTy {
339 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
340 Match_RequiresNotITBlock,
341 Match_RequiresV6,
342 Match_RequiresThumb2,
343 #define GET_OPERAND_DIAGNOSTIC_TYPES
344 #include "ARMGenAsmMatcher.inc"
345
346 };
347
ARMAsmParser(MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)348 ARMAsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
349 const MCInstrInfo &MII, const MCTargetOptions &Options)
350 : STI(STI), MII(MII), UC(Parser) {
351 MCAsmParserExtension::Initialize(Parser);
352
353 // Cache the MCRegisterInfo.
354 MRI = getContext().getRegisterInfo();
355
356 // Initialize the set of available features.
357 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
358
359 // Not in an ITBlock to start with.
360 ITState.CurPosition = ~0U;
361
362 NextSymbolIsThumb = false;
363 }
364
365 // Implementation of the MCTargetAsmParser interface:
366 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
367 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
368 SMLoc NameLoc, OperandVector &Operands) override;
369 bool ParseDirective(AsmToken DirectiveID) override;
370
371 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
372 unsigned Kind) override;
373 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
374
375 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
376 OperandVector &Operands, MCStreamer &Out,
377 uint64_t &ErrorInfo,
378 bool MatchingInlineAsm) override;
379 void onLabelParsed(MCSymbol *Symbol) override;
380 };
381 } // end anonymous namespace
382
383 namespace {
384
385 /// ARMOperand - Instances of this class represent a parsed ARM machine
386 /// operand.
387 class ARMOperand : public MCParsedAsmOperand {
388 enum KindTy {
389 k_CondCode,
390 k_CCOut,
391 k_ITCondMask,
392 k_CoprocNum,
393 k_CoprocReg,
394 k_CoprocOption,
395 k_Immediate,
396 k_MemBarrierOpt,
397 k_InstSyncBarrierOpt,
398 k_Memory,
399 k_PostIndexRegister,
400 k_MSRMask,
401 k_BankedReg,
402 k_ProcIFlags,
403 k_VectorIndex,
404 k_Register,
405 k_RegisterList,
406 k_DPRRegisterList,
407 k_SPRRegisterList,
408 k_VectorList,
409 k_VectorListAllLanes,
410 k_VectorListIndexed,
411 k_ShiftedRegister,
412 k_ShiftedImmediate,
413 k_ShifterImmediate,
414 k_RotateImmediate,
415 k_ModifiedImmediate,
416 k_BitfieldDescriptor,
417 k_Token
418 } Kind;
419
420 SMLoc StartLoc, EndLoc, AlignmentLoc;
421 SmallVector<unsigned, 8> Registers;
422
423 struct CCOp {
424 ARMCC::CondCodes Val;
425 };
426
427 struct CopOp {
428 unsigned Val;
429 };
430
431 struct CoprocOptionOp {
432 unsigned Val;
433 };
434
435 struct ITMaskOp {
436 unsigned Mask:4;
437 };
438
439 struct MBOptOp {
440 ARM_MB::MemBOpt Val;
441 };
442
443 struct ISBOptOp {
444 ARM_ISB::InstSyncBOpt Val;
445 };
446
447 struct IFlagsOp {
448 ARM_PROC::IFlags Val;
449 };
450
451 struct MMaskOp {
452 unsigned Val;
453 };
454
455 struct BankedRegOp {
456 unsigned Val;
457 };
458
459 struct TokOp {
460 const char *Data;
461 unsigned Length;
462 };
463
464 struct RegOp {
465 unsigned RegNum;
466 };
467
468 // A vector register list is a sequential list of 1 to 4 registers.
469 struct VectorListOp {
470 unsigned RegNum;
471 unsigned Count;
472 unsigned LaneIndex;
473 bool isDoubleSpaced;
474 };
475
476 struct VectorIndexOp {
477 unsigned Val;
478 };
479
480 struct ImmOp {
481 const MCExpr *Val;
482 };
483
484 /// Combined record for all forms of ARM address expressions.
485 struct MemoryOp {
486 unsigned BaseRegNum;
487 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
488 // was specified.
489 const MCConstantExpr *OffsetImm; // Offset immediate value
490 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
491 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
492 unsigned ShiftImm; // shift for OffsetReg.
493 unsigned Alignment; // 0 = no alignment specified
494 // n = alignment in bytes (2, 4, 8, 16, or 32)
495 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
496 };
497
498 struct PostIdxRegOp {
499 unsigned RegNum;
500 bool isAdd;
501 ARM_AM::ShiftOpc ShiftTy;
502 unsigned ShiftImm;
503 };
504
505 struct ShifterImmOp {
506 bool isASR;
507 unsigned Imm;
508 };
509
510 struct RegShiftedRegOp {
511 ARM_AM::ShiftOpc ShiftTy;
512 unsigned SrcReg;
513 unsigned ShiftReg;
514 unsigned ShiftImm;
515 };
516
517 struct RegShiftedImmOp {
518 ARM_AM::ShiftOpc ShiftTy;
519 unsigned SrcReg;
520 unsigned ShiftImm;
521 };
522
523 struct RotImmOp {
524 unsigned Imm;
525 };
526
527 struct ModImmOp {
528 unsigned Bits;
529 unsigned Rot;
530 };
531
532 struct BitfieldOp {
533 unsigned LSB;
534 unsigned Width;
535 };
536
537 union {
538 struct CCOp CC;
539 struct CopOp Cop;
540 struct CoprocOptionOp CoprocOption;
541 struct MBOptOp MBOpt;
542 struct ISBOptOp ISBOpt;
543 struct ITMaskOp ITMask;
544 struct IFlagsOp IFlags;
545 struct MMaskOp MMask;
546 struct BankedRegOp BankedReg;
547 struct TokOp Tok;
548 struct RegOp Reg;
549 struct VectorListOp VectorList;
550 struct VectorIndexOp VectorIndex;
551 struct ImmOp Imm;
552 struct MemoryOp Memory;
553 struct PostIdxRegOp PostIdxReg;
554 struct ShifterImmOp ShifterImm;
555 struct RegShiftedRegOp RegShiftedReg;
556 struct RegShiftedImmOp RegShiftedImm;
557 struct RotImmOp RotImm;
558 struct ModImmOp ModImm;
559 struct BitfieldOp Bitfield;
560 };
561
562 public:
ARMOperand(KindTy K)563 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
ARMOperand(const ARMOperand & o)564 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
565 Kind = o.Kind;
566 StartLoc = o.StartLoc;
567 EndLoc = o.EndLoc;
568 switch (Kind) {
569 case k_CondCode:
570 CC = o.CC;
571 break;
572 case k_ITCondMask:
573 ITMask = o.ITMask;
574 break;
575 case k_Token:
576 Tok = o.Tok;
577 break;
578 case k_CCOut:
579 case k_Register:
580 Reg = o.Reg;
581 break;
582 case k_RegisterList:
583 case k_DPRRegisterList:
584 case k_SPRRegisterList:
585 Registers = o.Registers;
586 break;
587 case k_VectorList:
588 case k_VectorListAllLanes:
589 case k_VectorListIndexed:
590 VectorList = o.VectorList;
591 break;
592 case k_CoprocNum:
593 case k_CoprocReg:
594 Cop = o.Cop;
595 break;
596 case k_CoprocOption:
597 CoprocOption = o.CoprocOption;
598 break;
599 case k_Immediate:
600 Imm = o.Imm;
601 break;
602 case k_MemBarrierOpt:
603 MBOpt = o.MBOpt;
604 break;
605 case k_InstSyncBarrierOpt:
606 ISBOpt = o.ISBOpt;
607 case k_Memory:
608 Memory = o.Memory;
609 break;
610 case k_PostIndexRegister:
611 PostIdxReg = o.PostIdxReg;
612 break;
613 case k_MSRMask:
614 MMask = o.MMask;
615 break;
616 case k_BankedReg:
617 BankedReg = o.BankedReg;
618 break;
619 case k_ProcIFlags:
620 IFlags = o.IFlags;
621 break;
622 case k_ShifterImmediate:
623 ShifterImm = o.ShifterImm;
624 break;
625 case k_ShiftedRegister:
626 RegShiftedReg = o.RegShiftedReg;
627 break;
628 case k_ShiftedImmediate:
629 RegShiftedImm = o.RegShiftedImm;
630 break;
631 case k_RotateImmediate:
632 RotImm = o.RotImm;
633 break;
634 case k_ModifiedImmediate:
635 ModImm = o.ModImm;
636 break;
637 case k_BitfieldDescriptor:
638 Bitfield = o.Bitfield;
639 break;
640 case k_VectorIndex:
641 VectorIndex = o.VectorIndex;
642 break;
643 }
644 }
645
646 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const647 SMLoc getStartLoc() const override { return StartLoc; }
648 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const649 SMLoc getEndLoc() const override { return EndLoc; }
650 /// getLocRange - Get the range between the first and last token of this
651 /// operand.
getLocRange() const652 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
653
654 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
getAlignmentLoc() const655 SMLoc getAlignmentLoc() const {
656 assert(Kind == k_Memory && "Invalid access!");
657 return AlignmentLoc;
658 }
659
getCondCode() const660 ARMCC::CondCodes getCondCode() const {
661 assert(Kind == k_CondCode && "Invalid access!");
662 return CC.Val;
663 }
664
getCoproc() const665 unsigned getCoproc() const {
666 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
667 return Cop.Val;
668 }
669
getToken() const670 StringRef getToken() const {
671 assert(Kind == k_Token && "Invalid access!");
672 return StringRef(Tok.Data, Tok.Length);
673 }
674
getReg() const675 unsigned getReg() const override {
676 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
677 return Reg.RegNum;
678 }
679
getRegList() const680 const SmallVectorImpl<unsigned> &getRegList() const {
681 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
682 Kind == k_SPRRegisterList) && "Invalid access!");
683 return Registers;
684 }
685
getImm() const686 const MCExpr *getImm() const {
687 assert(isImm() && "Invalid access!");
688 return Imm.Val;
689 }
690
getVectorIndex() const691 unsigned getVectorIndex() const {
692 assert(Kind == k_VectorIndex && "Invalid access!");
693 return VectorIndex.Val;
694 }
695
getMemBarrierOpt() const696 ARM_MB::MemBOpt getMemBarrierOpt() const {
697 assert(Kind == k_MemBarrierOpt && "Invalid access!");
698 return MBOpt.Val;
699 }
700
getInstSyncBarrierOpt() const701 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
702 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
703 return ISBOpt.Val;
704 }
705
getProcIFlags() const706 ARM_PROC::IFlags getProcIFlags() const {
707 assert(Kind == k_ProcIFlags && "Invalid access!");
708 return IFlags.Val;
709 }
710
getMSRMask() const711 unsigned getMSRMask() const {
712 assert(Kind == k_MSRMask && "Invalid access!");
713 return MMask.Val;
714 }
715
getBankedReg() const716 unsigned getBankedReg() const {
717 assert(Kind == k_BankedReg && "Invalid access!");
718 return BankedReg.Val;
719 }
720
isCoprocNum() const721 bool isCoprocNum() const { return Kind == k_CoprocNum; }
isCoprocReg() const722 bool isCoprocReg() const { return Kind == k_CoprocReg; }
isCoprocOption() const723 bool isCoprocOption() const { return Kind == k_CoprocOption; }
isCondCode() const724 bool isCondCode() const { return Kind == k_CondCode; }
isCCOut() const725 bool isCCOut() const { return Kind == k_CCOut; }
isITMask() const726 bool isITMask() const { return Kind == k_ITCondMask; }
isITCondCode() const727 bool isITCondCode() const { return Kind == k_CondCode; }
isImm() const728 bool isImm() const override { return Kind == k_Immediate; }
729 // checks whether this operand is an unsigned offset which fits is a field
730 // of specified width and scaled by a specific number of bits
731 template<unsigned width, unsigned scale>
isUnsignedOffset() const732 bool isUnsignedOffset() const {
733 if (!isImm()) return false;
734 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
735 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
736 int64_t Val = CE->getValue();
737 int64_t Align = 1LL << scale;
738 int64_t Max = Align * ((1LL << width) - 1);
739 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
740 }
741 return false;
742 }
743 // checks whether this operand is an signed offset which fits is a field
744 // of specified width and scaled by a specific number of bits
745 template<unsigned width, unsigned scale>
isSignedOffset() const746 bool isSignedOffset() const {
747 if (!isImm()) return false;
748 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
749 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
750 int64_t Val = CE->getValue();
751 int64_t Align = 1LL << scale;
752 int64_t Max = Align * ((1LL << (width-1)) - 1);
753 int64_t Min = -Align * (1LL << (width-1));
754 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
755 }
756 return false;
757 }
758
759 // checks whether this operand is a memory operand computed as an offset
760 // applied to PC. the offset may have 8 bits of magnitude and is represented
761 // with two bits of shift. textually it may be either [pc, #imm], #imm or
762 // relocable expression...
isThumbMemPC() const763 bool isThumbMemPC() const {
764 int64_t Val = 0;
765 if (isImm()) {
766 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
767 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
768 if (!CE) return false;
769 Val = CE->getValue();
770 }
771 else if (isMem()) {
772 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
773 if(Memory.BaseRegNum != ARM::PC) return false;
774 Val = Memory.OffsetImm->getValue();
775 }
776 else return false;
777 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
778 }
isFPImm() const779 bool isFPImm() const {
780 if (!isImm()) return false;
781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782 if (!CE) return false;
783 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
784 return Val != -1;
785 }
isFBits16() const786 bool isFBits16() const {
787 if (!isImm()) return false;
788 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
789 if (!CE) return false;
790 int64_t Value = CE->getValue();
791 return Value >= 0 && Value <= 16;
792 }
isFBits32() const793 bool isFBits32() const {
794 if (!isImm()) return false;
795 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796 if (!CE) return false;
797 int64_t Value = CE->getValue();
798 return Value >= 1 && Value <= 32;
799 }
isImm8s4() const800 bool isImm8s4() const {
801 if (!isImm()) return false;
802 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803 if (!CE) return false;
804 int64_t Value = CE->getValue();
805 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
806 }
isImm0_1020s4() const807 bool isImm0_1020s4() const {
808 if (!isImm()) return false;
809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810 if (!CE) return false;
811 int64_t Value = CE->getValue();
812 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
813 }
isImm0_508s4() const814 bool isImm0_508s4() const {
815 if (!isImm()) return false;
816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
817 if (!CE) return false;
818 int64_t Value = CE->getValue();
819 return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
820 }
isImm0_508s4Neg() const821 bool isImm0_508s4Neg() const {
822 if (!isImm()) return false;
823 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
824 if (!CE) return false;
825 int64_t Value = -CE->getValue();
826 // explicitly exclude zero. we want that to use the normal 0_508 version.
827 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
828 }
isImm0_239() const829 bool isImm0_239() const {
830 if (!isImm()) return false;
831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
832 if (!CE) return false;
833 int64_t Value = CE->getValue();
834 return Value >= 0 && Value < 240;
835 }
isImm0_255() const836 bool isImm0_255() const {
837 if (!isImm()) return false;
838 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
839 if (!CE) return false;
840 int64_t Value = CE->getValue();
841 return Value >= 0 && Value < 256;
842 }
isImm0_4095() const843 bool isImm0_4095() const {
844 if (!isImm()) return false;
845 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
846 if (!CE) return false;
847 int64_t Value = CE->getValue();
848 return Value >= 0 && Value < 4096;
849 }
isImm0_4095Neg() const850 bool isImm0_4095Neg() const {
851 if (!isImm()) return false;
852 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
853 if (!CE) return false;
854 int64_t Value = -CE->getValue();
855 return Value > 0 && Value < 4096;
856 }
isImm0_1() const857 bool isImm0_1() const {
858 if (!isImm()) return false;
859 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
860 if (!CE) return false;
861 int64_t Value = CE->getValue();
862 return Value >= 0 && Value < 2;
863 }
isImm0_3() const864 bool isImm0_3() const {
865 if (!isImm()) return false;
866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
867 if (!CE) return false;
868 int64_t Value = CE->getValue();
869 return Value >= 0 && Value < 4;
870 }
isImm0_7() const871 bool isImm0_7() const {
872 if (!isImm()) return false;
873 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
874 if (!CE) return false;
875 int64_t Value = CE->getValue();
876 return Value >= 0 && Value < 8;
877 }
isImm0_15() const878 bool isImm0_15() const {
879 if (!isImm()) return false;
880 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
881 if (!CE) return false;
882 int64_t Value = CE->getValue();
883 return Value >= 0 && Value < 16;
884 }
isImm0_31() const885 bool isImm0_31() const {
886 if (!isImm()) return false;
887 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888 if (!CE) return false;
889 int64_t Value = CE->getValue();
890 return Value >= 0 && Value < 32;
891 }
isImm0_63() const892 bool isImm0_63() const {
893 if (!isImm()) return false;
894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895 if (!CE) return false;
896 int64_t Value = CE->getValue();
897 return Value >= 0 && Value < 64;
898 }
isImm8() const899 bool isImm8() const {
900 if (!isImm()) return false;
901 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
902 if (!CE) return false;
903 int64_t Value = CE->getValue();
904 return Value == 8;
905 }
isImm16() const906 bool isImm16() const {
907 if (!isImm()) return false;
908 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
909 if (!CE) return false;
910 int64_t Value = CE->getValue();
911 return Value == 16;
912 }
isImm32() const913 bool isImm32() const {
914 if (!isImm()) return false;
915 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
916 if (!CE) return false;
917 int64_t Value = CE->getValue();
918 return Value == 32;
919 }
isShrImm8() const920 bool isShrImm8() const {
921 if (!isImm()) return false;
922 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
923 if (!CE) return false;
924 int64_t Value = CE->getValue();
925 return Value > 0 && Value <= 8;
926 }
isShrImm16() const927 bool isShrImm16() const {
928 if (!isImm()) return false;
929 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
930 if (!CE) return false;
931 int64_t Value = CE->getValue();
932 return Value > 0 && Value <= 16;
933 }
isShrImm32() const934 bool isShrImm32() const {
935 if (!isImm()) return false;
936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
937 if (!CE) return false;
938 int64_t Value = CE->getValue();
939 return Value > 0 && Value <= 32;
940 }
isShrImm64() const941 bool isShrImm64() const {
942 if (!isImm()) return false;
943 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
944 if (!CE) return false;
945 int64_t Value = CE->getValue();
946 return Value > 0 && Value <= 64;
947 }
isImm1_7() const948 bool isImm1_7() const {
949 if (!isImm()) return false;
950 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
951 if (!CE) return false;
952 int64_t Value = CE->getValue();
953 return Value > 0 && Value < 8;
954 }
isImm1_15() const955 bool isImm1_15() const {
956 if (!isImm()) return false;
957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
958 if (!CE) return false;
959 int64_t Value = CE->getValue();
960 return Value > 0 && Value < 16;
961 }
isImm1_31() const962 bool isImm1_31() const {
963 if (!isImm()) return false;
964 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
965 if (!CE) return false;
966 int64_t Value = CE->getValue();
967 return Value > 0 && Value < 32;
968 }
isImm1_16() const969 bool isImm1_16() const {
970 if (!isImm()) return false;
971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
972 if (!CE) return false;
973 int64_t Value = CE->getValue();
974 return Value > 0 && Value < 17;
975 }
isImm1_32() const976 bool isImm1_32() const {
977 if (!isImm()) return false;
978 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
979 if (!CE) return false;
980 int64_t Value = CE->getValue();
981 return Value > 0 && Value < 33;
982 }
isImm0_32() const983 bool isImm0_32() const {
984 if (!isImm()) return false;
985 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
986 if (!CE) return false;
987 int64_t Value = CE->getValue();
988 return Value >= 0 && Value < 33;
989 }
isImm0_65535() const990 bool isImm0_65535() const {
991 if (!isImm()) return false;
992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
993 if (!CE) return false;
994 int64_t Value = CE->getValue();
995 return Value >= 0 && Value < 65536;
996 }
isImm256_65535Expr() const997 bool isImm256_65535Expr() const {
998 if (!isImm()) return false;
999 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1000 // If it's not a constant expression, it'll generate a fixup and be
1001 // handled later.
1002 if (!CE) return true;
1003 int64_t Value = CE->getValue();
1004 return Value >= 256 && Value < 65536;
1005 }
isImm0_65535Expr() const1006 bool isImm0_65535Expr() const {
1007 if (!isImm()) return false;
1008 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1009 // If it's not a constant expression, it'll generate a fixup and be
1010 // handled later.
1011 if (!CE) return true;
1012 int64_t Value = CE->getValue();
1013 return Value >= 0 && Value < 65536;
1014 }
isImm24bit() const1015 bool isImm24bit() const {
1016 if (!isImm()) return false;
1017 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1018 if (!CE) return false;
1019 int64_t Value = CE->getValue();
1020 return Value >= 0 && Value <= 0xffffff;
1021 }
isImmThumbSR() const1022 bool isImmThumbSR() const {
1023 if (!isImm()) return false;
1024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1025 if (!CE) return false;
1026 int64_t Value = CE->getValue();
1027 return Value > 0 && Value < 33;
1028 }
isPKHLSLImm() const1029 bool isPKHLSLImm() const {
1030 if (!isImm()) return false;
1031 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1032 if (!CE) return false;
1033 int64_t Value = CE->getValue();
1034 return Value >= 0 && Value < 32;
1035 }
isPKHASRImm() const1036 bool isPKHASRImm() const {
1037 if (!isImm()) return false;
1038 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1039 if (!CE) return false;
1040 int64_t Value = CE->getValue();
1041 return Value > 0 && Value <= 32;
1042 }
isAdrLabel() const1043 bool isAdrLabel() const {
1044 // If we have an immediate that's not a constant, treat it as a label
1045 // reference needing a fixup.
1046 if (isImm() && !isa<MCConstantExpr>(getImm()))
1047 return true;
1048
1049 // If it is a constant, it must fit into a modified immediate encoding.
1050 if (!isImm()) return false;
1051 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1052 if (!CE) return false;
1053 int64_t Value = CE->getValue();
1054 return (ARM_AM::getSOImmVal(Value) != -1 ||
1055 ARM_AM::getSOImmVal(-Value) != -1);;
1056 }
isT2SOImm() const1057 bool isT2SOImm() const {
1058 if (!isImm()) return false;
1059 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1060 if (!CE) return false;
1061 int64_t Value = CE->getValue();
1062 return ARM_AM::getT2SOImmVal(Value) != -1;
1063 }
isT2SOImmNot() const1064 bool isT2SOImmNot() const {
1065 if (!isImm()) return false;
1066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1067 if (!CE) return false;
1068 int64_t Value = CE->getValue();
1069 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1070 ARM_AM::getT2SOImmVal(~Value) != -1;
1071 }
isT2SOImmNeg() const1072 bool isT2SOImmNeg() const {
1073 if (!isImm()) return false;
1074 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1075 if (!CE) return false;
1076 int64_t Value = CE->getValue();
1077 // Only use this when not representable as a plain so_imm.
1078 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1079 ARM_AM::getT2SOImmVal(-Value) != -1;
1080 }
isSetEndImm() const1081 bool isSetEndImm() const {
1082 if (!isImm()) return false;
1083 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1084 if (!CE) return false;
1085 int64_t Value = CE->getValue();
1086 return Value == 1 || Value == 0;
1087 }
isReg() const1088 bool isReg() const override { return Kind == k_Register; }
isRegList() const1089 bool isRegList() const { return Kind == k_RegisterList; }
isDPRRegList() const1090 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
isSPRRegList() const1091 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
isToken() const1092 bool isToken() const override { return Kind == k_Token; }
isMemBarrierOpt() const1093 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
isInstSyncBarrierOpt() const1094 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
isMem() const1095 bool isMem() const override { return Kind == k_Memory; }
isShifterImm() const1096 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
isRegShiftedReg() const1097 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
isRegShiftedImm() const1098 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
isRotImm() const1099 bool isRotImm() const { return Kind == k_RotateImmediate; }
isModImm() const1100 bool isModImm() const { return Kind == k_ModifiedImmediate; }
isModImmNot() const1101 bool isModImmNot() const {
1102 if (!isImm()) return false;
1103 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1104 if (!CE) return false;
1105 int64_t Value = CE->getValue();
1106 return ARM_AM::getSOImmVal(~Value) != -1;
1107 }
isModImmNeg() const1108 bool isModImmNeg() const {
1109 if (!isImm()) return false;
1110 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1111 if (!CE) return false;
1112 int64_t Value = CE->getValue();
1113 return ARM_AM::getSOImmVal(Value) == -1 &&
1114 ARM_AM::getSOImmVal(-Value) != -1;
1115 }
isBitfield() const1116 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
isPostIdxRegShifted() const1117 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
isPostIdxReg() const1118 bool isPostIdxReg() const {
1119 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1120 }
isMemNoOffset(bool alignOK=false,unsigned Alignment=0) const1121 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1122 if (!isMem())
1123 return false;
1124 // No offset of any kind.
1125 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1126 (alignOK || Memory.Alignment == Alignment);
1127 }
isMemPCRelImm12() const1128 bool isMemPCRelImm12() const {
1129 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1130 return false;
1131 // Base register must be PC.
1132 if (Memory.BaseRegNum != ARM::PC)
1133 return false;
1134 // Immediate offset in range [-4095, 4095].
1135 if (!Memory.OffsetImm) return true;
1136 int64_t Val = Memory.OffsetImm->getValue();
1137 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1138 }
isAlignedMemory() const1139 bool isAlignedMemory() const {
1140 return isMemNoOffset(true);
1141 }
isAlignedMemoryNone() const1142 bool isAlignedMemoryNone() const {
1143 return isMemNoOffset(false, 0);
1144 }
isDupAlignedMemoryNone() const1145 bool isDupAlignedMemoryNone() const {
1146 return isMemNoOffset(false, 0);
1147 }
isAlignedMemory16() const1148 bool isAlignedMemory16() const {
1149 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1150 return true;
1151 return isMemNoOffset(false, 0);
1152 }
isDupAlignedMemory16() const1153 bool isDupAlignedMemory16() const {
1154 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1155 return true;
1156 return isMemNoOffset(false, 0);
1157 }
isAlignedMemory32() const1158 bool isAlignedMemory32() const {
1159 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1160 return true;
1161 return isMemNoOffset(false, 0);
1162 }
isDupAlignedMemory32() const1163 bool isDupAlignedMemory32() const {
1164 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1165 return true;
1166 return isMemNoOffset(false, 0);
1167 }
isAlignedMemory64() const1168 bool isAlignedMemory64() const {
1169 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1170 return true;
1171 return isMemNoOffset(false, 0);
1172 }
isDupAlignedMemory64() const1173 bool isDupAlignedMemory64() const {
1174 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1175 return true;
1176 return isMemNoOffset(false, 0);
1177 }
isAlignedMemory64or128() const1178 bool isAlignedMemory64or128() const {
1179 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1180 return true;
1181 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1182 return true;
1183 return isMemNoOffset(false, 0);
1184 }
isDupAlignedMemory64or128() const1185 bool isDupAlignedMemory64or128() const {
1186 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1187 return true;
1188 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1189 return true;
1190 return isMemNoOffset(false, 0);
1191 }
isAlignedMemory64or128or256() const1192 bool isAlignedMemory64or128or256() const {
1193 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1194 return true;
1195 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1196 return true;
1197 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1198 return true;
1199 return isMemNoOffset(false, 0);
1200 }
isAddrMode2() const1201 bool isAddrMode2() const {
1202 if (!isMem() || Memory.Alignment != 0) return false;
1203 // Check for register offset.
1204 if (Memory.OffsetRegNum) return true;
1205 // Immediate offset in range [-4095, 4095].
1206 if (!Memory.OffsetImm) return true;
1207 int64_t Val = Memory.OffsetImm->getValue();
1208 return Val > -4096 && Val < 4096;
1209 }
isAM2OffsetImm() const1210 bool isAM2OffsetImm() const {
1211 if (!isImm()) return false;
1212 // Immediate offset in range [-4095, 4095].
1213 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1214 if (!CE) return false;
1215 int64_t Val = CE->getValue();
1216 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1217 }
isAddrMode3() const1218 bool isAddrMode3() const {
1219 // If we have an immediate that's not a constant, treat it as a label
1220 // reference needing a fixup. If it is a constant, it's something else
1221 // and we reject it.
1222 if (isImm() && !isa<MCConstantExpr>(getImm()))
1223 return true;
1224 if (!isMem() || Memory.Alignment != 0) return false;
1225 // No shifts are legal for AM3.
1226 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1227 // Check for register offset.
1228 if (Memory.OffsetRegNum) return true;
1229 // Immediate offset in range [-255, 255].
1230 if (!Memory.OffsetImm) return true;
1231 int64_t Val = Memory.OffsetImm->getValue();
1232 // The #-0 offset is encoded as INT32_MIN, and we have to check
1233 // for this too.
1234 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1235 }
isAM3Offset() const1236 bool isAM3Offset() const {
1237 if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1238 return false;
1239 if (Kind == k_PostIndexRegister)
1240 return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1241 // Immediate offset in range [-255, 255].
1242 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1243 if (!CE) return false;
1244 int64_t Val = CE->getValue();
1245 // Special case, #-0 is INT32_MIN.
1246 return (Val > -256 && Val < 256) || Val == INT32_MIN;
1247 }
isAddrMode5() const1248 bool isAddrMode5() const {
1249 // If we have an immediate that's not a constant, treat it as a label
1250 // reference needing a fixup. If it is a constant, it's something else
1251 // and we reject it.
1252 if (isImm() && !isa<MCConstantExpr>(getImm()))
1253 return true;
1254 if (!isMem() || Memory.Alignment != 0) return false;
1255 // Check for register offset.
1256 if (Memory.OffsetRegNum) return false;
1257 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1258 if (!Memory.OffsetImm) return true;
1259 int64_t Val = Memory.OffsetImm->getValue();
1260 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1261 Val == INT32_MIN;
1262 }
isMemTBB() const1263 bool isMemTBB() const {
1264 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1265 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1266 return false;
1267 return true;
1268 }
isMemTBH() const1269 bool isMemTBH() const {
1270 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1271 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1272 Memory.Alignment != 0 )
1273 return false;
1274 return true;
1275 }
isMemRegOffset() const1276 bool isMemRegOffset() const {
1277 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1278 return false;
1279 return true;
1280 }
isT2MemRegOffset() const1281 bool isT2MemRegOffset() const {
1282 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1283 Memory.Alignment != 0)
1284 return false;
1285 // Only lsl #{0, 1, 2, 3} allowed.
1286 if (Memory.ShiftType == ARM_AM::no_shift)
1287 return true;
1288 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1289 return false;
1290 return true;
1291 }
isMemThumbRR() const1292 bool isMemThumbRR() const {
1293 // Thumb reg+reg addressing is simple. Just two registers, a base and
1294 // an offset. No shifts, negations or any other complicating factors.
1295 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1296 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1297 return false;
1298 return isARMLowRegister(Memory.BaseRegNum) &&
1299 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1300 }
isMemThumbRIs4() const1301 bool isMemThumbRIs4() const {
1302 if (!isMem() || Memory.OffsetRegNum != 0 ||
1303 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1304 return false;
1305 // Immediate offset, multiple of 4 in range [0, 124].
1306 if (!Memory.OffsetImm) return true;
1307 int64_t Val = Memory.OffsetImm->getValue();
1308 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1309 }
isMemThumbRIs2() const1310 bool isMemThumbRIs2() const {
1311 if (!isMem() || Memory.OffsetRegNum != 0 ||
1312 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1313 return false;
1314 // Immediate offset, multiple of 4 in range [0, 62].
1315 if (!Memory.OffsetImm) return true;
1316 int64_t Val = Memory.OffsetImm->getValue();
1317 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1318 }
isMemThumbRIs1() const1319 bool isMemThumbRIs1() const {
1320 if (!isMem() || Memory.OffsetRegNum != 0 ||
1321 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1322 return false;
1323 // Immediate offset in range [0, 31].
1324 if (!Memory.OffsetImm) return true;
1325 int64_t Val = Memory.OffsetImm->getValue();
1326 return Val >= 0 && Val <= 31;
1327 }
isMemThumbSPI() const1328 bool isMemThumbSPI() const {
1329 if (!isMem() || Memory.OffsetRegNum != 0 ||
1330 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1331 return false;
1332 // Immediate offset, multiple of 4 in range [0, 1020].
1333 if (!Memory.OffsetImm) return true;
1334 int64_t Val = Memory.OffsetImm->getValue();
1335 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1336 }
isMemImm8s4Offset() const1337 bool isMemImm8s4Offset() const {
1338 // If we have an immediate that's not a constant, treat it as a label
1339 // reference needing a fixup. If it is a constant, it's something else
1340 // and we reject it.
1341 if (isImm() && !isa<MCConstantExpr>(getImm()))
1342 return true;
1343 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1344 return false;
1345 // Immediate offset a multiple of 4 in range [-1020, 1020].
1346 if (!Memory.OffsetImm) return true;
1347 int64_t Val = Memory.OffsetImm->getValue();
1348 // Special case, #-0 is INT32_MIN.
1349 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1350 }
isMemImm0_1020s4Offset() const1351 bool isMemImm0_1020s4Offset() const {
1352 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1353 return false;
1354 // Immediate offset a multiple of 4 in range [0, 1020].
1355 if (!Memory.OffsetImm) return true;
1356 int64_t Val = Memory.OffsetImm->getValue();
1357 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1358 }
isMemImm8Offset() const1359 bool isMemImm8Offset() const {
1360 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1361 return false;
1362 // Base reg of PC isn't allowed for these encodings.
1363 if (Memory.BaseRegNum == ARM::PC) return false;
1364 // Immediate offset in range [-255, 255].
1365 if (!Memory.OffsetImm) return true;
1366 int64_t Val = Memory.OffsetImm->getValue();
1367 return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1368 }
isMemPosImm8Offset() const1369 bool isMemPosImm8Offset() const {
1370 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1371 return false;
1372 // Immediate offset in range [0, 255].
1373 if (!Memory.OffsetImm) return true;
1374 int64_t Val = Memory.OffsetImm->getValue();
1375 return Val >= 0 && Val < 256;
1376 }
isMemNegImm8Offset() const1377 bool isMemNegImm8Offset() const {
1378 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1379 return false;
1380 // Base reg of PC isn't allowed for these encodings.
1381 if (Memory.BaseRegNum == ARM::PC) return false;
1382 // Immediate offset in range [-255, -1].
1383 if (!Memory.OffsetImm) return false;
1384 int64_t Val = Memory.OffsetImm->getValue();
1385 return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1386 }
isMemUImm12Offset() const1387 bool isMemUImm12Offset() const {
1388 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1389 return false;
1390 // Immediate offset in range [0, 4095].
1391 if (!Memory.OffsetImm) return true;
1392 int64_t Val = Memory.OffsetImm->getValue();
1393 return (Val >= 0 && Val < 4096);
1394 }
isMemImm12Offset() const1395 bool isMemImm12Offset() const {
1396 // If we have an immediate that's not a constant, treat it as a label
1397 // reference needing a fixup. If it is a constant, it's something else
1398 // and we reject it.
1399 if (isImm() && !isa<MCConstantExpr>(getImm()))
1400 return true;
1401
1402 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1403 return false;
1404 // Immediate offset in range [-4095, 4095].
1405 if (!Memory.OffsetImm) return true;
1406 int64_t Val = Memory.OffsetImm->getValue();
1407 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1408 }
isPostIdxImm8() const1409 bool isPostIdxImm8() const {
1410 if (!isImm()) return false;
1411 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412 if (!CE) return false;
1413 int64_t Val = CE->getValue();
1414 return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1415 }
isPostIdxImm8s4() const1416 bool isPostIdxImm8s4() const {
1417 if (!isImm()) return false;
1418 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1419 if (!CE) return false;
1420 int64_t Val = CE->getValue();
1421 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1422 (Val == INT32_MIN);
1423 }
1424
isMSRMask() const1425 bool isMSRMask() const { return Kind == k_MSRMask; }
isBankedReg() const1426 bool isBankedReg() const { return Kind == k_BankedReg; }
isProcIFlags() const1427 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1428
1429 // NEON operands.
isSingleSpacedVectorList() const1430 bool isSingleSpacedVectorList() const {
1431 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1432 }
isDoubleSpacedVectorList() const1433 bool isDoubleSpacedVectorList() const {
1434 return Kind == k_VectorList && VectorList.isDoubleSpaced;
1435 }
isVecListOneD() const1436 bool isVecListOneD() const {
1437 if (!isSingleSpacedVectorList()) return false;
1438 return VectorList.Count == 1;
1439 }
1440
isVecListDPair() const1441 bool isVecListDPair() const {
1442 if (!isSingleSpacedVectorList()) return false;
1443 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1444 .contains(VectorList.RegNum));
1445 }
1446
isVecListThreeD() const1447 bool isVecListThreeD() const {
1448 if (!isSingleSpacedVectorList()) return false;
1449 return VectorList.Count == 3;
1450 }
1451
isVecListFourD() const1452 bool isVecListFourD() const {
1453 if (!isSingleSpacedVectorList()) return false;
1454 return VectorList.Count == 4;
1455 }
1456
isVecListDPairSpaced() const1457 bool isVecListDPairSpaced() const {
1458 if (Kind != k_VectorList) return false;
1459 if (isSingleSpacedVectorList()) return false;
1460 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1461 .contains(VectorList.RegNum));
1462 }
1463
isVecListThreeQ() const1464 bool isVecListThreeQ() const {
1465 if (!isDoubleSpacedVectorList()) return false;
1466 return VectorList.Count == 3;
1467 }
1468
isVecListFourQ() const1469 bool isVecListFourQ() const {
1470 if (!isDoubleSpacedVectorList()) return false;
1471 return VectorList.Count == 4;
1472 }
1473
isSingleSpacedVectorAllLanes() const1474 bool isSingleSpacedVectorAllLanes() const {
1475 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1476 }
isDoubleSpacedVectorAllLanes() const1477 bool isDoubleSpacedVectorAllLanes() const {
1478 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1479 }
isVecListOneDAllLanes() const1480 bool isVecListOneDAllLanes() const {
1481 if (!isSingleSpacedVectorAllLanes()) return false;
1482 return VectorList.Count == 1;
1483 }
1484
isVecListDPairAllLanes() const1485 bool isVecListDPairAllLanes() const {
1486 if (!isSingleSpacedVectorAllLanes()) return false;
1487 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1488 .contains(VectorList.RegNum));
1489 }
1490
isVecListDPairSpacedAllLanes() const1491 bool isVecListDPairSpacedAllLanes() const {
1492 if (!isDoubleSpacedVectorAllLanes()) return false;
1493 return VectorList.Count == 2;
1494 }
1495
isVecListThreeDAllLanes() const1496 bool isVecListThreeDAllLanes() const {
1497 if (!isSingleSpacedVectorAllLanes()) return false;
1498 return VectorList.Count == 3;
1499 }
1500
isVecListThreeQAllLanes() const1501 bool isVecListThreeQAllLanes() const {
1502 if (!isDoubleSpacedVectorAllLanes()) return false;
1503 return VectorList.Count == 3;
1504 }
1505
isVecListFourDAllLanes() const1506 bool isVecListFourDAllLanes() const {
1507 if (!isSingleSpacedVectorAllLanes()) return false;
1508 return VectorList.Count == 4;
1509 }
1510
isVecListFourQAllLanes() const1511 bool isVecListFourQAllLanes() const {
1512 if (!isDoubleSpacedVectorAllLanes()) return false;
1513 return VectorList.Count == 4;
1514 }
1515
isSingleSpacedVectorIndexed() const1516 bool isSingleSpacedVectorIndexed() const {
1517 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1518 }
isDoubleSpacedVectorIndexed() const1519 bool isDoubleSpacedVectorIndexed() const {
1520 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1521 }
isVecListOneDByteIndexed() const1522 bool isVecListOneDByteIndexed() const {
1523 if (!isSingleSpacedVectorIndexed()) return false;
1524 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1525 }
1526
isVecListOneDHWordIndexed() const1527 bool isVecListOneDHWordIndexed() const {
1528 if (!isSingleSpacedVectorIndexed()) return false;
1529 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1530 }
1531
isVecListOneDWordIndexed() const1532 bool isVecListOneDWordIndexed() const {
1533 if (!isSingleSpacedVectorIndexed()) return false;
1534 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1535 }
1536
isVecListTwoDByteIndexed() const1537 bool isVecListTwoDByteIndexed() const {
1538 if (!isSingleSpacedVectorIndexed()) return false;
1539 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1540 }
1541
isVecListTwoDHWordIndexed() const1542 bool isVecListTwoDHWordIndexed() const {
1543 if (!isSingleSpacedVectorIndexed()) return false;
1544 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1545 }
1546
isVecListTwoQWordIndexed() const1547 bool isVecListTwoQWordIndexed() const {
1548 if (!isDoubleSpacedVectorIndexed()) return false;
1549 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1550 }
1551
isVecListTwoQHWordIndexed() const1552 bool isVecListTwoQHWordIndexed() const {
1553 if (!isDoubleSpacedVectorIndexed()) return false;
1554 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1555 }
1556
isVecListTwoDWordIndexed() const1557 bool isVecListTwoDWordIndexed() const {
1558 if (!isSingleSpacedVectorIndexed()) return false;
1559 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1560 }
1561
isVecListThreeDByteIndexed() const1562 bool isVecListThreeDByteIndexed() const {
1563 if (!isSingleSpacedVectorIndexed()) return false;
1564 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1565 }
1566
isVecListThreeDHWordIndexed() const1567 bool isVecListThreeDHWordIndexed() const {
1568 if (!isSingleSpacedVectorIndexed()) return false;
1569 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1570 }
1571
isVecListThreeQWordIndexed() const1572 bool isVecListThreeQWordIndexed() const {
1573 if (!isDoubleSpacedVectorIndexed()) return false;
1574 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1575 }
1576
isVecListThreeQHWordIndexed() const1577 bool isVecListThreeQHWordIndexed() const {
1578 if (!isDoubleSpacedVectorIndexed()) return false;
1579 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1580 }
1581
isVecListThreeDWordIndexed() const1582 bool isVecListThreeDWordIndexed() const {
1583 if (!isSingleSpacedVectorIndexed()) return false;
1584 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1585 }
1586
isVecListFourDByteIndexed() const1587 bool isVecListFourDByteIndexed() const {
1588 if (!isSingleSpacedVectorIndexed()) return false;
1589 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1590 }
1591
isVecListFourDHWordIndexed() const1592 bool isVecListFourDHWordIndexed() const {
1593 if (!isSingleSpacedVectorIndexed()) return false;
1594 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1595 }
1596
isVecListFourQWordIndexed() const1597 bool isVecListFourQWordIndexed() const {
1598 if (!isDoubleSpacedVectorIndexed()) return false;
1599 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1600 }
1601
isVecListFourQHWordIndexed() const1602 bool isVecListFourQHWordIndexed() const {
1603 if (!isDoubleSpacedVectorIndexed()) return false;
1604 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1605 }
1606
isVecListFourDWordIndexed() const1607 bool isVecListFourDWordIndexed() const {
1608 if (!isSingleSpacedVectorIndexed()) return false;
1609 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1610 }
1611
isVectorIndex8() const1612 bool isVectorIndex8() const {
1613 if (Kind != k_VectorIndex) return false;
1614 return VectorIndex.Val < 8;
1615 }
isVectorIndex16() const1616 bool isVectorIndex16() const {
1617 if (Kind != k_VectorIndex) return false;
1618 return VectorIndex.Val < 4;
1619 }
isVectorIndex32() const1620 bool isVectorIndex32() const {
1621 if (Kind != k_VectorIndex) return false;
1622 return VectorIndex.Val < 2;
1623 }
1624
isNEONi8splat() const1625 bool isNEONi8splat() const {
1626 if (!isImm()) return false;
1627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1628 // Must be a constant.
1629 if (!CE) return false;
1630 int64_t Value = CE->getValue();
1631 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1632 // value.
1633 return Value >= 0 && Value < 256;
1634 }
1635
isNEONi16splat() const1636 bool isNEONi16splat() const {
1637 if (isNEONByteReplicate(2))
1638 return false; // Leave that for bytes replication and forbid by default.
1639 if (!isImm())
1640 return false;
1641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1642 // Must be a constant.
1643 if (!CE) return false;
1644 unsigned Value = CE->getValue();
1645 return ARM_AM::isNEONi16splat(Value);
1646 }
1647
isNEONi16splatNot() const1648 bool isNEONi16splatNot() const {
1649 if (!isImm())
1650 return false;
1651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1652 // Must be a constant.
1653 if (!CE) return false;
1654 unsigned Value = CE->getValue();
1655 return ARM_AM::isNEONi16splat(~Value & 0xffff);
1656 }
1657
isNEONi32splat() const1658 bool isNEONi32splat() const {
1659 if (isNEONByteReplicate(4))
1660 return false; // Leave that for bytes replication and forbid by default.
1661 if (!isImm())
1662 return false;
1663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1664 // Must be a constant.
1665 if (!CE) return false;
1666 unsigned Value = CE->getValue();
1667 return ARM_AM::isNEONi32splat(Value);
1668 }
1669
isNEONi32splatNot() const1670 bool isNEONi32splatNot() const {
1671 if (!isImm())
1672 return false;
1673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1674 // Must be a constant.
1675 if (!CE) return false;
1676 unsigned Value = CE->getValue();
1677 return ARM_AM::isNEONi32splat(~Value);
1678 }
1679
isNEONByteReplicate(unsigned NumBytes) const1680 bool isNEONByteReplicate(unsigned NumBytes) const {
1681 if (!isImm())
1682 return false;
1683 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1684 // Must be a constant.
1685 if (!CE)
1686 return false;
1687 int64_t Value = CE->getValue();
1688 if (!Value)
1689 return false; // Don't bother with zero.
1690
1691 unsigned char B = Value & 0xff;
1692 for (unsigned i = 1; i < NumBytes; ++i) {
1693 Value >>= 8;
1694 if ((Value & 0xff) != B)
1695 return false;
1696 }
1697 return true;
1698 }
isNEONi16ByteReplicate() const1699 bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
isNEONi32ByteReplicate() const1700 bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
isNEONi32vmov() const1701 bool isNEONi32vmov() const {
1702 if (isNEONByteReplicate(4))
1703 return false; // Let it to be classified as byte-replicate case.
1704 if (!isImm())
1705 return false;
1706 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1707 // Must be a constant.
1708 if (!CE)
1709 return false;
1710 int64_t Value = CE->getValue();
1711 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1712 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1713 // FIXME: This is probably wrong and a copy and paste from previous example
1714 return (Value >= 0 && Value < 256) ||
1715 (Value >= 0x0100 && Value <= 0xff00) ||
1716 (Value >= 0x010000 && Value <= 0xff0000) ||
1717 (Value >= 0x01000000 && Value <= 0xff000000) ||
1718 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1719 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1720 }
isNEONi32vmovNeg() const1721 bool isNEONi32vmovNeg() const {
1722 if (!isImm()) return false;
1723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1724 // Must be a constant.
1725 if (!CE) return false;
1726 int64_t Value = ~CE->getValue();
1727 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1728 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1729 // FIXME: This is probably wrong and a copy and paste from previous example
1730 return (Value >= 0 && Value < 256) ||
1731 (Value >= 0x0100 && Value <= 0xff00) ||
1732 (Value >= 0x010000 && Value <= 0xff0000) ||
1733 (Value >= 0x01000000 && Value <= 0xff000000) ||
1734 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1735 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1736 }
1737
isNEONi64splat() const1738 bool isNEONi64splat() const {
1739 if (!isImm()) return false;
1740 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1741 // Must be a constant.
1742 if (!CE) return false;
1743 uint64_t Value = CE->getValue();
1744 // i64 value with each byte being either 0 or 0xff.
1745 for (unsigned i = 0; i < 8; ++i)
1746 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1747 return true;
1748 }
1749
addExpr(MCInst & Inst,const MCExpr * Expr) const1750 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1751 // Add as immediates when possible. Null MCExpr = 0.
1752 if (!Expr)
1753 Inst.addOperand(MCOperand::CreateImm(0));
1754 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1755 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1756 else
1757 Inst.addOperand(MCOperand::CreateExpr(Expr));
1758 }
1759
addCondCodeOperands(MCInst & Inst,unsigned N) const1760 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1761 assert(N == 2 && "Invalid number of operands!");
1762 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1763 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1764 Inst.addOperand(MCOperand::CreateReg(RegNum));
1765 }
1766
addCoprocNumOperands(MCInst & Inst,unsigned N) const1767 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1768 assert(N == 1 && "Invalid number of operands!");
1769 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1770 }
1771
addCoprocRegOperands(MCInst & Inst,unsigned N) const1772 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1773 assert(N == 1 && "Invalid number of operands!");
1774 Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1775 }
1776
addCoprocOptionOperands(MCInst & Inst,unsigned N) const1777 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1778 assert(N == 1 && "Invalid number of operands!");
1779 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1780 }
1781
addITMaskOperands(MCInst & Inst,unsigned N) const1782 void addITMaskOperands(MCInst &Inst, unsigned N) const {
1783 assert(N == 1 && "Invalid number of operands!");
1784 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1785 }
1786
addITCondCodeOperands(MCInst & Inst,unsigned N) const1787 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1788 assert(N == 1 && "Invalid number of operands!");
1789 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1790 }
1791
addCCOutOperands(MCInst & Inst,unsigned N) const1792 void addCCOutOperands(MCInst &Inst, unsigned N) const {
1793 assert(N == 1 && "Invalid number of operands!");
1794 Inst.addOperand(MCOperand::CreateReg(getReg()));
1795 }
1796
addRegOperands(MCInst & Inst,unsigned N) const1797 void addRegOperands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!");
1799 Inst.addOperand(MCOperand::CreateReg(getReg()));
1800 }
1801
addRegShiftedRegOperands(MCInst & Inst,unsigned N) const1802 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1803 assert(N == 3 && "Invalid number of operands!");
1804 assert(isRegShiftedReg() &&
1805 "addRegShiftedRegOperands() on non-RegShiftedReg!");
1806 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1807 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1808 Inst.addOperand(MCOperand::CreateImm(
1809 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1810 }
1811
addRegShiftedImmOperands(MCInst & Inst,unsigned N) const1812 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1813 assert(N == 2 && "Invalid number of operands!");
1814 assert(isRegShiftedImm() &&
1815 "addRegShiftedImmOperands() on non-RegShiftedImm!");
1816 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1817 // Shift of #32 is encoded as 0 where permitted
1818 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1819 Inst.addOperand(MCOperand::CreateImm(
1820 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1821 }
1822
addShifterImmOperands(MCInst & Inst,unsigned N) const1823 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!");
1825 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1826 ShifterImm.Imm));
1827 }
1828
addRegListOperands(MCInst & Inst,unsigned N) const1829 void addRegListOperands(MCInst &Inst, unsigned N) const {
1830 assert(N == 1 && "Invalid number of operands!");
1831 const SmallVectorImpl<unsigned> &RegList = getRegList();
1832 for (SmallVectorImpl<unsigned>::const_iterator
1833 I = RegList.begin(), E = RegList.end(); I != E; ++I)
1834 Inst.addOperand(MCOperand::CreateReg(*I));
1835 }
1836
addDPRRegListOperands(MCInst & Inst,unsigned N) const1837 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1838 addRegListOperands(Inst, N);
1839 }
1840
addSPRRegListOperands(MCInst & Inst,unsigned N) const1841 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1842 addRegListOperands(Inst, N);
1843 }
1844
addRotImmOperands(MCInst & Inst,unsigned N) const1845 void addRotImmOperands(MCInst &Inst, unsigned N) const {
1846 assert(N == 1 && "Invalid number of operands!");
1847 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1848 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1849 }
1850
addModImmOperands(MCInst & Inst,unsigned N) const1851 void addModImmOperands(MCInst &Inst, unsigned N) const {
1852 assert(N == 1 && "Invalid number of operands!");
1853
1854 // Support for fixups (MCFixup)
1855 if (isImm())
1856 return addImmOperands(Inst, N);
1857
1858 Inst.addOperand(MCOperand::CreateImm(ModImm.Bits | (ModImm.Rot << 7)));
1859 }
1860
addModImmNotOperands(MCInst & Inst,unsigned N) const1861 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
1862 assert(N == 1 && "Invalid number of operands!");
1863 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1864 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
1865 Inst.addOperand(MCOperand::CreateImm(Enc));
1866 }
1867
addModImmNegOperands(MCInst & Inst,unsigned N) const1868 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
1869 assert(N == 1 && "Invalid number of operands!");
1870 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1871 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
1872 Inst.addOperand(MCOperand::CreateImm(Enc));
1873 }
1874
addBitfieldOperands(MCInst & Inst,unsigned N) const1875 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1876 assert(N == 1 && "Invalid number of operands!");
1877 // Munge the lsb/width into a bitfield mask.
1878 unsigned lsb = Bitfield.LSB;
1879 unsigned width = Bitfield.Width;
1880 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1881 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1882 (32 - (lsb + width)));
1883 Inst.addOperand(MCOperand::CreateImm(Mask));
1884 }
1885
addImmOperands(MCInst & Inst,unsigned N) const1886 void addImmOperands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!");
1888 addExpr(Inst, getImm());
1889 }
1890
addFBits16Operands(MCInst & Inst,unsigned N) const1891 void addFBits16Operands(MCInst &Inst, unsigned N) const {
1892 assert(N == 1 && "Invalid number of operands!");
1893 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1894 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1895 }
1896
addFBits32Operands(MCInst & Inst,unsigned N) const1897 void addFBits32Operands(MCInst &Inst, unsigned N) const {
1898 assert(N == 1 && "Invalid number of operands!");
1899 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1900 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1901 }
1902
addFPImmOperands(MCInst & Inst,unsigned N) const1903 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1904 assert(N == 1 && "Invalid number of operands!");
1905 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1906 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1907 Inst.addOperand(MCOperand::CreateImm(Val));
1908 }
1909
addImm8s4Operands(MCInst & Inst,unsigned N) const1910 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1911 assert(N == 1 && "Invalid number of operands!");
1912 // FIXME: We really want to scale the value here, but the LDRD/STRD
1913 // instruction don't encode operands that way yet.
1914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1915 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1916 }
1917
addImm0_1020s4Operands(MCInst & Inst,unsigned N) const1918 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1919 assert(N == 1 && "Invalid number of operands!");
1920 // The immediate is scaled by four in the encoding and is stored
1921 // in the MCInst as such. Lop off the low two bits here.
1922 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1923 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1924 }
1925
addImm0_508s4NegOperands(MCInst & Inst,unsigned N) const1926 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1927 assert(N == 1 && "Invalid number of operands!");
1928 // The immediate is scaled by four in the encoding and is stored
1929 // in the MCInst as such. Lop off the low two bits here.
1930 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1931 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1932 }
1933
addImm0_508s4Operands(MCInst & Inst,unsigned N) const1934 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1935 assert(N == 1 && "Invalid number of operands!");
1936 // The immediate is scaled by four in the encoding and is stored
1937 // in the MCInst as such. Lop off the low two bits here.
1938 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1940 }
1941
addImm1_16Operands(MCInst & Inst,unsigned N) const1942 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 // The constant encodes as the immediate-1, and we store in the instruction
1945 // the bits as encoded, so subtract off one here.
1946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1947 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1948 }
1949
addImm1_32Operands(MCInst & Inst,unsigned N) const1950 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1951 assert(N == 1 && "Invalid number of operands!");
1952 // The constant encodes as the immediate-1, and we store in the instruction
1953 // the bits as encoded, so subtract off one here.
1954 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1955 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1956 }
1957
addImmThumbSROperands(MCInst & Inst,unsigned N) const1958 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1959 assert(N == 1 && "Invalid number of operands!");
1960 // The constant encodes as the immediate, except for 32, which encodes as
1961 // zero.
1962 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1963 unsigned Imm = CE->getValue();
1964 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1965 }
1966
addPKHASRImmOperands(MCInst & Inst,unsigned N) const1967 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1968 assert(N == 1 && "Invalid number of operands!");
1969 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1970 // the instruction as well.
1971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1972 int Val = CE->getValue();
1973 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1974 }
1975
addT2SOImmNotOperands(MCInst & Inst,unsigned N) const1976 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1977 assert(N == 1 && "Invalid number of operands!");
1978 // The operand is actually a t2_so_imm, but we have its bitwise
1979 // negation in the assembly source, so twiddle it here.
1980 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1981 Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1982 }
1983
addT2SOImmNegOperands(MCInst & Inst,unsigned N) const1984 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1985 assert(N == 1 && "Invalid number of operands!");
1986 // The operand is actually a t2_so_imm, but we have its
1987 // negation in the assembly source, so twiddle it here.
1988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1989 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1990 }
1991
addImm0_4095NegOperands(MCInst & Inst,unsigned N) const1992 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 // The operand is actually an imm0_4095, but we have its
1995 // negation in the assembly source, so twiddle it here.
1996 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1997 Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1998 }
1999
addUnsignedOffset_b8s2Operands(MCInst & Inst,unsigned N) const2000 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2001 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2002 Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
2003 return;
2004 }
2005
2006 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2007 assert(SR && "Unknown value type!");
2008 Inst.addOperand(MCOperand::CreateExpr(SR));
2009 }
2010
addThumbMemPCOperands(MCInst & Inst,unsigned N) const2011 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2012 assert(N == 1 && "Invalid number of operands!");
2013 if (isImm()) {
2014 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2015 if (CE) {
2016 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
2017 return;
2018 }
2019
2020 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2021 assert(SR && "Unknown value type!");
2022 Inst.addOperand(MCOperand::CreateExpr(SR));
2023 return;
2024 }
2025
2026 assert(isMem() && "Unknown value type!");
2027 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2028 Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
2029 }
2030
addMemBarrierOptOperands(MCInst & Inst,unsigned N) const2031 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2033 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
2034 }
2035
addInstSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2036 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
2039 }
2040
addMemNoOffsetOperands(MCInst & Inst,unsigned N) const2041 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2042 assert(N == 1 && "Invalid number of operands!");
2043 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2044 }
2045
addMemPCRelImm12Operands(MCInst & Inst,unsigned N) const2046 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2047 assert(N == 1 && "Invalid number of operands!");
2048 int32_t Imm = Memory.OffsetImm->getValue();
2049 Inst.addOperand(MCOperand::CreateImm(Imm));
2050 }
2051
addAdrLabelOperands(MCInst & Inst,unsigned N) const2052 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 assert(isImm() && "Not an immediate!");
2055
2056 // If we have an immediate that's not a constant, treat it as a label
2057 // reference needing a fixup.
2058 if (!isa<MCConstantExpr>(getImm())) {
2059 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2060 return;
2061 }
2062
2063 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2064 int Val = CE->getValue();
2065 Inst.addOperand(MCOperand::CreateImm(Val));
2066 }
2067
addAlignedMemoryOperands(MCInst & Inst,unsigned N) const2068 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 2 && "Invalid number of operands!");
2070 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2071 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
2072 }
2073
addDupAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2074 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2075 addAlignedMemoryOperands(Inst, N);
2076 }
2077
addAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2078 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2079 addAlignedMemoryOperands(Inst, N);
2080 }
2081
addAlignedMemory16Operands(MCInst & Inst,unsigned N) const2082 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2083 addAlignedMemoryOperands(Inst, N);
2084 }
2085
addDupAlignedMemory16Operands(MCInst & Inst,unsigned N) const2086 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2087 addAlignedMemoryOperands(Inst, N);
2088 }
2089
addAlignedMemory32Operands(MCInst & Inst,unsigned N) const2090 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2091 addAlignedMemoryOperands(Inst, N);
2092 }
2093
addDupAlignedMemory32Operands(MCInst & Inst,unsigned N) const2094 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2095 addAlignedMemoryOperands(Inst, N);
2096 }
2097
addAlignedMemory64Operands(MCInst & Inst,unsigned N) const2098 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2099 addAlignedMemoryOperands(Inst, N);
2100 }
2101
addDupAlignedMemory64Operands(MCInst & Inst,unsigned N) const2102 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2103 addAlignedMemoryOperands(Inst, N);
2104 }
2105
addAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2106 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2107 addAlignedMemoryOperands(Inst, N);
2108 }
2109
addDupAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2110 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2111 addAlignedMemoryOperands(Inst, N);
2112 }
2113
addAlignedMemory64or128or256Operands(MCInst & Inst,unsigned N) const2114 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2115 addAlignedMemoryOperands(Inst, N);
2116 }
2117
addAddrMode2Operands(MCInst & Inst,unsigned N) const2118 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2119 assert(N == 3 && "Invalid number of operands!");
2120 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2121 if (!Memory.OffsetRegNum) {
2122 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2123 // Special case for #-0
2124 if (Val == INT32_MIN) Val = 0;
2125 if (Val < 0) Val = -Val;
2126 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2127 } else {
2128 // For register offset, we encode the shift type and negation flag
2129 // here.
2130 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2131 Memory.ShiftImm, Memory.ShiftType);
2132 }
2133 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2134 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2135 Inst.addOperand(MCOperand::CreateImm(Val));
2136 }
2137
addAM2OffsetImmOperands(MCInst & Inst,unsigned N) const2138 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2139 assert(N == 2 && "Invalid number of operands!");
2140 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2141 assert(CE && "non-constant AM2OffsetImm operand!");
2142 int32_t Val = CE->getValue();
2143 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2144 // Special case for #-0
2145 if (Val == INT32_MIN) Val = 0;
2146 if (Val < 0) Val = -Val;
2147 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2148 Inst.addOperand(MCOperand::CreateReg(0));
2149 Inst.addOperand(MCOperand::CreateImm(Val));
2150 }
2151
addAddrMode3Operands(MCInst & Inst,unsigned N) const2152 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2153 assert(N == 3 && "Invalid number of operands!");
2154 // If we have an immediate that's not a constant, treat it as a label
2155 // reference needing a fixup. If it is a constant, it's something else
2156 // and we reject it.
2157 if (isImm()) {
2158 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2159 Inst.addOperand(MCOperand::CreateReg(0));
2160 Inst.addOperand(MCOperand::CreateImm(0));
2161 return;
2162 }
2163
2164 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2165 if (!Memory.OffsetRegNum) {
2166 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2167 // Special case for #-0
2168 if (Val == INT32_MIN) Val = 0;
2169 if (Val < 0) Val = -Val;
2170 Val = ARM_AM::getAM3Opc(AddSub, Val);
2171 } else {
2172 // For register offset, we encode the shift type and negation flag
2173 // here.
2174 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2175 }
2176 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2177 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2178 Inst.addOperand(MCOperand::CreateImm(Val));
2179 }
2180
addAM3OffsetOperands(MCInst & Inst,unsigned N) const2181 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2182 assert(N == 2 && "Invalid number of operands!");
2183 if (Kind == k_PostIndexRegister) {
2184 int32_t Val =
2185 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2186 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2187 Inst.addOperand(MCOperand::CreateImm(Val));
2188 return;
2189 }
2190
2191 // Constant offset.
2192 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2193 int32_t Val = CE->getValue();
2194 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2195 // Special case for #-0
2196 if (Val == INT32_MIN) Val = 0;
2197 if (Val < 0) Val = -Val;
2198 Val = ARM_AM::getAM3Opc(AddSub, Val);
2199 Inst.addOperand(MCOperand::CreateReg(0));
2200 Inst.addOperand(MCOperand::CreateImm(Val));
2201 }
2202
addAddrMode5Operands(MCInst & Inst,unsigned N) const2203 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2204 assert(N == 2 && "Invalid number of operands!");
2205 // If we have an immediate that's not a constant, treat it as a label
2206 // reference needing a fixup. If it is a constant, it's something else
2207 // and we reject it.
2208 if (isImm()) {
2209 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2210 Inst.addOperand(MCOperand::CreateImm(0));
2211 return;
2212 }
2213
2214 // The lower two bits are always zero and as such are not encoded.
2215 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2216 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2217 // Special case for #-0
2218 if (Val == INT32_MIN) Val = 0;
2219 if (Val < 0) Val = -Val;
2220 Val = ARM_AM::getAM5Opc(AddSub, Val);
2221 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2222 Inst.addOperand(MCOperand::CreateImm(Val));
2223 }
2224
addMemImm8s4OffsetOperands(MCInst & Inst,unsigned N) const2225 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 2 && "Invalid number of operands!");
2227 // If we have an immediate that's not a constant, treat it as a label
2228 // reference needing a fixup. If it is a constant, it's something else
2229 // and we reject it.
2230 if (isImm()) {
2231 Inst.addOperand(MCOperand::CreateExpr(getImm()));
2232 Inst.addOperand(MCOperand::CreateImm(0));
2233 return;
2234 }
2235
2236 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2237 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2238 Inst.addOperand(MCOperand::CreateImm(Val));
2239 }
2240
addMemImm0_1020s4OffsetOperands(MCInst & Inst,unsigned N) const2241 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2242 assert(N == 2 && "Invalid number of operands!");
2243 // The lower two bits are always zero and as such are not encoded.
2244 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2245 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2246 Inst.addOperand(MCOperand::CreateImm(Val));
2247 }
2248
addMemImm8OffsetOperands(MCInst & Inst,unsigned N) const2249 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2250 assert(N == 2 && "Invalid number of operands!");
2251 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2252 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2253 Inst.addOperand(MCOperand::CreateImm(Val));
2254 }
2255
addMemPosImm8OffsetOperands(MCInst & Inst,unsigned N) const2256 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2257 addMemImm8OffsetOperands(Inst, N);
2258 }
2259
addMemNegImm8OffsetOperands(MCInst & Inst,unsigned N) const2260 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2261 addMemImm8OffsetOperands(Inst, N);
2262 }
2263
addMemUImm12OffsetOperands(MCInst & Inst,unsigned N) const2264 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2265 assert(N == 2 && "Invalid number of operands!");
2266 // If this is an immediate, it's a label reference.
2267 if (isImm()) {
2268 addExpr(Inst, getImm());
2269 Inst.addOperand(MCOperand::CreateImm(0));
2270 return;
2271 }
2272
2273 // Otherwise, it's a normal memory reg+offset.
2274 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2275 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2276 Inst.addOperand(MCOperand::CreateImm(Val));
2277 }
2278
addMemImm12OffsetOperands(MCInst & Inst,unsigned N) const2279 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2280 assert(N == 2 && "Invalid number of operands!");
2281 // If this is an immediate, it's a label reference.
2282 if (isImm()) {
2283 addExpr(Inst, getImm());
2284 Inst.addOperand(MCOperand::CreateImm(0));
2285 return;
2286 }
2287
2288 // Otherwise, it's a normal memory reg+offset.
2289 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2290 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2291 Inst.addOperand(MCOperand::CreateImm(Val));
2292 }
2293
addMemTBBOperands(MCInst & Inst,unsigned N) const2294 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2295 assert(N == 2 && "Invalid number of operands!");
2296 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2297 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2298 }
2299
addMemTBHOperands(MCInst & Inst,unsigned N) const2300 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2301 assert(N == 2 && "Invalid number of operands!");
2302 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2303 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2304 }
2305
addMemRegOffsetOperands(MCInst & Inst,unsigned N) const2306 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2307 assert(N == 3 && "Invalid number of operands!");
2308 unsigned Val =
2309 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2310 Memory.ShiftImm, Memory.ShiftType);
2311 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2312 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2313 Inst.addOperand(MCOperand::CreateImm(Val));
2314 }
2315
addT2MemRegOffsetOperands(MCInst & Inst,unsigned N) const2316 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2317 assert(N == 3 && "Invalid number of operands!");
2318 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2319 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2320 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
2321 }
2322
addMemThumbRROperands(MCInst & Inst,unsigned N) const2323 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2324 assert(N == 2 && "Invalid number of operands!");
2325 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2326 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2327 }
2328
addMemThumbRIs4Operands(MCInst & Inst,unsigned N) const2329 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2330 assert(N == 2 && "Invalid number of operands!");
2331 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2332 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2333 Inst.addOperand(MCOperand::CreateImm(Val));
2334 }
2335
addMemThumbRIs2Operands(MCInst & Inst,unsigned N) const2336 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2337 assert(N == 2 && "Invalid number of operands!");
2338 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2339 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2340 Inst.addOperand(MCOperand::CreateImm(Val));
2341 }
2342
addMemThumbRIs1Operands(MCInst & Inst,unsigned N) const2343 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2344 assert(N == 2 && "Invalid number of operands!");
2345 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2346 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2347 Inst.addOperand(MCOperand::CreateImm(Val));
2348 }
2349
addMemThumbSPIOperands(MCInst & Inst,unsigned N) const2350 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2351 assert(N == 2 && "Invalid number of operands!");
2352 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2353 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2354 Inst.addOperand(MCOperand::CreateImm(Val));
2355 }
2356
addPostIdxImm8Operands(MCInst & Inst,unsigned N) const2357 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2358 assert(N == 1 && "Invalid number of operands!");
2359 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2360 assert(CE && "non-constant post-idx-imm8 operand!");
2361 int Imm = CE->getValue();
2362 bool isAdd = Imm >= 0;
2363 if (Imm == INT32_MIN) Imm = 0;
2364 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2365 Inst.addOperand(MCOperand::CreateImm(Imm));
2366 }
2367
addPostIdxImm8s4Operands(MCInst & Inst,unsigned N) const2368 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2369 assert(N == 1 && "Invalid number of operands!");
2370 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2371 assert(CE && "non-constant post-idx-imm8s4 operand!");
2372 int Imm = CE->getValue();
2373 bool isAdd = Imm >= 0;
2374 if (Imm == INT32_MIN) Imm = 0;
2375 // Immediate is scaled by 4.
2376 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2377 Inst.addOperand(MCOperand::CreateImm(Imm));
2378 }
2379
addPostIdxRegOperands(MCInst & Inst,unsigned N) const2380 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2381 assert(N == 2 && "Invalid number of operands!");
2382 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2383 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2384 }
2385
addPostIdxRegShiftedOperands(MCInst & Inst,unsigned N) const2386 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2387 assert(N == 2 && "Invalid number of operands!");
2388 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2389 // The sign, shift type, and shift amount are encoded in a single operand
2390 // using the AM2 encoding helpers.
2391 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2392 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2393 PostIdxReg.ShiftTy);
2394 Inst.addOperand(MCOperand::CreateImm(Imm));
2395 }
2396
addMSRMaskOperands(MCInst & Inst,unsigned N) const2397 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2398 assert(N == 1 && "Invalid number of operands!");
2399 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2400 }
2401
addBankedRegOperands(MCInst & Inst,unsigned N) const2402 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2403 assert(N == 1 && "Invalid number of operands!");
2404 Inst.addOperand(MCOperand::CreateImm(unsigned(getBankedReg())));
2405 }
2406
addProcIFlagsOperands(MCInst & Inst,unsigned N) const2407 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2408 assert(N == 1 && "Invalid number of operands!");
2409 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2410 }
2411
addVecListOperands(MCInst & Inst,unsigned N) const2412 void addVecListOperands(MCInst &Inst, unsigned N) const {
2413 assert(N == 1 && "Invalid number of operands!");
2414 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2415 }
2416
addVecListIndexedOperands(MCInst & Inst,unsigned N) const2417 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2418 assert(N == 2 && "Invalid number of operands!");
2419 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2420 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2421 }
2422
addVectorIndex8Operands(MCInst & Inst,unsigned N) const2423 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2424 assert(N == 1 && "Invalid number of operands!");
2425 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2426 }
2427
addVectorIndex16Operands(MCInst & Inst,unsigned N) const2428 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2429 assert(N == 1 && "Invalid number of operands!");
2430 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2431 }
2432
addVectorIndex32Operands(MCInst & Inst,unsigned N) const2433 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2434 assert(N == 1 && "Invalid number of operands!");
2435 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2436 }
2437
addNEONi8splatOperands(MCInst & Inst,unsigned N) const2438 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2439 assert(N == 1 && "Invalid number of operands!");
2440 // The immediate encodes the type of constant as well as the value.
2441 // Mask in that this is an i8 splat.
2442 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2443 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2444 }
2445
addNEONi16splatOperands(MCInst & Inst,unsigned N) const2446 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2447 assert(N == 1 && "Invalid number of operands!");
2448 // The immediate encodes the type of constant as well as the value.
2449 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2450 unsigned Value = CE->getValue();
2451 Value = ARM_AM::encodeNEONi16splat(Value);
2452 Inst.addOperand(MCOperand::CreateImm(Value));
2453 }
2454
addNEONi16splatNotOperands(MCInst & Inst,unsigned N) const2455 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2456 assert(N == 1 && "Invalid number of operands!");
2457 // The immediate encodes the type of constant as well as the value.
2458 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2459 unsigned Value = CE->getValue();
2460 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2461 Inst.addOperand(MCOperand::CreateImm(Value));
2462 }
2463
addNEONi32splatOperands(MCInst & Inst,unsigned N) const2464 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2465 assert(N == 1 && "Invalid number of operands!");
2466 // The immediate encodes the type of constant as well as the value.
2467 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2468 unsigned Value = CE->getValue();
2469 Value = ARM_AM::encodeNEONi32splat(Value);
2470 Inst.addOperand(MCOperand::CreateImm(Value));
2471 }
2472
addNEONi32splatNotOperands(MCInst & Inst,unsigned N) const2473 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2474 assert(N == 1 && "Invalid number of operands!");
2475 // The immediate encodes the type of constant as well as the value.
2476 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2477 unsigned Value = CE->getValue();
2478 Value = ARM_AM::encodeNEONi32splat(~Value);
2479 Inst.addOperand(MCOperand::CreateImm(Value));
2480 }
2481
addNEONinvByteReplicateOperands(MCInst & Inst,unsigned N) const2482 void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2483 assert(N == 1 && "Invalid number of operands!");
2484 // The immediate encodes the type of constant as well as the value.
2485 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2486 unsigned Value = CE->getValue();
2487 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2488 Inst.getOpcode() == ARM::VMOVv16i8) &&
2489 "All vmvn instructions that wants to replicate non-zero byte "
2490 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2491 unsigned B = ((~Value) & 0xff);
2492 B |= 0xe00; // cmode = 0b1110
2493 Inst.addOperand(MCOperand::CreateImm(B));
2494 }
addNEONi32vmovOperands(MCInst & Inst,unsigned N) const2495 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2496 assert(N == 1 && "Invalid number of operands!");
2497 // The immediate encodes the type of constant as well as the value.
2498 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2499 unsigned Value = CE->getValue();
2500 if (Value >= 256 && Value <= 0xffff)
2501 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2502 else if (Value > 0xffff && Value <= 0xffffff)
2503 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2504 else if (Value > 0xffffff)
2505 Value = (Value >> 24) | 0x600;
2506 Inst.addOperand(MCOperand::CreateImm(Value));
2507 }
2508
addNEONvmovByteReplicateOperands(MCInst & Inst,unsigned N) const2509 void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2510 assert(N == 1 && "Invalid number of operands!");
2511 // The immediate encodes the type of constant as well as the value.
2512 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2513 unsigned Value = CE->getValue();
2514 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2515 Inst.getOpcode() == ARM::VMOVv16i8) &&
2516 "All instructions that wants to replicate non-zero byte "
2517 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2518 unsigned B = Value & 0xff;
2519 B |= 0xe00; // cmode = 0b1110
2520 Inst.addOperand(MCOperand::CreateImm(B));
2521 }
addNEONi32vmovNegOperands(MCInst & Inst,unsigned N) const2522 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2523 assert(N == 1 && "Invalid number of operands!");
2524 // The immediate encodes the type of constant as well as the value.
2525 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2526 unsigned Value = ~CE->getValue();
2527 if (Value >= 256 && Value <= 0xffff)
2528 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2529 else if (Value > 0xffff && Value <= 0xffffff)
2530 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2531 else if (Value > 0xffffff)
2532 Value = (Value >> 24) | 0x600;
2533 Inst.addOperand(MCOperand::CreateImm(Value));
2534 }
2535
addNEONi64splatOperands(MCInst & Inst,unsigned N) const2536 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2537 assert(N == 1 && "Invalid number of operands!");
2538 // The immediate encodes the type of constant as well as the value.
2539 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2540 uint64_t Value = CE->getValue();
2541 unsigned Imm = 0;
2542 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2543 Imm |= (Value & 1) << i;
2544 }
2545 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2546 }
2547
2548 void print(raw_ostream &OS) const override;
2549
CreateITMask(unsigned Mask,SMLoc S)2550 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2551 auto Op = make_unique<ARMOperand>(k_ITCondMask);
2552 Op->ITMask.Mask = Mask;
2553 Op->StartLoc = S;
2554 Op->EndLoc = S;
2555 return Op;
2556 }
2557
CreateCondCode(ARMCC::CondCodes CC,SMLoc S)2558 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2559 SMLoc S) {
2560 auto Op = make_unique<ARMOperand>(k_CondCode);
2561 Op->CC.Val = CC;
2562 Op->StartLoc = S;
2563 Op->EndLoc = S;
2564 return Op;
2565 }
2566
CreateCoprocNum(unsigned CopVal,SMLoc S)2567 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2568 auto Op = make_unique<ARMOperand>(k_CoprocNum);
2569 Op->Cop.Val = CopVal;
2570 Op->StartLoc = S;
2571 Op->EndLoc = S;
2572 return Op;
2573 }
2574
CreateCoprocReg(unsigned CopVal,SMLoc S)2575 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2576 auto Op = make_unique<ARMOperand>(k_CoprocReg);
2577 Op->Cop.Val = CopVal;
2578 Op->StartLoc = S;
2579 Op->EndLoc = S;
2580 return Op;
2581 }
2582
CreateCoprocOption(unsigned Val,SMLoc S,SMLoc E)2583 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2584 SMLoc E) {
2585 auto Op = make_unique<ARMOperand>(k_CoprocOption);
2586 Op->Cop.Val = Val;
2587 Op->StartLoc = S;
2588 Op->EndLoc = E;
2589 return Op;
2590 }
2591
CreateCCOut(unsigned RegNum,SMLoc S)2592 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2593 auto Op = make_unique<ARMOperand>(k_CCOut);
2594 Op->Reg.RegNum = RegNum;
2595 Op->StartLoc = S;
2596 Op->EndLoc = S;
2597 return Op;
2598 }
2599
CreateToken(StringRef Str,SMLoc S)2600 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2601 auto Op = make_unique<ARMOperand>(k_Token);
2602 Op->Tok.Data = Str.data();
2603 Op->Tok.Length = Str.size();
2604 Op->StartLoc = S;
2605 Op->EndLoc = S;
2606 return Op;
2607 }
2608
CreateReg(unsigned RegNum,SMLoc S,SMLoc E)2609 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2610 SMLoc E) {
2611 auto Op = make_unique<ARMOperand>(k_Register);
2612 Op->Reg.RegNum = RegNum;
2613 Op->StartLoc = S;
2614 Op->EndLoc = E;
2615 return Op;
2616 }
2617
2618 static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftReg,unsigned ShiftImm,SMLoc S,SMLoc E)2619 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2620 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2621 SMLoc E) {
2622 auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2623 Op->RegShiftedReg.ShiftTy = ShTy;
2624 Op->RegShiftedReg.SrcReg = SrcReg;
2625 Op->RegShiftedReg.ShiftReg = ShiftReg;
2626 Op->RegShiftedReg.ShiftImm = ShiftImm;
2627 Op->StartLoc = S;
2628 Op->EndLoc = E;
2629 return Op;
2630 }
2631
2632 static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftImm,SMLoc S,SMLoc E)2633 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2634 unsigned ShiftImm, SMLoc S, SMLoc E) {
2635 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2636 Op->RegShiftedImm.ShiftTy = ShTy;
2637 Op->RegShiftedImm.SrcReg = SrcReg;
2638 Op->RegShiftedImm.ShiftImm = ShiftImm;
2639 Op->StartLoc = S;
2640 Op->EndLoc = E;
2641 return Op;
2642 }
2643
CreateShifterImm(bool isASR,unsigned Imm,SMLoc S,SMLoc E)2644 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2645 SMLoc S, SMLoc E) {
2646 auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2647 Op->ShifterImm.isASR = isASR;
2648 Op->ShifterImm.Imm = Imm;
2649 Op->StartLoc = S;
2650 Op->EndLoc = E;
2651 return Op;
2652 }
2653
CreateRotImm(unsigned Imm,SMLoc S,SMLoc E)2654 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2655 SMLoc E) {
2656 auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2657 Op->RotImm.Imm = Imm;
2658 Op->StartLoc = S;
2659 Op->EndLoc = E;
2660 return Op;
2661 }
2662
CreateModImm(unsigned Bits,unsigned Rot,SMLoc S,SMLoc E)2663 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2664 SMLoc S, SMLoc E) {
2665 auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2666 Op->ModImm.Bits = Bits;
2667 Op->ModImm.Rot = Rot;
2668 Op->StartLoc = S;
2669 Op->EndLoc = E;
2670 return Op;
2671 }
2672
2673 static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB,unsigned Width,SMLoc S,SMLoc E)2674 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2675 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2676 Op->Bitfield.LSB = LSB;
2677 Op->Bitfield.Width = Width;
2678 Op->StartLoc = S;
2679 Op->EndLoc = E;
2680 return Op;
2681 }
2682
2683 static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,SMLoc StartLoc,SMLoc EndLoc)2684 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2685 SMLoc StartLoc, SMLoc EndLoc) {
2686 assert (Regs.size() > 0 && "RegList contains no registers?");
2687 KindTy Kind = k_RegisterList;
2688
2689 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2690 Kind = k_DPRRegisterList;
2691 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2692 contains(Regs.front().second))
2693 Kind = k_SPRRegisterList;
2694
2695 // Sort based on the register encoding values.
2696 array_pod_sort(Regs.begin(), Regs.end());
2697
2698 auto Op = make_unique<ARMOperand>(Kind);
2699 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2700 I = Regs.begin(), E = Regs.end(); I != E; ++I)
2701 Op->Registers.push_back(I->second);
2702 Op->StartLoc = StartLoc;
2703 Op->EndLoc = EndLoc;
2704 return Op;
2705 }
2706
CreateVectorList(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2707 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2708 unsigned Count,
2709 bool isDoubleSpaced,
2710 SMLoc S, SMLoc E) {
2711 auto Op = make_unique<ARMOperand>(k_VectorList);
2712 Op->VectorList.RegNum = RegNum;
2713 Op->VectorList.Count = Count;
2714 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2715 Op->StartLoc = S;
2716 Op->EndLoc = E;
2717 return Op;
2718 }
2719
2720 static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)2721 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2722 SMLoc S, SMLoc E) {
2723 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2724 Op->VectorList.RegNum = RegNum;
2725 Op->VectorList.Count = Count;
2726 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2727 Op->StartLoc = S;
2728 Op->EndLoc = E;
2729 return Op;
2730 }
2731
2732 static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum,unsigned Count,unsigned Index,bool isDoubleSpaced,SMLoc S,SMLoc E)2733 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2734 bool isDoubleSpaced, SMLoc S, SMLoc E) {
2735 auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2736 Op->VectorList.RegNum = RegNum;
2737 Op->VectorList.Count = Count;
2738 Op->VectorList.LaneIndex = Index;
2739 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2740 Op->StartLoc = S;
2741 Op->EndLoc = E;
2742 return Op;
2743 }
2744
2745 static std::unique_ptr<ARMOperand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)2746 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2747 auto Op = make_unique<ARMOperand>(k_VectorIndex);
2748 Op->VectorIndex.Val = Idx;
2749 Op->StartLoc = S;
2750 Op->EndLoc = E;
2751 return Op;
2752 }
2753
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E)2754 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2755 SMLoc E) {
2756 auto Op = make_unique<ARMOperand>(k_Immediate);
2757 Op->Imm.Val = Val;
2758 Op->StartLoc = S;
2759 Op->EndLoc = E;
2760 return Op;
2761 }
2762
2763 static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum,const MCConstantExpr * OffsetImm,unsigned OffsetRegNum,ARM_AM::ShiftOpc ShiftType,unsigned ShiftImm,unsigned Alignment,bool isNegative,SMLoc S,SMLoc E,SMLoc AlignmentLoc=SMLoc ())2764 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2765 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2766 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2767 SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2768 auto Op = make_unique<ARMOperand>(k_Memory);
2769 Op->Memory.BaseRegNum = BaseRegNum;
2770 Op->Memory.OffsetImm = OffsetImm;
2771 Op->Memory.OffsetRegNum = OffsetRegNum;
2772 Op->Memory.ShiftType = ShiftType;
2773 Op->Memory.ShiftImm = ShiftImm;
2774 Op->Memory.Alignment = Alignment;
2775 Op->Memory.isNegative = isNegative;
2776 Op->StartLoc = S;
2777 Op->EndLoc = E;
2778 Op->AlignmentLoc = AlignmentLoc;
2779 return Op;
2780 }
2781
2782 static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum,bool isAdd,ARM_AM::ShiftOpc ShiftTy,unsigned ShiftImm,SMLoc S,SMLoc E)2783 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2784 unsigned ShiftImm, SMLoc S, SMLoc E) {
2785 auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2786 Op->PostIdxReg.RegNum = RegNum;
2787 Op->PostIdxReg.isAdd = isAdd;
2788 Op->PostIdxReg.ShiftTy = ShiftTy;
2789 Op->PostIdxReg.ShiftImm = ShiftImm;
2790 Op->StartLoc = S;
2791 Op->EndLoc = E;
2792 return Op;
2793 }
2794
CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,SMLoc S)2795 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
2796 SMLoc S) {
2797 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
2798 Op->MBOpt.Val = Opt;
2799 Op->StartLoc = S;
2800 Op->EndLoc = S;
2801 return Op;
2802 }
2803
2804 static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,SMLoc S)2805 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
2806 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
2807 Op->ISBOpt.Val = Opt;
2808 Op->StartLoc = S;
2809 Op->EndLoc = S;
2810 return Op;
2811 }
2812
CreateProcIFlags(ARM_PROC::IFlags IFlags,SMLoc S)2813 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
2814 SMLoc S) {
2815 auto Op = make_unique<ARMOperand>(k_ProcIFlags);
2816 Op->IFlags.Val = IFlags;
2817 Op->StartLoc = S;
2818 Op->EndLoc = S;
2819 return Op;
2820 }
2821
CreateMSRMask(unsigned MMask,SMLoc S)2822 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
2823 auto Op = make_unique<ARMOperand>(k_MSRMask);
2824 Op->MMask.Val = MMask;
2825 Op->StartLoc = S;
2826 Op->EndLoc = S;
2827 return Op;
2828 }
2829
CreateBankedReg(unsigned Reg,SMLoc S)2830 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
2831 auto Op = make_unique<ARMOperand>(k_BankedReg);
2832 Op->BankedReg.Val = Reg;
2833 Op->StartLoc = S;
2834 Op->EndLoc = S;
2835 return Op;
2836 }
2837 };
2838
2839 } // end anonymous namespace.
2840
print(raw_ostream & OS) const2841 void ARMOperand::print(raw_ostream &OS) const {
2842 switch (Kind) {
2843 case k_CondCode:
2844 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2845 break;
2846 case k_CCOut:
2847 OS << "<ccout " << getReg() << ">";
2848 break;
2849 case k_ITCondMask: {
2850 static const char *const MaskStr[] = {
2851 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2852 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2853 };
2854 assert((ITMask.Mask & 0xf) == ITMask.Mask);
2855 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2856 break;
2857 }
2858 case k_CoprocNum:
2859 OS << "<coprocessor number: " << getCoproc() << ">";
2860 break;
2861 case k_CoprocReg:
2862 OS << "<coprocessor register: " << getCoproc() << ">";
2863 break;
2864 case k_CoprocOption:
2865 OS << "<coprocessor option: " << CoprocOption.Val << ">";
2866 break;
2867 case k_MSRMask:
2868 OS << "<mask: " << getMSRMask() << ">";
2869 break;
2870 case k_BankedReg:
2871 OS << "<banked reg: " << getBankedReg() << ">";
2872 break;
2873 case k_Immediate:
2874 getImm()->print(OS);
2875 break;
2876 case k_MemBarrierOpt:
2877 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2878 break;
2879 case k_InstSyncBarrierOpt:
2880 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2881 break;
2882 case k_Memory:
2883 OS << "<memory "
2884 << " base:" << Memory.BaseRegNum;
2885 OS << ">";
2886 break;
2887 case k_PostIndexRegister:
2888 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2889 << PostIdxReg.RegNum;
2890 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2891 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2892 << PostIdxReg.ShiftImm;
2893 OS << ">";
2894 break;
2895 case k_ProcIFlags: {
2896 OS << "<ARM_PROC::";
2897 unsigned IFlags = getProcIFlags();
2898 for (int i=2; i >= 0; --i)
2899 if (IFlags & (1 << i))
2900 OS << ARM_PROC::IFlagsToString(1 << i);
2901 OS << ">";
2902 break;
2903 }
2904 case k_Register:
2905 OS << "<register " << getReg() << ">";
2906 break;
2907 case k_ShifterImmediate:
2908 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2909 << " #" << ShifterImm.Imm << ">";
2910 break;
2911 case k_ShiftedRegister:
2912 OS << "<so_reg_reg "
2913 << RegShiftedReg.SrcReg << " "
2914 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2915 << " " << RegShiftedReg.ShiftReg << ">";
2916 break;
2917 case k_ShiftedImmediate:
2918 OS << "<so_reg_imm "
2919 << RegShiftedImm.SrcReg << " "
2920 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2921 << " #" << RegShiftedImm.ShiftImm << ">";
2922 break;
2923 case k_RotateImmediate:
2924 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2925 break;
2926 case k_ModifiedImmediate:
2927 OS << "<mod_imm #" << ModImm.Bits << ", #"
2928 << ModImm.Rot << ")>";
2929 break;
2930 case k_BitfieldDescriptor:
2931 OS << "<bitfield " << "lsb: " << Bitfield.LSB
2932 << ", width: " << Bitfield.Width << ">";
2933 break;
2934 case k_RegisterList:
2935 case k_DPRRegisterList:
2936 case k_SPRRegisterList: {
2937 OS << "<register_list ";
2938
2939 const SmallVectorImpl<unsigned> &RegList = getRegList();
2940 for (SmallVectorImpl<unsigned>::const_iterator
2941 I = RegList.begin(), E = RegList.end(); I != E; ) {
2942 OS << *I;
2943 if (++I < E) OS << ", ";
2944 }
2945
2946 OS << ">";
2947 break;
2948 }
2949 case k_VectorList:
2950 OS << "<vector_list " << VectorList.Count << " * "
2951 << VectorList.RegNum << ">";
2952 break;
2953 case k_VectorListAllLanes:
2954 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2955 << VectorList.RegNum << ">";
2956 break;
2957 case k_VectorListIndexed:
2958 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2959 << VectorList.Count << " * " << VectorList.RegNum << ">";
2960 break;
2961 case k_Token:
2962 OS << "'" << getToken() << "'";
2963 break;
2964 case k_VectorIndex:
2965 OS << "<vectorindex " << getVectorIndex() << ">";
2966 break;
2967 }
2968 }
2969
2970 /// @name Auto-generated Match Functions
2971 /// {
2972
2973 static unsigned MatchRegisterName(StringRef Name);
2974
2975 /// }
2976
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)2977 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2978 SMLoc &StartLoc, SMLoc &EndLoc) {
2979 const AsmToken &Tok = getParser().getTok();
2980 StartLoc = Tok.getLoc();
2981 EndLoc = Tok.getEndLoc();
2982 RegNo = tryParseRegister();
2983
2984 return (RegNo == (unsigned)-1);
2985 }
2986
2987 /// Try to parse a register name. The token must be an Identifier when called,
2988 /// and if it is a register name the token is eaten and the register number is
2989 /// returned. Otherwise return -1.
2990 ///
tryParseRegister()2991 int ARMAsmParser::tryParseRegister() {
2992 MCAsmParser &Parser = getParser();
2993 const AsmToken &Tok = Parser.getTok();
2994 if (Tok.isNot(AsmToken::Identifier)) return -1;
2995
2996 std::string lowerCase = Tok.getString().lower();
2997 unsigned RegNum = MatchRegisterName(lowerCase);
2998 if (!RegNum) {
2999 RegNum = StringSwitch<unsigned>(lowerCase)
3000 .Case("r13", ARM::SP)
3001 .Case("r14", ARM::LR)
3002 .Case("r15", ARM::PC)
3003 .Case("ip", ARM::R12)
3004 // Additional register name aliases for 'gas' compatibility.
3005 .Case("a1", ARM::R0)
3006 .Case("a2", ARM::R1)
3007 .Case("a3", ARM::R2)
3008 .Case("a4", ARM::R3)
3009 .Case("v1", ARM::R4)
3010 .Case("v2", ARM::R5)
3011 .Case("v3", ARM::R6)
3012 .Case("v4", ARM::R7)
3013 .Case("v5", ARM::R8)
3014 .Case("v6", ARM::R9)
3015 .Case("v7", ARM::R10)
3016 .Case("v8", ARM::R11)
3017 .Case("sb", ARM::R9)
3018 .Case("sl", ARM::R10)
3019 .Case("fp", ARM::R11)
3020 .Default(0);
3021 }
3022 if (!RegNum) {
3023 // Check for aliases registered via .req. Canonicalize to lower case.
3024 // That's more consistent since register names are case insensitive, and
3025 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3026 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3027 // If no match, return failure.
3028 if (Entry == RegisterReqs.end())
3029 return -1;
3030 Parser.Lex(); // Eat identifier token.
3031 return Entry->getValue();
3032 }
3033
3034 // Some FPUs only have 16 D registers, so D16-D31 are invalid
3035 if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3036 return -1;
3037
3038 Parser.Lex(); // Eat identifier token.
3039
3040 return RegNum;
3041 }
3042
3043 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
3044 // If a recoverable error occurs, return 1. If an irrecoverable error
3045 // occurs, return -1. An irrecoverable error is one where tokens have been
3046 // consumed in the process of trying to parse the shifter (i.e., when it is
3047 // indeed a shifter operand, but malformed).
tryParseShiftRegister(OperandVector & Operands)3048 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3049 MCAsmParser &Parser = getParser();
3050 SMLoc S = Parser.getTok().getLoc();
3051 const AsmToken &Tok = Parser.getTok();
3052 if (Tok.isNot(AsmToken::Identifier))
3053 return -1;
3054
3055 std::string lowerCase = Tok.getString().lower();
3056 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3057 .Case("asl", ARM_AM::lsl)
3058 .Case("lsl", ARM_AM::lsl)
3059 .Case("lsr", ARM_AM::lsr)
3060 .Case("asr", ARM_AM::asr)
3061 .Case("ror", ARM_AM::ror)
3062 .Case("rrx", ARM_AM::rrx)
3063 .Default(ARM_AM::no_shift);
3064
3065 if (ShiftTy == ARM_AM::no_shift)
3066 return 1;
3067
3068 Parser.Lex(); // Eat the operator.
3069
3070 // The source register for the shift has already been added to the
3071 // operand list, so we need to pop it off and combine it into the shifted
3072 // register operand instead.
3073 std::unique_ptr<ARMOperand> PrevOp(
3074 (ARMOperand *)Operands.pop_back_val().release());
3075 if (!PrevOp->isReg())
3076 return Error(PrevOp->getStartLoc(), "shift must be of a register");
3077 int SrcReg = PrevOp->getReg();
3078
3079 SMLoc EndLoc;
3080 int64_t Imm = 0;
3081 int ShiftReg = 0;
3082 if (ShiftTy == ARM_AM::rrx) {
3083 // RRX Doesn't have an explicit shift amount. The encoder expects
3084 // the shift register to be the same as the source register. Seems odd,
3085 // but OK.
3086 ShiftReg = SrcReg;
3087 } else {
3088 // Figure out if this is shifted by a constant or a register (for non-RRX).
3089 if (Parser.getTok().is(AsmToken::Hash) ||
3090 Parser.getTok().is(AsmToken::Dollar)) {
3091 Parser.Lex(); // Eat hash.
3092 SMLoc ImmLoc = Parser.getTok().getLoc();
3093 const MCExpr *ShiftExpr = nullptr;
3094 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3095 Error(ImmLoc, "invalid immediate shift value");
3096 return -1;
3097 }
3098 // The expression must be evaluatable as an immediate.
3099 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3100 if (!CE) {
3101 Error(ImmLoc, "invalid immediate shift value");
3102 return -1;
3103 }
3104 // Range check the immediate.
3105 // lsl, ror: 0 <= imm <= 31
3106 // lsr, asr: 0 <= imm <= 32
3107 Imm = CE->getValue();
3108 if (Imm < 0 ||
3109 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3110 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3111 Error(ImmLoc, "immediate shift value out of range");
3112 return -1;
3113 }
3114 // shift by zero is a nop. Always send it through as lsl.
3115 // ('as' compatibility)
3116 if (Imm == 0)
3117 ShiftTy = ARM_AM::lsl;
3118 } else if (Parser.getTok().is(AsmToken::Identifier)) {
3119 SMLoc L = Parser.getTok().getLoc();
3120 EndLoc = Parser.getTok().getEndLoc();
3121 ShiftReg = tryParseRegister();
3122 if (ShiftReg == -1) {
3123 Error(L, "expected immediate or register in shift operand");
3124 return -1;
3125 }
3126 } else {
3127 Error(Parser.getTok().getLoc(),
3128 "expected immediate or register in shift operand");
3129 return -1;
3130 }
3131 }
3132
3133 if (ShiftReg && ShiftTy != ARM_AM::rrx)
3134 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3135 ShiftReg, Imm,
3136 S, EndLoc));
3137 else
3138 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3139 S, EndLoc));
3140
3141 return 0;
3142 }
3143
3144
3145 /// Try to parse a register name. The token must be an Identifier when called.
3146 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3147 /// if there is a "writeback". 'true' if it's not a register.
3148 ///
3149 /// TODO this is likely to change to allow different register types and or to
3150 /// parse for a specific register type.
tryParseRegisterWithWriteBack(OperandVector & Operands)3151 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3152 MCAsmParser &Parser = getParser();
3153 const AsmToken &RegTok = Parser.getTok();
3154 int RegNo = tryParseRegister();
3155 if (RegNo == -1)
3156 return true;
3157
3158 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3159 RegTok.getEndLoc()));
3160
3161 const AsmToken &ExclaimTok = Parser.getTok();
3162 if (ExclaimTok.is(AsmToken::Exclaim)) {
3163 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3164 ExclaimTok.getLoc()));
3165 Parser.Lex(); // Eat exclaim token
3166 return false;
3167 }
3168
3169 // Also check for an index operand. This is only legal for vector registers,
3170 // but that'll get caught OK in operand matching, so we don't need to
3171 // explicitly filter everything else out here.
3172 if (Parser.getTok().is(AsmToken::LBrac)) {
3173 SMLoc SIdx = Parser.getTok().getLoc();
3174 Parser.Lex(); // Eat left bracket token.
3175
3176 const MCExpr *ImmVal;
3177 if (getParser().parseExpression(ImmVal))
3178 return true;
3179 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3180 if (!MCE)
3181 return TokError("immediate value expected for vector index");
3182
3183 if (Parser.getTok().isNot(AsmToken::RBrac))
3184 return Error(Parser.getTok().getLoc(), "']' expected");
3185
3186 SMLoc E = Parser.getTok().getEndLoc();
3187 Parser.Lex(); // Eat right bracket token.
3188
3189 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3190 SIdx, E,
3191 getContext()));
3192 }
3193
3194 return false;
3195 }
3196
3197 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3198 /// instruction with a symbolic operand name.
3199 /// We accept "crN" syntax for GAS compatibility.
3200 /// <operand-name> ::= <prefix><number>
3201 /// If CoprocOp is 'c', then:
3202 /// <prefix> ::= c | cr
3203 /// If CoprocOp is 'p', then :
3204 /// <prefix> ::= p
3205 /// <number> ::= integer in range [0, 15]
MatchCoprocessorOperandName(StringRef Name,char CoprocOp)3206 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3207 // Use the same layout as the tablegen'erated register name matcher. Ugly,
3208 // but efficient.
3209 if (Name.size() < 2 || Name[0] != CoprocOp)
3210 return -1;
3211 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3212
3213 switch (Name.size()) {
3214 default: return -1;
3215 case 1:
3216 switch (Name[0]) {
3217 default: return -1;
3218 case '0': return 0;
3219 case '1': return 1;
3220 case '2': return 2;
3221 case '3': return 3;
3222 case '4': return 4;
3223 case '5': return 5;
3224 case '6': return 6;
3225 case '7': return 7;
3226 case '8': return 8;
3227 case '9': return 9;
3228 }
3229 case 2:
3230 if (Name[0] != '1')
3231 return -1;
3232 switch (Name[1]) {
3233 default: return -1;
3234 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3235 // However, old cores (v5/v6) did use them in that way.
3236 case '0': return 10;
3237 case '1': return 11;
3238 case '2': return 12;
3239 case '3': return 13;
3240 case '4': return 14;
3241 case '5': return 15;
3242 }
3243 }
3244 }
3245
3246 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3247 ARMAsmParser::OperandMatchResultTy
parseITCondCode(OperandVector & Operands)3248 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3249 MCAsmParser &Parser = getParser();
3250 SMLoc S = Parser.getTok().getLoc();
3251 const AsmToken &Tok = Parser.getTok();
3252 if (!Tok.is(AsmToken::Identifier))
3253 return MatchOperand_NoMatch;
3254 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
3255 .Case("eq", ARMCC::EQ)
3256 .Case("ne", ARMCC::NE)
3257 .Case("hs", ARMCC::HS)
3258 .Case("cs", ARMCC::HS)
3259 .Case("lo", ARMCC::LO)
3260 .Case("cc", ARMCC::LO)
3261 .Case("mi", ARMCC::MI)
3262 .Case("pl", ARMCC::PL)
3263 .Case("vs", ARMCC::VS)
3264 .Case("vc", ARMCC::VC)
3265 .Case("hi", ARMCC::HI)
3266 .Case("ls", ARMCC::LS)
3267 .Case("ge", ARMCC::GE)
3268 .Case("lt", ARMCC::LT)
3269 .Case("gt", ARMCC::GT)
3270 .Case("le", ARMCC::LE)
3271 .Case("al", ARMCC::AL)
3272 .Default(~0U);
3273 if (CC == ~0U)
3274 return MatchOperand_NoMatch;
3275 Parser.Lex(); // Eat the token.
3276
3277 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3278
3279 return MatchOperand_Success;
3280 }
3281
3282 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3283 /// token must be an Identifier when called, and if it is a coprocessor
3284 /// number, the token is eaten and the operand is added to the operand list.
3285 ARMAsmParser::OperandMatchResultTy
parseCoprocNumOperand(OperandVector & Operands)3286 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3287 MCAsmParser &Parser = getParser();
3288 SMLoc S = Parser.getTok().getLoc();
3289 const AsmToken &Tok = Parser.getTok();
3290 if (Tok.isNot(AsmToken::Identifier))
3291 return MatchOperand_NoMatch;
3292
3293 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3294 if (Num == -1)
3295 return MatchOperand_NoMatch;
3296 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3297 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3298 return MatchOperand_NoMatch;
3299
3300 Parser.Lex(); // Eat identifier token.
3301 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3302 return MatchOperand_Success;
3303 }
3304
3305 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3306 /// token must be an Identifier when called, and if it is a coprocessor
3307 /// number, the token is eaten and the operand is added to the operand list.
3308 ARMAsmParser::OperandMatchResultTy
parseCoprocRegOperand(OperandVector & Operands)3309 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3310 MCAsmParser &Parser = getParser();
3311 SMLoc S = Parser.getTok().getLoc();
3312 const AsmToken &Tok = Parser.getTok();
3313 if (Tok.isNot(AsmToken::Identifier))
3314 return MatchOperand_NoMatch;
3315
3316 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3317 if (Reg == -1)
3318 return MatchOperand_NoMatch;
3319
3320 Parser.Lex(); // Eat identifier token.
3321 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3322 return MatchOperand_Success;
3323 }
3324
3325 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3326 /// coproc_option : '{' imm0_255 '}'
3327 ARMAsmParser::OperandMatchResultTy
parseCoprocOptionOperand(OperandVector & Operands)3328 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3329 MCAsmParser &Parser = getParser();
3330 SMLoc S = Parser.getTok().getLoc();
3331
3332 // If this isn't a '{', this isn't a coprocessor immediate operand.
3333 if (Parser.getTok().isNot(AsmToken::LCurly))
3334 return MatchOperand_NoMatch;
3335 Parser.Lex(); // Eat the '{'
3336
3337 const MCExpr *Expr;
3338 SMLoc Loc = Parser.getTok().getLoc();
3339 if (getParser().parseExpression(Expr)) {
3340 Error(Loc, "illegal expression");
3341 return MatchOperand_ParseFail;
3342 }
3343 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3344 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3345 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3346 return MatchOperand_ParseFail;
3347 }
3348 int Val = CE->getValue();
3349
3350 // Check for and consume the closing '}'
3351 if (Parser.getTok().isNot(AsmToken::RCurly))
3352 return MatchOperand_ParseFail;
3353 SMLoc E = Parser.getTok().getEndLoc();
3354 Parser.Lex(); // Eat the '}'
3355
3356 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3357 return MatchOperand_Success;
3358 }
3359
3360 // For register list parsing, we need to map from raw GPR register numbering
3361 // to the enumeration values. The enumeration values aren't sorted by
3362 // register number due to our using "sp", "lr" and "pc" as canonical names.
getNextRegister(unsigned Reg)3363 static unsigned getNextRegister(unsigned Reg) {
3364 // If this is a GPR, we need to do it manually, otherwise we can rely
3365 // on the sort ordering of the enumeration since the other reg-classes
3366 // are sane.
3367 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3368 return Reg + 1;
3369 switch(Reg) {
3370 default: llvm_unreachable("Invalid GPR number!");
3371 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3372 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3373 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3374 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3375 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3376 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3377 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3378 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3379 }
3380 }
3381
3382 // Return the low-subreg of a given Q register.
getDRegFromQReg(unsigned QReg)3383 static unsigned getDRegFromQReg(unsigned QReg) {
3384 switch (QReg) {
3385 default: llvm_unreachable("expected a Q register!");
3386 case ARM::Q0: return ARM::D0;
3387 case ARM::Q1: return ARM::D2;
3388 case ARM::Q2: return ARM::D4;
3389 case ARM::Q3: return ARM::D6;
3390 case ARM::Q4: return ARM::D8;
3391 case ARM::Q5: return ARM::D10;
3392 case ARM::Q6: return ARM::D12;
3393 case ARM::Q7: return ARM::D14;
3394 case ARM::Q8: return ARM::D16;
3395 case ARM::Q9: return ARM::D18;
3396 case ARM::Q10: return ARM::D20;
3397 case ARM::Q11: return ARM::D22;
3398 case ARM::Q12: return ARM::D24;
3399 case ARM::Q13: return ARM::D26;
3400 case ARM::Q14: return ARM::D28;
3401 case ARM::Q15: return ARM::D30;
3402 }
3403 }
3404
3405 /// Parse a register list.
parseRegisterList(OperandVector & Operands)3406 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3407 MCAsmParser &Parser = getParser();
3408 assert(Parser.getTok().is(AsmToken::LCurly) &&
3409 "Token is not a Left Curly Brace");
3410 SMLoc S = Parser.getTok().getLoc();
3411 Parser.Lex(); // Eat '{' token.
3412 SMLoc RegLoc = Parser.getTok().getLoc();
3413
3414 // Check the first register in the list to see what register class
3415 // this is a list of.
3416 int Reg = tryParseRegister();
3417 if (Reg == -1)
3418 return Error(RegLoc, "register expected");
3419
3420 // The reglist instructions have at most 16 registers, so reserve
3421 // space for that many.
3422 int EReg = 0;
3423 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3424
3425 // Allow Q regs and just interpret them as the two D sub-registers.
3426 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3427 Reg = getDRegFromQReg(Reg);
3428 EReg = MRI->getEncodingValue(Reg);
3429 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3430 ++Reg;
3431 }
3432 const MCRegisterClass *RC;
3433 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3434 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3435 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3436 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3437 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3438 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3439 else
3440 return Error(RegLoc, "invalid register in register list");
3441
3442 // Store the register.
3443 EReg = MRI->getEncodingValue(Reg);
3444 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3445
3446 // This starts immediately after the first register token in the list,
3447 // so we can see either a comma or a minus (range separator) as a legal
3448 // next token.
3449 while (Parser.getTok().is(AsmToken::Comma) ||
3450 Parser.getTok().is(AsmToken::Minus)) {
3451 if (Parser.getTok().is(AsmToken::Minus)) {
3452 Parser.Lex(); // Eat the minus.
3453 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3454 int EndReg = tryParseRegister();
3455 if (EndReg == -1)
3456 return Error(AfterMinusLoc, "register expected");
3457 // Allow Q regs and just interpret them as the two D sub-registers.
3458 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3459 EndReg = getDRegFromQReg(EndReg) + 1;
3460 // If the register is the same as the start reg, there's nothing
3461 // more to do.
3462 if (Reg == EndReg)
3463 continue;
3464 // The register must be in the same register class as the first.
3465 if (!RC->contains(EndReg))
3466 return Error(AfterMinusLoc, "invalid register in register list");
3467 // Ranges must go from low to high.
3468 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3469 return Error(AfterMinusLoc, "bad range in register list");
3470
3471 // Add all the registers in the range to the register list.
3472 while (Reg != EndReg) {
3473 Reg = getNextRegister(Reg);
3474 EReg = MRI->getEncodingValue(Reg);
3475 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3476 }
3477 continue;
3478 }
3479 Parser.Lex(); // Eat the comma.
3480 RegLoc = Parser.getTok().getLoc();
3481 int OldReg = Reg;
3482 const AsmToken RegTok = Parser.getTok();
3483 Reg = tryParseRegister();
3484 if (Reg == -1)
3485 return Error(RegLoc, "register expected");
3486 // Allow Q regs and just interpret them as the two D sub-registers.
3487 bool isQReg = false;
3488 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3489 Reg = getDRegFromQReg(Reg);
3490 isQReg = true;
3491 }
3492 // The register must be in the same register class as the first.
3493 if (!RC->contains(Reg))
3494 return Error(RegLoc, "invalid register in register list");
3495 // List must be monotonically increasing.
3496 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3497 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3498 Warning(RegLoc, "register list not in ascending order");
3499 else
3500 return Error(RegLoc, "register list not in ascending order");
3501 }
3502 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3503 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3504 ") in register list");
3505 continue;
3506 }
3507 // VFP register lists must also be contiguous.
3508 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3509 Reg != OldReg + 1)
3510 return Error(RegLoc, "non-contiguous register range");
3511 EReg = MRI->getEncodingValue(Reg);
3512 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3513 if (isQReg) {
3514 EReg = MRI->getEncodingValue(++Reg);
3515 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3516 }
3517 }
3518
3519 if (Parser.getTok().isNot(AsmToken::RCurly))
3520 return Error(Parser.getTok().getLoc(), "'}' expected");
3521 SMLoc E = Parser.getTok().getEndLoc();
3522 Parser.Lex(); // Eat '}' token.
3523
3524 // Push the register list operand.
3525 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3526
3527 // The ARM system instruction variants for LDM/STM have a '^' token here.
3528 if (Parser.getTok().is(AsmToken::Caret)) {
3529 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3530 Parser.Lex(); // Eat '^' token.
3531 }
3532
3533 return false;
3534 }
3535
3536 // Helper function to parse the lane index for vector lists.
3537 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseVectorLane(VectorLaneTy & LaneKind,unsigned & Index,SMLoc & EndLoc)3538 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3539 MCAsmParser &Parser = getParser();
3540 Index = 0; // Always return a defined index value.
3541 if (Parser.getTok().is(AsmToken::LBrac)) {
3542 Parser.Lex(); // Eat the '['.
3543 if (Parser.getTok().is(AsmToken::RBrac)) {
3544 // "Dn[]" is the 'all lanes' syntax.
3545 LaneKind = AllLanes;
3546 EndLoc = Parser.getTok().getEndLoc();
3547 Parser.Lex(); // Eat the ']'.
3548 return MatchOperand_Success;
3549 }
3550
3551 // There's an optional '#' token here. Normally there wouldn't be, but
3552 // inline assemble puts one in, and it's friendly to accept that.
3553 if (Parser.getTok().is(AsmToken::Hash))
3554 Parser.Lex(); // Eat '#' or '$'.
3555
3556 const MCExpr *LaneIndex;
3557 SMLoc Loc = Parser.getTok().getLoc();
3558 if (getParser().parseExpression(LaneIndex)) {
3559 Error(Loc, "illegal expression");
3560 return MatchOperand_ParseFail;
3561 }
3562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3563 if (!CE) {
3564 Error(Loc, "lane index must be empty or an integer");
3565 return MatchOperand_ParseFail;
3566 }
3567 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3568 Error(Parser.getTok().getLoc(), "']' expected");
3569 return MatchOperand_ParseFail;
3570 }
3571 EndLoc = Parser.getTok().getEndLoc();
3572 Parser.Lex(); // Eat the ']'.
3573 int64_t Val = CE->getValue();
3574
3575 // FIXME: Make this range check context sensitive for .8, .16, .32.
3576 if (Val < 0 || Val > 7) {
3577 Error(Parser.getTok().getLoc(), "lane index out of range");
3578 return MatchOperand_ParseFail;
3579 }
3580 Index = Val;
3581 LaneKind = IndexedLane;
3582 return MatchOperand_Success;
3583 }
3584 LaneKind = NoLanes;
3585 return MatchOperand_Success;
3586 }
3587
3588 // parse a vector register list
3589 ARMAsmParser::OperandMatchResultTy
parseVectorList(OperandVector & Operands)3590 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3591 MCAsmParser &Parser = getParser();
3592 VectorLaneTy LaneKind;
3593 unsigned LaneIndex;
3594 SMLoc S = Parser.getTok().getLoc();
3595 // As an extension (to match gas), support a plain D register or Q register
3596 // (without encosing curly braces) as a single or double entry list,
3597 // respectively.
3598 if (Parser.getTok().is(AsmToken::Identifier)) {
3599 SMLoc E = Parser.getTok().getEndLoc();
3600 int Reg = tryParseRegister();
3601 if (Reg == -1)
3602 return MatchOperand_NoMatch;
3603 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3604 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3605 if (Res != MatchOperand_Success)
3606 return Res;
3607 switch (LaneKind) {
3608 case NoLanes:
3609 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3610 break;
3611 case AllLanes:
3612 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3613 S, E));
3614 break;
3615 case IndexedLane:
3616 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3617 LaneIndex,
3618 false, S, E));
3619 break;
3620 }
3621 return MatchOperand_Success;
3622 }
3623 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3624 Reg = getDRegFromQReg(Reg);
3625 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3626 if (Res != MatchOperand_Success)
3627 return Res;
3628 switch (LaneKind) {
3629 case NoLanes:
3630 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3631 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3632 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3633 break;
3634 case AllLanes:
3635 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3636 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3637 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3638 S, E));
3639 break;
3640 case IndexedLane:
3641 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3642 LaneIndex,
3643 false, S, E));
3644 break;
3645 }
3646 return MatchOperand_Success;
3647 }
3648 Error(S, "vector register expected");
3649 return MatchOperand_ParseFail;
3650 }
3651
3652 if (Parser.getTok().isNot(AsmToken::LCurly))
3653 return MatchOperand_NoMatch;
3654
3655 Parser.Lex(); // Eat '{' token.
3656 SMLoc RegLoc = Parser.getTok().getLoc();
3657
3658 int Reg = tryParseRegister();
3659 if (Reg == -1) {
3660 Error(RegLoc, "register expected");
3661 return MatchOperand_ParseFail;
3662 }
3663 unsigned Count = 1;
3664 int Spacing = 0;
3665 unsigned FirstReg = Reg;
3666 // The list is of D registers, but we also allow Q regs and just interpret
3667 // them as the two D sub-registers.
3668 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3669 FirstReg = Reg = getDRegFromQReg(Reg);
3670 Spacing = 1; // double-spacing requires explicit D registers, otherwise
3671 // it's ambiguous with four-register single spaced.
3672 ++Reg;
3673 ++Count;
3674 }
3675
3676 SMLoc E;
3677 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3678 return MatchOperand_ParseFail;
3679
3680 while (Parser.getTok().is(AsmToken::Comma) ||
3681 Parser.getTok().is(AsmToken::Minus)) {
3682 if (Parser.getTok().is(AsmToken::Minus)) {
3683 if (!Spacing)
3684 Spacing = 1; // Register range implies a single spaced list.
3685 else if (Spacing == 2) {
3686 Error(Parser.getTok().getLoc(),
3687 "sequential registers in double spaced list");
3688 return MatchOperand_ParseFail;
3689 }
3690 Parser.Lex(); // Eat the minus.
3691 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3692 int EndReg = tryParseRegister();
3693 if (EndReg == -1) {
3694 Error(AfterMinusLoc, "register expected");
3695 return MatchOperand_ParseFail;
3696 }
3697 // Allow Q regs and just interpret them as the two D sub-registers.
3698 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3699 EndReg = getDRegFromQReg(EndReg) + 1;
3700 // If the register is the same as the start reg, there's nothing
3701 // more to do.
3702 if (Reg == EndReg)
3703 continue;
3704 // The register must be in the same register class as the first.
3705 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3706 Error(AfterMinusLoc, "invalid register in register list");
3707 return MatchOperand_ParseFail;
3708 }
3709 // Ranges must go from low to high.
3710 if (Reg > EndReg) {
3711 Error(AfterMinusLoc, "bad range in register list");
3712 return MatchOperand_ParseFail;
3713 }
3714 // Parse the lane specifier if present.
3715 VectorLaneTy NextLaneKind;
3716 unsigned NextLaneIndex;
3717 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3718 MatchOperand_Success)
3719 return MatchOperand_ParseFail;
3720 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3721 Error(AfterMinusLoc, "mismatched lane index in register list");
3722 return MatchOperand_ParseFail;
3723 }
3724
3725 // Add all the registers in the range to the register list.
3726 Count += EndReg - Reg;
3727 Reg = EndReg;
3728 continue;
3729 }
3730 Parser.Lex(); // Eat the comma.
3731 RegLoc = Parser.getTok().getLoc();
3732 int OldReg = Reg;
3733 Reg = tryParseRegister();
3734 if (Reg == -1) {
3735 Error(RegLoc, "register expected");
3736 return MatchOperand_ParseFail;
3737 }
3738 // vector register lists must be contiguous.
3739 // It's OK to use the enumeration values directly here rather, as the
3740 // VFP register classes have the enum sorted properly.
3741 //
3742 // The list is of D registers, but we also allow Q regs and just interpret
3743 // them as the two D sub-registers.
3744 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3745 if (!Spacing)
3746 Spacing = 1; // Register range implies a single spaced list.
3747 else if (Spacing == 2) {
3748 Error(RegLoc,
3749 "invalid register in double-spaced list (must be 'D' register')");
3750 return MatchOperand_ParseFail;
3751 }
3752 Reg = getDRegFromQReg(Reg);
3753 if (Reg != OldReg + 1) {
3754 Error(RegLoc, "non-contiguous register range");
3755 return MatchOperand_ParseFail;
3756 }
3757 ++Reg;
3758 Count += 2;
3759 // Parse the lane specifier if present.
3760 VectorLaneTy NextLaneKind;
3761 unsigned NextLaneIndex;
3762 SMLoc LaneLoc = Parser.getTok().getLoc();
3763 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3764 MatchOperand_Success)
3765 return MatchOperand_ParseFail;
3766 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3767 Error(LaneLoc, "mismatched lane index in register list");
3768 return MatchOperand_ParseFail;
3769 }
3770 continue;
3771 }
3772 // Normal D register.
3773 // Figure out the register spacing (single or double) of the list if
3774 // we don't know it already.
3775 if (!Spacing)
3776 Spacing = 1 + (Reg == OldReg + 2);
3777
3778 // Just check that it's contiguous and keep going.
3779 if (Reg != OldReg + Spacing) {
3780 Error(RegLoc, "non-contiguous register range");
3781 return MatchOperand_ParseFail;
3782 }
3783 ++Count;
3784 // Parse the lane specifier if present.
3785 VectorLaneTy NextLaneKind;
3786 unsigned NextLaneIndex;
3787 SMLoc EndLoc = Parser.getTok().getLoc();
3788 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3789 return MatchOperand_ParseFail;
3790 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3791 Error(EndLoc, "mismatched lane index in register list");
3792 return MatchOperand_ParseFail;
3793 }
3794 }
3795
3796 if (Parser.getTok().isNot(AsmToken::RCurly)) {
3797 Error(Parser.getTok().getLoc(), "'}' expected");
3798 return MatchOperand_ParseFail;
3799 }
3800 E = Parser.getTok().getEndLoc();
3801 Parser.Lex(); // Eat '}' token.
3802
3803 switch (LaneKind) {
3804 case NoLanes:
3805 // Two-register operands have been converted to the
3806 // composite register classes.
3807 if (Count == 2) {
3808 const MCRegisterClass *RC = (Spacing == 1) ?
3809 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3810 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3811 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3812 }
3813
3814 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3815 (Spacing == 2), S, E));
3816 break;
3817 case AllLanes:
3818 // Two-register operands have been converted to the
3819 // composite register classes.
3820 if (Count == 2) {
3821 const MCRegisterClass *RC = (Spacing == 1) ?
3822 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3823 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3824 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3825 }
3826 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3827 (Spacing == 2),
3828 S, E));
3829 break;
3830 case IndexedLane:
3831 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3832 LaneIndex,
3833 (Spacing == 2),
3834 S, E));
3835 break;
3836 }
3837 return MatchOperand_Success;
3838 }
3839
3840 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3841 ARMAsmParser::OperandMatchResultTy
parseMemBarrierOptOperand(OperandVector & Operands)3842 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
3843 MCAsmParser &Parser = getParser();
3844 SMLoc S = Parser.getTok().getLoc();
3845 const AsmToken &Tok = Parser.getTok();
3846 unsigned Opt;
3847
3848 if (Tok.is(AsmToken::Identifier)) {
3849 StringRef OptStr = Tok.getString();
3850
3851 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3852 .Case("sy", ARM_MB::SY)
3853 .Case("st", ARM_MB::ST)
3854 .Case("ld", ARM_MB::LD)
3855 .Case("sh", ARM_MB::ISH)
3856 .Case("ish", ARM_MB::ISH)
3857 .Case("shst", ARM_MB::ISHST)
3858 .Case("ishst", ARM_MB::ISHST)
3859 .Case("ishld", ARM_MB::ISHLD)
3860 .Case("nsh", ARM_MB::NSH)
3861 .Case("un", ARM_MB::NSH)
3862 .Case("nshst", ARM_MB::NSHST)
3863 .Case("nshld", ARM_MB::NSHLD)
3864 .Case("unst", ARM_MB::NSHST)
3865 .Case("osh", ARM_MB::OSH)
3866 .Case("oshst", ARM_MB::OSHST)
3867 .Case("oshld", ARM_MB::OSHLD)
3868 .Default(~0U);
3869
3870 // ishld, oshld, nshld and ld are only available from ARMv8.
3871 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3872 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3873 Opt = ~0U;
3874
3875 if (Opt == ~0U)
3876 return MatchOperand_NoMatch;
3877
3878 Parser.Lex(); // Eat identifier token.
3879 } else if (Tok.is(AsmToken::Hash) ||
3880 Tok.is(AsmToken::Dollar) ||
3881 Tok.is(AsmToken::Integer)) {
3882 if (Parser.getTok().isNot(AsmToken::Integer))
3883 Parser.Lex(); // Eat '#' or '$'.
3884 SMLoc Loc = Parser.getTok().getLoc();
3885
3886 const MCExpr *MemBarrierID;
3887 if (getParser().parseExpression(MemBarrierID)) {
3888 Error(Loc, "illegal expression");
3889 return MatchOperand_ParseFail;
3890 }
3891
3892 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3893 if (!CE) {
3894 Error(Loc, "constant expression expected");
3895 return MatchOperand_ParseFail;
3896 }
3897
3898 int Val = CE->getValue();
3899 if (Val & ~0xf) {
3900 Error(Loc, "immediate value out of range");
3901 return MatchOperand_ParseFail;
3902 }
3903
3904 Opt = ARM_MB::RESERVED_0 + Val;
3905 } else
3906 return MatchOperand_ParseFail;
3907
3908 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3909 return MatchOperand_Success;
3910 }
3911
3912 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3913 ARMAsmParser::OperandMatchResultTy
parseInstSyncBarrierOptOperand(OperandVector & Operands)3914 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
3915 MCAsmParser &Parser = getParser();
3916 SMLoc S = Parser.getTok().getLoc();
3917 const AsmToken &Tok = Parser.getTok();
3918 unsigned Opt;
3919
3920 if (Tok.is(AsmToken::Identifier)) {
3921 StringRef OptStr = Tok.getString();
3922
3923 if (OptStr.equals_lower("sy"))
3924 Opt = ARM_ISB::SY;
3925 else
3926 return MatchOperand_NoMatch;
3927
3928 Parser.Lex(); // Eat identifier token.
3929 } else if (Tok.is(AsmToken::Hash) ||
3930 Tok.is(AsmToken::Dollar) ||
3931 Tok.is(AsmToken::Integer)) {
3932 if (Parser.getTok().isNot(AsmToken::Integer))
3933 Parser.Lex(); // Eat '#' or '$'.
3934 SMLoc Loc = Parser.getTok().getLoc();
3935
3936 const MCExpr *ISBarrierID;
3937 if (getParser().parseExpression(ISBarrierID)) {
3938 Error(Loc, "illegal expression");
3939 return MatchOperand_ParseFail;
3940 }
3941
3942 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3943 if (!CE) {
3944 Error(Loc, "constant expression expected");
3945 return MatchOperand_ParseFail;
3946 }
3947
3948 int Val = CE->getValue();
3949 if (Val & ~0xf) {
3950 Error(Loc, "immediate value out of range");
3951 return MatchOperand_ParseFail;
3952 }
3953
3954 Opt = ARM_ISB::RESERVED_0 + Val;
3955 } else
3956 return MatchOperand_ParseFail;
3957
3958 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3959 (ARM_ISB::InstSyncBOpt)Opt, S));
3960 return MatchOperand_Success;
3961 }
3962
3963
3964 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3965 ARMAsmParser::OperandMatchResultTy
parseProcIFlagsOperand(OperandVector & Operands)3966 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
3967 MCAsmParser &Parser = getParser();
3968 SMLoc S = Parser.getTok().getLoc();
3969 const AsmToken &Tok = Parser.getTok();
3970 if (!Tok.is(AsmToken::Identifier))
3971 return MatchOperand_NoMatch;
3972 StringRef IFlagsStr = Tok.getString();
3973
3974 // An iflags string of "none" is interpreted to mean that none of the AIF
3975 // bits are set. Not a terribly useful instruction, but a valid encoding.
3976 unsigned IFlags = 0;
3977 if (IFlagsStr != "none") {
3978 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3979 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3980 .Case("a", ARM_PROC::A)
3981 .Case("i", ARM_PROC::I)
3982 .Case("f", ARM_PROC::F)
3983 .Default(~0U);
3984
3985 // If some specific iflag is already set, it means that some letter is
3986 // present more than once, this is not acceptable.
3987 if (Flag == ~0U || (IFlags & Flag))
3988 return MatchOperand_NoMatch;
3989
3990 IFlags |= Flag;
3991 }
3992 }
3993
3994 Parser.Lex(); // Eat identifier token.
3995 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3996 return MatchOperand_Success;
3997 }
3998
3999 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4000 ARMAsmParser::OperandMatchResultTy
parseMSRMaskOperand(OperandVector & Operands)4001 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4002 MCAsmParser &Parser = getParser();
4003 SMLoc S = Parser.getTok().getLoc();
4004 const AsmToken &Tok = Parser.getTok();
4005 if (!Tok.is(AsmToken::Identifier))
4006 return MatchOperand_NoMatch;
4007 StringRef Mask = Tok.getString();
4008
4009 if (isMClass()) {
4010 // See ARMv6-M 10.1.1
4011 std::string Name = Mask.lower();
4012 unsigned FlagsVal = StringSwitch<unsigned>(Name)
4013 // Note: in the documentation:
4014 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
4015 // for MSR APSR_nzcvq.
4016 // but we do make it an alias here. This is so to get the "mask encoding"
4017 // bits correct on MSR APSR writes.
4018 //
4019 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
4020 // should really only be allowed when writing a special register. Note
4021 // they get dropped in the MRS instruction reading a special register as
4022 // the SYSm field is only 8 bits.
4023 .Case("apsr", 0x800)
4024 .Case("apsr_nzcvq", 0x800)
4025 .Case("apsr_g", 0x400)
4026 .Case("apsr_nzcvqg", 0xc00)
4027 .Case("iapsr", 0x801)
4028 .Case("iapsr_nzcvq", 0x801)
4029 .Case("iapsr_g", 0x401)
4030 .Case("iapsr_nzcvqg", 0xc01)
4031 .Case("eapsr", 0x802)
4032 .Case("eapsr_nzcvq", 0x802)
4033 .Case("eapsr_g", 0x402)
4034 .Case("eapsr_nzcvqg", 0xc02)
4035 .Case("xpsr", 0x803)
4036 .Case("xpsr_nzcvq", 0x803)
4037 .Case("xpsr_g", 0x403)
4038 .Case("xpsr_nzcvqg", 0xc03)
4039 .Case("ipsr", 0x805)
4040 .Case("epsr", 0x806)
4041 .Case("iepsr", 0x807)
4042 .Case("msp", 0x808)
4043 .Case("psp", 0x809)
4044 .Case("primask", 0x810)
4045 .Case("basepri", 0x811)
4046 .Case("basepri_max", 0x812)
4047 .Case("faultmask", 0x813)
4048 .Case("control", 0x814)
4049 .Default(~0U);
4050
4051 if (FlagsVal == ~0U)
4052 return MatchOperand_NoMatch;
4053
4054 if (!hasThumb2DSP() && (FlagsVal & 0x400))
4055 // The _g and _nzcvqg versions are only valid if the DSP extension is
4056 // available.
4057 return MatchOperand_NoMatch;
4058
4059 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
4060 // basepri, basepri_max and faultmask only valid for V7m.
4061 return MatchOperand_NoMatch;
4062
4063 Parser.Lex(); // Eat identifier token.
4064 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4065 return MatchOperand_Success;
4066 }
4067
4068 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4069 size_t Start = 0, Next = Mask.find('_');
4070 StringRef Flags = "";
4071 std::string SpecReg = Mask.slice(Start, Next).lower();
4072 if (Next != StringRef::npos)
4073 Flags = Mask.slice(Next+1, Mask.size());
4074
4075 // FlagsVal contains the complete mask:
4076 // 3-0: Mask
4077 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4078 unsigned FlagsVal = 0;
4079
4080 if (SpecReg == "apsr") {
4081 FlagsVal = StringSwitch<unsigned>(Flags)
4082 .Case("nzcvq", 0x8) // same as CPSR_f
4083 .Case("g", 0x4) // same as CPSR_s
4084 .Case("nzcvqg", 0xc) // same as CPSR_fs
4085 .Default(~0U);
4086
4087 if (FlagsVal == ~0U) {
4088 if (!Flags.empty())
4089 return MatchOperand_NoMatch;
4090 else
4091 FlagsVal = 8; // No flag
4092 }
4093 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4094 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4095 if (Flags == "all" || Flags == "")
4096 Flags = "fc";
4097 for (int i = 0, e = Flags.size(); i != e; ++i) {
4098 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4099 .Case("c", 1)
4100 .Case("x", 2)
4101 .Case("s", 4)
4102 .Case("f", 8)
4103 .Default(~0U);
4104
4105 // If some specific flag is already set, it means that some letter is
4106 // present more than once, this is not acceptable.
4107 if (FlagsVal == ~0U || (FlagsVal & Flag))
4108 return MatchOperand_NoMatch;
4109 FlagsVal |= Flag;
4110 }
4111 } else // No match for special register.
4112 return MatchOperand_NoMatch;
4113
4114 // Special register without flags is NOT equivalent to "fc" flags.
4115 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4116 // two lines would enable gas compatibility at the expense of breaking
4117 // round-tripping.
4118 //
4119 // if (!FlagsVal)
4120 // FlagsVal = 0x9;
4121
4122 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4123 if (SpecReg == "spsr")
4124 FlagsVal |= 16;
4125
4126 Parser.Lex(); // Eat identifier token.
4127 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4128 return MatchOperand_Success;
4129 }
4130
4131 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4132 /// use in the MRS/MSR instructions added to support virtualization.
4133 ARMAsmParser::OperandMatchResultTy
parseBankedRegOperand(OperandVector & Operands)4134 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4135 MCAsmParser &Parser = getParser();
4136 SMLoc S = Parser.getTok().getLoc();
4137 const AsmToken &Tok = Parser.getTok();
4138 if (!Tok.is(AsmToken::Identifier))
4139 return MatchOperand_NoMatch;
4140 StringRef RegName = Tok.getString();
4141
4142 // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM
4143 // and bit 5 is R.
4144 unsigned Encoding = StringSwitch<unsigned>(RegName.lower())
4145 .Case("r8_usr", 0x00)
4146 .Case("r9_usr", 0x01)
4147 .Case("r10_usr", 0x02)
4148 .Case("r11_usr", 0x03)
4149 .Case("r12_usr", 0x04)
4150 .Case("sp_usr", 0x05)
4151 .Case("lr_usr", 0x06)
4152 .Case("r8_fiq", 0x08)
4153 .Case("r9_fiq", 0x09)
4154 .Case("r10_fiq", 0x0a)
4155 .Case("r11_fiq", 0x0b)
4156 .Case("r12_fiq", 0x0c)
4157 .Case("sp_fiq", 0x0d)
4158 .Case("lr_fiq", 0x0e)
4159 .Case("lr_irq", 0x10)
4160 .Case("sp_irq", 0x11)
4161 .Case("lr_svc", 0x12)
4162 .Case("sp_svc", 0x13)
4163 .Case("lr_abt", 0x14)
4164 .Case("sp_abt", 0x15)
4165 .Case("lr_und", 0x16)
4166 .Case("sp_und", 0x17)
4167 .Case("lr_mon", 0x1c)
4168 .Case("sp_mon", 0x1d)
4169 .Case("elr_hyp", 0x1e)
4170 .Case("sp_hyp", 0x1f)
4171 .Case("spsr_fiq", 0x2e)
4172 .Case("spsr_irq", 0x30)
4173 .Case("spsr_svc", 0x32)
4174 .Case("spsr_abt", 0x34)
4175 .Case("spsr_und", 0x36)
4176 .Case("spsr_mon", 0x3c)
4177 .Case("spsr_hyp", 0x3e)
4178 .Default(~0U);
4179
4180 if (Encoding == ~0U)
4181 return MatchOperand_NoMatch;
4182
4183 Parser.Lex(); // Eat identifier token.
4184 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4185 return MatchOperand_Success;
4186 }
4187
4188 ARMAsmParser::OperandMatchResultTy
parsePKHImm(OperandVector & Operands,StringRef Op,int Low,int High)4189 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4190 int High) {
4191 MCAsmParser &Parser = getParser();
4192 const AsmToken &Tok = Parser.getTok();
4193 if (Tok.isNot(AsmToken::Identifier)) {
4194 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4195 return MatchOperand_ParseFail;
4196 }
4197 StringRef ShiftName = Tok.getString();
4198 std::string LowerOp = Op.lower();
4199 std::string UpperOp = Op.upper();
4200 if (ShiftName != LowerOp && ShiftName != UpperOp) {
4201 Error(Parser.getTok().getLoc(), Op + " operand expected.");
4202 return MatchOperand_ParseFail;
4203 }
4204 Parser.Lex(); // Eat shift type token.
4205
4206 // There must be a '#' and a shift amount.
4207 if (Parser.getTok().isNot(AsmToken::Hash) &&
4208 Parser.getTok().isNot(AsmToken::Dollar)) {
4209 Error(Parser.getTok().getLoc(), "'#' expected");
4210 return MatchOperand_ParseFail;
4211 }
4212 Parser.Lex(); // Eat hash token.
4213
4214 const MCExpr *ShiftAmount;
4215 SMLoc Loc = Parser.getTok().getLoc();
4216 SMLoc EndLoc;
4217 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4218 Error(Loc, "illegal expression");
4219 return MatchOperand_ParseFail;
4220 }
4221 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4222 if (!CE) {
4223 Error(Loc, "constant expression expected");
4224 return MatchOperand_ParseFail;
4225 }
4226 int Val = CE->getValue();
4227 if (Val < Low || Val > High) {
4228 Error(Loc, "immediate value out of range");
4229 return MatchOperand_ParseFail;
4230 }
4231
4232 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4233
4234 return MatchOperand_Success;
4235 }
4236
4237 ARMAsmParser::OperandMatchResultTy
parseSetEndImm(OperandVector & Operands)4238 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4239 MCAsmParser &Parser = getParser();
4240 const AsmToken &Tok = Parser.getTok();
4241 SMLoc S = Tok.getLoc();
4242 if (Tok.isNot(AsmToken::Identifier)) {
4243 Error(S, "'be' or 'le' operand expected");
4244 return MatchOperand_ParseFail;
4245 }
4246 int Val = StringSwitch<int>(Tok.getString().lower())
4247 .Case("be", 1)
4248 .Case("le", 0)
4249 .Default(-1);
4250 Parser.Lex(); // Eat the token.
4251
4252 if (Val == -1) {
4253 Error(S, "'be' or 'le' operand expected");
4254 return MatchOperand_ParseFail;
4255 }
4256 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
4257 getContext()),
4258 S, Tok.getEndLoc()));
4259 return MatchOperand_Success;
4260 }
4261
4262 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4263 /// instructions. Legal values are:
4264 /// lsl #n 'n' in [0,31]
4265 /// asr #n 'n' in [1,32]
4266 /// n == 32 encoded as n == 0.
4267 ARMAsmParser::OperandMatchResultTy
parseShifterImm(OperandVector & Operands)4268 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4269 MCAsmParser &Parser = getParser();
4270 const AsmToken &Tok = Parser.getTok();
4271 SMLoc S = Tok.getLoc();
4272 if (Tok.isNot(AsmToken::Identifier)) {
4273 Error(S, "shift operator 'asr' or 'lsl' expected");
4274 return MatchOperand_ParseFail;
4275 }
4276 StringRef ShiftName = Tok.getString();
4277 bool isASR;
4278 if (ShiftName == "lsl" || ShiftName == "LSL")
4279 isASR = false;
4280 else if (ShiftName == "asr" || ShiftName == "ASR")
4281 isASR = true;
4282 else {
4283 Error(S, "shift operator 'asr' or 'lsl' expected");
4284 return MatchOperand_ParseFail;
4285 }
4286 Parser.Lex(); // Eat the operator.
4287
4288 // A '#' and a shift amount.
4289 if (Parser.getTok().isNot(AsmToken::Hash) &&
4290 Parser.getTok().isNot(AsmToken::Dollar)) {
4291 Error(Parser.getTok().getLoc(), "'#' expected");
4292 return MatchOperand_ParseFail;
4293 }
4294 Parser.Lex(); // Eat hash token.
4295 SMLoc ExLoc = Parser.getTok().getLoc();
4296
4297 const MCExpr *ShiftAmount;
4298 SMLoc EndLoc;
4299 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4300 Error(ExLoc, "malformed shift expression");
4301 return MatchOperand_ParseFail;
4302 }
4303 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4304 if (!CE) {
4305 Error(ExLoc, "shift amount must be an immediate");
4306 return MatchOperand_ParseFail;
4307 }
4308
4309 int64_t Val = CE->getValue();
4310 if (isASR) {
4311 // Shift amount must be in [1,32]
4312 if (Val < 1 || Val > 32) {
4313 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4314 return MatchOperand_ParseFail;
4315 }
4316 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4317 if (isThumb() && Val == 32) {
4318 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4319 return MatchOperand_ParseFail;
4320 }
4321 if (Val == 32) Val = 0;
4322 } else {
4323 // Shift amount must be in [1,32]
4324 if (Val < 0 || Val > 31) {
4325 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4326 return MatchOperand_ParseFail;
4327 }
4328 }
4329
4330 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4331
4332 return MatchOperand_Success;
4333 }
4334
4335 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4336 /// of instructions. Legal values are:
4337 /// ror #n 'n' in {0, 8, 16, 24}
4338 ARMAsmParser::OperandMatchResultTy
parseRotImm(OperandVector & Operands)4339 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4340 MCAsmParser &Parser = getParser();
4341 const AsmToken &Tok = Parser.getTok();
4342 SMLoc S = Tok.getLoc();
4343 if (Tok.isNot(AsmToken::Identifier))
4344 return MatchOperand_NoMatch;
4345 StringRef ShiftName = Tok.getString();
4346 if (ShiftName != "ror" && ShiftName != "ROR")
4347 return MatchOperand_NoMatch;
4348 Parser.Lex(); // Eat the operator.
4349
4350 // A '#' and a rotate amount.
4351 if (Parser.getTok().isNot(AsmToken::Hash) &&
4352 Parser.getTok().isNot(AsmToken::Dollar)) {
4353 Error(Parser.getTok().getLoc(), "'#' expected");
4354 return MatchOperand_ParseFail;
4355 }
4356 Parser.Lex(); // Eat hash token.
4357 SMLoc ExLoc = Parser.getTok().getLoc();
4358
4359 const MCExpr *ShiftAmount;
4360 SMLoc EndLoc;
4361 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4362 Error(ExLoc, "malformed rotate expression");
4363 return MatchOperand_ParseFail;
4364 }
4365 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4366 if (!CE) {
4367 Error(ExLoc, "rotate amount must be an immediate");
4368 return MatchOperand_ParseFail;
4369 }
4370
4371 int64_t Val = CE->getValue();
4372 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4373 // normally, zero is represented in asm by omitting the rotate operand
4374 // entirely.
4375 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4376 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4377 return MatchOperand_ParseFail;
4378 }
4379
4380 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4381
4382 return MatchOperand_Success;
4383 }
4384
4385 ARMAsmParser::OperandMatchResultTy
parseModImm(OperandVector & Operands)4386 ARMAsmParser::parseModImm(OperandVector &Operands) {
4387 MCAsmParser &Parser = getParser();
4388 MCAsmLexer &Lexer = getLexer();
4389 int64_t Imm1, Imm2;
4390
4391 SMLoc S = Parser.getTok().getLoc();
4392
4393 // 1) A mod_imm operand can appear in the place of a register name:
4394 // add r0, #mod_imm
4395 // add r0, r0, #mod_imm
4396 // to correctly handle the latter, we bail out as soon as we see an
4397 // identifier.
4398 //
4399 // 2) Similarly, we do not want to parse into complex operands:
4400 // mov r0, #mod_imm
4401 // mov r0, :lower16:(_foo)
4402 if (Parser.getTok().is(AsmToken::Identifier) ||
4403 Parser.getTok().is(AsmToken::Colon))
4404 return MatchOperand_NoMatch;
4405
4406 // Hash (dollar) is optional as per the ARMARM
4407 if (Parser.getTok().is(AsmToken::Hash) ||
4408 Parser.getTok().is(AsmToken::Dollar)) {
4409 // Avoid parsing into complex operands (#:)
4410 if (Lexer.peekTok().is(AsmToken::Colon))
4411 return MatchOperand_NoMatch;
4412
4413 // Eat the hash (dollar)
4414 Parser.Lex();
4415 }
4416
4417 SMLoc Sx1, Ex1;
4418 Sx1 = Parser.getTok().getLoc();
4419 const MCExpr *Imm1Exp;
4420 if (getParser().parseExpression(Imm1Exp, Ex1)) {
4421 Error(Sx1, "malformed expression");
4422 return MatchOperand_ParseFail;
4423 }
4424
4425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4426
4427 if (CE) {
4428 // Immediate must fit within 32-bits
4429 Imm1 = CE->getValue();
4430 int Enc = ARM_AM::getSOImmVal(Imm1);
4431 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4432 // We have a match!
4433 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4434 (Enc & 0xF00) >> 7,
4435 Sx1, Ex1));
4436 return MatchOperand_Success;
4437 }
4438
4439 // We have parsed an immediate which is not for us, fallback to a plain
4440 // immediate. This can happen for instruction aliases. For an example,
4441 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4442 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4443 // instruction with a mod_imm operand. The alias is defined such that the
4444 // parser method is shared, that's why we have to do this here.
4445 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4446 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4447 return MatchOperand_Success;
4448 }
4449 } else {
4450 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4451 // MCFixup). Fallback to a plain immediate.
4452 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4453 return MatchOperand_Success;
4454 }
4455
4456 // From this point onward, we expect the input to be a (#bits, #rot) pair
4457 if (Parser.getTok().isNot(AsmToken::Comma)) {
4458 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4459 return MatchOperand_ParseFail;
4460 }
4461
4462 if (Imm1 & ~0xFF) {
4463 Error(Sx1, "immediate operand must a number in the range [0, 255]");
4464 return MatchOperand_ParseFail;
4465 }
4466
4467 // Eat the comma
4468 Parser.Lex();
4469
4470 // Repeat for #rot
4471 SMLoc Sx2, Ex2;
4472 Sx2 = Parser.getTok().getLoc();
4473
4474 // Eat the optional hash (dollar)
4475 if (Parser.getTok().is(AsmToken::Hash) ||
4476 Parser.getTok().is(AsmToken::Dollar))
4477 Parser.Lex();
4478
4479 const MCExpr *Imm2Exp;
4480 if (getParser().parseExpression(Imm2Exp, Ex2)) {
4481 Error(Sx2, "malformed expression");
4482 return MatchOperand_ParseFail;
4483 }
4484
4485 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4486
4487 if (CE) {
4488 Imm2 = CE->getValue();
4489 if (!(Imm2 & ~0x1E)) {
4490 // We have a match!
4491 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4492 return MatchOperand_Success;
4493 }
4494 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4495 return MatchOperand_ParseFail;
4496 } else {
4497 Error(Sx2, "constant expression expected");
4498 return MatchOperand_ParseFail;
4499 }
4500 }
4501
4502 ARMAsmParser::OperandMatchResultTy
parseBitfield(OperandVector & Operands)4503 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4504 MCAsmParser &Parser = getParser();
4505 SMLoc S = Parser.getTok().getLoc();
4506 // The bitfield descriptor is really two operands, the LSB and the width.
4507 if (Parser.getTok().isNot(AsmToken::Hash) &&
4508 Parser.getTok().isNot(AsmToken::Dollar)) {
4509 Error(Parser.getTok().getLoc(), "'#' expected");
4510 return MatchOperand_ParseFail;
4511 }
4512 Parser.Lex(); // Eat hash token.
4513
4514 const MCExpr *LSBExpr;
4515 SMLoc E = Parser.getTok().getLoc();
4516 if (getParser().parseExpression(LSBExpr)) {
4517 Error(E, "malformed immediate expression");
4518 return MatchOperand_ParseFail;
4519 }
4520 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4521 if (!CE) {
4522 Error(E, "'lsb' operand must be an immediate");
4523 return MatchOperand_ParseFail;
4524 }
4525
4526 int64_t LSB = CE->getValue();
4527 // The LSB must be in the range [0,31]
4528 if (LSB < 0 || LSB > 31) {
4529 Error(E, "'lsb' operand must be in the range [0,31]");
4530 return MatchOperand_ParseFail;
4531 }
4532 E = Parser.getTok().getLoc();
4533
4534 // Expect another immediate operand.
4535 if (Parser.getTok().isNot(AsmToken::Comma)) {
4536 Error(Parser.getTok().getLoc(), "too few operands");
4537 return MatchOperand_ParseFail;
4538 }
4539 Parser.Lex(); // Eat hash token.
4540 if (Parser.getTok().isNot(AsmToken::Hash) &&
4541 Parser.getTok().isNot(AsmToken::Dollar)) {
4542 Error(Parser.getTok().getLoc(), "'#' expected");
4543 return MatchOperand_ParseFail;
4544 }
4545 Parser.Lex(); // Eat hash token.
4546
4547 const MCExpr *WidthExpr;
4548 SMLoc EndLoc;
4549 if (getParser().parseExpression(WidthExpr, EndLoc)) {
4550 Error(E, "malformed immediate expression");
4551 return MatchOperand_ParseFail;
4552 }
4553 CE = dyn_cast<MCConstantExpr>(WidthExpr);
4554 if (!CE) {
4555 Error(E, "'width' operand must be an immediate");
4556 return MatchOperand_ParseFail;
4557 }
4558
4559 int64_t Width = CE->getValue();
4560 // The LSB must be in the range [1,32-lsb]
4561 if (Width < 1 || Width > 32 - LSB) {
4562 Error(E, "'width' operand must be in the range [1,32-lsb]");
4563 return MatchOperand_ParseFail;
4564 }
4565
4566 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4567
4568 return MatchOperand_Success;
4569 }
4570
4571 ARMAsmParser::OperandMatchResultTy
parsePostIdxReg(OperandVector & Operands)4572 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4573 // Check for a post-index addressing register operand. Specifically:
4574 // postidx_reg := '+' register {, shift}
4575 // | '-' register {, shift}
4576 // | register {, shift}
4577
4578 // This method must return MatchOperand_NoMatch without consuming any tokens
4579 // in the case where there is no match, as other alternatives take other
4580 // parse methods.
4581 MCAsmParser &Parser = getParser();
4582 AsmToken Tok = Parser.getTok();
4583 SMLoc S = Tok.getLoc();
4584 bool haveEaten = false;
4585 bool isAdd = true;
4586 if (Tok.is(AsmToken::Plus)) {
4587 Parser.Lex(); // Eat the '+' token.
4588 haveEaten = true;
4589 } else if (Tok.is(AsmToken::Minus)) {
4590 Parser.Lex(); // Eat the '-' token.
4591 isAdd = false;
4592 haveEaten = true;
4593 }
4594
4595 SMLoc E = Parser.getTok().getEndLoc();
4596 int Reg = tryParseRegister();
4597 if (Reg == -1) {
4598 if (!haveEaten)
4599 return MatchOperand_NoMatch;
4600 Error(Parser.getTok().getLoc(), "register expected");
4601 return MatchOperand_ParseFail;
4602 }
4603
4604 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4605 unsigned ShiftImm = 0;
4606 if (Parser.getTok().is(AsmToken::Comma)) {
4607 Parser.Lex(); // Eat the ','.
4608 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4609 return MatchOperand_ParseFail;
4610
4611 // FIXME: Only approximates end...may include intervening whitespace.
4612 E = Parser.getTok().getLoc();
4613 }
4614
4615 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4616 ShiftImm, S, E));
4617
4618 return MatchOperand_Success;
4619 }
4620
4621 ARMAsmParser::OperandMatchResultTy
parseAM3Offset(OperandVector & Operands)4622 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4623 // Check for a post-index addressing register operand. Specifically:
4624 // am3offset := '+' register
4625 // | '-' register
4626 // | register
4627 // | # imm
4628 // | # + imm
4629 // | # - imm
4630
4631 // This method must return MatchOperand_NoMatch without consuming any tokens
4632 // in the case where there is no match, as other alternatives take other
4633 // parse methods.
4634 MCAsmParser &Parser = getParser();
4635 AsmToken Tok = Parser.getTok();
4636 SMLoc S = Tok.getLoc();
4637
4638 // Do immediates first, as we always parse those if we have a '#'.
4639 if (Parser.getTok().is(AsmToken::Hash) ||
4640 Parser.getTok().is(AsmToken::Dollar)) {
4641 Parser.Lex(); // Eat '#' or '$'.
4642 // Explicitly look for a '-', as we need to encode negative zero
4643 // differently.
4644 bool isNegative = Parser.getTok().is(AsmToken::Minus);
4645 const MCExpr *Offset;
4646 SMLoc E;
4647 if (getParser().parseExpression(Offset, E))
4648 return MatchOperand_ParseFail;
4649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4650 if (!CE) {
4651 Error(S, "constant expression expected");
4652 return MatchOperand_ParseFail;
4653 }
4654 // Negative zero is encoded as the flag value INT32_MIN.
4655 int32_t Val = CE->getValue();
4656 if (isNegative && Val == 0)
4657 Val = INT32_MIN;
4658
4659 Operands.push_back(
4660 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
4661
4662 return MatchOperand_Success;
4663 }
4664
4665
4666 bool haveEaten = false;
4667 bool isAdd = true;
4668 if (Tok.is(AsmToken::Plus)) {
4669 Parser.Lex(); // Eat the '+' token.
4670 haveEaten = true;
4671 } else if (Tok.is(AsmToken::Minus)) {
4672 Parser.Lex(); // Eat the '-' token.
4673 isAdd = false;
4674 haveEaten = true;
4675 }
4676
4677 Tok = Parser.getTok();
4678 int Reg = tryParseRegister();
4679 if (Reg == -1) {
4680 if (!haveEaten)
4681 return MatchOperand_NoMatch;
4682 Error(Tok.getLoc(), "register expected");
4683 return MatchOperand_ParseFail;
4684 }
4685
4686 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4687 0, S, Tok.getEndLoc()));
4688
4689 return MatchOperand_Success;
4690 }
4691
4692 /// Convert parsed operands to MCInst. Needed here because this instruction
4693 /// only has two register operands, but multiplication is commutative so
4694 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
cvtThumbMultiply(MCInst & Inst,const OperandVector & Operands)4695 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4696 const OperandVector &Operands) {
4697 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4698 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4699 // If we have a three-operand form, make sure to set Rn to be the operand
4700 // that isn't the same as Rd.
4701 unsigned RegOp = 4;
4702 if (Operands.size() == 6 &&
4703 ((ARMOperand &)*Operands[4]).getReg() ==
4704 ((ARMOperand &)*Operands[3]).getReg())
4705 RegOp = 5;
4706 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4707 Inst.addOperand(Inst.getOperand(0));
4708 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4709 }
4710
cvtThumbBranches(MCInst & Inst,const OperandVector & Operands)4711 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4712 const OperandVector &Operands) {
4713 int CondOp = -1, ImmOp = -1;
4714 switch(Inst.getOpcode()) {
4715 case ARM::tB:
4716 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4717
4718 case ARM::t2B:
4719 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4720
4721 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4722 }
4723 // first decide whether or not the branch should be conditional
4724 // by looking at it's location relative to an IT block
4725 if(inITBlock()) {
4726 // inside an IT block we cannot have any conditional branches. any
4727 // such instructions needs to be converted to unconditional form
4728 switch(Inst.getOpcode()) {
4729 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4730 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4731 }
4732 } else {
4733 // outside IT blocks we can only have unconditional branches with AL
4734 // condition code or conditional branches with non-AL condition code
4735 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4736 switch(Inst.getOpcode()) {
4737 case ARM::tB:
4738 case ARM::tBcc:
4739 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4740 break;
4741 case ARM::t2B:
4742 case ARM::t2Bcc:
4743 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4744 break;
4745 }
4746 }
4747
4748 // now decide on encoding size based on branch target range
4749 switch(Inst.getOpcode()) {
4750 // classify tB as either t2B or t1B based on range of immediate operand
4751 case ARM::tB: {
4752 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4753 if (!op.isSignedOffset<11, 1>() && isThumbTwo())
4754 Inst.setOpcode(ARM::t2B);
4755 break;
4756 }
4757 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4758 case ARM::tBcc: {
4759 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4760 if (!op.isSignedOffset<8, 1>() && isThumbTwo())
4761 Inst.setOpcode(ARM::t2Bcc);
4762 break;
4763 }
4764 }
4765 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4766 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4767 }
4768
4769 /// Parse an ARM memory expression, return false if successful else return true
4770 /// or an error. The first token must be a '[' when called.
parseMemory(OperandVector & Operands)4771 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4772 MCAsmParser &Parser = getParser();
4773 SMLoc S, E;
4774 assert(Parser.getTok().is(AsmToken::LBrac) &&
4775 "Token is not a Left Bracket");
4776 S = Parser.getTok().getLoc();
4777 Parser.Lex(); // Eat left bracket token.
4778
4779 const AsmToken &BaseRegTok = Parser.getTok();
4780 int BaseRegNum = tryParseRegister();
4781 if (BaseRegNum == -1)
4782 return Error(BaseRegTok.getLoc(), "register expected");
4783
4784 // The next token must either be a comma, a colon or a closing bracket.
4785 const AsmToken &Tok = Parser.getTok();
4786 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4787 !Tok.is(AsmToken::RBrac))
4788 return Error(Tok.getLoc(), "malformed memory operand");
4789
4790 if (Tok.is(AsmToken::RBrac)) {
4791 E = Tok.getEndLoc();
4792 Parser.Lex(); // Eat right bracket token.
4793
4794 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4795 ARM_AM::no_shift, 0, 0, false,
4796 S, E));
4797
4798 // If there's a pre-indexing writeback marker, '!', just add it as a token
4799 // operand. It's rather odd, but syntactically valid.
4800 if (Parser.getTok().is(AsmToken::Exclaim)) {
4801 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4802 Parser.Lex(); // Eat the '!'.
4803 }
4804
4805 return false;
4806 }
4807
4808 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4809 "Lost colon or comma in memory operand?!");
4810 if (Tok.is(AsmToken::Comma)) {
4811 Parser.Lex(); // Eat the comma.
4812 }
4813
4814 // If we have a ':', it's an alignment specifier.
4815 if (Parser.getTok().is(AsmToken::Colon)) {
4816 Parser.Lex(); // Eat the ':'.
4817 E = Parser.getTok().getLoc();
4818 SMLoc AlignmentLoc = Tok.getLoc();
4819
4820 const MCExpr *Expr;
4821 if (getParser().parseExpression(Expr))
4822 return true;
4823
4824 // The expression has to be a constant. Memory references with relocations
4825 // don't come through here, as they use the <label> forms of the relevant
4826 // instructions.
4827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4828 if (!CE)
4829 return Error (E, "constant expression expected");
4830
4831 unsigned Align = 0;
4832 switch (CE->getValue()) {
4833 default:
4834 return Error(E,
4835 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4836 case 16: Align = 2; break;
4837 case 32: Align = 4; break;
4838 case 64: Align = 8; break;
4839 case 128: Align = 16; break;
4840 case 256: Align = 32; break;
4841 }
4842
4843 // Now we should have the closing ']'
4844 if (Parser.getTok().isNot(AsmToken::RBrac))
4845 return Error(Parser.getTok().getLoc(), "']' expected");
4846 E = Parser.getTok().getEndLoc();
4847 Parser.Lex(); // Eat right bracket token.
4848
4849 // Don't worry about range checking the value here. That's handled by
4850 // the is*() predicates.
4851 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4852 ARM_AM::no_shift, 0, Align,
4853 false, S, E, AlignmentLoc));
4854
4855 // If there's a pre-indexing writeback marker, '!', just add it as a token
4856 // operand.
4857 if (Parser.getTok().is(AsmToken::Exclaim)) {
4858 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4859 Parser.Lex(); // Eat the '!'.
4860 }
4861
4862 return false;
4863 }
4864
4865 // If we have a '#', it's an immediate offset, else assume it's a register
4866 // offset. Be friendly and also accept a plain integer (without a leading
4867 // hash) for gas compatibility.
4868 if (Parser.getTok().is(AsmToken::Hash) ||
4869 Parser.getTok().is(AsmToken::Dollar) ||
4870 Parser.getTok().is(AsmToken::Integer)) {
4871 if (Parser.getTok().isNot(AsmToken::Integer))
4872 Parser.Lex(); // Eat '#' or '$'.
4873 E = Parser.getTok().getLoc();
4874
4875 bool isNegative = getParser().getTok().is(AsmToken::Minus);
4876 const MCExpr *Offset;
4877 if (getParser().parseExpression(Offset))
4878 return true;
4879
4880 // The expression has to be a constant. Memory references with relocations
4881 // don't come through here, as they use the <label> forms of the relevant
4882 // instructions.
4883 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4884 if (!CE)
4885 return Error (E, "constant expression expected");
4886
4887 // If the constant was #-0, represent it as INT32_MIN.
4888 int32_t Val = CE->getValue();
4889 if (isNegative && Val == 0)
4890 CE = MCConstantExpr::Create(INT32_MIN, getContext());
4891
4892 // Now we should have the closing ']'
4893 if (Parser.getTok().isNot(AsmToken::RBrac))
4894 return Error(Parser.getTok().getLoc(), "']' expected");
4895 E = Parser.getTok().getEndLoc();
4896 Parser.Lex(); // Eat right bracket token.
4897
4898 // Don't worry about range checking the value here. That's handled by
4899 // the is*() predicates.
4900 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4901 ARM_AM::no_shift, 0, 0,
4902 false, S, E));
4903
4904 // If there's a pre-indexing writeback marker, '!', just add it as a token
4905 // operand.
4906 if (Parser.getTok().is(AsmToken::Exclaim)) {
4907 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4908 Parser.Lex(); // Eat the '!'.
4909 }
4910
4911 return false;
4912 }
4913
4914 // The register offset is optionally preceded by a '+' or '-'
4915 bool isNegative = false;
4916 if (Parser.getTok().is(AsmToken::Minus)) {
4917 isNegative = true;
4918 Parser.Lex(); // Eat the '-'.
4919 } else if (Parser.getTok().is(AsmToken::Plus)) {
4920 // Nothing to do.
4921 Parser.Lex(); // Eat the '+'.
4922 }
4923
4924 E = Parser.getTok().getLoc();
4925 int OffsetRegNum = tryParseRegister();
4926 if (OffsetRegNum == -1)
4927 return Error(E, "register expected");
4928
4929 // If there's a shift operator, handle it.
4930 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4931 unsigned ShiftImm = 0;
4932 if (Parser.getTok().is(AsmToken::Comma)) {
4933 Parser.Lex(); // Eat the ','.
4934 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4935 return true;
4936 }
4937
4938 // Now we should have the closing ']'
4939 if (Parser.getTok().isNot(AsmToken::RBrac))
4940 return Error(Parser.getTok().getLoc(), "']' expected");
4941 E = Parser.getTok().getEndLoc();
4942 Parser.Lex(); // Eat right bracket token.
4943
4944 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
4945 ShiftType, ShiftImm, 0, isNegative,
4946 S, E));
4947
4948 // If there's a pre-indexing writeback marker, '!', just add it as a token
4949 // operand.
4950 if (Parser.getTok().is(AsmToken::Exclaim)) {
4951 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4952 Parser.Lex(); // Eat the '!'.
4953 }
4954
4955 return false;
4956 }
4957
4958 /// parseMemRegOffsetShift - one of these two:
4959 /// ( lsl | lsr | asr | ror ) , # shift_amount
4960 /// rrx
4961 /// return true if it parses a shift otherwise it returns false.
parseMemRegOffsetShift(ARM_AM::ShiftOpc & St,unsigned & Amount)4962 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4963 unsigned &Amount) {
4964 MCAsmParser &Parser = getParser();
4965 SMLoc Loc = Parser.getTok().getLoc();
4966 const AsmToken &Tok = Parser.getTok();
4967 if (Tok.isNot(AsmToken::Identifier))
4968 return true;
4969 StringRef ShiftName = Tok.getString();
4970 if (ShiftName == "lsl" || ShiftName == "LSL" ||
4971 ShiftName == "asl" || ShiftName == "ASL")
4972 St = ARM_AM::lsl;
4973 else if (ShiftName == "lsr" || ShiftName == "LSR")
4974 St = ARM_AM::lsr;
4975 else if (ShiftName == "asr" || ShiftName == "ASR")
4976 St = ARM_AM::asr;
4977 else if (ShiftName == "ror" || ShiftName == "ROR")
4978 St = ARM_AM::ror;
4979 else if (ShiftName == "rrx" || ShiftName == "RRX")
4980 St = ARM_AM::rrx;
4981 else
4982 return Error(Loc, "illegal shift operator");
4983 Parser.Lex(); // Eat shift type token.
4984
4985 // rrx stands alone.
4986 Amount = 0;
4987 if (St != ARM_AM::rrx) {
4988 Loc = Parser.getTok().getLoc();
4989 // A '#' and a shift amount.
4990 const AsmToken &HashTok = Parser.getTok();
4991 if (HashTok.isNot(AsmToken::Hash) &&
4992 HashTok.isNot(AsmToken::Dollar))
4993 return Error(HashTok.getLoc(), "'#' expected");
4994 Parser.Lex(); // Eat hash token.
4995
4996 const MCExpr *Expr;
4997 if (getParser().parseExpression(Expr))
4998 return true;
4999 // Range check the immediate.
5000 // lsl, ror: 0 <= imm <= 31
5001 // lsr, asr: 0 <= imm <= 32
5002 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5003 if (!CE)
5004 return Error(Loc, "shift amount must be an immediate");
5005 int64_t Imm = CE->getValue();
5006 if (Imm < 0 ||
5007 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5008 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5009 return Error(Loc, "immediate shift value out of range");
5010 // If <ShiftTy> #0, turn it into a no_shift.
5011 if (Imm == 0)
5012 St = ARM_AM::lsl;
5013 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5014 if (Imm == 32)
5015 Imm = 0;
5016 Amount = Imm;
5017 }
5018
5019 return false;
5020 }
5021
5022 /// parseFPImm - A floating point immediate expression operand.
5023 ARMAsmParser::OperandMatchResultTy
parseFPImm(OperandVector & Operands)5024 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5025 MCAsmParser &Parser = getParser();
5026 // Anything that can accept a floating point constant as an operand
5027 // needs to go through here, as the regular parseExpression is
5028 // integer only.
5029 //
5030 // This routine still creates a generic Immediate operand, containing
5031 // a bitcast of the 64-bit floating point value. The various operands
5032 // that accept floats can check whether the value is valid for them
5033 // via the standard is*() predicates.
5034
5035 SMLoc S = Parser.getTok().getLoc();
5036
5037 if (Parser.getTok().isNot(AsmToken::Hash) &&
5038 Parser.getTok().isNot(AsmToken::Dollar))
5039 return MatchOperand_NoMatch;
5040
5041 // Disambiguate the VMOV forms that can accept an FP immediate.
5042 // vmov.f32 <sreg>, #imm
5043 // vmov.f64 <dreg>, #imm
5044 // vmov.f32 <dreg>, #imm @ vector f32x2
5045 // vmov.f32 <qreg>, #imm @ vector f32x4
5046 //
5047 // There are also the NEON VMOV instructions which expect an
5048 // integer constant. Make sure we don't try to parse an FPImm
5049 // for these:
5050 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5051 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5052 bool isVmovf = TyOp.isToken() &&
5053 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
5054 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5055 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5056 Mnemonic.getToken() == "fconsts");
5057 if (!(isVmovf || isFconst))
5058 return MatchOperand_NoMatch;
5059
5060 Parser.Lex(); // Eat '#' or '$'.
5061
5062 // Handle negation, as that still comes through as a separate token.
5063 bool isNegative = false;
5064 if (Parser.getTok().is(AsmToken::Minus)) {
5065 isNegative = true;
5066 Parser.Lex();
5067 }
5068 const AsmToken &Tok = Parser.getTok();
5069 SMLoc Loc = Tok.getLoc();
5070 if (Tok.is(AsmToken::Real) && isVmovf) {
5071 APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
5072 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5073 // If we had a '-' in front, toggle the sign bit.
5074 IntVal ^= (uint64_t)isNegative << 31;
5075 Parser.Lex(); // Eat the token.
5076 Operands.push_back(ARMOperand::CreateImm(
5077 MCConstantExpr::Create(IntVal, getContext()),
5078 S, Parser.getTok().getLoc()));
5079 return MatchOperand_Success;
5080 }
5081 // Also handle plain integers. Instructions which allow floating point
5082 // immediates also allow a raw encoded 8-bit value.
5083 if (Tok.is(AsmToken::Integer) && isFconst) {
5084 int64_t Val = Tok.getIntVal();
5085 Parser.Lex(); // Eat the token.
5086 if (Val > 255 || Val < 0) {
5087 Error(Loc, "encoded floating point value out of range");
5088 return MatchOperand_ParseFail;
5089 }
5090 float RealVal = ARM_AM::getFPImmFloat(Val);
5091 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5092
5093 Operands.push_back(ARMOperand::CreateImm(
5094 MCConstantExpr::Create(Val, getContext()), S,
5095 Parser.getTok().getLoc()));
5096 return MatchOperand_Success;
5097 }
5098
5099 Error(Loc, "invalid floating point immediate");
5100 return MatchOperand_ParseFail;
5101 }
5102
5103 /// Parse a arm instruction operand. For now this parses the operand regardless
5104 /// of the mnemonic.
parseOperand(OperandVector & Operands,StringRef Mnemonic)5105 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5106 MCAsmParser &Parser = getParser();
5107 SMLoc S, E;
5108
5109 // Check if the current operand has a custom associated parser, if so, try to
5110 // custom parse the operand, or fallback to the general approach.
5111 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5112 if (ResTy == MatchOperand_Success)
5113 return false;
5114 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5115 // there was a match, but an error occurred, in which case, just return that
5116 // the operand parsing failed.
5117 if (ResTy == MatchOperand_ParseFail)
5118 return true;
5119
5120 switch (getLexer().getKind()) {
5121 default:
5122 Error(Parser.getTok().getLoc(), "unexpected token in operand");
5123 return true;
5124 case AsmToken::Identifier: {
5125 // If we've seen a branch mnemonic, the next operand must be a label. This
5126 // is true even if the label is a register name. So "br r1" means branch to
5127 // label "r1".
5128 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5129 if (!ExpectLabel) {
5130 if (!tryParseRegisterWithWriteBack(Operands))
5131 return false;
5132 int Res = tryParseShiftRegister(Operands);
5133 if (Res == 0) // success
5134 return false;
5135 else if (Res == -1) // irrecoverable error
5136 return true;
5137 // If this is VMRS, check for the apsr_nzcv operand.
5138 if (Mnemonic == "vmrs" &&
5139 Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5140 S = Parser.getTok().getLoc();
5141 Parser.Lex();
5142 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5143 return false;
5144 }
5145 }
5146
5147 // Fall though for the Identifier case that is not a register or a
5148 // special name.
5149 }
5150 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
5151 case AsmToken::Integer: // things like 1f and 2b as a branch targets
5152 case AsmToken::String: // quoted label names.
5153 case AsmToken::Dot: { // . as a branch target
5154 // This was not a register so parse other operands that start with an
5155 // identifier (like labels) as expressions and create them as immediates.
5156 const MCExpr *IdVal;
5157 S = Parser.getTok().getLoc();
5158 if (getParser().parseExpression(IdVal))
5159 return true;
5160 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5161 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5162 return false;
5163 }
5164 case AsmToken::LBrac:
5165 return parseMemory(Operands);
5166 case AsmToken::LCurly:
5167 return parseRegisterList(Operands);
5168 case AsmToken::Dollar:
5169 case AsmToken::Hash: {
5170 // #42 -> immediate.
5171 S = Parser.getTok().getLoc();
5172 Parser.Lex();
5173
5174 if (Parser.getTok().isNot(AsmToken::Colon)) {
5175 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5176 const MCExpr *ImmVal;
5177 if (getParser().parseExpression(ImmVal))
5178 return true;
5179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5180 if (CE) {
5181 int32_t Val = CE->getValue();
5182 if (isNegative && Val == 0)
5183 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
5184 }
5185 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5186 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5187
5188 // There can be a trailing '!' on operands that we want as a separate
5189 // '!' Token operand. Handle that here. For example, the compatibility
5190 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5191 if (Parser.getTok().is(AsmToken::Exclaim)) {
5192 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5193 Parser.getTok().getLoc()));
5194 Parser.Lex(); // Eat exclaim token
5195 }
5196 return false;
5197 }
5198 // w/ a ':' after the '#', it's just like a plain ':'.
5199 // FALLTHROUGH
5200 }
5201 case AsmToken::Colon: {
5202 // ":lower16:" and ":upper16:" expression prefixes
5203 // FIXME: Check it's an expression prefix,
5204 // e.g. (FOO - :lower16:BAR) isn't legal.
5205 ARMMCExpr::VariantKind RefKind;
5206 if (parsePrefix(RefKind))
5207 return true;
5208
5209 const MCExpr *SubExprVal;
5210 if (getParser().parseExpression(SubExprVal))
5211 return true;
5212
5213 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
5214 getContext());
5215 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5216 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5217 return false;
5218 }
5219 case AsmToken::Equal: {
5220 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5221 return Error(Parser.getTok().getLoc(), "unexpected token in operand");
5222
5223 Parser.Lex(); // Eat '='
5224 const MCExpr *SubExprVal;
5225 if (getParser().parseExpression(SubExprVal))
5226 return true;
5227 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5228
5229 const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
5230 Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
5231 return false;
5232 }
5233 }
5234 }
5235
5236 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5237 // :lower16: and :upper16:.
parsePrefix(ARMMCExpr::VariantKind & RefKind)5238 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5239 MCAsmParser &Parser = getParser();
5240 RefKind = ARMMCExpr::VK_ARM_None;
5241
5242 // consume an optional '#' (GNU compatibility)
5243 if (getLexer().is(AsmToken::Hash))
5244 Parser.Lex();
5245
5246 // :lower16: and :upper16: modifiers
5247 assert(getLexer().is(AsmToken::Colon) && "expected a :");
5248 Parser.Lex(); // Eat ':'
5249
5250 if (getLexer().isNot(AsmToken::Identifier)) {
5251 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5252 return true;
5253 }
5254
5255 enum {
5256 COFF = (1 << MCObjectFileInfo::IsCOFF),
5257 ELF = (1 << MCObjectFileInfo::IsELF),
5258 MACHO = (1 << MCObjectFileInfo::IsMachO)
5259 };
5260 static const struct PrefixEntry {
5261 const char *Spelling;
5262 ARMMCExpr::VariantKind VariantKind;
5263 uint8_t SupportedFormats;
5264 } PrefixEntries[] = {
5265 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5266 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5267 };
5268
5269 StringRef IDVal = Parser.getTok().getIdentifier();
5270
5271 const auto &Prefix =
5272 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5273 [&IDVal](const PrefixEntry &PE) {
5274 return PE.Spelling == IDVal;
5275 });
5276 if (Prefix == std::end(PrefixEntries)) {
5277 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5278 return true;
5279 }
5280
5281 uint8_t CurrentFormat;
5282 switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5283 case MCObjectFileInfo::IsMachO:
5284 CurrentFormat = MACHO;
5285 break;
5286 case MCObjectFileInfo::IsELF:
5287 CurrentFormat = ELF;
5288 break;
5289 case MCObjectFileInfo::IsCOFF:
5290 CurrentFormat = COFF;
5291 break;
5292 }
5293
5294 if (~Prefix->SupportedFormats & CurrentFormat) {
5295 Error(Parser.getTok().getLoc(),
5296 "cannot represent relocation in the current file format");
5297 return true;
5298 }
5299
5300 RefKind = Prefix->VariantKind;
5301 Parser.Lex();
5302
5303 if (getLexer().isNot(AsmToken::Colon)) {
5304 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5305 return true;
5306 }
5307 Parser.Lex(); // Eat the last ':'
5308
5309 return false;
5310 }
5311
5312 /// \brief Given a mnemonic, split out possible predication code and carry
5313 /// setting letters to form a canonical mnemonic and flags.
5314 //
5315 // FIXME: Would be nice to autogen this.
5316 // FIXME: This is a bit of a maze of special cases.
splitMnemonic(StringRef Mnemonic,unsigned & PredicationCode,bool & CarrySetting,unsigned & ProcessorIMod,StringRef & ITMask)5317 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5318 unsigned &PredicationCode,
5319 bool &CarrySetting,
5320 unsigned &ProcessorIMod,
5321 StringRef &ITMask) {
5322 PredicationCode = ARMCC::AL;
5323 CarrySetting = false;
5324 ProcessorIMod = 0;
5325
5326 // Ignore some mnemonics we know aren't predicated forms.
5327 //
5328 // FIXME: Would be nice to autogen this.
5329 if ((Mnemonic == "movs" && isThumb()) ||
5330 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
5331 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
5332 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
5333 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
5334 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
5335 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
5336 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
5337 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5338 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5339 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
5340 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5341 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5342 Mnemonic.startswith("vsel"))
5343 return Mnemonic;
5344
5345 // First, split out any predication code. Ignore mnemonics we know aren't
5346 // predicated but do have a carry-set and so weren't caught above.
5347 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5348 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5349 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5350 Mnemonic != "sbcs" && Mnemonic != "rscs") {
5351 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
5352 .Case("eq", ARMCC::EQ)
5353 .Case("ne", ARMCC::NE)
5354 .Case("hs", ARMCC::HS)
5355 .Case("cs", ARMCC::HS)
5356 .Case("lo", ARMCC::LO)
5357 .Case("cc", ARMCC::LO)
5358 .Case("mi", ARMCC::MI)
5359 .Case("pl", ARMCC::PL)
5360 .Case("vs", ARMCC::VS)
5361 .Case("vc", ARMCC::VC)
5362 .Case("hi", ARMCC::HI)
5363 .Case("ls", ARMCC::LS)
5364 .Case("ge", ARMCC::GE)
5365 .Case("lt", ARMCC::LT)
5366 .Case("gt", ARMCC::GT)
5367 .Case("le", ARMCC::LE)
5368 .Case("al", ARMCC::AL)
5369 .Default(~0U);
5370 if (CC != ~0U) {
5371 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5372 PredicationCode = CC;
5373 }
5374 }
5375
5376 // Next, determine if we have a carry setting bit. We explicitly ignore all
5377 // the instructions we know end in 's'.
5378 if (Mnemonic.endswith("s") &&
5379 !(Mnemonic == "cps" || Mnemonic == "mls" ||
5380 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5381 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5382 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5383 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5384 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5385 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5386 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5387 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5388 (Mnemonic == "movs" && isThumb()))) {
5389 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5390 CarrySetting = true;
5391 }
5392
5393 // The "cps" instruction can have a interrupt mode operand which is glued into
5394 // the mnemonic. Check if this is the case, split it and parse the imod op
5395 if (Mnemonic.startswith("cps")) {
5396 // Split out any imod code.
5397 unsigned IMod =
5398 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5399 .Case("ie", ARM_PROC::IE)
5400 .Case("id", ARM_PROC::ID)
5401 .Default(~0U);
5402 if (IMod != ~0U) {
5403 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5404 ProcessorIMod = IMod;
5405 }
5406 }
5407
5408 // The "it" instruction has the condition mask on the end of the mnemonic.
5409 if (Mnemonic.startswith("it")) {
5410 ITMask = Mnemonic.slice(2, Mnemonic.size());
5411 Mnemonic = Mnemonic.slice(0, 2);
5412 }
5413
5414 return Mnemonic;
5415 }
5416
5417 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5418 /// inclusion of carry set or predication code operands.
5419 //
5420 // FIXME: It would be nice to autogen this.
getMnemonicAcceptInfo(StringRef Mnemonic,StringRef FullInst,bool & CanAcceptCarrySet,bool & CanAcceptPredicationCode)5421 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5422 bool &CanAcceptCarrySet,
5423 bool &CanAcceptPredicationCode) {
5424 CanAcceptCarrySet =
5425 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5426 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5427 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5428 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5429 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5430 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5431 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5432 (!isThumb() &&
5433 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5434 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5435
5436 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5437 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5438 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5439 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5440 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5441 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5442 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5443 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5444 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5445 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5446 (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
5447 // These mnemonics are never predicable
5448 CanAcceptPredicationCode = false;
5449 } else if (!isThumb()) {
5450 // Some instructions are only predicable in Thumb mode
5451 CanAcceptPredicationCode =
5452 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5453 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5454 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5455 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5456 Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" &&
5457 Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") &&
5458 !Mnemonic.startswith("srs");
5459 } else if (isThumbOne()) {
5460 if (hasV6MOps())
5461 CanAcceptPredicationCode = Mnemonic != "movs";
5462 else
5463 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5464 } else
5465 CanAcceptPredicationCode = true;
5466 }
5467
shouldOmitCCOutOperand(StringRef Mnemonic,OperandVector & Operands)5468 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5469 OperandVector &Operands) {
5470 // FIXME: This is all horribly hacky. We really need a better way to deal
5471 // with optional operands like this in the matcher table.
5472
5473 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5474 // another does not. Specifically, the MOVW instruction does not. So we
5475 // special case it here and remove the defaulted (non-setting) cc_out
5476 // operand if that's the instruction we're trying to match.
5477 //
5478 // We do this as post-processing of the explicit operands rather than just
5479 // conditionally adding the cc_out in the first place because we need
5480 // to check the type of the parsed immediate operand.
5481 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5482 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5483 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5484 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5485 return true;
5486
5487 // Register-register 'add' for thumb does not have a cc_out operand
5488 // when there are only two register operands.
5489 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5490 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5491 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5492 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5493 return true;
5494 // Register-register 'add' for thumb does not have a cc_out operand
5495 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5496 // have to check the immediate range here since Thumb2 has a variant
5497 // that can handle a different range and has a cc_out operand.
5498 if (((isThumb() && Mnemonic == "add") ||
5499 (isThumbTwo() && Mnemonic == "sub")) &&
5500 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5501 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5502 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5503 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5504 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5505 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5506 return true;
5507 // For Thumb2, add/sub immediate does not have a cc_out operand for the
5508 // imm0_4095 variant. That's the least-preferred variant when
5509 // selecting via the generic "add" mnemonic, so to know that we
5510 // should remove the cc_out operand, we have to explicitly check that
5511 // it's not one of the other variants. Ugh.
5512 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5513 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5514 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5515 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5516 // Nest conditions rather than one big 'if' statement for readability.
5517 //
5518 // If both registers are low, we're in an IT block, and the immediate is
5519 // in range, we should use encoding T1 instead, which has a cc_out.
5520 if (inITBlock() &&
5521 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5522 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5523 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5524 return false;
5525 // Check against T3. If the second register is the PC, this is an
5526 // alternate form of ADR, which uses encoding T4, so check for that too.
5527 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5528 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5529 return false;
5530
5531 // Otherwise, we use encoding T4, which does not have a cc_out
5532 // operand.
5533 return true;
5534 }
5535
5536 // The thumb2 multiply instruction doesn't have a CCOut register, so
5537 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5538 // use the 16-bit encoding or not.
5539 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5540 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5541 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5542 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5543 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5544 // If the registers aren't low regs, the destination reg isn't the
5545 // same as one of the source regs, or the cc_out operand is zero
5546 // outside of an IT block, we have to use the 32-bit encoding, so
5547 // remove the cc_out operand.
5548 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5549 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5550 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5551 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5552 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5553 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5554 static_cast<ARMOperand &>(*Operands[4]).getReg())))
5555 return true;
5556
5557 // Also check the 'mul' syntax variant that doesn't specify an explicit
5558 // destination register.
5559 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5560 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5561 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5562 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5563 // If the registers aren't low regs or the cc_out operand is zero
5564 // outside of an IT block, we have to use the 32-bit encoding, so
5565 // remove the cc_out operand.
5566 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5567 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5568 !inITBlock()))
5569 return true;
5570
5571
5572
5573 // Register-register 'add/sub' for thumb does not have a cc_out operand
5574 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5575 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5576 // right, this will result in better diagnostics (which operand is off)
5577 // anyway.
5578 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5579 (Operands.size() == 5 || Operands.size() == 6) &&
5580 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5581 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5582 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5583 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5584 (Operands.size() == 6 &&
5585 static_cast<ARMOperand &>(*Operands[5]).isImm())))
5586 return true;
5587
5588 return false;
5589 }
5590
shouldOmitPredicateOperand(StringRef Mnemonic,OperandVector & Operands)5591 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5592 OperandVector &Operands) {
5593 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5594 unsigned RegIdx = 3;
5595 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5596 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32") {
5597 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5598 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32")
5599 RegIdx = 4;
5600
5601 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5602 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5603 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5604 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5605 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5606 return true;
5607 }
5608 return false;
5609 }
5610
isDataTypeToken(StringRef Tok)5611 static bool isDataTypeToken(StringRef Tok) {
5612 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5613 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5614 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5615 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5616 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5617 Tok == ".f" || Tok == ".d";
5618 }
5619
5620 // FIXME: This bit should probably be handled via an explicit match class
5621 // in the .td files that matches the suffix instead of having it be
5622 // a literal string token the way it is now.
doesIgnoreDataTypeSuffix(StringRef Mnemonic,StringRef DT)5623 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5624 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5625 }
5626 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5627 unsigned VariantID);
5628
RequiresVFPRegListValidation(StringRef Inst,bool & AcceptSinglePrecisionOnly,bool & AcceptDoublePrecisionOnly)5629 static bool RequiresVFPRegListValidation(StringRef Inst,
5630 bool &AcceptSinglePrecisionOnly,
5631 bool &AcceptDoublePrecisionOnly) {
5632 if (Inst.size() < 7)
5633 return false;
5634
5635 if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5636 StringRef AddressingMode = Inst.substr(4, 2);
5637 if (AddressingMode == "ia" || AddressingMode == "db" ||
5638 AddressingMode == "ea" || AddressingMode == "fd") {
5639 AcceptSinglePrecisionOnly = Inst[6] == 's';
5640 AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5641 return true;
5642 }
5643 }
5644
5645 return false;
5646 }
5647
5648 /// Parse an arm instruction mnemonic followed by its operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)5649 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5650 SMLoc NameLoc, OperandVector &Operands) {
5651 MCAsmParser &Parser = getParser();
5652 // FIXME: Can this be done via tablegen in some fashion?
5653 bool RequireVFPRegisterListCheck;
5654 bool AcceptSinglePrecisionOnly;
5655 bool AcceptDoublePrecisionOnly;
5656 RequireVFPRegisterListCheck =
5657 RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5658 AcceptDoublePrecisionOnly);
5659
5660 // Apply mnemonic aliases before doing anything else, as the destination
5661 // mnemonic may include suffices and we want to handle them normally.
5662 // The generic tblgen'erated code does this later, at the start of
5663 // MatchInstructionImpl(), but that's too late for aliases that include
5664 // any sort of suffix.
5665 uint64_t AvailableFeatures = getAvailableFeatures();
5666 unsigned AssemblerDialect = getParser().getAssemblerDialect();
5667 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5668
5669 // First check for the ARM-specific .req directive.
5670 if (Parser.getTok().is(AsmToken::Identifier) &&
5671 Parser.getTok().getIdentifier() == ".req") {
5672 parseDirectiveReq(Name, NameLoc);
5673 // We always return 'error' for this, as we're done with this
5674 // statement and don't need to match the 'instruction."
5675 return true;
5676 }
5677
5678 // Create the leading tokens for the mnemonic, split by '.' characters.
5679 size_t Start = 0, Next = Name.find('.');
5680 StringRef Mnemonic = Name.slice(Start, Next);
5681
5682 // Split out the predication code and carry setting flag from the mnemonic.
5683 unsigned PredicationCode;
5684 unsigned ProcessorIMod;
5685 bool CarrySetting;
5686 StringRef ITMask;
5687 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5688 ProcessorIMod, ITMask);
5689
5690 // In Thumb1, only the branch (B) instruction can be predicated.
5691 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5692 Parser.eatToEndOfStatement();
5693 return Error(NameLoc, "conditional execution not supported in Thumb1");
5694 }
5695
5696 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5697
5698 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5699 // is the mask as it will be for the IT encoding if the conditional
5700 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5701 // where the conditional bit0 is zero, the instruction post-processing
5702 // will adjust the mask accordingly.
5703 if (Mnemonic == "it") {
5704 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5705 if (ITMask.size() > 3) {
5706 Parser.eatToEndOfStatement();
5707 return Error(Loc, "too many conditions on IT instruction");
5708 }
5709 unsigned Mask = 8;
5710 for (unsigned i = ITMask.size(); i != 0; --i) {
5711 char pos = ITMask[i - 1];
5712 if (pos != 't' && pos != 'e') {
5713 Parser.eatToEndOfStatement();
5714 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5715 }
5716 Mask >>= 1;
5717 if (ITMask[i - 1] == 't')
5718 Mask |= 8;
5719 }
5720 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5721 }
5722
5723 // FIXME: This is all a pretty gross hack. We should automatically handle
5724 // optional operands like this via tblgen.
5725
5726 // Next, add the CCOut and ConditionCode operands, if needed.
5727 //
5728 // For mnemonics which can ever incorporate a carry setting bit or predication
5729 // code, our matching model involves us always generating CCOut and
5730 // ConditionCode operands to match the mnemonic "as written" and then we let
5731 // the matcher deal with finding the right instruction or generating an
5732 // appropriate error.
5733 bool CanAcceptCarrySet, CanAcceptPredicationCode;
5734 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5735
5736 // If we had a carry-set on an instruction that can't do that, issue an
5737 // error.
5738 if (!CanAcceptCarrySet && CarrySetting) {
5739 Parser.eatToEndOfStatement();
5740 return Error(NameLoc, "instruction '" + Mnemonic +
5741 "' can not set flags, but 's' suffix specified");
5742 }
5743 // If we had a predication code on an instruction that can't do that, issue an
5744 // error.
5745 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5746 Parser.eatToEndOfStatement();
5747 return Error(NameLoc, "instruction '" + Mnemonic +
5748 "' is not predicable, but condition code specified");
5749 }
5750
5751 // Add the carry setting operand, if necessary.
5752 if (CanAcceptCarrySet) {
5753 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5754 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5755 Loc));
5756 }
5757
5758 // Add the predication code operand, if necessary.
5759 if (CanAcceptPredicationCode) {
5760 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5761 CarrySetting);
5762 Operands.push_back(ARMOperand::CreateCondCode(
5763 ARMCC::CondCodes(PredicationCode), Loc));
5764 }
5765
5766 // Add the processor imod operand, if necessary.
5767 if (ProcessorIMod) {
5768 Operands.push_back(ARMOperand::CreateImm(
5769 MCConstantExpr::Create(ProcessorIMod, getContext()),
5770 NameLoc, NameLoc));
5771 } else if (Mnemonic == "cps" && isMClass()) {
5772 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
5773 }
5774
5775 // Add the remaining tokens in the mnemonic.
5776 while (Next != StringRef::npos) {
5777 Start = Next;
5778 Next = Name.find('.', Start + 1);
5779 StringRef ExtraToken = Name.slice(Start, Next);
5780
5781 // Some NEON instructions have an optional datatype suffix that is
5782 // completely ignored. Check for that.
5783 if (isDataTypeToken(ExtraToken) &&
5784 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5785 continue;
5786
5787 // For for ARM mode generate an error if the .n qualifier is used.
5788 if (ExtraToken == ".n" && !isThumb()) {
5789 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5790 Parser.eatToEndOfStatement();
5791 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5792 "arm mode");
5793 }
5794
5795 // The .n qualifier is always discarded as that is what the tables
5796 // and matcher expect. In ARM mode the .w qualifier has no effect,
5797 // so discard it to avoid errors that can be caused by the matcher.
5798 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5799 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5800 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5801 }
5802 }
5803
5804 // Read the remaining operands.
5805 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5806 // Read the first operand.
5807 if (parseOperand(Operands, Mnemonic)) {
5808 Parser.eatToEndOfStatement();
5809 return true;
5810 }
5811
5812 while (getLexer().is(AsmToken::Comma)) {
5813 Parser.Lex(); // Eat the comma.
5814
5815 // Parse and remember the operand.
5816 if (parseOperand(Operands, Mnemonic)) {
5817 Parser.eatToEndOfStatement();
5818 return true;
5819 }
5820 }
5821 }
5822
5823 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5824 SMLoc Loc = getLexer().getLoc();
5825 Parser.eatToEndOfStatement();
5826 return Error(Loc, "unexpected token in argument list");
5827 }
5828
5829 Parser.Lex(); // Consume the EndOfStatement
5830
5831 if (RequireVFPRegisterListCheck) {
5832 ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
5833 if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
5834 return Error(Op.getStartLoc(),
5835 "VFP/Neon single precision register expected");
5836 if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
5837 return Error(Op.getStartLoc(),
5838 "VFP/Neon double precision register expected");
5839 }
5840
5841 // Some instructions, mostly Thumb, have forms for the same mnemonic that
5842 // do and don't have a cc_out optional-def operand. With some spot-checks
5843 // of the operand list, we can figure out which variant we're trying to
5844 // parse and adjust accordingly before actually matching. We shouldn't ever
5845 // try to remove a cc_out operand that was explicitly set on the the
5846 // mnemonic, of course (CarrySetting == true). Reason number #317 the
5847 // table driven matcher doesn't fit well with the ARM instruction set.
5848 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
5849 Operands.erase(Operands.begin() + 1);
5850
5851 // Some instructions have the same mnemonic, but don't always
5852 // have a predicate. Distinguish them here and delete the
5853 // predicate if needed.
5854 if (shouldOmitPredicateOperand(Mnemonic, Operands))
5855 Operands.erase(Operands.begin() + 1);
5856
5857 // ARM mode 'blx' need special handling, as the register operand version
5858 // is predicable, but the label operand version is not. So, we can't rely
5859 // on the Mnemonic based checking to correctly figure out when to put
5860 // a k_CondCode operand in the list. If we're trying to match the label
5861 // version, remove the k_CondCode operand here.
5862 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5863 static_cast<ARMOperand &>(*Operands[2]).isImm())
5864 Operands.erase(Operands.begin() + 1);
5865
5866 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5867 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5868 // a single GPRPair reg operand is used in the .td file to replace the two
5869 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5870 // expressed as a GPRPair, so we have to manually merge them.
5871 // FIXME: We would really like to be able to tablegen'erate this.
5872 if (!isThumb() && Operands.size() > 4 &&
5873 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5874 Mnemonic == "stlexd")) {
5875 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5876 unsigned Idx = isLoad ? 2 : 3;
5877 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
5878 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
5879
5880 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5881 // Adjust only if Op1 and Op2 are GPRs.
5882 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
5883 MRC.contains(Op2.getReg())) {
5884 unsigned Reg1 = Op1.getReg();
5885 unsigned Reg2 = Op2.getReg();
5886 unsigned Rt = MRI->getEncodingValue(Reg1);
5887 unsigned Rt2 = MRI->getEncodingValue(Reg2);
5888
5889 // Rt2 must be Rt + 1 and Rt must be even.
5890 if (Rt + 1 != Rt2 || (Rt & 1)) {
5891 Error(Op2.getStartLoc(), isLoad
5892 ? "destination operands must be sequential"
5893 : "source operands must be sequential");
5894 return true;
5895 }
5896 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5897 &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5898 Operands[Idx] =
5899 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
5900 Operands.erase(Operands.begin() + Idx + 1);
5901 }
5902 }
5903
5904 // If first 2 operands of a 3 operand instruction are the same
5905 // then transform to 2 operand version of the same instruction
5906 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5907 // FIXME: We would really like to be able to tablegen'erate this.
5908 if (isThumbOne() && Operands.size() == 6 &&
5909 (Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5910 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5911 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5912 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic")) {
5913 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5914 ARMOperand &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5915 ARMOperand &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5916
5917 // If both registers are the same then remove one of them from
5918 // the operand list.
5919 if (Op3.isReg() && Op4.isReg() && Op3.getReg() == Op4.getReg()) {
5920 // If 3rd operand (variable Op5) is a register and the instruction is adds/sub
5921 // then do not transform as the backend already handles this instruction
5922 // correctly.
5923 if (!Op5.isReg() || !((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub")) {
5924 Operands.erase(Operands.begin() + 3);
5925 if (Mnemonic == "add" && !CarrySetting) {
5926 // Special case for 'add' (not 'adds') instruction must
5927 // remove the CCOut operand as well.
5928 Operands.erase(Operands.begin() + 1);
5929 }
5930 }
5931 }
5932 }
5933
5934 // If instruction is 'add' and first two register operands
5935 // use SP register, then remove one of the SP registers from
5936 // the instruction.
5937 // FIXME: We would really like to be able to tablegen'erate this.
5938 if (isThumbOne() && Operands.size() == 5 && Mnemonic == "add" && !CarrySetting) {
5939 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5940 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5941 if (Op2.isReg() && Op3.isReg() && Op2.getReg() == ARM::SP && Op3.getReg() == ARM::SP) {
5942 Operands.erase(Operands.begin() + 2);
5943 }
5944 }
5945
5946 // GNU Assembler extension (compatibility)
5947 if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
5948 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5949 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5950 if (Op3.isMem()) {
5951 assert(Op2.isReg() && "expected register argument");
5952
5953 unsigned SuperReg = MRI->getMatchingSuperReg(
5954 Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
5955
5956 assert(SuperReg && "expected register pair");
5957
5958 unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
5959
5960 Operands.insert(
5961 Operands.begin() + 3,
5962 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5963 }
5964 }
5965
5966 // FIXME: As said above, this is all a pretty gross hack. This instruction
5967 // does not fit with other "subs" and tblgen.
5968 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5969 // so the Mnemonic is the original name "subs" and delete the predicate
5970 // operand so it will match the table entry.
5971 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5972 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5973 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
5974 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5975 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
5976 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5977 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
5978 Operands.erase(Operands.begin() + 1);
5979 }
5980 return false;
5981 }
5982
5983 // Validate context-sensitive operand constraints.
5984
5985 // return 'true' if register list contains non-low GPR registers,
5986 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5987 // 'containsReg' to true.
checkLowRegisterList(MCInst Inst,unsigned OpNo,unsigned Reg,unsigned HiReg,bool & containsReg)5988 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5989 unsigned HiReg, bool &containsReg) {
5990 containsReg = false;
5991 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5992 unsigned OpReg = Inst.getOperand(i).getReg();
5993 if (OpReg == Reg)
5994 containsReg = true;
5995 // Anything other than a low register isn't legal here.
5996 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5997 return true;
5998 }
5999 return false;
6000 }
6001
6002 // Check if the specified regisgter is in the register list of the inst,
6003 // starting at the indicated operand number.
listContainsReg(MCInst & Inst,unsigned OpNo,unsigned Reg)6004 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
6005 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6006 unsigned OpReg = Inst.getOperand(i).getReg();
6007 if (OpReg == Reg)
6008 return true;
6009 }
6010 return false;
6011 }
6012
6013 // Return true if instruction has the interesting property of being
6014 // allowed in IT blocks, but not being predicable.
instIsBreakpoint(const MCInst & Inst)6015 static bool instIsBreakpoint(const MCInst &Inst) {
6016 return Inst.getOpcode() == ARM::tBKPT ||
6017 Inst.getOpcode() == ARM::BKPT ||
6018 Inst.getOpcode() == ARM::tHLT ||
6019 Inst.getOpcode() == ARM::HLT;
6020
6021 }
6022
validatetLDMRegList(MCInst Inst,const OperandVector & Operands,unsigned ListNo,bool IsARPop)6023 bool ARMAsmParser::validatetLDMRegList(MCInst Inst,
6024 const OperandVector &Operands,
6025 unsigned ListNo, bool IsARPop) {
6026 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6027 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6028
6029 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6030 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6031 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6032
6033 if (!IsARPop && ListContainsSP)
6034 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6035 "SP may not be in the register list");
6036 else if (ListContainsPC && ListContainsLR)
6037 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6038 "PC and LR may not be in the register list simultaneously");
6039 else if (inITBlock() && !lastInITBlock() && ListContainsPC)
6040 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6041 "instruction must be outside of IT block or the last "
6042 "instruction in an IT block");
6043 return false;
6044 }
6045
validatetSTMRegList(MCInst Inst,const OperandVector & Operands,unsigned ListNo)6046 bool ARMAsmParser::validatetSTMRegList(MCInst Inst,
6047 const OperandVector &Operands,
6048 unsigned ListNo) {
6049 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6050 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6051
6052 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6053 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6054
6055 if (ListContainsSP && ListContainsPC)
6056 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6057 "SP and PC may not be in the register list");
6058 else if (ListContainsSP)
6059 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6060 "SP may not be in the register list");
6061 else if (ListContainsPC)
6062 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6063 "PC may not be in the register list");
6064 return false;
6065 }
6066
6067 // FIXME: We would really like to be able to tablegen'erate this.
validateInstruction(MCInst & Inst,const OperandVector & Operands)6068 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6069 const OperandVector &Operands) {
6070 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6071 SMLoc Loc = Operands[0]->getStartLoc();
6072
6073 // Check the IT block state first.
6074 // NOTE: BKPT and HLT instructions have the interesting property of being
6075 // allowed in IT blocks, but not being predicable. They just always execute.
6076 if (inITBlock() && !instIsBreakpoint(Inst)) {
6077 unsigned Bit = 1;
6078 if (ITState.FirstCond)
6079 ITState.FirstCond = false;
6080 else
6081 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
6082 // The instruction must be predicable.
6083 if (!MCID.isPredicable())
6084 return Error(Loc, "instructions in IT block must be predicable");
6085 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6086 unsigned ITCond = Bit ? ITState.Cond :
6087 ARMCC::getOppositeCondition(ITState.Cond);
6088 if (Cond != ITCond) {
6089 // Find the condition code Operand to get its SMLoc information.
6090 SMLoc CondLoc;
6091 for (unsigned I = 1; I < Operands.size(); ++I)
6092 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6093 CondLoc = Operands[I]->getStartLoc();
6094 return Error(CondLoc, "incorrect condition in IT block; got '" +
6095 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6096 "', but expected '" +
6097 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
6098 }
6099 // Check for non-'al' condition codes outside of the IT block.
6100 } else if (isThumbTwo() && MCID.isPredicable() &&
6101 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6102 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6103 Inst.getOpcode() != ARM::t2Bcc)
6104 return Error(Loc, "predicated instructions must be in IT block");
6105
6106 const unsigned Opcode = Inst.getOpcode();
6107 switch (Opcode) {
6108 case ARM::LDRD:
6109 case ARM::LDRD_PRE:
6110 case ARM::LDRD_POST: {
6111 const unsigned RtReg = Inst.getOperand(0).getReg();
6112
6113 // Rt can't be R14.
6114 if (RtReg == ARM::LR)
6115 return Error(Operands[3]->getStartLoc(),
6116 "Rt can't be R14");
6117
6118 const unsigned Rt = MRI->getEncodingValue(RtReg);
6119 // Rt must be even-numbered.
6120 if ((Rt & 1) == 1)
6121 return Error(Operands[3]->getStartLoc(),
6122 "Rt must be even-numbered");
6123
6124 // Rt2 must be Rt + 1.
6125 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6126 if (Rt2 != Rt + 1)
6127 return Error(Operands[3]->getStartLoc(),
6128 "destination operands must be sequential");
6129
6130 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6131 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6132 // For addressing modes with writeback, the base register needs to be
6133 // different from the destination registers.
6134 if (Rn == Rt || Rn == Rt2)
6135 return Error(Operands[3]->getStartLoc(),
6136 "base register needs to be different from destination "
6137 "registers");
6138 }
6139
6140 return false;
6141 }
6142 case ARM::t2LDRDi8:
6143 case ARM::t2LDRD_PRE:
6144 case ARM::t2LDRD_POST: {
6145 // Rt2 must be different from Rt.
6146 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6147 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6148 if (Rt2 == Rt)
6149 return Error(Operands[3]->getStartLoc(),
6150 "destination operands can't be identical");
6151 return false;
6152 }
6153 case ARM::t2BXJ: {
6154 const unsigned RmReg = Inst.getOperand(0).getReg();
6155 // Rm = SP is no longer unpredictable in v8-A
6156 if (RmReg == ARM::SP && !hasV8Ops())
6157 return Error(Operands[2]->getStartLoc(),
6158 "r13 (SP) is an unpredictable operand to BXJ");
6159 return false;
6160 }
6161 case ARM::STRD: {
6162 // Rt2 must be Rt + 1.
6163 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6164 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6165 if (Rt2 != Rt + 1)
6166 return Error(Operands[3]->getStartLoc(),
6167 "source operands must be sequential");
6168 return false;
6169 }
6170 case ARM::STRD_PRE:
6171 case ARM::STRD_POST: {
6172 // Rt2 must be Rt + 1.
6173 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6174 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6175 if (Rt2 != Rt + 1)
6176 return Error(Operands[3]->getStartLoc(),
6177 "source operands must be sequential");
6178 return false;
6179 }
6180 case ARM::STR_PRE_IMM:
6181 case ARM::STR_PRE_REG:
6182 case ARM::STR_POST_IMM:
6183 case ARM::STR_POST_REG:
6184 case ARM::STRH_PRE:
6185 case ARM::STRH_POST:
6186 case ARM::STRB_PRE_IMM:
6187 case ARM::STRB_PRE_REG:
6188 case ARM::STRB_POST_IMM:
6189 case ARM::STRB_POST_REG: {
6190 // Rt must be different from Rn.
6191 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6192 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6193
6194 if (Rt == Rn)
6195 return Error(Operands[3]->getStartLoc(),
6196 "source register and base register can't be identical");
6197 return false;
6198 }
6199 case ARM::LDR_PRE_IMM:
6200 case ARM::LDR_PRE_REG:
6201 case ARM::LDR_POST_IMM:
6202 case ARM::LDR_POST_REG:
6203 case ARM::LDRH_PRE:
6204 case ARM::LDRH_POST:
6205 case ARM::LDRSH_PRE:
6206 case ARM::LDRSH_POST:
6207 case ARM::LDRB_PRE_IMM:
6208 case ARM::LDRB_PRE_REG:
6209 case ARM::LDRB_POST_IMM:
6210 case ARM::LDRB_POST_REG:
6211 case ARM::LDRSB_PRE:
6212 case ARM::LDRSB_POST: {
6213 // Rt must be different from Rn.
6214 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6215 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6216
6217 if (Rt == Rn)
6218 return Error(Operands[3]->getStartLoc(),
6219 "destination register and base register can't be identical");
6220 return false;
6221 }
6222 case ARM::SBFX:
6223 case ARM::UBFX: {
6224 // Width must be in range [1, 32-lsb].
6225 unsigned LSB = Inst.getOperand(2).getImm();
6226 unsigned Widthm1 = Inst.getOperand(3).getImm();
6227 if (Widthm1 >= 32 - LSB)
6228 return Error(Operands[5]->getStartLoc(),
6229 "bitfield width must be in range [1,32-lsb]");
6230 return false;
6231 }
6232 // Notionally handles ARM::tLDMIA_UPD too.
6233 case ARM::tLDMIA: {
6234 // If we're parsing Thumb2, the .w variant is available and handles
6235 // most cases that are normally illegal for a Thumb1 LDM instruction.
6236 // We'll make the transformation in processInstruction() if necessary.
6237 //
6238 // Thumb LDM instructions are writeback iff the base register is not
6239 // in the register list.
6240 unsigned Rn = Inst.getOperand(0).getReg();
6241 bool HasWritebackToken =
6242 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6243 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6244 bool ListContainsBase;
6245 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6246 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6247 "registers must be in range r0-r7");
6248 // If we should have writeback, then there should be a '!' token.
6249 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6250 return Error(Operands[2]->getStartLoc(),
6251 "writeback operator '!' expected");
6252 // If we should not have writeback, there must not be a '!'. This is
6253 // true even for the 32-bit wide encodings.
6254 if (ListContainsBase && HasWritebackToken)
6255 return Error(Operands[3]->getStartLoc(),
6256 "writeback operator '!' not allowed when base register "
6257 "in register list");
6258
6259 if (validatetLDMRegList(Inst, Operands, 3))
6260 return true;
6261 break;
6262 }
6263 case ARM::LDMIA_UPD:
6264 case ARM::LDMDB_UPD:
6265 case ARM::LDMIB_UPD:
6266 case ARM::LDMDA_UPD:
6267 // ARM variants loading and updating the same register are only officially
6268 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6269 if (!hasV7Ops())
6270 break;
6271 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6272 return Error(Operands.back()->getStartLoc(),
6273 "writeback register not allowed in register list");
6274 break;
6275 case ARM::t2LDMIA:
6276 case ARM::t2LDMDB:
6277 if (validatetLDMRegList(Inst, Operands, 3))
6278 return true;
6279 break;
6280 case ARM::t2STMIA:
6281 case ARM::t2STMDB:
6282 if (validatetSTMRegList(Inst, Operands, 3))
6283 return true;
6284 break;
6285 case ARM::t2LDMIA_UPD:
6286 case ARM::t2LDMDB_UPD:
6287 case ARM::t2STMIA_UPD:
6288 case ARM::t2STMDB_UPD: {
6289 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6290 return Error(Operands.back()->getStartLoc(),
6291 "writeback register not allowed in register list");
6292
6293 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6294 if (validatetLDMRegList(Inst, Operands, 3))
6295 return true;
6296 } else {
6297 if (validatetSTMRegList(Inst, Operands, 3))
6298 return true;
6299 }
6300 break;
6301 }
6302 case ARM::sysLDMIA_UPD:
6303 case ARM::sysLDMDA_UPD:
6304 case ARM::sysLDMDB_UPD:
6305 case ARM::sysLDMIB_UPD:
6306 if (!listContainsReg(Inst, 3, ARM::PC))
6307 return Error(Operands[4]->getStartLoc(),
6308 "writeback register only allowed on system LDM "
6309 "if PC in register-list");
6310 break;
6311 case ARM::sysSTMIA_UPD:
6312 case ARM::sysSTMDA_UPD:
6313 case ARM::sysSTMDB_UPD:
6314 case ARM::sysSTMIB_UPD:
6315 return Error(Operands[2]->getStartLoc(),
6316 "system STM cannot have writeback register");
6317 case ARM::tMUL: {
6318 // The second source operand must be the same register as the destination
6319 // operand.
6320 //
6321 // In this case, we must directly check the parsed operands because the
6322 // cvtThumbMultiply() function is written in such a way that it guarantees
6323 // this first statement is always true for the new Inst. Essentially, the
6324 // destination is unconditionally copied into the second source operand
6325 // without checking to see if it matches what we actually parsed.
6326 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6327 ((ARMOperand &)*Operands[5]).getReg()) &&
6328 (((ARMOperand &)*Operands[3]).getReg() !=
6329 ((ARMOperand &)*Operands[4]).getReg())) {
6330 return Error(Operands[3]->getStartLoc(),
6331 "destination register must match source register");
6332 }
6333 break;
6334 }
6335 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6336 // so only issue a diagnostic for thumb1. The instructions will be
6337 // switched to the t2 encodings in processInstruction() if necessary.
6338 case ARM::tPOP: {
6339 bool ListContainsBase;
6340 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6341 !isThumbTwo())
6342 return Error(Operands[2]->getStartLoc(),
6343 "registers must be in range r0-r7 or pc");
6344 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6345 return true;
6346 break;
6347 }
6348 case ARM::tPUSH: {
6349 bool ListContainsBase;
6350 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6351 !isThumbTwo())
6352 return Error(Operands[2]->getStartLoc(),
6353 "registers must be in range r0-r7 or lr");
6354 if (validatetSTMRegList(Inst, Operands, 2))
6355 return true;
6356 break;
6357 }
6358 case ARM::tSTMIA_UPD: {
6359 bool ListContainsBase, InvalidLowList;
6360 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6361 0, ListContainsBase);
6362 if (InvalidLowList && !isThumbTwo())
6363 return Error(Operands[4]->getStartLoc(),
6364 "registers must be in range r0-r7");
6365
6366 // This would be converted to a 32-bit stm, but that's not valid if the
6367 // writeback register is in the list.
6368 if (InvalidLowList && ListContainsBase)
6369 return Error(Operands[4]->getStartLoc(),
6370 "writeback operator '!' not allowed when base register "
6371 "in register list");
6372
6373 if (validatetSTMRegList(Inst, Operands, 4))
6374 return true;
6375 break;
6376 }
6377 case ARM::tADDrSP: {
6378 // If the non-SP source operand and the destination operand are not the
6379 // same, we need thumb2 (for the wide encoding), or we have an error.
6380 if (!isThumbTwo() &&
6381 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6382 return Error(Operands[4]->getStartLoc(),
6383 "source register must be the same as destination");
6384 }
6385 break;
6386 }
6387 // Final range checking for Thumb unconditional branch instructions.
6388 case ARM::tB:
6389 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6390 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6391 break;
6392 case ARM::t2B: {
6393 int op = (Operands[2]->isImm()) ? 2 : 3;
6394 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6395 return Error(Operands[op]->getStartLoc(), "branch target out of range");
6396 break;
6397 }
6398 // Final range checking for Thumb conditional branch instructions.
6399 case ARM::tBcc:
6400 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6401 return Error(Operands[2]->getStartLoc(), "branch target out of range");
6402 break;
6403 case ARM::t2Bcc: {
6404 int Op = (Operands[2]->isImm()) ? 2 : 3;
6405 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6406 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6407 break;
6408 }
6409 case ARM::MOVi16:
6410 case ARM::t2MOVi16:
6411 case ARM::t2MOVTi16:
6412 {
6413 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6414 // especially when we turn it into a movw and the expression <symbol> does
6415 // not have a :lower16: or :upper16 as part of the expression. We don't
6416 // want the behavior of silently truncating, which can be unexpected and
6417 // lead to bugs that are difficult to find since this is an easy mistake
6418 // to make.
6419 int i = (Operands[3]->isImm()) ? 3 : 4;
6420 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6421 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6422 if (CE) break;
6423 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6424 if (!E) break;
6425 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6426 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6427 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6428 return Error(
6429 Op.getStartLoc(),
6430 "immediate expression for mov requires :lower16: or :upper16");
6431 break;
6432 }
6433 }
6434
6435 return false;
6436 }
6437
getRealVSTOpcode(unsigned Opc,unsigned & Spacing)6438 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6439 switch(Opc) {
6440 default: llvm_unreachable("unexpected opcode!");
6441 // VST1LN
6442 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6443 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6444 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6445 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
6446 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6447 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6448 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
6449 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6450 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6451
6452 // VST2LN
6453 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6454 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6455 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6456 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6457 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6458
6459 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
6460 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6461 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6462 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6463 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6464
6465 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
6466 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6467 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6468 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6469 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6470
6471 // VST3LN
6472 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6473 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6474 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6475 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6476 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6477 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
6478 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6479 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6480 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6481 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6482 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
6483 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6484 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6485 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6486 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6487
6488 // VST3
6489 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6490 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6491 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6492 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6493 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6494 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6495 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
6496 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6497 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6498 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
6499 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6500 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6501 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
6502 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6503 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6504 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
6505 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6506 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6507
6508 // VST4LN
6509 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6510 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6511 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6512 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6513 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6514 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
6515 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6516 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6517 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6518 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6519 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
6520 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6521 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6522 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6523 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6524
6525 // VST4
6526 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6527 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6528 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6529 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6530 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6531 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6532 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
6533 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6534 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6535 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
6536 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6537 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6538 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
6539 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6540 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6541 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
6542 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6543 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6544 }
6545 }
6546
getRealVLDOpcode(unsigned Opc,unsigned & Spacing)6547 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6548 switch(Opc) {
6549 default: llvm_unreachable("unexpected opcode!");
6550 // VLD1LN
6551 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6552 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6553 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6554 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
6555 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6556 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6557 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
6558 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6559 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6560
6561 // VLD2LN
6562 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6563 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6564 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6565 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6566 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6567 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
6568 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6569 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6570 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6571 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6572 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
6573 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6574 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6575 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6576 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6577
6578 // VLD3DUP
6579 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6580 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6581 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6582 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6583 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6584 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6585 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
6586 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6587 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6588 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6589 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6590 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6591 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
6592 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6593 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6594 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6595 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6596 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6597
6598 // VLD3LN
6599 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6600 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6601 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6602 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6603 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6604 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
6605 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6606 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6607 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6608 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6609 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
6610 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6611 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6612 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6613 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6614
6615 // VLD3
6616 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6617 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6618 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6619 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6620 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6621 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6622 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
6623 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6624 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6625 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
6626 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6627 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6628 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
6629 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6630 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6631 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
6632 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6633 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6634
6635 // VLD4LN
6636 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6637 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6638 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6639 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6640 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6641 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
6642 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6643 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6644 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6645 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6646 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
6647 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6648 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6649 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6650 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6651
6652 // VLD4DUP
6653 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6654 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6655 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6656 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6657 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6658 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6659 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
6660 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6661 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6662 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6663 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6664 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6665 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
6666 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6667 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6668 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6669 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6670 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6671
6672 // VLD4
6673 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6674 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6675 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6676 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6677 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6678 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6679 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
6680 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6681 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6682 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
6683 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6684 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6685 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
6686 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6687 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6688 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
6689 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6690 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6691 }
6692 }
6693
processInstruction(MCInst & Inst,const OperandVector & Operands,MCStreamer & Out)6694 bool ARMAsmParser::processInstruction(MCInst &Inst,
6695 const OperandVector &Operands,
6696 MCStreamer &Out) {
6697 switch (Inst.getOpcode()) {
6698 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6699 case ARM::LDRT_POST:
6700 case ARM::LDRBT_POST: {
6701 const unsigned Opcode =
6702 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6703 : ARM::LDRBT_POST_IMM;
6704 MCInst TmpInst;
6705 TmpInst.setOpcode(Opcode);
6706 TmpInst.addOperand(Inst.getOperand(0));
6707 TmpInst.addOperand(Inst.getOperand(1));
6708 TmpInst.addOperand(Inst.getOperand(1));
6709 TmpInst.addOperand(MCOperand::CreateReg(0));
6710 TmpInst.addOperand(MCOperand::CreateImm(0));
6711 TmpInst.addOperand(Inst.getOperand(2));
6712 TmpInst.addOperand(Inst.getOperand(3));
6713 Inst = TmpInst;
6714 return true;
6715 }
6716 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6717 case ARM::STRT_POST:
6718 case ARM::STRBT_POST: {
6719 const unsigned Opcode =
6720 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6721 : ARM::STRBT_POST_IMM;
6722 MCInst TmpInst;
6723 TmpInst.setOpcode(Opcode);
6724 TmpInst.addOperand(Inst.getOperand(1));
6725 TmpInst.addOperand(Inst.getOperand(0));
6726 TmpInst.addOperand(Inst.getOperand(1));
6727 TmpInst.addOperand(MCOperand::CreateReg(0));
6728 TmpInst.addOperand(MCOperand::CreateImm(0));
6729 TmpInst.addOperand(Inst.getOperand(2));
6730 TmpInst.addOperand(Inst.getOperand(3));
6731 Inst = TmpInst;
6732 return true;
6733 }
6734 // Alias for alternate form of 'ADR Rd, #imm' instruction.
6735 case ARM::ADDri: {
6736 if (Inst.getOperand(1).getReg() != ARM::PC ||
6737 Inst.getOperand(5).getReg() != 0 ||
6738 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6739 return false;
6740 MCInst TmpInst;
6741 TmpInst.setOpcode(ARM::ADR);
6742 TmpInst.addOperand(Inst.getOperand(0));
6743 if (Inst.getOperand(2).isImm()) {
6744 // Immediate (mod_imm) will be in its encoded form, we must unencode it
6745 // before passing it to the ADR instruction.
6746 unsigned Enc = Inst.getOperand(2).getImm();
6747 TmpInst.addOperand(MCOperand::CreateImm(
6748 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
6749 } else {
6750 // Turn PC-relative expression into absolute expression.
6751 // Reading PC provides the start of the current instruction + 8 and
6752 // the transform to adr is biased by that.
6753 MCSymbol *Dot = getContext().CreateTempSymbol();
6754 Out.EmitLabel(Dot);
6755 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6756 const MCExpr *InstPC = MCSymbolRefExpr::Create(Dot,
6757 MCSymbolRefExpr::VK_None,
6758 getContext());
6759 const MCExpr *Const8 = MCConstantExpr::Create(8, getContext());
6760 const MCExpr *ReadPC = MCBinaryExpr::CreateAdd(InstPC, Const8,
6761 getContext());
6762 const MCExpr *FixupAddr = MCBinaryExpr::CreateAdd(ReadPC, OpExpr,
6763 getContext());
6764 TmpInst.addOperand(MCOperand::CreateExpr(FixupAddr));
6765 }
6766 TmpInst.addOperand(Inst.getOperand(3));
6767 TmpInst.addOperand(Inst.getOperand(4));
6768 Inst = TmpInst;
6769 return true;
6770 }
6771 // Aliases for alternate PC+imm syntax of LDR instructions.
6772 case ARM::t2LDRpcrel:
6773 // Select the narrow version if the immediate will fit.
6774 if (Inst.getOperand(1).getImm() > 0 &&
6775 Inst.getOperand(1).getImm() <= 0xff &&
6776 !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
6777 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
6778 Inst.setOpcode(ARM::tLDRpci);
6779 else
6780 Inst.setOpcode(ARM::t2LDRpci);
6781 return true;
6782 case ARM::t2LDRBpcrel:
6783 Inst.setOpcode(ARM::t2LDRBpci);
6784 return true;
6785 case ARM::t2LDRHpcrel:
6786 Inst.setOpcode(ARM::t2LDRHpci);
6787 return true;
6788 case ARM::t2LDRSBpcrel:
6789 Inst.setOpcode(ARM::t2LDRSBpci);
6790 return true;
6791 case ARM::t2LDRSHpcrel:
6792 Inst.setOpcode(ARM::t2LDRSHpci);
6793 return true;
6794 // Handle NEON VST complex aliases.
6795 case ARM::VST1LNdWB_register_Asm_8:
6796 case ARM::VST1LNdWB_register_Asm_16:
6797 case ARM::VST1LNdWB_register_Asm_32: {
6798 MCInst TmpInst;
6799 // Shuffle the operands around so the lane index operand is in the
6800 // right place.
6801 unsigned Spacing;
6802 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6803 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6804 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6805 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6806 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6807 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6808 TmpInst.addOperand(Inst.getOperand(1)); // lane
6809 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6810 TmpInst.addOperand(Inst.getOperand(6));
6811 Inst = TmpInst;
6812 return true;
6813 }
6814
6815 case ARM::VST2LNdWB_register_Asm_8:
6816 case ARM::VST2LNdWB_register_Asm_16:
6817 case ARM::VST2LNdWB_register_Asm_32:
6818 case ARM::VST2LNqWB_register_Asm_16:
6819 case ARM::VST2LNqWB_register_Asm_32: {
6820 MCInst TmpInst;
6821 // Shuffle the operands around so the lane index operand is in the
6822 // right place.
6823 unsigned Spacing;
6824 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6825 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6826 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6827 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6828 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6829 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6830 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6831 Spacing));
6832 TmpInst.addOperand(Inst.getOperand(1)); // lane
6833 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6834 TmpInst.addOperand(Inst.getOperand(6));
6835 Inst = TmpInst;
6836 return true;
6837 }
6838
6839 case ARM::VST3LNdWB_register_Asm_8:
6840 case ARM::VST3LNdWB_register_Asm_16:
6841 case ARM::VST3LNdWB_register_Asm_32:
6842 case ARM::VST3LNqWB_register_Asm_16:
6843 case ARM::VST3LNqWB_register_Asm_32: {
6844 MCInst TmpInst;
6845 // Shuffle the operands around so the lane index operand is in the
6846 // right place.
6847 unsigned Spacing;
6848 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6849 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6850 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6851 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6852 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6853 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6854 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6855 Spacing));
6856 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6857 Spacing * 2));
6858 TmpInst.addOperand(Inst.getOperand(1)); // lane
6859 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6860 TmpInst.addOperand(Inst.getOperand(6));
6861 Inst = TmpInst;
6862 return true;
6863 }
6864
6865 case ARM::VST4LNdWB_register_Asm_8:
6866 case ARM::VST4LNdWB_register_Asm_16:
6867 case ARM::VST4LNdWB_register_Asm_32:
6868 case ARM::VST4LNqWB_register_Asm_16:
6869 case ARM::VST4LNqWB_register_Asm_32: {
6870 MCInst TmpInst;
6871 // Shuffle the operands around so the lane index operand is in the
6872 // right place.
6873 unsigned Spacing;
6874 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6875 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6876 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6877 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6878 TmpInst.addOperand(Inst.getOperand(4)); // Rm
6879 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6880 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6881 Spacing));
6882 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6883 Spacing * 2));
6884 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6885 Spacing * 3));
6886 TmpInst.addOperand(Inst.getOperand(1)); // lane
6887 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6888 TmpInst.addOperand(Inst.getOperand(6));
6889 Inst = TmpInst;
6890 return true;
6891 }
6892
6893 case ARM::VST1LNdWB_fixed_Asm_8:
6894 case ARM::VST1LNdWB_fixed_Asm_16:
6895 case ARM::VST1LNdWB_fixed_Asm_32: {
6896 MCInst TmpInst;
6897 // Shuffle the operands around so the lane index operand is in the
6898 // right place.
6899 unsigned Spacing;
6900 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6901 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6902 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6903 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6904 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6905 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6906 TmpInst.addOperand(Inst.getOperand(1)); // lane
6907 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6908 TmpInst.addOperand(Inst.getOperand(5));
6909 Inst = TmpInst;
6910 return true;
6911 }
6912
6913 case ARM::VST2LNdWB_fixed_Asm_8:
6914 case ARM::VST2LNdWB_fixed_Asm_16:
6915 case ARM::VST2LNdWB_fixed_Asm_32:
6916 case ARM::VST2LNqWB_fixed_Asm_16:
6917 case ARM::VST2LNqWB_fixed_Asm_32: {
6918 MCInst TmpInst;
6919 // Shuffle the operands around so the lane index operand is in the
6920 // right place.
6921 unsigned Spacing;
6922 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6923 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6924 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6925 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6926 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6927 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6928 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6929 Spacing));
6930 TmpInst.addOperand(Inst.getOperand(1)); // lane
6931 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6932 TmpInst.addOperand(Inst.getOperand(5));
6933 Inst = TmpInst;
6934 return true;
6935 }
6936
6937 case ARM::VST3LNdWB_fixed_Asm_8:
6938 case ARM::VST3LNdWB_fixed_Asm_16:
6939 case ARM::VST3LNdWB_fixed_Asm_32:
6940 case ARM::VST3LNqWB_fixed_Asm_16:
6941 case ARM::VST3LNqWB_fixed_Asm_32: {
6942 MCInst TmpInst;
6943 // Shuffle the operands around so the lane index operand is in the
6944 // right place.
6945 unsigned Spacing;
6946 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6947 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6948 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6949 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6950 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6951 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6952 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6953 Spacing));
6954 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6955 Spacing * 2));
6956 TmpInst.addOperand(Inst.getOperand(1)); // lane
6957 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6958 TmpInst.addOperand(Inst.getOperand(5));
6959 Inst = TmpInst;
6960 return true;
6961 }
6962
6963 case ARM::VST4LNdWB_fixed_Asm_8:
6964 case ARM::VST4LNdWB_fixed_Asm_16:
6965 case ARM::VST4LNdWB_fixed_Asm_32:
6966 case ARM::VST4LNqWB_fixed_Asm_16:
6967 case ARM::VST4LNqWB_fixed_Asm_32: {
6968 MCInst TmpInst;
6969 // Shuffle the operands around so the lane index operand is in the
6970 // right place.
6971 unsigned Spacing;
6972 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6973 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6974 TmpInst.addOperand(Inst.getOperand(2)); // Rn
6975 TmpInst.addOperand(Inst.getOperand(3)); // alignment
6976 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6977 TmpInst.addOperand(Inst.getOperand(0)); // Vd
6978 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6979 Spacing));
6980 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6981 Spacing * 2));
6982 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6983 Spacing * 3));
6984 TmpInst.addOperand(Inst.getOperand(1)); // lane
6985 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6986 TmpInst.addOperand(Inst.getOperand(5));
6987 Inst = TmpInst;
6988 return true;
6989 }
6990
6991 case ARM::VST1LNdAsm_8:
6992 case ARM::VST1LNdAsm_16:
6993 case ARM::VST1LNdAsm_32: {
6994 MCInst TmpInst;
6995 // Shuffle the operands around so the lane index operand is in the
6996 // right place.
6997 unsigned Spacing;
6998 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6999 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7000 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7001 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7002 TmpInst.addOperand(Inst.getOperand(1)); // lane
7003 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7004 TmpInst.addOperand(Inst.getOperand(5));
7005 Inst = TmpInst;
7006 return true;
7007 }
7008
7009 case ARM::VST2LNdAsm_8:
7010 case ARM::VST2LNdAsm_16:
7011 case ARM::VST2LNdAsm_32:
7012 case ARM::VST2LNqAsm_16:
7013 case ARM::VST2LNqAsm_32: {
7014 MCInst TmpInst;
7015 // Shuffle the operands around so the lane index operand is in the
7016 // right place.
7017 unsigned Spacing;
7018 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7019 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7020 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7021 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7022 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7023 Spacing));
7024 TmpInst.addOperand(Inst.getOperand(1)); // lane
7025 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7026 TmpInst.addOperand(Inst.getOperand(5));
7027 Inst = TmpInst;
7028 return true;
7029 }
7030
7031 case ARM::VST3LNdAsm_8:
7032 case ARM::VST3LNdAsm_16:
7033 case ARM::VST3LNdAsm_32:
7034 case ARM::VST3LNqAsm_16:
7035 case ARM::VST3LNqAsm_32: {
7036 MCInst TmpInst;
7037 // Shuffle the operands around so the lane index operand is in the
7038 // right place.
7039 unsigned Spacing;
7040 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7041 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7042 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7043 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7044 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7045 Spacing));
7046 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7047 Spacing * 2));
7048 TmpInst.addOperand(Inst.getOperand(1)); // lane
7049 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7050 TmpInst.addOperand(Inst.getOperand(5));
7051 Inst = TmpInst;
7052 return true;
7053 }
7054
7055 case ARM::VST4LNdAsm_8:
7056 case ARM::VST4LNdAsm_16:
7057 case ARM::VST4LNdAsm_32:
7058 case ARM::VST4LNqAsm_16:
7059 case ARM::VST4LNqAsm_32: {
7060 MCInst TmpInst;
7061 // Shuffle the operands around so the lane index operand is in the
7062 // right place.
7063 unsigned Spacing;
7064 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7065 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7066 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7067 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7068 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7069 Spacing));
7070 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7071 Spacing * 2));
7072 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7073 Spacing * 3));
7074 TmpInst.addOperand(Inst.getOperand(1)); // lane
7075 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7076 TmpInst.addOperand(Inst.getOperand(5));
7077 Inst = TmpInst;
7078 return true;
7079 }
7080
7081 // Handle NEON VLD complex aliases.
7082 case ARM::VLD1LNdWB_register_Asm_8:
7083 case ARM::VLD1LNdWB_register_Asm_16:
7084 case ARM::VLD1LNdWB_register_Asm_32: {
7085 MCInst TmpInst;
7086 // Shuffle the operands around so the lane index operand is in the
7087 // right place.
7088 unsigned Spacing;
7089 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7090 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7091 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7092 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7093 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7094 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7095 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7096 TmpInst.addOperand(Inst.getOperand(1)); // lane
7097 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7098 TmpInst.addOperand(Inst.getOperand(6));
7099 Inst = TmpInst;
7100 return true;
7101 }
7102
7103 case ARM::VLD2LNdWB_register_Asm_8:
7104 case ARM::VLD2LNdWB_register_Asm_16:
7105 case ARM::VLD2LNdWB_register_Asm_32:
7106 case ARM::VLD2LNqWB_register_Asm_16:
7107 case ARM::VLD2LNqWB_register_Asm_32: {
7108 MCInst TmpInst;
7109 // Shuffle the operands around so the lane index operand is in the
7110 // right place.
7111 unsigned Spacing;
7112 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7113 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7114 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7115 Spacing));
7116 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7117 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7118 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7119 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7120 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7121 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7122 Spacing));
7123 TmpInst.addOperand(Inst.getOperand(1)); // lane
7124 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7125 TmpInst.addOperand(Inst.getOperand(6));
7126 Inst = TmpInst;
7127 return true;
7128 }
7129
7130 case ARM::VLD3LNdWB_register_Asm_8:
7131 case ARM::VLD3LNdWB_register_Asm_16:
7132 case ARM::VLD3LNdWB_register_Asm_32:
7133 case ARM::VLD3LNqWB_register_Asm_16:
7134 case ARM::VLD3LNqWB_register_Asm_32: {
7135 MCInst TmpInst;
7136 // Shuffle the operands around so the lane index operand is in the
7137 // right place.
7138 unsigned Spacing;
7139 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7140 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7141 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7142 Spacing));
7143 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7144 Spacing * 2));
7145 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7146 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7147 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7148 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7149 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7150 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7151 Spacing));
7152 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7153 Spacing * 2));
7154 TmpInst.addOperand(Inst.getOperand(1)); // lane
7155 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7156 TmpInst.addOperand(Inst.getOperand(6));
7157 Inst = TmpInst;
7158 return true;
7159 }
7160
7161 case ARM::VLD4LNdWB_register_Asm_8:
7162 case ARM::VLD4LNdWB_register_Asm_16:
7163 case ARM::VLD4LNdWB_register_Asm_32:
7164 case ARM::VLD4LNqWB_register_Asm_16:
7165 case ARM::VLD4LNqWB_register_Asm_32: {
7166 MCInst TmpInst;
7167 // Shuffle the operands around so the lane index operand is in the
7168 // right place.
7169 unsigned Spacing;
7170 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7171 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7172 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7173 Spacing));
7174 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7175 Spacing * 2));
7176 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7177 Spacing * 3));
7178 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7179 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7180 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7181 TmpInst.addOperand(Inst.getOperand(4)); // Rm
7182 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7183 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7184 Spacing));
7185 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7186 Spacing * 2));
7187 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7188 Spacing * 3));
7189 TmpInst.addOperand(Inst.getOperand(1)); // lane
7190 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7191 TmpInst.addOperand(Inst.getOperand(6));
7192 Inst = TmpInst;
7193 return true;
7194 }
7195
7196 case ARM::VLD1LNdWB_fixed_Asm_8:
7197 case ARM::VLD1LNdWB_fixed_Asm_16:
7198 case ARM::VLD1LNdWB_fixed_Asm_32: {
7199 MCInst TmpInst;
7200 // Shuffle the operands around so the lane index operand is in the
7201 // right place.
7202 unsigned Spacing;
7203 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7204 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7205 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7206 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7207 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7208 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7209 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7210 TmpInst.addOperand(Inst.getOperand(1)); // lane
7211 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7212 TmpInst.addOperand(Inst.getOperand(5));
7213 Inst = TmpInst;
7214 return true;
7215 }
7216
7217 case ARM::VLD2LNdWB_fixed_Asm_8:
7218 case ARM::VLD2LNdWB_fixed_Asm_16:
7219 case ARM::VLD2LNdWB_fixed_Asm_32:
7220 case ARM::VLD2LNqWB_fixed_Asm_16:
7221 case ARM::VLD2LNqWB_fixed_Asm_32: {
7222 MCInst TmpInst;
7223 // Shuffle the operands around so the lane index operand is in the
7224 // right place.
7225 unsigned Spacing;
7226 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7227 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7228 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7229 Spacing));
7230 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7231 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7232 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7233 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7234 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7235 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7236 Spacing));
7237 TmpInst.addOperand(Inst.getOperand(1)); // lane
7238 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7239 TmpInst.addOperand(Inst.getOperand(5));
7240 Inst = TmpInst;
7241 return true;
7242 }
7243
7244 case ARM::VLD3LNdWB_fixed_Asm_8:
7245 case ARM::VLD3LNdWB_fixed_Asm_16:
7246 case ARM::VLD3LNdWB_fixed_Asm_32:
7247 case ARM::VLD3LNqWB_fixed_Asm_16:
7248 case ARM::VLD3LNqWB_fixed_Asm_32: {
7249 MCInst TmpInst;
7250 // Shuffle the operands around so the lane index operand is in the
7251 // right place.
7252 unsigned Spacing;
7253 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7254 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7255 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7256 Spacing));
7257 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7258 Spacing * 2));
7259 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7260 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7261 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7262 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7263 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7264 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7265 Spacing));
7266 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7267 Spacing * 2));
7268 TmpInst.addOperand(Inst.getOperand(1)); // lane
7269 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7270 TmpInst.addOperand(Inst.getOperand(5));
7271 Inst = TmpInst;
7272 return true;
7273 }
7274
7275 case ARM::VLD4LNdWB_fixed_Asm_8:
7276 case ARM::VLD4LNdWB_fixed_Asm_16:
7277 case ARM::VLD4LNdWB_fixed_Asm_32:
7278 case ARM::VLD4LNqWB_fixed_Asm_16:
7279 case ARM::VLD4LNqWB_fixed_Asm_32: {
7280 MCInst TmpInst;
7281 // Shuffle the operands around so the lane index operand is in the
7282 // right place.
7283 unsigned Spacing;
7284 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7285 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7286 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7287 Spacing));
7288 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7289 Spacing * 2));
7290 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7291 Spacing * 3));
7292 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7293 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7294 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7295 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7296 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7297 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7298 Spacing));
7299 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7300 Spacing * 2));
7301 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7302 Spacing * 3));
7303 TmpInst.addOperand(Inst.getOperand(1)); // lane
7304 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7305 TmpInst.addOperand(Inst.getOperand(5));
7306 Inst = TmpInst;
7307 return true;
7308 }
7309
7310 case ARM::VLD1LNdAsm_8:
7311 case ARM::VLD1LNdAsm_16:
7312 case ARM::VLD1LNdAsm_32: {
7313 MCInst TmpInst;
7314 // Shuffle the operands around so the lane index operand is in the
7315 // right place.
7316 unsigned Spacing;
7317 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7318 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7319 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7320 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7321 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7322 TmpInst.addOperand(Inst.getOperand(1)); // lane
7323 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7324 TmpInst.addOperand(Inst.getOperand(5));
7325 Inst = TmpInst;
7326 return true;
7327 }
7328
7329 case ARM::VLD2LNdAsm_8:
7330 case ARM::VLD2LNdAsm_16:
7331 case ARM::VLD2LNdAsm_32:
7332 case ARM::VLD2LNqAsm_16:
7333 case ARM::VLD2LNqAsm_32: {
7334 MCInst TmpInst;
7335 // Shuffle the operands around so the lane index operand is in the
7336 // right place.
7337 unsigned Spacing;
7338 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7339 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7340 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7341 Spacing));
7342 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7343 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7344 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7345 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7346 Spacing));
7347 TmpInst.addOperand(Inst.getOperand(1)); // lane
7348 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7349 TmpInst.addOperand(Inst.getOperand(5));
7350 Inst = TmpInst;
7351 return true;
7352 }
7353
7354 case ARM::VLD3LNdAsm_8:
7355 case ARM::VLD3LNdAsm_16:
7356 case ARM::VLD3LNdAsm_32:
7357 case ARM::VLD3LNqAsm_16:
7358 case ARM::VLD3LNqAsm_32: {
7359 MCInst TmpInst;
7360 // Shuffle the operands around so the lane index operand is in the
7361 // right place.
7362 unsigned Spacing;
7363 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7364 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7365 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7366 Spacing));
7367 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7368 Spacing * 2));
7369 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7370 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7371 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7372 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7373 Spacing));
7374 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7375 Spacing * 2));
7376 TmpInst.addOperand(Inst.getOperand(1)); // lane
7377 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7378 TmpInst.addOperand(Inst.getOperand(5));
7379 Inst = TmpInst;
7380 return true;
7381 }
7382
7383 case ARM::VLD4LNdAsm_8:
7384 case ARM::VLD4LNdAsm_16:
7385 case ARM::VLD4LNdAsm_32:
7386 case ARM::VLD4LNqAsm_16:
7387 case ARM::VLD4LNqAsm_32: {
7388 MCInst TmpInst;
7389 // Shuffle the operands around so the lane index operand is in the
7390 // right place.
7391 unsigned Spacing;
7392 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7393 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7394 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7395 Spacing));
7396 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7397 Spacing * 2));
7398 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7399 Spacing * 3));
7400 TmpInst.addOperand(Inst.getOperand(2)); // Rn
7401 TmpInst.addOperand(Inst.getOperand(3)); // alignment
7402 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7403 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7404 Spacing));
7405 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7406 Spacing * 2));
7407 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7408 Spacing * 3));
7409 TmpInst.addOperand(Inst.getOperand(1)); // lane
7410 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7411 TmpInst.addOperand(Inst.getOperand(5));
7412 Inst = TmpInst;
7413 return true;
7414 }
7415
7416 // VLD3DUP single 3-element structure to all lanes instructions.
7417 case ARM::VLD3DUPdAsm_8:
7418 case ARM::VLD3DUPdAsm_16:
7419 case ARM::VLD3DUPdAsm_32:
7420 case ARM::VLD3DUPqAsm_8:
7421 case ARM::VLD3DUPqAsm_16:
7422 case ARM::VLD3DUPqAsm_32: {
7423 MCInst TmpInst;
7424 unsigned Spacing;
7425 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7426 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7427 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7428 Spacing));
7429 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7430 Spacing * 2));
7431 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7432 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7433 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7434 TmpInst.addOperand(Inst.getOperand(4));
7435 Inst = TmpInst;
7436 return true;
7437 }
7438
7439 case ARM::VLD3DUPdWB_fixed_Asm_8:
7440 case ARM::VLD3DUPdWB_fixed_Asm_16:
7441 case ARM::VLD3DUPdWB_fixed_Asm_32:
7442 case ARM::VLD3DUPqWB_fixed_Asm_8:
7443 case ARM::VLD3DUPqWB_fixed_Asm_16:
7444 case ARM::VLD3DUPqWB_fixed_Asm_32: {
7445 MCInst TmpInst;
7446 unsigned Spacing;
7447 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7448 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7449 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7450 Spacing));
7451 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7452 Spacing * 2));
7453 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7454 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7455 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7456 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7457 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7458 TmpInst.addOperand(Inst.getOperand(4));
7459 Inst = TmpInst;
7460 return true;
7461 }
7462
7463 case ARM::VLD3DUPdWB_register_Asm_8:
7464 case ARM::VLD3DUPdWB_register_Asm_16:
7465 case ARM::VLD3DUPdWB_register_Asm_32:
7466 case ARM::VLD3DUPqWB_register_Asm_8:
7467 case ARM::VLD3DUPqWB_register_Asm_16:
7468 case ARM::VLD3DUPqWB_register_Asm_32: {
7469 MCInst TmpInst;
7470 unsigned Spacing;
7471 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7472 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7473 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7474 Spacing));
7475 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7476 Spacing * 2));
7477 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7478 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7479 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7480 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7481 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7482 TmpInst.addOperand(Inst.getOperand(5));
7483 Inst = TmpInst;
7484 return true;
7485 }
7486
7487 // VLD3 multiple 3-element structure instructions.
7488 case ARM::VLD3dAsm_8:
7489 case ARM::VLD3dAsm_16:
7490 case ARM::VLD3dAsm_32:
7491 case ARM::VLD3qAsm_8:
7492 case ARM::VLD3qAsm_16:
7493 case ARM::VLD3qAsm_32: {
7494 MCInst TmpInst;
7495 unsigned Spacing;
7496 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7497 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7498 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7499 Spacing));
7500 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7501 Spacing * 2));
7502 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7503 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7504 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7505 TmpInst.addOperand(Inst.getOperand(4));
7506 Inst = TmpInst;
7507 return true;
7508 }
7509
7510 case ARM::VLD3dWB_fixed_Asm_8:
7511 case ARM::VLD3dWB_fixed_Asm_16:
7512 case ARM::VLD3dWB_fixed_Asm_32:
7513 case ARM::VLD3qWB_fixed_Asm_8:
7514 case ARM::VLD3qWB_fixed_Asm_16:
7515 case ARM::VLD3qWB_fixed_Asm_32: {
7516 MCInst TmpInst;
7517 unsigned Spacing;
7518 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7519 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7520 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7521 Spacing));
7522 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7523 Spacing * 2));
7524 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7525 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7526 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7527 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7528 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7529 TmpInst.addOperand(Inst.getOperand(4));
7530 Inst = TmpInst;
7531 return true;
7532 }
7533
7534 case ARM::VLD3dWB_register_Asm_8:
7535 case ARM::VLD3dWB_register_Asm_16:
7536 case ARM::VLD3dWB_register_Asm_32:
7537 case ARM::VLD3qWB_register_Asm_8:
7538 case ARM::VLD3qWB_register_Asm_16:
7539 case ARM::VLD3qWB_register_Asm_32: {
7540 MCInst TmpInst;
7541 unsigned Spacing;
7542 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7543 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7544 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7545 Spacing));
7546 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7547 Spacing * 2));
7548 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7549 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7550 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7551 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7552 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7553 TmpInst.addOperand(Inst.getOperand(5));
7554 Inst = TmpInst;
7555 return true;
7556 }
7557
7558 // VLD4DUP single 3-element structure to all lanes instructions.
7559 case ARM::VLD4DUPdAsm_8:
7560 case ARM::VLD4DUPdAsm_16:
7561 case ARM::VLD4DUPdAsm_32:
7562 case ARM::VLD4DUPqAsm_8:
7563 case ARM::VLD4DUPqAsm_16:
7564 case ARM::VLD4DUPqAsm_32: {
7565 MCInst TmpInst;
7566 unsigned Spacing;
7567 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7568 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7569 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7570 Spacing));
7571 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7572 Spacing * 2));
7573 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7574 Spacing * 3));
7575 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7576 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7577 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7578 TmpInst.addOperand(Inst.getOperand(4));
7579 Inst = TmpInst;
7580 return true;
7581 }
7582
7583 case ARM::VLD4DUPdWB_fixed_Asm_8:
7584 case ARM::VLD4DUPdWB_fixed_Asm_16:
7585 case ARM::VLD4DUPdWB_fixed_Asm_32:
7586 case ARM::VLD4DUPqWB_fixed_Asm_8:
7587 case ARM::VLD4DUPqWB_fixed_Asm_16:
7588 case ARM::VLD4DUPqWB_fixed_Asm_32: {
7589 MCInst TmpInst;
7590 unsigned Spacing;
7591 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7592 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7593 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7594 Spacing));
7595 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7596 Spacing * 2));
7597 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7598 Spacing * 3));
7599 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7600 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7601 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7602 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7603 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7604 TmpInst.addOperand(Inst.getOperand(4));
7605 Inst = TmpInst;
7606 return true;
7607 }
7608
7609 case ARM::VLD4DUPdWB_register_Asm_8:
7610 case ARM::VLD4DUPdWB_register_Asm_16:
7611 case ARM::VLD4DUPdWB_register_Asm_32:
7612 case ARM::VLD4DUPqWB_register_Asm_8:
7613 case ARM::VLD4DUPqWB_register_Asm_16:
7614 case ARM::VLD4DUPqWB_register_Asm_32: {
7615 MCInst TmpInst;
7616 unsigned Spacing;
7617 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7618 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7619 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7620 Spacing));
7621 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7622 Spacing * 2));
7623 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7624 Spacing * 3));
7625 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7626 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7627 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7628 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7629 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7630 TmpInst.addOperand(Inst.getOperand(5));
7631 Inst = TmpInst;
7632 return true;
7633 }
7634
7635 // VLD4 multiple 4-element structure instructions.
7636 case ARM::VLD4dAsm_8:
7637 case ARM::VLD4dAsm_16:
7638 case ARM::VLD4dAsm_32:
7639 case ARM::VLD4qAsm_8:
7640 case ARM::VLD4qAsm_16:
7641 case ARM::VLD4qAsm_32: {
7642 MCInst TmpInst;
7643 unsigned Spacing;
7644 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7645 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7646 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7647 Spacing));
7648 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7649 Spacing * 2));
7650 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7651 Spacing * 3));
7652 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7653 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7654 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7655 TmpInst.addOperand(Inst.getOperand(4));
7656 Inst = TmpInst;
7657 return true;
7658 }
7659
7660 case ARM::VLD4dWB_fixed_Asm_8:
7661 case ARM::VLD4dWB_fixed_Asm_16:
7662 case ARM::VLD4dWB_fixed_Asm_32:
7663 case ARM::VLD4qWB_fixed_Asm_8:
7664 case ARM::VLD4qWB_fixed_Asm_16:
7665 case ARM::VLD4qWB_fixed_Asm_32: {
7666 MCInst TmpInst;
7667 unsigned Spacing;
7668 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7669 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7670 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7671 Spacing));
7672 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7673 Spacing * 2));
7674 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7675 Spacing * 3));
7676 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7677 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7678 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7679 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7680 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7681 TmpInst.addOperand(Inst.getOperand(4));
7682 Inst = TmpInst;
7683 return true;
7684 }
7685
7686 case ARM::VLD4dWB_register_Asm_8:
7687 case ARM::VLD4dWB_register_Asm_16:
7688 case ARM::VLD4dWB_register_Asm_32:
7689 case ARM::VLD4qWB_register_Asm_8:
7690 case ARM::VLD4qWB_register_Asm_16:
7691 case ARM::VLD4qWB_register_Asm_32: {
7692 MCInst TmpInst;
7693 unsigned Spacing;
7694 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7695 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7696 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7697 Spacing));
7698 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7699 Spacing * 2));
7700 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7701 Spacing * 3));
7702 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7703 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7704 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7705 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7706 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7707 TmpInst.addOperand(Inst.getOperand(5));
7708 Inst = TmpInst;
7709 return true;
7710 }
7711
7712 // VST3 multiple 3-element structure instructions.
7713 case ARM::VST3dAsm_8:
7714 case ARM::VST3dAsm_16:
7715 case ARM::VST3dAsm_32:
7716 case ARM::VST3qAsm_8:
7717 case ARM::VST3qAsm_16:
7718 case ARM::VST3qAsm_32: {
7719 MCInst TmpInst;
7720 unsigned Spacing;
7721 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7722 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7723 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7724 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7725 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7726 Spacing));
7727 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7728 Spacing * 2));
7729 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7730 TmpInst.addOperand(Inst.getOperand(4));
7731 Inst = TmpInst;
7732 return true;
7733 }
7734
7735 case ARM::VST3dWB_fixed_Asm_8:
7736 case ARM::VST3dWB_fixed_Asm_16:
7737 case ARM::VST3dWB_fixed_Asm_32:
7738 case ARM::VST3qWB_fixed_Asm_8:
7739 case ARM::VST3qWB_fixed_Asm_16:
7740 case ARM::VST3qWB_fixed_Asm_32: {
7741 MCInst TmpInst;
7742 unsigned Spacing;
7743 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7744 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7745 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7746 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7747 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7748 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7749 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7750 Spacing));
7751 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7752 Spacing * 2));
7753 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7754 TmpInst.addOperand(Inst.getOperand(4));
7755 Inst = TmpInst;
7756 return true;
7757 }
7758
7759 case ARM::VST3dWB_register_Asm_8:
7760 case ARM::VST3dWB_register_Asm_16:
7761 case ARM::VST3dWB_register_Asm_32:
7762 case ARM::VST3qWB_register_Asm_8:
7763 case ARM::VST3qWB_register_Asm_16:
7764 case ARM::VST3qWB_register_Asm_32: {
7765 MCInst TmpInst;
7766 unsigned Spacing;
7767 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7768 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7769 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7770 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7771 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7772 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7773 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7774 Spacing));
7775 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7776 Spacing * 2));
7777 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7778 TmpInst.addOperand(Inst.getOperand(5));
7779 Inst = TmpInst;
7780 return true;
7781 }
7782
7783 // VST4 multiple 3-element structure instructions.
7784 case ARM::VST4dAsm_8:
7785 case ARM::VST4dAsm_16:
7786 case ARM::VST4dAsm_32:
7787 case ARM::VST4qAsm_8:
7788 case ARM::VST4qAsm_16:
7789 case ARM::VST4qAsm_32: {
7790 MCInst TmpInst;
7791 unsigned Spacing;
7792 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7793 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7794 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7795 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7796 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7797 Spacing));
7798 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7799 Spacing * 2));
7800 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7801 Spacing * 3));
7802 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7803 TmpInst.addOperand(Inst.getOperand(4));
7804 Inst = TmpInst;
7805 return true;
7806 }
7807
7808 case ARM::VST4dWB_fixed_Asm_8:
7809 case ARM::VST4dWB_fixed_Asm_16:
7810 case ARM::VST4dWB_fixed_Asm_32:
7811 case ARM::VST4qWB_fixed_Asm_8:
7812 case ARM::VST4qWB_fixed_Asm_16:
7813 case ARM::VST4qWB_fixed_Asm_32: {
7814 MCInst TmpInst;
7815 unsigned Spacing;
7816 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7817 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7818 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7819 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7820 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
7821 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7822 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7823 Spacing));
7824 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7825 Spacing * 2));
7826 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7827 Spacing * 3));
7828 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7829 TmpInst.addOperand(Inst.getOperand(4));
7830 Inst = TmpInst;
7831 return true;
7832 }
7833
7834 case ARM::VST4dWB_register_Asm_8:
7835 case ARM::VST4dWB_register_Asm_16:
7836 case ARM::VST4dWB_register_Asm_32:
7837 case ARM::VST4qWB_register_Asm_8:
7838 case ARM::VST4qWB_register_Asm_16:
7839 case ARM::VST4qWB_register_Asm_32: {
7840 MCInst TmpInst;
7841 unsigned Spacing;
7842 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7843 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7844 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7845 TmpInst.addOperand(Inst.getOperand(2)); // alignment
7846 TmpInst.addOperand(Inst.getOperand(3)); // Rm
7847 TmpInst.addOperand(Inst.getOperand(0)); // Vd
7848 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7849 Spacing));
7850 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7851 Spacing * 2));
7852 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
7853 Spacing * 3));
7854 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7855 TmpInst.addOperand(Inst.getOperand(5));
7856 Inst = TmpInst;
7857 return true;
7858 }
7859
7860 // Handle encoding choice for the shift-immediate instructions.
7861 case ARM::t2LSLri:
7862 case ARM::t2LSRri:
7863 case ARM::t2ASRri: {
7864 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7865 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7866 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
7867 !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7868 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
7869 unsigned NewOpc;
7870 switch (Inst.getOpcode()) {
7871 default: llvm_unreachable("unexpected opcode");
7872 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
7873 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
7874 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
7875 }
7876 // The Thumb1 operands aren't in the same order. Awesome, eh?
7877 MCInst TmpInst;
7878 TmpInst.setOpcode(NewOpc);
7879 TmpInst.addOperand(Inst.getOperand(0));
7880 TmpInst.addOperand(Inst.getOperand(5));
7881 TmpInst.addOperand(Inst.getOperand(1));
7882 TmpInst.addOperand(Inst.getOperand(2));
7883 TmpInst.addOperand(Inst.getOperand(3));
7884 TmpInst.addOperand(Inst.getOperand(4));
7885 Inst = TmpInst;
7886 return true;
7887 }
7888 return false;
7889 }
7890
7891 // Handle the Thumb2 mode MOV complex aliases.
7892 case ARM::t2MOVsr:
7893 case ARM::t2MOVSsr: {
7894 // Which instruction to expand to depends on the CCOut operand and
7895 // whether we're in an IT block if the register operands are low
7896 // registers.
7897 bool isNarrow = false;
7898 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7899 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7900 isARMLowRegister(Inst.getOperand(2).getReg()) &&
7901 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7902 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
7903 isNarrow = true;
7904 MCInst TmpInst;
7905 unsigned newOpc;
7906 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
7907 default: llvm_unreachable("unexpected opcode!");
7908 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
7909 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
7910 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
7911 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
7912 }
7913 TmpInst.setOpcode(newOpc);
7914 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7915 if (isNarrow)
7916 TmpInst.addOperand(MCOperand::CreateReg(
7917 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7918 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7919 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7920 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7921 TmpInst.addOperand(Inst.getOperand(5));
7922 if (!isNarrow)
7923 TmpInst.addOperand(MCOperand::CreateReg(
7924 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
7925 Inst = TmpInst;
7926 return true;
7927 }
7928 case ARM::t2MOVsi:
7929 case ARM::t2MOVSsi: {
7930 // Which instruction to expand to depends on the CCOut operand and
7931 // whether we're in an IT block if the register operands are low
7932 // registers.
7933 bool isNarrow = false;
7934 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7935 isARMLowRegister(Inst.getOperand(1).getReg()) &&
7936 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7937 isNarrow = true;
7938 MCInst TmpInst;
7939 unsigned newOpc;
7940 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7941 default: llvm_unreachable("unexpected opcode!");
7942 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7943 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7944 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7945 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7946 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7947 }
7948 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7949 if (Amount == 32) Amount = 0;
7950 TmpInst.setOpcode(newOpc);
7951 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7952 if (isNarrow)
7953 TmpInst.addOperand(MCOperand::CreateReg(
7954 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7955 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7956 if (newOpc != ARM::t2RRX)
7957 TmpInst.addOperand(MCOperand::CreateImm(Amount));
7958 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7959 TmpInst.addOperand(Inst.getOperand(4));
7960 if (!isNarrow)
7961 TmpInst.addOperand(MCOperand::CreateReg(
7962 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7963 Inst = TmpInst;
7964 return true;
7965 }
7966 // Handle the ARM mode MOV complex aliases.
7967 case ARM::ASRr:
7968 case ARM::LSRr:
7969 case ARM::LSLr:
7970 case ARM::RORr: {
7971 ARM_AM::ShiftOpc ShiftTy;
7972 switch(Inst.getOpcode()) {
7973 default: llvm_unreachable("unexpected opcode!");
7974 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7975 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7976 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7977 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7978 }
7979 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7980 MCInst TmpInst;
7981 TmpInst.setOpcode(ARM::MOVsr);
7982 TmpInst.addOperand(Inst.getOperand(0)); // Rd
7983 TmpInst.addOperand(Inst.getOperand(1)); // Rn
7984 TmpInst.addOperand(Inst.getOperand(2)); // Rm
7985 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7986 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7987 TmpInst.addOperand(Inst.getOperand(4));
7988 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7989 Inst = TmpInst;
7990 return true;
7991 }
7992 case ARM::ASRi:
7993 case ARM::LSRi:
7994 case ARM::LSLi:
7995 case ARM::RORi: {
7996 ARM_AM::ShiftOpc ShiftTy;
7997 switch(Inst.getOpcode()) {
7998 default: llvm_unreachable("unexpected opcode!");
7999 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8000 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8001 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8002 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8003 }
8004 // A shift by zero is a plain MOVr, not a MOVsi.
8005 unsigned Amt = Inst.getOperand(2).getImm();
8006 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8007 // A shift by 32 should be encoded as 0 when permitted
8008 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8009 Amt = 0;
8010 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8011 MCInst TmpInst;
8012 TmpInst.setOpcode(Opc);
8013 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8014 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8015 if (Opc == ARM::MOVsi)
8016 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
8017 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8018 TmpInst.addOperand(Inst.getOperand(4));
8019 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8020 Inst = TmpInst;
8021 return true;
8022 }
8023 case ARM::RRXi: {
8024 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8025 MCInst TmpInst;
8026 TmpInst.setOpcode(ARM::MOVsi);
8027 TmpInst.addOperand(Inst.getOperand(0)); // Rd
8028 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8029 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
8030 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8031 TmpInst.addOperand(Inst.getOperand(3));
8032 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8033 Inst = TmpInst;
8034 return true;
8035 }
8036 case ARM::t2LDMIA_UPD: {
8037 // If this is a load of a single register, then we should use
8038 // a post-indexed LDR instruction instead, per the ARM ARM.
8039 if (Inst.getNumOperands() != 5)
8040 return false;
8041 MCInst TmpInst;
8042 TmpInst.setOpcode(ARM::t2LDR_POST);
8043 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8044 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8045 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8046 TmpInst.addOperand(MCOperand::CreateImm(4));
8047 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8048 TmpInst.addOperand(Inst.getOperand(3));
8049 Inst = TmpInst;
8050 return true;
8051 }
8052 case ARM::t2STMDB_UPD: {
8053 // If this is a store of a single register, then we should use
8054 // a pre-indexed STR instruction instead, per the ARM ARM.
8055 if (Inst.getNumOperands() != 5)
8056 return false;
8057 MCInst TmpInst;
8058 TmpInst.setOpcode(ARM::t2STR_PRE);
8059 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8060 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8061 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8062 TmpInst.addOperand(MCOperand::CreateImm(-4));
8063 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8064 TmpInst.addOperand(Inst.getOperand(3));
8065 Inst = TmpInst;
8066 return true;
8067 }
8068 case ARM::LDMIA_UPD:
8069 // If this is a load of a single register via a 'pop', then we should use
8070 // a post-indexed LDR instruction instead, per the ARM ARM.
8071 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8072 Inst.getNumOperands() == 5) {
8073 MCInst TmpInst;
8074 TmpInst.setOpcode(ARM::LDR_POST_IMM);
8075 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8076 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8077 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8078 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset
8079 TmpInst.addOperand(MCOperand::CreateImm(4));
8080 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8081 TmpInst.addOperand(Inst.getOperand(3));
8082 Inst = TmpInst;
8083 return true;
8084 }
8085 break;
8086 case ARM::STMDB_UPD:
8087 // If this is a store of a single register via a 'push', then we should use
8088 // a pre-indexed STR instruction instead, per the ARM ARM.
8089 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8090 Inst.getNumOperands() == 5) {
8091 MCInst TmpInst;
8092 TmpInst.setOpcode(ARM::STR_PRE_IMM);
8093 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8094 TmpInst.addOperand(Inst.getOperand(4)); // Rt
8095 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8096 TmpInst.addOperand(MCOperand::CreateImm(-4));
8097 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8098 TmpInst.addOperand(Inst.getOperand(3));
8099 Inst = TmpInst;
8100 }
8101 break;
8102 case ARM::t2ADDri12:
8103 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8104 // mnemonic was used (not "addw"), encoding T3 is preferred.
8105 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8106 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8107 break;
8108 Inst.setOpcode(ARM::t2ADDri);
8109 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
8110 break;
8111 case ARM::t2SUBri12:
8112 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8113 // mnemonic was used (not "subw"), encoding T3 is preferred.
8114 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8115 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8116 break;
8117 Inst.setOpcode(ARM::t2SUBri);
8118 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
8119 break;
8120 case ARM::tADDi8:
8121 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8122 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8123 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8124 // to encoding T1 if <Rd> is omitted."
8125 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8126 Inst.setOpcode(ARM::tADDi3);
8127 return true;
8128 }
8129 break;
8130 case ARM::tSUBi8:
8131 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8132 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8133 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8134 // to encoding T1 if <Rd> is omitted."
8135 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8136 Inst.setOpcode(ARM::tSUBi3);
8137 return true;
8138 }
8139 break;
8140 case ARM::t2ADDri:
8141 case ARM::t2SUBri: {
8142 // If the destination and first source operand are the same, and
8143 // the flags are compatible with the current IT status, use encoding T2
8144 // instead of T3. For compatibility with the system 'as'. Make sure the
8145 // wide encoding wasn't explicit.
8146 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8147 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8148 (unsigned)Inst.getOperand(2).getImm() > 255 ||
8149 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
8150 (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
8151 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8152 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8153 break;
8154 MCInst TmpInst;
8155 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8156 ARM::tADDi8 : ARM::tSUBi8);
8157 TmpInst.addOperand(Inst.getOperand(0));
8158 TmpInst.addOperand(Inst.getOperand(5));
8159 TmpInst.addOperand(Inst.getOperand(0));
8160 TmpInst.addOperand(Inst.getOperand(2));
8161 TmpInst.addOperand(Inst.getOperand(3));
8162 TmpInst.addOperand(Inst.getOperand(4));
8163 Inst = TmpInst;
8164 return true;
8165 }
8166 case ARM::t2ADDrr: {
8167 // If the destination and first source operand are the same, and
8168 // there's no setting of the flags, use encoding T2 instead of T3.
8169 // Note that this is only for ADD, not SUB. This mirrors the system
8170 // 'as' behaviour. Make sure the wide encoding wasn't explicit.
8171 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8172 Inst.getOperand(5).getReg() != 0 ||
8173 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8174 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8175 break;
8176 MCInst TmpInst;
8177 TmpInst.setOpcode(ARM::tADDhirr);
8178 TmpInst.addOperand(Inst.getOperand(0));
8179 TmpInst.addOperand(Inst.getOperand(0));
8180 TmpInst.addOperand(Inst.getOperand(2));
8181 TmpInst.addOperand(Inst.getOperand(3));
8182 TmpInst.addOperand(Inst.getOperand(4));
8183 Inst = TmpInst;
8184 return true;
8185 }
8186 case ARM::tADDrSP: {
8187 // If the non-SP source operand and the destination operand are not the
8188 // same, we need to use the 32-bit encoding if it's available.
8189 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8190 Inst.setOpcode(ARM::t2ADDrr);
8191 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
8192 return true;
8193 }
8194 break;
8195 }
8196 case ARM::tB:
8197 // A Thumb conditional branch outside of an IT block is a tBcc.
8198 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8199 Inst.setOpcode(ARM::tBcc);
8200 return true;
8201 }
8202 break;
8203 case ARM::t2B:
8204 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8205 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8206 Inst.setOpcode(ARM::t2Bcc);
8207 return true;
8208 }
8209 break;
8210 case ARM::t2Bcc:
8211 // If the conditional is AL or we're in an IT block, we really want t2B.
8212 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8213 Inst.setOpcode(ARM::t2B);
8214 return true;
8215 }
8216 break;
8217 case ARM::tBcc:
8218 // If the conditional is AL, we really want tB.
8219 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8220 Inst.setOpcode(ARM::tB);
8221 return true;
8222 }
8223 break;
8224 case ARM::tLDMIA: {
8225 // If the register list contains any high registers, or if the writeback
8226 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8227 // instead if we're in Thumb2. Otherwise, this should have generated
8228 // an error in validateInstruction().
8229 unsigned Rn = Inst.getOperand(0).getReg();
8230 bool hasWritebackToken =
8231 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8232 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8233 bool listContainsBase;
8234 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8235 (!listContainsBase && !hasWritebackToken) ||
8236 (listContainsBase && hasWritebackToken)) {
8237 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8238 assert (isThumbTwo());
8239 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8240 // If we're switching to the updating version, we need to insert
8241 // the writeback tied operand.
8242 if (hasWritebackToken)
8243 Inst.insert(Inst.begin(),
8244 MCOperand::CreateReg(Inst.getOperand(0).getReg()));
8245 return true;
8246 }
8247 break;
8248 }
8249 case ARM::tSTMIA_UPD: {
8250 // If the register list contains any high registers, we need to use
8251 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8252 // should have generated an error in validateInstruction().
8253 unsigned Rn = Inst.getOperand(0).getReg();
8254 bool listContainsBase;
8255 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8256 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8257 assert (isThumbTwo());
8258 Inst.setOpcode(ARM::t2STMIA_UPD);
8259 return true;
8260 }
8261 break;
8262 }
8263 case ARM::tPOP: {
8264 bool listContainsBase;
8265 // If the register list contains any high registers, we need to use
8266 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8267 // should have generated an error in validateInstruction().
8268 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8269 return false;
8270 assert (isThumbTwo());
8271 Inst.setOpcode(ARM::t2LDMIA_UPD);
8272 // Add the base register and writeback operands.
8273 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
8274 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
8275 return true;
8276 }
8277 case ARM::tPUSH: {
8278 bool listContainsBase;
8279 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8280 return false;
8281 assert (isThumbTwo());
8282 Inst.setOpcode(ARM::t2STMDB_UPD);
8283 // Add the base register and writeback operands.
8284 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
8285 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
8286 return true;
8287 }
8288 case ARM::t2MOVi: {
8289 // If we can use the 16-bit encoding and the user didn't explicitly
8290 // request the 32-bit variant, transform it here.
8291 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8292 (unsigned)Inst.getOperand(1).getImm() <= 255 &&
8293 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
8294 Inst.getOperand(4).getReg() == ARM::CPSR) ||
8295 (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
8296 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8297 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8298 // The operands aren't in the same order for tMOVi8...
8299 MCInst TmpInst;
8300 TmpInst.setOpcode(ARM::tMOVi8);
8301 TmpInst.addOperand(Inst.getOperand(0));
8302 TmpInst.addOperand(Inst.getOperand(4));
8303 TmpInst.addOperand(Inst.getOperand(1));
8304 TmpInst.addOperand(Inst.getOperand(2));
8305 TmpInst.addOperand(Inst.getOperand(3));
8306 Inst = TmpInst;
8307 return true;
8308 }
8309 break;
8310 }
8311 case ARM::t2MOVr: {
8312 // If we can use the 16-bit encoding and the user didn't explicitly
8313 // request the 32-bit variant, transform it here.
8314 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8315 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8316 Inst.getOperand(2).getImm() == ARMCC::AL &&
8317 Inst.getOperand(4).getReg() == ARM::CPSR &&
8318 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8319 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8320 // The operands aren't the same for tMOV[S]r... (no cc_out)
8321 MCInst TmpInst;
8322 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8323 TmpInst.addOperand(Inst.getOperand(0));
8324 TmpInst.addOperand(Inst.getOperand(1));
8325 TmpInst.addOperand(Inst.getOperand(2));
8326 TmpInst.addOperand(Inst.getOperand(3));
8327 Inst = TmpInst;
8328 return true;
8329 }
8330 break;
8331 }
8332 case ARM::t2SXTH:
8333 case ARM::t2SXTB:
8334 case ARM::t2UXTH:
8335 case ARM::t2UXTB: {
8336 // If we can use the 16-bit encoding and the user didn't explicitly
8337 // request the 32-bit variant, transform it here.
8338 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8339 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8340 Inst.getOperand(2).getImm() == 0 &&
8341 (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8342 static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8343 unsigned NewOpc;
8344 switch (Inst.getOpcode()) {
8345 default: llvm_unreachable("Illegal opcode!");
8346 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8347 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8348 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8349 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8350 }
8351 // The operands aren't the same for thumb1 (no rotate operand).
8352 MCInst TmpInst;
8353 TmpInst.setOpcode(NewOpc);
8354 TmpInst.addOperand(Inst.getOperand(0));
8355 TmpInst.addOperand(Inst.getOperand(1));
8356 TmpInst.addOperand(Inst.getOperand(3));
8357 TmpInst.addOperand(Inst.getOperand(4));
8358 Inst = TmpInst;
8359 return true;
8360 }
8361 break;
8362 }
8363 case ARM::MOVsi: {
8364 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8365 // rrx shifts and asr/lsr of #32 is encoded as 0
8366 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8367 return false;
8368 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8369 // Shifting by zero is accepted as a vanilla 'MOVr'
8370 MCInst TmpInst;
8371 TmpInst.setOpcode(ARM::MOVr);
8372 TmpInst.addOperand(Inst.getOperand(0));
8373 TmpInst.addOperand(Inst.getOperand(1));
8374 TmpInst.addOperand(Inst.getOperand(3));
8375 TmpInst.addOperand(Inst.getOperand(4));
8376 TmpInst.addOperand(Inst.getOperand(5));
8377 Inst = TmpInst;
8378 return true;
8379 }
8380 return false;
8381 }
8382 case ARM::ANDrsi:
8383 case ARM::ORRrsi:
8384 case ARM::EORrsi:
8385 case ARM::BICrsi:
8386 case ARM::SUBrsi:
8387 case ARM::ADDrsi: {
8388 unsigned newOpc;
8389 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8390 if (SOpc == ARM_AM::rrx) return false;
8391 switch (Inst.getOpcode()) {
8392 default: llvm_unreachable("unexpected opcode!");
8393 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8394 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8395 case ARM::EORrsi: newOpc = ARM::EORrr; break;
8396 case ARM::BICrsi: newOpc = ARM::BICrr; break;
8397 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8398 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8399 }
8400 // If the shift is by zero, use the non-shifted instruction definition.
8401 // The exception is for right shifts, where 0 == 32
8402 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8403 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8404 MCInst TmpInst;
8405 TmpInst.setOpcode(newOpc);
8406 TmpInst.addOperand(Inst.getOperand(0));
8407 TmpInst.addOperand(Inst.getOperand(1));
8408 TmpInst.addOperand(Inst.getOperand(2));
8409 TmpInst.addOperand(Inst.getOperand(4));
8410 TmpInst.addOperand(Inst.getOperand(5));
8411 TmpInst.addOperand(Inst.getOperand(6));
8412 Inst = TmpInst;
8413 return true;
8414 }
8415 return false;
8416 }
8417 case ARM::ITasm:
8418 case ARM::t2IT: {
8419 // The mask bits for all but the first condition are represented as
8420 // the low bit of the condition code value implies 't'. We currently
8421 // always have 1 implies 't', so XOR toggle the bits if the low bit
8422 // of the condition code is zero.
8423 MCOperand &MO = Inst.getOperand(1);
8424 unsigned Mask = MO.getImm();
8425 unsigned OrigMask = Mask;
8426 unsigned TZ = countTrailingZeros(Mask);
8427 if ((Inst.getOperand(0).getImm() & 1) == 0) {
8428 assert(Mask && TZ <= 3 && "illegal IT mask value!");
8429 Mask ^= (0xE << TZ) & 0xF;
8430 }
8431 MO.setImm(Mask);
8432
8433 // Set up the IT block state according to the IT instruction we just
8434 // matched.
8435 assert(!inITBlock() && "nested IT blocks?!");
8436 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8437 ITState.Mask = OrigMask; // Use the original mask, not the updated one.
8438 ITState.CurPosition = 0;
8439 ITState.FirstCond = true;
8440 break;
8441 }
8442 case ARM::t2LSLrr:
8443 case ARM::t2LSRrr:
8444 case ARM::t2ASRrr:
8445 case ARM::t2SBCrr:
8446 case ARM::t2RORrr:
8447 case ARM::t2BICrr:
8448 {
8449 // Assemblers should use the narrow encodings of these instructions when permissible.
8450 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8451 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8452 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8453 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8454 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8455 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8456 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8457 ".w"))) {
8458 unsigned NewOpc;
8459 switch (Inst.getOpcode()) {
8460 default: llvm_unreachable("unexpected opcode");
8461 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8462 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8463 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8464 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8465 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8466 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8467 }
8468 MCInst TmpInst;
8469 TmpInst.setOpcode(NewOpc);
8470 TmpInst.addOperand(Inst.getOperand(0));
8471 TmpInst.addOperand(Inst.getOperand(5));
8472 TmpInst.addOperand(Inst.getOperand(1));
8473 TmpInst.addOperand(Inst.getOperand(2));
8474 TmpInst.addOperand(Inst.getOperand(3));
8475 TmpInst.addOperand(Inst.getOperand(4));
8476 Inst = TmpInst;
8477 return true;
8478 }
8479 return false;
8480 }
8481 case ARM::t2ANDrr:
8482 case ARM::t2EORrr:
8483 case ARM::t2ADCrr:
8484 case ARM::t2ORRrr:
8485 {
8486 // Assemblers should use the narrow encodings of these instructions when permissible.
8487 // These instructions are special in that they are commutable, so shorter encodings
8488 // are available more often.
8489 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8490 isARMLowRegister(Inst.getOperand(2).getReg())) &&
8491 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8492 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8493 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8494 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8495 (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8496 !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8497 ".w"))) {
8498 unsigned NewOpc;
8499 switch (Inst.getOpcode()) {
8500 default: llvm_unreachable("unexpected opcode");
8501 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8502 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8503 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8504 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8505 }
8506 MCInst TmpInst;
8507 TmpInst.setOpcode(NewOpc);
8508 TmpInst.addOperand(Inst.getOperand(0));
8509 TmpInst.addOperand(Inst.getOperand(5));
8510 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8511 TmpInst.addOperand(Inst.getOperand(1));
8512 TmpInst.addOperand(Inst.getOperand(2));
8513 } else {
8514 TmpInst.addOperand(Inst.getOperand(2));
8515 TmpInst.addOperand(Inst.getOperand(1));
8516 }
8517 TmpInst.addOperand(Inst.getOperand(3));
8518 TmpInst.addOperand(Inst.getOperand(4));
8519 Inst = TmpInst;
8520 return true;
8521 }
8522 return false;
8523 }
8524 }
8525 return false;
8526 }
8527
checkTargetMatchPredicate(MCInst & Inst)8528 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8529 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8530 // suffix depending on whether they're in an IT block or not.
8531 unsigned Opc = Inst.getOpcode();
8532 const MCInstrDesc &MCID = MII.get(Opc);
8533 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8534 assert(MCID.hasOptionalDef() &&
8535 "optionally flag setting instruction missing optional def operand");
8536 assert(MCID.NumOperands == Inst.getNumOperands() &&
8537 "operand count mismatch!");
8538 // Find the optional-def operand (cc_out).
8539 unsigned OpNo;
8540 for (OpNo = 0;
8541 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8542 ++OpNo)
8543 ;
8544 // If we're parsing Thumb1, reject it completely.
8545 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8546 return Match_MnemonicFail;
8547 // If we're parsing Thumb2, which form is legal depends on whether we're
8548 // in an IT block.
8549 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8550 !inITBlock())
8551 return Match_RequiresITBlock;
8552 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8553 inITBlock())
8554 return Match_RequiresNotITBlock;
8555 }
8556 // Some high-register supporting Thumb1 encodings only allow both registers
8557 // to be from r0-r7 when in Thumb2.
8558 else if (Opc == ARM::tADDhirr && isThumbOne() && !hasV6MOps() &&
8559 isARMLowRegister(Inst.getOperand(1).getReg()) &&
8560 isARMLowRegister(Inst.getOperand(2).getReg()))
8561 return Match_RequiresThumb2;
8562 // Others only require ARMv6 or later.
8563 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
8564 isARMLowRegister(Inst.getOperand(0).getReg()) &&
8565 isARMLowRegister(Inst.getOperand(1).getReg()))
8566 return Match_RequiresV6;
8567 return Match_Success;
8568 }
8569
8570 namespace llvm {
IsCPSRDead(MCInst * Instr)8571 template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
8572 return true; // In an assembly source, no need to second-guess
8573 }
8574 }
8575
8576 static const char *getSubtargetFeatureName(uint64_t Val);
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)8577 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
8578 OperandVector &Operands,
8579 MCStreamer &Out, uint64_t &ErrorInfo,
8580 bool MatchingInlineAsm) {
8581 MCInst Inst;
8582 unsigned MatchResult;
8583
8584 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
8585 MatchingInlineAsm);
8586 switch (MatchResult) {
8587 case Match_Success:
8588 // Context sensitive operand constraints aren't handled by the matcher,
8589 // so check them here.
8590 if (validateInstruction(Inst, Operands)) {
8591 // Still progress the IT block, otherwise one wrong condition causes
8592 // nasty cascading errors.
8593 forwardITPosition();
8594 return true;
8595 }
8596
8597 { // processInstruction() updates inITBlock state, we need to save it away
8598 bool wasInITBlock = inITBlock();
8599
8600 // Some instructions need post-processing to, for example, tweak which
8601 // encoding is selected. Loop on it while changes happen so the
8602 // individual transformations can chain off each other. E.g.,
8603 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
8604 while (processInstruction(Inst, Operands, Out))
8605 ;
8606
8607 // Only after the instruction is fully processed, we can validate it
8608 if (wasInITBlock && hasV8Ops() && isThumb() &&
8609 !isV8EligibleForIT(&Inst)) {
8610 Warning(IDLoc, "deprecated instruction in IT block");
8611 }
8612 }
8613
8614 // Only move forward at the very end so that everything in validate
8615 // and process gets a consistent answer about whether we're in an IT
8616 // block.
8617 forwardITPosition();
8618
8619 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
8620 // doesn't actually encode.
8621 if (Inst.getOpcode() == ARM::ITasm)
8622 return false;
8623
8624 Inst.setLoc(IDLoc);
8625 Out.EmitInstruction(Inst, STI);
8626 return false;
8627 case Match_MissingFeature: {
8628 assert(ErrorInfo && "Unknown missing feature!");
8629 // Special case the error message for the very common case where only
8630 // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
8631 std::string Msg = "instruction requires:";
8632 uint64_t Mask = 1;
8633 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
8634 if (ErrorInfo & Mask) {
8635 Msg += " ";
8636 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
8637 }
8638 Mask <<= 1;
8639 }
8640 return Error(IDLoc, Msg);
8641 }
8642 case Match_InvalidOperand: {
8643 SMLoc ErrorLoc = IDLoc;
8644 if (ErrorInfo != ~0ULL) {
8645 if (ErrorInfo >= Operands.size())
8646 return Error(IDLoc, "too few operands for instruction");
8647
8648 ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8649 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8650 }
8651
8652 return Error(ErrorLoc, "invalid operand for instruction");
8653 }
8654 case Match_MnemonicFail:
8655 return Error(IDLoc, "invalid instruction",
8656 ((ARMOperand &)*Operands[0]).getLocRange());
8657 case Match_RequiresNotITBlock:
8658 return Error(IDLoc, "flag setting instruction only valid outside IT block");
8659 case Match_RequiresITBlock:
8660 return Error(IDLoc, "instruction only valid inside IT block");
8661 case Match_RequiresV6:
8662 return Error(IDLoc, "instruction variant requires ARMv6 or later");
8663 case Match_RequiresThumb2:
8664 return Error(IDLoc, "instruction variant requires Thumb2");
8665 case Match_ImmRange0_15: {
8666 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8667 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8668 return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
8669 }
8670 case Match_ImmRange0_239: {
8671 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
8672 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8673 return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
8674 }
8675 case Match_AlignedMemoryRequiresNone:
8676 case Match_DupAlignedMemoryRequiresNone:
8677 case Match_AlignedMemoryRequires16:
8678 case Match_DupAlignedMemoryRequires16:
8679 case Match_AlignedMemoryRequires32:
8680 case Match_DupAlignedMemoryRequires32:
8681 case Match_AlignedMemoryRequires64:
8682 case Match_DupAlignedMemoryRequires64:
8683 case Match_AlignedMemoryRequires64or128:
8684 case Match_DupAlignedMemoryRequires64or128:
8685 case Match_AlignedMemoryRequires64or128or256:
8686 {
8687 SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
8688 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
8689 switch (MatchResult) {
8690 default:
8691 llvm_unreachable("Missing Match_Aligned type");
8692 case Match_AlignedMemoryRequiresNone:
8693 case Match_DupAlignedMemoryRequiresNone:
8694 return Error(ErrorLoc, "alignment must be omitted");
8695 case Match_AlignedMemoryRequires16:
8696 case Match_DupAlignedMemoryRequires16:
8697 return Error(ErrorLoc, "alignment must be 16 or omitted");
8698 case Match_AlignedMemoryRequires32:
8699 case Match_DupAlignedMemoryRequires32:
8700 return Error(ErrorLoc, "alignment must be 32 or omitted");
8701 case Match_AlignedMemoryRequires64:
8702 case Match_DupAlignedMemoryRequires64:
8703 return Error(ErrorLoc, "alignment must be 64 or omitted");
8704 case Match_AlignedMemoryRequires64or128:
8705 case Match_DupAlignedMemoryRequires64or128:
8706 return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
8707 case Match_AlignedMemoryRequires64or128or256:
8708 return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
8709 }
8710 }
8711 }
8712
8713 llvm_unreachable("Implement any new match types added!");
8714 }
8715
8716 /// parseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)8717 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
8718 const MCObjectFileInfo::Environment Format =
8719 getContext().getObjectFileInfo()->getObjectFileType();
8720 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8721 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
8722
8723 StringRef IDVal = DirectiveID.getIdentifier();
8724 if (IDVal == ".word")
8725 return parseLiteralValues(4, DirectiveID.getLoc());
8726 else if (IDVal == ".short" || IDVal == ".hword")
8727 return parseLiteralValues(2, DirectiveID.getLoc());
8728 else if (IDVal == ".thumb")
8729 return parseDirectiveThumb(DirectiveID.getLoc());
8730 else if (IDVal == ".arm")
8731 return parseDirectiveARM(DirectiveID.getLoc());
8732 else if (IDVal == ".thumb_func")
8733 return parseDirectiveThumbFunc(DirectiveID.getLoc());
8734 else if (IDVal == ".code")
8735 return parseDirectiveCode(DirectiveID.getLoc());
8736 else if (IDVal == ".syntax")
8737 return parseDirectiveSyntax(DirectiveID.getLoc());
8738 else if (IDVal == ".unreq")
8739 return parseDirectiveUnreq(DirectiveID.getLoc());
8740 else if (IDVal == ".fnend")
8741 return parseDirectiveFnEnd(DirectiveID.getLoc());
8742 else if (IDVal == ".cantunwind")
8743 return parseDirectiveCantUnwind(DirectiveID.getLoc());
8744 else if (IDVal == ".personality")
8745 return parseDirectivePersonality(DirectiveID.getLoc());
8746 else if (IDVal == ".handlerdata")
8747 return parseDirectiveHandlerData(DirectiveID.getLoc());
8748 else if (IDVal == ".setfp")
8749 return parseDirectiveSetFP(DirectiveID.getLoc());
8750 else if (IDVal == ".pad")
8751 return parseDirectivePad(DirectiveID.getLoc());
8752 else if (IDVal == ".save")
8753 return parseDirectiveRegSave(DirectiveID.getLoc(), false);
8754 else if (IDVal == ".vsave")
8755 return parseDirectiveRegSave(DirectiveID.getLoc(), true);
8756 else if (IDVal == ".ltorg" || IDVal == ".pool")
8757 return parseDirectiveLtorg(DirectiveID.getLoc());
8758 else if (IDVal == ".even")
8759 return parseDirectiveEven(DirectiveID.getLoc());
8760 else if (IDVal == ".personalityindex")
8761 return parseDirectivePersonalityIndex(DirectiveID.getLoc());
8762 else if (IDVal == ".unwind_raw")
8763 return parseDirectiveUnwindRaw(DirectiveID.getLoc());
8764 else if (IDVal == ".movsp")
8765 return parseDirectiveMovSP(DirectiveID.getLoc());
8766 else if (IDVal == ".arch_extension")
8767 return parseDirectiveArchExtension(DirectiveID.getLoc());
8768 else if (IDVal == ".align")
8769 return parseDirectiveAlign(DirectiveID.getLoc());
8770 else if (IDVal == ".thumb_set")
8771 return parseDirectiveThumbSet(DirectiveID.getLoc());
8772
8773 if (!IsMachO && !IsCOFF) {
8774 if (IDVal == ".arch")
8775 return parseDirectiveArch(DirectiveID.getLoc());
8776 else if (IDVal == ".cpu")
8777 return parseDirectiveCPU(DirectiveID.getLoc());
8778 else if (IDVal == ".eabi_attribute")
8779 return parseDirectiveEabiAttr(DirectiveID.getLoc());
8780 else if (IDVal == ".fpu")
8781 return parseDirectiveFPU(DirectiveID.getLoc());
8782 else if (IDVal == ".fnstart")
8783 return parseDirectiveFnStart(DirectiveID.getLoc());
8784 else if (IDVal == ".inst")
8785 return parseDirectiveInst(DirectiveID.getLoc());
8786 else if (IDVal == ".inst.n")
8787 return parseDirectiveInst(DirectiveID.getLoc(), 'n');
8788 else if (IDVal == ".inst.w")
8789 return parseDirectiveInst(DirectiveID.getLoc(), 'w');
8790 else if (IDVal == ".object_arch")
8791 return parseDirectiveObjectArch(DirectiveID.getLoc());
8792 else if (IDVal == ".tlsdescseq")
8793 return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
8794 }
8795
8796 return true;
8797 }
8798
8799 /// parseLiteralValues
8800 /// ::= .hword expression [, expression]*
8801 /// ::= .short expression [, expression]*
8802 /// ::= .word expression [, expression]*
parseLiteralValues(unsigned Size,SMLoc L)8803 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
8804 MCAsmParser &Parser = getParser();
8805 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8806 for (;;) {
8807 const MCExpr *Value;
8808 if (getParser().parseExpression(Value)) {
8809 Parser.eatToEndOfStatement();
8810 return false;
8811 }
8812
8813 getParser().getStreamer().EmitValue(Value, Size);
8814
8815 if (getLexer().is(AsmToken::EndOfStatement))
8816 break;
8817
8818 // FIXME: Improve diagnostic.
8819 if (getLexer().isNot(AsmToken::Comma)) {
8820 Error(L, "unexpected token in directive");
8821 return false;
8822 }
8823 Parser.Lex();
8824 }
8825 }
8826
8827 Parser.Lex();
8828 return false;
8829 }
8830
8831 /// parseDirectiveThumb
8832 /// ::= .thumb
parseDirectiveThumb(SMLoc L)8833 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
8834 MCAsmParser &Parser = getParser();
8835 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8836 Error(L, "unexpected token in directive");
8837 return false;
8838 }
8839 Parser.Lex();
8840
8841 if (!hasThumb()) {
8842 Error(L, "target does not support Thumb mode");
8843 return false;
8844 }
8845
8846 if (!isThumb())
8847 SwitchMode();
8848
8849 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8850 return false;
8851 }
8852
8853 /// parseDirectiveARM
8854 /// ::= .arm
parseDirectiveARM(SMLoc L)8855 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
8856 MCAsmParser &Parser = getParser();
8857 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8858 Error(L, "unexpected token in directive");
8859 return false;
8860 }
8861 Parser.Lex();
8862
8863 if (!hasARM()) {
8864 Error(L, "target does not support ARM mode");
8865 return false;
8866 }
8867
8868 if (isThumb())
8869 SwitchMode();
8870
8871 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8872 return false;
8873 }
8874
onLabelParsed(MCSymbol * Symbol)8875 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
8876 if (NextSymbolIsThumb) {
8877 getParser().getStreamer().EmitThumbFunc(Symbol);
8878 NextSymbolIsThumb = false;
8879 }
8880 }
8881
8882 /// parseDirectiveThumbFunc
8883 /// ::= .thumbfunc symbol_name
parseDirectiveThumbFunc(SMLoc L)8884 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
8885 MCAsmParser &Parser = getParser();
8886 const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
8887 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
8888
8889 // Darwin asm has (optionally) function name after .thumb_func direction
8890 // ELF doesn't
8891 if (IsMachO) {
8892 const AsmToken &Tok = Parser.getTok();
8893 if (Tok.isNot(AsmToken::EndOfStatement)) {
8894 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
8895 Error(L, "unexpected token in .thumb_func directive");
8896 return false;
8897 }
8898
8899 MCSymbol *Func =
8900 getParser().getContext().GetOrCreateSymbol(Tok.getIdentifier());
8901 getParser().getStreamer().EmitThumbFunc(Func);
8902 Parser.Lex(); // Consume the identifier token.
8903 return false;
8904 }
8905 }
8906
8907 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8908 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8909 Parser.eatToEndOfStatement();
8910 return false;
8911 }
8912
8913 NextSymbolIsThumb = true;
8914 return false;
8915 }
8916
8917 /// parseDirectiveSyntax
8918 /// ::= .syntax unified | divided
parseDirectiveSyntax(SMLoc L)8919 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
8920 MCAsmParser &Parser = getParser();
8921 const AsmToken &Tok = Parser.getTok();
8922 if (Tok.isNot(AsmToken::Identifier)) {
8923 Error(L, "unexpected token in .syntax directive");
8924 return false;
8925 }
8926
8927 StringRef Mode = Tok.getString();
8928 if (Mode == "unified" || Mode == "UNIFIED") {
8929 Parser.Lex();
8930 } else if (Mode == "divided" || Mode == "DIVIDED") {
8931 Error(L, "'.syntax divided' arm asssembly not supported");
8932 return false;
8933 } else {
8934 Error(L, "unrecognized syntax mode in .syntax directive");
8935 return false;
8936 }
8937
8938 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8939 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8940 return false;
8941 }
8942 Parser.Lex();
8943
8944 // TODO tell the MC streamer the mode
8945 // getParser().getStreamer().Emit???();
8946 return false;
8947 }
8948
8949 /// parseDirectiveCode
8950 /// ::= .code 16 | 32
parseDirectiveCode(SMLoc L)8951 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
8952 MCAsmParser &Parser = getParser();
8953 const AsmToken &Tok = Parser.getTok();
8954 if (Tok.isNot(AsmToken::Integer)) {
8955 Error(L, "unexpected token in .code directive");
8956 return false;
8957 }
8958 int64_t Val = Parser.getTok().getIntVal();
8959 if (Val != 16 && Val != 32) {
8960 Error(L, "invalid operand to .code directive");
8961 return false;
8962 }
8963 Parser.Lex();
8964
8965 if (getLexer().isNot(AsmToken::EndOfStatement)) {
8966 Error(Parser.getTok().getLoc(), "unexpected token in directive");
8967 return false;
8968 }
8969 Parser.Lex();
8970
8971 if (Val == 16) {
8972 if (!hasThumb()) {
8973 Error(L, "target does not support Thumb mode");
8974 return false;
8975 }
8976
8977 if (!isThumb())
8978 SwitchMode();
8979 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
8980 } else {
8981 if (!hasARM()) {
8982 Error(L, "target does not support ARM mode");
8983 return false;
8984 }
8985
8986 if (isThumb())
8987 SwitchMode();
8988 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
8989 }
8990
8991 return false;
8992 }
8993
8994 /// parseDirectiveReq
8995 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)8996 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
8997 MCAsmParser &Parser = getParser();
8998 Parser.Lex(); // Eat the '.req' token.
8999 unsigned Reg;
9000 SMLoc SRegLoc, ERegLoc;
9001 if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
9002 Parser.eatToEndOfStatement();
9003 Error(SRegLoc, "register name expected");
9004 return false;
9005 }
9006
9007 // Shouldn't be anything else.
9008 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
9009 Parser.eatToEndOfStatement();
9010 Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
9011 return false;
9012 }
9013
9014 Parser.Lex(); // Consume the EndOfStatement
9015
9016 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) {
9017 Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
9018 return false;
9019 }
9020
9021 return false;
9022 }
9023
9024 /// parseDirectiveUneq
9025 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)9026 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9027 MCAsmParser &Parser = getParser();
9028 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9029 Parser.eatToEndOfStatement();
9030 Error(L, "unexpected input in .unreq directive.");
9031 return false;
9032 }
9033 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9034 Parser.Lex(); // Eat the identifier.
9035 return false;
9036 }
9037
9038 /// parseDirectiveArch
9039 /// ::= .arch token
parseDirectiveArch(SMLoc L)9040 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9041 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9042
9043 unsigned ID = StringSwitch<unsigned>(Arch)
9044 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
9045 .Case(NAME, ARM::ID)
9046 #define ARM_ARCH_ALIAS(NAME, ID) \
9047 .Case(NAME, ARM::ID)
9048 #include "MCTargetDesc/ARMArchName.def"
9049 .Default(ARM::INVALID_ARCH);
9050
9051 if (ID == ARM::INVALID_ARCH) {
9052 Error(L, "Unknown arch name");
9053 return false;
9054 }
9055
9056 getTargetStreamer().emitArch(ID);
9057 return false;
9058 }
9059
9060 /// parseDirectiveEabiAttr
9061 /// ::= .eabi_attribute int, int [, "str"]
9062 /// ::= .eabi_attribute Tag_name, int [, "str"]
parseDirectiveEabiAttr(SMLoc L)9063 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9064 MCAsmParser &Parser = getParser();
9065 int64_t Tag;
9066 SMLoc TagLoc;
9067 TagLoc = Parser.getTok().getLoc();
9068 if (Parser.getTok().is(AsmToken::Identifier)) {
9069 StringRef Name = Parser.getTok().getIdentifier();
9070 Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9071 if (Tag == -1) {
9072 Error(TagLoc, "attribute name not recognised: " + Name);
9073 Parser.eatToEndOfStatement();
9074 return false;
9075 }
9076 Parser.Lex();
9077 } else {
9078 const MCExpr *AttrExpr;
9079
9080 TagLoc = Parser.getTok().getLoc();
9081 if (Parser.parseExpression(AttrExpr)) {
9082 Parser.eatToEndOfStatement();
9083 return false;
9084 }
9085
9086 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9087 if (!CE) {
9088 Error(TagLoc, "expected numeric constant");
9089 Parser.eatToEndOfStatement();
9090 return false;
9091 }
9092
9093 Tag = CE->getValue();
9094 }
9095
9096 if (Parser.getTok().isNot(AsmToken::Comma)) {
9097 Error(Parser.getTok().getLoc(), "comma expected");
9098 Parser.eatToEndOfStatement();
9099 return false;
9100 }
9101 Parser.Lex(); // skip comma
9102
9103 StringRef StringValue = "";
9104 bool IsStringValue = false;
9105
9106 int64_t IntegerValue = 0;
9107 bool IsIntegerValue = false;
9108
9109 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9110 IsStringValue = true;
9111 else if (Tag == ARMBuildAttrs::compatibility) {
9112 IsStringValue = true;
9113 IsIntegerValue = true;
9114 } else if (Tag < 32 || Tag % 2 == 0)
9115 IsIntegerValue = true;
9116 else if (Tag % 2 == 1)
9117 IsStringValue = true;
9118 else
9119 llvm_unreachable("invalid tag type");
9120
9121 if (IsIntegerValue) {
9122 const MCExpr *ValueExpr;
9123 SMLoc ValueExprLoc = Parser.getTok().getLoc();
9124 if (Parser.parseExpression(ValueExpr)) {
9125 Parser.eatToEndOfStatement();
9126 return false;
9127 }
9128
9129 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9130 if (!CE) {
9131 Error(ValueExprLoc, "expected numeric constant");
9132 Parser.eatToEndOfStatement();
9133 return false;
9134 }
9135
9136 IntegerValue = CE->getValue();
9137 }
9138
9139 if (Tag == ARMBuildAttrs::compatibility) {
9140 if (Parser.getTok().isNot(AsmToken::Comma))
9141 IsStringValue = false;
9142 if (Parser.getTok().isNot(AsmToken::Comma)) {
9143 Error(Parser.getTok().getLoc(), "comma expected");
9144 Parser.eatToEndOfStatement();
9145 return false;
9146 } else {
9147 Parser.Lex();
9148 }
9149 }
9150
9151 if (IsStringValue) {
9152 if (Parser.getTok().isNot(AsmToken::String)) {
9153 Error(Parser.getTok().getLoc(), "bad string constant");
9154 Parser.eatToEndOfStatement();
9155 return false;
9156 }
9157
9158 StringValue = Parser.getTok().getStringContents();
9159 Parser.Lex();
9160 }
9161
9162 if (IsIntegerValue && IsStringValue) {
9163 assert(Tag == ARMBuildAttrs::compatibility);
9164 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9165 } else if (IsIntegerValue)
9166 getTargetStreamer().emitAttribute(Tag, IntegerValue);
9167 else if (IsStringValue)
9168 getTargetStreamer().emitTextAttribute(Tag, StringValue);
9169 return false;
9170 }
9171
9172 /// parseDirectiveCPU
9173 /// ::= .cpu str
parseDirectiveCPU(SMLoc L)9174 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9175 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9176 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9177
9178 if (!STI.isCPUStringValid(CPU)) {
9179 Error(L, "Unknown CPU name");
9180 return false;
9181 }
9182
9183 // FIXME: This switches the CPU features globally, therefore it might
9184 // happen that code you would not expect to assemble will. For details
9185 // see: http://llvm.org/bugs/show_bug.cgi?id=20757
9186 STI.InitMCProcessorInfo(CPU, "");
9187 STI.InitCPUSchedModel(CPU);
9188 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9189
9190 return false;
9191 }
9192
9193 // FIXME: This is duplicated in getARMFPUFeatures() in
9194 // tools/clang/lib/Driver/Tools.cpp
9195 static const struct {
9196 const unsigned ID;
9197 const uint64_t Enabled;
9198 const uint64_t Disabled;
9199 } FPUs[] = {
9200 {/* ID */ ARM::VFP,
9201 /* Enabled */ ARM::FeatureVFP2,
9202 /* Disabled */ ARM::FeatureNEON},
9203 {/* ID */ ARM::VFPV2,
9204 /* Enabled */ ARM::FeatureVFP2,
9205 /* Disabled */ ARM::FeatureNEON},
9206 {/* ID */ ARM::VFPV3,
9207 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3,
9208 /* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
9209 {/* ID */ ARM::VFPV3_D16,
9210 /* Enable */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureD16,
9211 /* Disabled */ ARM::FeatureNEON},
9212 {/* ID */ ARM::VFPV4,
9213 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4,
9214 /* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
9215 {/* ID */ ARM::VFPV4_D16,
9216 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9217 ARM::FeatureD16,
9218 /* Disabled */ ARM::FeatureNEON},
9219 {/* ID */ ARM::FPV5_D16,
9220 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9221 ARM::FeatureFPARMv8 | ARM::FeatureD16,
9222 /* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto},
9223 {/* ID */ ARM::FP_ARMV8,
9224 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9225 ARM::FeatureFPARMv8,
9226 /* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto | ARM::FeatureD16},
9227 {/* ID */ ARM::NEON,
9228 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureNEON,
9229 /* Disabled */ ARM::FeatureD16},
9230 {/* ID */ ARM::NEON_VFPV4,
9231 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9232 ARM::FeatureNEON,
9233 /* Disabled */ ARM::FeatureD16},
9234 {/* ID */ ARM::NEON_FP_ARMV8,
9235 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9236 ARM::FeatureFPARMv8 | ARM::FeatureNEON,
9237 /* Disabled */ ARM::FeatureCrypto | ARM::FeatureD16},
9238 {/* ID */ ARM::CRYPTO_NEON_FP_ARMV8,
9239 /* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
9240 ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto,
9241 /* Disabled */ ARM::FeatureD16},
9242 {ARM::SOFTVFP, 0, 0},
9243 };
9244
9245 /// parseDirectiveFPU
9246 /// ::= .fpu str
parseDirectiveFPU(SMLoc L)9247 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9248 SMLoc FPUNameLoc = getTok().getLoc();
9249 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9250
9251 unsigned ID = StringSwitch<unsigned>(FPU)
9252 #define ARM_FPU_NAME(NAME, ID) .Case(NAME, ARM::ID)
9253 #include "ARMFPUName.def"
9254 .Default(ARM::INVALID_FPU);
9255
9256 if (ID == ARM::INVALID_FPU) {
9257 Error(FPUNameLoc, "Unknown FPU name");
9258 return false;
9259 }
9260
9261 for (const auto &Entry : FPUs) {
9262 if (Entry.ID != ID)
9263 continue;
9264
9265 // Need to toggle features that should be on but are off and that
9266 // should off but are on.
9267 uint64_t Toggle = (Entry.Enabled & ~STI.getFeatureBits()) |
9268 (Entry.Disabled & STI.getFeatureBits());
9269 setAvailableFeatures(ComputeAvailableFeatures(STI.ToggleFeature(Toggle)));
9270 break;
9271 }
9272
9273 getTargetStreamer().emitFPU(ID);
9274 return false;
9275 }
9276
9277 /// parseDirectiveFnStart
9278 /// ::= .fnstart
parseDirectiveFnStart(SMLoc L)9279 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9280 if (UC.hasFnStart()) {
9281 Error(L, ".fnstart starts before the end of previous one");
9282 UC.emitFnStartLocNotes();
9283 return false;
9284 }
9285
9286 // Reset the unwind directives parser state
9287 UC.reset();
9288
9289 getTargetStreamer().emitFnStart();
9290
9291 UC.recordFnStart(L);
9292 return false;
9293 }
9294
9295 /// parseDirectiveFnEnd
9296 /// ::= .fnend
parseDirectiveFnEnd(SMLoc L)9297 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9298 // Check the ordering of unwind directives
9299 if (!UC.hasFnStart()) {
9300 Error(L, ".fnstart must precede .fnend directive");
9301 return false;
9302 }
9303
9304 // Reset the unwind directives parser state
9305 getTargetStreamer().emitFnEnd();
9306
9307 UC.reset();
9308 return false;
9309 }
9310
9311 /// parseDirectiveCantUnwind
9312 /// ::= .cantunwind
parseDirectiveCantUnwind(SMLoc L)9313 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9314 UC.recordCantUnwind(L);
9315
9316 // Check the ordering of unwind directives
9317 if (!UC.hasFnStart()) {
9318 Error(L, ".fnstart must precede .cantunwind directive");
9319 return false;
9320 }
9321 if (UC.hasHandlerData()) {
9322 Error(L, ".cantunwind can't be used with .handlerdata directive");
9323 UC.emitHandlerDataLocNotes();
9324 return false;
9325 }
9326 if (UC.hasPersonality()) {
9327 Error(L, ".cantunwind can't be used with .personality directive");
9328 UC.emitPersonalityLocNotes();
9329 return false;
9330 }
9331
9332 getTargetStreamer().emitCantUnwind();
9333 return false;
9334 }
9335
9336 /// parseDirectivePersonality
9337 /// ::= .personality name
parseDirectivePersonality(SMLoc L)9338 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9339 MCAsmParser &Parser = getParser();
9340 bool HasExistingPersonality = UC.hasPersonality();
9341
9342 UC.recordPersonality(L);
9343
9344 // Check the ordering of unwind directives
9345 if (!UC.hasFnStart()) {
9346 Error(L, ".fnstart must precede .personality directive");
9347 return false;
9348 }
9349 if (UC.cantUnwind()) {
9350 Error(L, ".personality can't be used with .cantunwind directive");
9351 UC.emitCantUnwindLocNotes();
9352 return false;
9353 }
9354 if (UC.hasHandlerData()) {
9355 Error(L, ".personality must precede .handlerdata directive");
9356 UC.emitHandlerDataLocNotes();
9357 return false;
9358 }
9359 if (HasExistingPersonality) {
9360 Parser.eatToEndOfStatement();
9361 Error(L, "multiple personality directives");
9362 UC.emitPersonalityLocNotes();
9363 return false;
9364 }
9365
9366 // Parse the name of the personality routine
9367 if (Parser.getTok().isNot(AsmToken::Identifier)) {
9368 Parser.eatToEndOfStatement();
9369 Error(L, "unexpected input in .personality directive.");
9370 return false;
9371 }
9372 StringRef Name(Parser.getTok().getIdentifier());
9373 Parser.Lex();
9374
9375 MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
9376 getTargetStreamer().emitPersonality(PR);
9377 return false;
9378 }
9379
9380 /// parseDirectiveHandlerData
9381 /// ::= .handlerdata
parseDirectiveHandlerData(SMLoc L)9382 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9383 UC.recordHandlerData(L);
9384
9385 // Check the ordering of unwind directives
9386 if (!UC.hasFnStart()) {
9387 Error(L, ".fnstart must precede .personality directive");
9388 return false;
9389 }
9390 if (UC.cantUnwind()) {
9391 Error(L, ".handlerdata can't be used with .cantunwind directive");
9392 UC.emitCantUnwindLocNotes();
9393 return false;
9394 }
9395
9396 getTargetStreamer().emitHandlerData();
9397 return false;
9398 }
9399
9400 /// parseDirectiveSetFP
9401 /// ::= .setfp fpreg, spreg [, offset]
parseDirectiveSetFP(SMLoc L)9402 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9403 MCAsmParser &Parser = getParser();
9404 // Check the ordering of unwind directives
9405 if (!UC.hasFnStart()) {
9406 Error(L, ".fnstart must precede .setfp directive");
9407 return false;
9408 }
9409 if (UC.hasHandlerData()) {
9410 Error(L, ".setfp must precede .handlerdata directive");
9411 return false;
9412 }
9413
9414 // Parse fpreg
9415 SMLoc FPRegLoc = Parser.getTok().getLoc();
9416 int FPReg = tryParseRegister();
9417 if (FPReg == -1) {
9418 Error(FPRegLoc, "frame pointer register expected");
9419 return false;
9420 }
9421
9422 // Consume comma
9423 if (Parser.getTok().isNot(AsmToken::Comma)) {
9424 Error(Parser.getTok().getLoc(), "comma expected");
9425 return false;
9426 }
9427 Parser.Lex(); // skip comma
9428
9429 // Parse spreg
9430 SMLoc SPRegLoc = Parser.getTok().getLoc();
9431 int SPReg = tryParseRegister();
9432 if (SPReg == -1) {
9433 Error(SPRegLoc, "stack pointer register expected");
9434 return false;
9435 }
9436
9437 if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
9438 Error(SPRegLoc, "register should be either $sp or the latest fp register");
9439 return false;
9440 }
9441
9442 // Update the frame pointer register
9443 UC.saveFPReg(FPReg);
9444
9445 // Parse offset
9446 int64_t Offset = 0;
9447 if (Parser.getTok().is(AsmToken::Comma)) {
9448 Parser.Lex(); // skip comma
9449
9450 if (Parser.getTok().isNot(AsmToken::Hash) &&
9451 Parser.getTok().isNot(AsmToken::Dollar)) {
9452 Error(Parser.getTok().getLoc(), "'#' expected");
9453 return false;
9454 }
9455 Parser.Lex(); // skip hash token.
9456
9457 const MCExpr *OffsetExpr;
9458 SMLoc ExLoc = Parser.getTok().getLoc();
9459 SMLoc EndLoc;
9460 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9461 Error(ExLoc, "malformed setfp offset");
9462 return false;
9463 }
9464 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9465 if (!CE) {
9466 Error(ExLoc, "setfp offset must be an immediate");
9467 return false;
9468 }
9469
9470 Offset = CE->getValue();
9471 }
9472
9473 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9474 static_cast<unsigned>(SPReg), Offset);
9475 return false;
9476 }
9477
9478 /// parseDirective
9479 /// ::= .pad offset
parseDirectivePad(SMLoc L)9480 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9481 MCAsmParser &Parser = getParser();
9482 // Check the ordering of unwind directives
9483 if (!UC.hasFnStart()) {
9484 Error(L, ".fnstart must precede .pad directive");
9485 return false;
9486 }
9487 if (UC.hasHandlerData()) {
9488 Error(L, ".pad must precede .handlerdata directive");
9489 return false;
9490 }
9491
9492 // Parse the offset
9493 if (Parser.getTok().isNot(AsmToken::Hash) &&
9494 Parser.getTok().isNot(AsmToken::Dollar)) {
9495 Error(Parser.getTok().getLoc(), "'#' expected");
9496 return false;
9497 }
9498 Parser.Lex(); // skip hash token.
9499
9500 const MCExpr *OffsetExpr;
9501 SMLoc ExLoc = Parser.getTok().getLoc();
9502 SMLoc EndLoc;
9503 if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9504 Error(ExLoc, "malformed pad offset");
9505 return false;
9506 }
9507 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9508 if (!CE) {
9509 Error(ExLoc, "pad offset must be an immediate");
9510 return false;
9511 }
9512
9513 getTargetStreamer().emitPad(CE->getValue());
9514 return false;
9515 }
9516
9517 /// parseDirectiveRegSave
9518 /// ::= .save { registers }
9519 /// ::= .vsave { registers }
parseDirectiveRegSave(SMLoc L,bool IsVector)9520 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9521 // Check the ordering of unwind directives
9522 if (!UC.hasFnStart()) {
9523 Error(L, ".fnstart must precede .save or .vsave directives");
9524 return false;
9525 }
9526 if (UC.hasHandlerData()) {
9527 Error(L, ".save or .vsave must precede .handlerdata directive");
9528 return false;
9529 }
9530
9531 // RAII object to make sure parsed operands are deleted.
9532 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
9533
9534 // Parse the register list
9535 if (parseRegisterList(Operands))
9536 return false;
9537 ARMOperand &Op = (ARMOperand &)*Operands[0];
9538 if (!IsVector && !Op.isRegList()) {
9539 Error(L, ".save expects GPR registers");
9540 return false;
9541 }
9542 if (IsVector && !Op.isDPRRegList()) {
9543 Error(L, ".vsave expects DPR registers");
9544 return false;
9545 }
9546
9547 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
9548 return false;
9549 }
9550
9551 /// parseDirectiveInst
9552 /// ::= .inst opcode [, ...]
9553 /// ::= .inst.n opcode [, ...]
9554 /// ::= .inst.w opcode [, ...]
parseDirectiveInst(SMLoc Loc,char Suffix)9555 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
9556 MCAsmParser &Parser = getParser();
9557 int Width;
9558
9559 if (isThumb()) {
9560 switch (Suffix) {
9561 case 'n':
9562 Width = 2;
9563 break;
9564 case 'w':
9565 Width = 4;
9566 break;
9567 default:
9568 Parser.eatToEndOfStatement();
9569 Error(Loc, "cannot determine Thumb instruction size, "
9570 "use inst.n/inst.w instead");
9571 return false;
9572 }
9573 } else {
9574 if (Suffix) {
9575 Parser.eatToEndOfStatement();
9576 Error(Loc, "width suffixes are invalid in ARM mode");
9577 return false;
9578 }
9579 Width = 4;
9580 }
9581
9582 if (getLexer().is(AsmToken::EndOfStatement)) {
9583 Parser.eatToEndOfStatement();
9584 Error(Loc, "expected expression following directive");
9585 return false;
9586 }
9587
9588 for (;;) {
9589 const MCExpr *Expr;
9590
9591 if (getParser().parseExpression(Expr)) {
9592 Error(Loc, "expected expression");
9593 return false;
9594 }
9595
9596 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9597 if (!Value) {
9598 Error(Loc, "expected constant expression");
9599 return false;
9600 }
9601
9602 switch (Width) {
9603 case 2:
9604 if (Value->getValue() > 0xffff) {
9605 Error(Loc, "inst.n operand is too big, use inst.w instead");
9606 return false;
9607 }
9608 break;
9609 case 4:
9610 if (Value->getValue() > 0xffffffff) {
9611 Error(Loc,
9612 StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
9613 return false;
9614 }
9615 break;
9616 default:
9617 llvm_unreachable("only supported widths are 2 and 4");
9618 }
9619
9620 getTargetStreamer().emitInst(Value->getValue(), Suffix);
9621
9622 if (getLexer().is(AsmToken::EndOfStatement))
9623 break;
9624
9625 if (getLexer().isNot(AsmToken::Comma)) {
9626 Error(Loc, "unexpected token in directive");
9627 return false;
9628 }
9629
9630 Parser.Lex();
9631 }
9632
9633 Parser.Lex();
9634 return false;
9635 }
9636
9637 /// parseDirectiveLtorg
9638 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)9639 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9640 getTargetStreamer().emitCurrentConstantPool();
9641 return false;
9642 }
9643
parseDirectiveEven(SMLoc L)9644 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9645 const MCSection *Section = getStreamer().getCurrentSection().first;
9646
9647 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9648 TokError("unexpected token in directive");
9649 return false;
9650 }
9651
9652 if (!Section) {
9653 getStreamer().InitSections(false);
9654 Section = getStreamer().getCurrentSection().first;
9655 }
9656
9657 assert(Section && "must have section to emit alignment");
9658 if (Section->UseCodeAlign())
9659 getStreamer().EmitCodeAlignment(2);
9660 else
9661 getStreamer().EmitValueToAlignment(2);
9662
9663 return false;
9664 }
9665
9666 /// parseDirectivePersonalityIndex
9667 /// ::= .personalityindex index
parseDirectivePersonalityIndex(SMLoc L)9668 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9669 MCAsmParser &Parser = getParser();
9670 bool HasExistingPersonality = UC.hasPersonality();
9671
9672 UC.recordPersonalityIndex(L);
9673
9674 if (!UC.hasFnStart()) {
9675 Parser.eatToEndOfStatement();
9676 Error(L, ".fnstart must precede .personalityindex directive");
9677 return false;
9678 }
9679 if (UC.cantUnwind()) {
9680 Parser.eatToEndOfStatement();
9681 Error(L, ".personalityindex cannot be used with .cantunwind");
9682 UC.emitCantUnwindLocNotes();
9683 return false;
9684 }
9685 if (UC.hasHandlerData()) {
9686 Parser.eatToEndOfStatement();
9687 Error(L, ".personalityindex must precede .handlerdata directive");
9688 UC.emitHandlerDataLocNotes();
9689 return false;
9690 }
9691 if (HasExistingPersonality) {
9692 Parser.eatToEndOfStatement();
9693 Error(L, "multiple personality directives");
9694 UC.emitPersonalityLocNotes();
9695 return false;
9696 }
9697
9698 const MCExpr *IndexExpression;
9699 SMLoc IndexLoc = Parser.getTok().getLoc();
9700 if (Parser.parseExpression(IndexExpression)) {
9701 Parser.eatToEndOfStatement();
9702 return false;
9703 }
9704
9705 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
9706 if (!CE) {
9707 Parser.eatToEndOfStatement();
9708 Error(IndexLoc, "index must be a constant number");
9709 return false;
9710 }
9711 if (CE->getValue() < 0 ||
9712 CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
9713 Parser.eatToEndOfStatement();
9714 Error(IndexLoc, "personality routine index should be in range [0-3]");
9715 return false;
9716 }
9717
9718 getTargetStreamer().emitPersonalityIndex(CE->getValue());
9719 return false;
9720 }
9721
9722 /// parseDirectiveUnwindRaw
9723 /// ::= .unwind_raw offset, opcode [, opcode...]
parseDirectiveUnwindRaw(SMLoc L)9724 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
9725 MCAsmParser &Parser = getParser();
9726 if (!UC.hasFnStart()) {
9727 Parser.eatToEndOfStatement();
9728 Error(L, ".fnstart must precede .unwind_raw directives");
9729 return false;
9730 }
9731
9732 int64_t StackOffset;
9733
9734 const MCExpr *OffsetExpr;
9735 SMLoc OffsetLoc = getLexer().getLoc();
9736 if (getLexer().is(AsmToken::EndOfStatement) ||
9737 getParser().parseExpression(OffsetExpr)) {
9738 Error(OffsetLoc, "expected expression");
9739 Parser.eatToEndOfStatement();
9740 return false;
9741 }
9742
9743 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9744 if (!CE) {
9745 Error(OffsetLoc, "offset must be a constant");
9746 Parser.eatToEndOfStatement();
9747 return false;
9748 }
9749
9750 StackOffset = CE->getValue();
9751
9752 if (getLexer().isNot(AsmToken::Comma)) {
9753 Error(getLexer().getLoc(), "expected comma");
9754 Parser.eatToEndOfStatement();
9755 return false;
9756 }
9757 Parser.Lex();
9758
9759 SmallVector<uint8_t, 16> Opcodes;
9760 for (;;) {
9761 const MCExpr *OE;
9762
9763 SMLoc OpcodeLoc = getLexer().getLoc();
9764 if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
9765 Error(OpcodeLoc, "expected opcode expression");
9766 Parser.eatToEndOfStatement();
9767 return false;
9768 }
9769
9770 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
9771 if (!OC) {
9772 Error(OpcodeLoc, "opcode value must be a constant");
9773 Parser.eatToEndOfStatement();
9774 return false;
9775 }
9776
9777 const int64_t Opcode = OC->getValue();
9778 if (Opcode & ~0xff) {
9779 Error(OpcodeLoc, "invalid opcode");
9780 Parser.eatToEndOfStatement();
9781 return false;
9782 }
9783
9784 Opcodes.push_back(uint8_t(Opcode));
9785
9786 if (getLexer().is(AsmToken::EndOfStatement))
9787 break;
9788
9789 if (getLexer().isNot(AsmToken::Comma)) {
9790 Error(getLexer().getLoc(), "unexpected token in directive");
9791 Parser.eatToEndOfStatement();
9792 return false;
9793 }
9794
9795 Parser.Lex();
9796 }
9797
9798 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9799
9800 Parser.Lex();
9801 return false;
9802 }
9803
9804 /// parseDirectiveTLSDescSeq
9805 /// ::= .tlsdescseq tls-variable
parseDirectiveTLSDescSeq(SMLoc L)9806 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
9807 MCAsmParser &Parser = getParser();
9808
9809 if (getLexer().isNot(AsmToken::Identifier)) {
9810 TokError("expected variable after '.tlsdescseq' directive");
9811 Parser.eatToEndOfStatement();
9812 return false;
9813 }
9814
9815 const MCSymbolRefExpr *SRE =
9816 MCSymbolRefExpr::Create(Parser.getTok().getIdentifier(),
9817 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
9818 Lex();
9819
9820 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9821 Error(Parser.getTok().getLoc(), "unexpected token");
9822 Parser.eatToEndOfStatement();
9823 return false;
9824 }
9825
9826 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
9827 return false;
9828 }
9829
9830 /// parseDirectiveMovSP
9831 /// ::= .movsp reg [, #offset]
parseDirectiveMovSP(SMLoc L)9832 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
9833 MCAsmParser &Parser = getParser();
9834 if (!UC.hasFnStart()) {
9835 Parser.eatToEndOfStatement();
9836 Error(L, ".fnstart must precede .movsp directives");
9837 return false;
9838 }
9839 if (UC.getFPReg() != ARM::SP) {
9840 Parser.eatToEndOfStatement();
9841 Error(L, "unexpected .movsp directive");
9842 return false;
9843 }
9844
9845 SMLoc SPRegLoc = Parser.getTok().getLoc();
9846 int SPReg = tryParseRegister();
9847 if (SPReg == -1) {
9848 Parser.eatToEndOfStatement();
9849 Error(SPRegLoc, "register expected");
9850 return false;
9851 }
9852
9853 if (SPReg == ARM::SP || SPReg == ARM::PC) {
9854 Parser.eatToEndOfStatement();
9855 Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
9856 return false;
9857 }
9858
9859 int64_t Offset = 0;
9860 if (Parser.getTok().is(AsmToken::Comma)) {
9861 Parser.Lex();
9862
9863 if (Parser.getTok().isNot(AsmToken::Hash)) {
9864 Error(Parser.getTok().getLoc(), "expected #constant");
9865 Parser.eatToEndOfStatement();
9866 return false;
9867 }
9868 Parser.Lex();
9869
9870 const MCExpr *OffsetExpr;
9871 SMLoc OffsetLoc = Parser.getTok().getLoc();
9872 if (Parser.parseExpression(OffsetExpr)) {
9873 Parser.eatToEndOfStatement();
9874 Error(OffsetLoc, "malformed offset expression");
9875 return false;
9876 }
9877
9878 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9879 if (!CE) {
9880 Parser.eatToEndOfStatement();
9881 Error(OffsetLoc, "offset must be an immediate constant");
9882 return false;
9883 }
9884
9885 Offset = CE->getValue();
9886 }
9887
9888 getTargetStreamer().emitMovSP(SPReg, Offset);
9889 UC.saveFPReg(SPReg);
9890
9891 return false;
9892 }
9893
9894 /// parseDirectiveObjectArch
9895 /// ::= .object_arch name
parseDirectiveObjectArch(SMLoc L)9896 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
9897 MCAsmParser &Parser = getParser();
9898 if (getLexer().isNot(AsmToken::Identifier)) {
9899 Error(getLexer().getLoc(), "unexpected token");
9900 Parser.eatToEndOfStatement();
9901 return false;
9902 }
9903
9904 StringRef Arch = Parser.getTok().getString();
9905 SMLoc ArchLoc = Parser.getTok().getLoc();
9906 getLexer().Lex();
9907
9908 unsigned ID = StringSwitch<unsigned>(Arch)
9909 #define ARM_ARCH_NAME(NAME, ID, DEFAULT_CPU_NAME, DEFAULT_CPU_ARCH) \
9910 .Case(NAME, ARM::ID)
9911 #define ARM_ARCH_ALIAS(NAME, ID) \
9912 .Case(NAME, ARM::ID)
9913 #include "MCTargetDesc/ARMArchName.def"
9914 #undef ARM_ARCH_NAME
9915 #undef ARM_ARCH_ALIAS
9916 .Default(ARM::INVALID_ARCH);
9917
9918 if (ID == ARM::INVALID_ARCH) {
9919 Error(ArchLoc, "unknown architecture '" + Arch + "'");
9920 Parser.eatToEndOfStatement();
9921 return false;
9922 }
9923
9924 getTargetStreamer().emitObjectArch(ID);
9925
9926 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9927 Error(getLexer().getLoc(), "unexpected token");
9928 Parser.eatToEndOfStatement();
9929 }
9930
9931 return false;
9932 }
9933
9934 /// parseDirectiveAlign
9935 /// ::= .align
parseDirectiveAlign(SMLoc L)9936 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
9937 // NOTE: if this is not the end of the statement, fall back to the target
9938 // agnostic handling for this directive which will correctly handle this.
9939 if (getLexer().isNot(AsmToken::EndOfStatement))
9940 return true;
9941
9942 // '.align' is target specifically handled to mean 2**2 byte alignment.
9943 if (getStreamer().getCurrentSection().first->UseCodeAlign())
9944 getStreamer().EmitCodeAlignment(4, 0);
9945 else
9946 getStreamer().EmitValueToAlignment(4, 0, 1, 0);
9947
9948 return false;
9949 }
9950
9951 /// parseDirectiveThumbSet
9952 /// ::= .thumb_set name, value
parseDirectiveThumbSet(SMLoc L)9953 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
9954 MCAsmParser &Parser = getParser();
9955
9956 StringRef Name;
9957 if (Parser.parseIdentifier(Name)) {
9958 TokError("expected identifier after '.thumb_set'");
9959 Parser.eatToEndOfStatement();
9960 return false;
9961 }
9962
9963 if (getLexer().isNot(AsmToken::Comma)) {
9964 TokError("expected comma after name '" + Name + "'");
9965 Parser.eatToEndOfStatement();
9966 return false;
9967 }
9968 Lex();
9969
9970 const MCExpr *Value;
9971 if (Parser.parseExpression(Value)) {
9972 TokError("missing expression");
9973 Parser.eatToEndOfStatement();
9974 return false;
9975 }
9976
9977 if (getLexer().isNot(AsmToken::EndOfStatement)) {
9978 TokError("unexpected token");
9979 Parser.eatToEndOfStatement();
9980 return false;
9981 }
9982 Lex();
9983
9984 MCSymbol *Alias = getContext().GetOrCreateSymbol(Name);
9985 getTargetStreamer().emitThumbSet(Alias, Value);
9986 return false;
9987 }
9988
9989 /// Force static initialization.
LLVMInitializeARMAsmParser()9990 extern "C" void LLVMInitializeARMAsmParser() {
9991 RegisterMCAsmParser<ARMAsmParser> X(TheARMLETarget);
9992 RegisterMCAsmParser<ARMAsmParser> Y(TheARMBETarget);
9993 RegisterMCAsmParser<ARMAsmParser> A(TheThumbLETarget);
9994 RegisterMCAsmParser<ARMAsmParser> B(TheThumbBETarget);
9995 }
9996
9997 #define GET_REGISTER_MATCHER
9998 #define GET_SUBTARGET_FEATURE_NAME
9999 #define GET_MATCHER_IMPLEMENTATION
10000 #include "ARMGenAsmMatcher.inc"
10001
10002 static const struct {
10003 const char *Name;
10004 const unsigned ArchCheck;
10005 const uint64_t Features;
10006 } Extensions[] = {
10007 { "crc", Feature_HasV8, ARM::FeatureCRC },
10008 { "crypto", Feature_HasV8,
10009 ARM::FeatureCrypto | ARM::FeatureNEON | ARM::FeatureFPARMv8 },
10010 { "fp", Feature_HasV8, ARM::FeatureFPARMv8 },
10011 { "idiv", Feature_HasV7 | Feature_IsNotMClass,
10012 ARM::FeatureHWDiv | ARM::FeatureHWDivARM },
10013 // FIXME: iWMMXT not supported
10014 { "iwmmxt", Feature_None, 0 },
10015 // FIXME: iWMMXT2 not supported
10016 { "iwmmxt2", Feature_None, 0 },
10017 // FIXME: Maverick not supported
10018 { "maverick", Feature_None, 0 },
10019 { "mp", Feature_HasV7 | Feature_IsNotMClass, ARM::FeatureMP },
10020 // FIXME: ARMv6-m OS Extensions feature not checked
10021 { "os", Feature_None, 0 },
10022 // FIXME: Also available in ARMv6-K
10023 { "sec", Feature_HasV7, ARM::FeatureTrustZone },
10024 { "simd", Feature_HasV8, ARM::FeatureNEON | ARM::FeatureFPARMv8 },
10025 // FIXME: Only available in A-class, isel not predicated
10026 { "virt", Feature_HasV7, ARM::FeatureVirtualization },
10027 // FIXME: xscale not supported
10028 { "xscale", Feature_None, 0 },
10029 };
10030
10031 /// parseDirectiveArchExtension
10032 /// ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)10033 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10034 MCAsmParser &Parser = getParser();
10035
10036 if (getLexer().isNot(AsmToken::Identifier)) {
10037 Error(getLexer().getLoc(), "unexpected token");
10038 Parser.eatToEndOfStatement();
10039 return false;
10040 }
10041
10042 StringRef Name = Parser.getTok().getString();
10043 SMLoc ExtLoc = Parser.getTok().getLoc();
10044 getLexer().Lex();
10045
10046 bool EnableFeature = true;
10047 if (Name.startswith_lower("no")) {
10048 EnableFeature = false;
10049 Name = Name.substr(2);
10050 }
10051
10052 for (const auto &Extension : Extensions) {
10053 if (Extension.Name != Name)
10054 continue;
10055
10056 if (!Extension.Features)
10057 report_fatal_error("unsupported architectural extension: " + Name);
10058
10059 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) {
10060 Error(ExtLoc, "architectural extension '" + Name + "' is not "
10061 "allowed for the current base architecture");
10062 return false;
10063 }
10064
10065 uint64_t ToggleFeatures = EnableFeature
10066 ? (~STI.getFeatureBits() & Extension.Features)
10067 : ( STI.getFeatureBits() & Extension.Features);
10068 uint64_t Features =
10069 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10070 setAvailableFeatures(Features);
10071 return false;
10072 }
10073
10074 Error(ExtLoc, "unknown architectural extension: " + Name);
10075 Parser.eatToEndOfStatement();
10076 return false;
10077 }
10078
10079 // Define this matcher function after the auto-generated include so we
10080 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)10081 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10082 unsigned Kind) {
10083 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10084 // If the kind is a token for a literal immediate, check if our asm
10085 // operand matches. This is for InstAliases which have a fixed-value
10086 // immediate in the syntax.
10087 switch (Kind) {
10088 default: break;
10089 case MCK__35_0:
10090 if (Op.isImm())
10091 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10092 if (CE->getValue() == 0)
10093 return Match_Success;
10094 break;
10095 case MCK_ModImm:
10096 if (Op.isImm()) {
10097 const MCExpr *SOExpr = Op.getImm();
10098 int64_t Value;
10099 if (!SOExpr->EvaluateAsAbsolute(Value))
10100 return Match_Success;
10101 assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
10102 "expression value must be representable in 32 bits");
10103 }
10104 break;
10105 case MCK_GPRPair:
10106 if (Op.isReg() &&
10107 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10108 return Match_Success;
10109 break;
10110 }
10111 return Match_InvalidOperand;
10112 }
10113