1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCStreamer.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCTargetAsmParser.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetRegistry.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <cstdio>
36 using namespace llvm;
37
38 namespace {
39
40 class AArch64Operand;
41
42 class AArch64AsmParser : public MCTargetAsmParser {
43 private:
44 StringRef Mnemonic; ///< Instruction mnemonic.
45 MCSubtargetInfo &STI;
46
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
49
getTargetStreamer()50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
53 }
54
getLoc() const55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
56
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
67 bool invertCondCode);
68
Warning(SMLoc L,const Twine & Msg)69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
Error(SMLoc L,const Twine & Msg)70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72
73 bool parseDirectiveWord(unsigned Size, SMLoc L);
74 bool parseDirectiveInst(SMLoc L);
75
76 bool parseDirectiveTLSDescCall(SMLoc L);
77
78 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79 bool parseDirectiveLtorg(SMLoc L);
80
81 bool parseDirectiveReq(StringRef Name, SMLoc L);
82 bool parseDirectiveUnreq(SMLoc L);
83
84 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86 OperandVector &Operands, MCStreamer &Out,
87 uint64_t &ErrorInfo,
88 bool MatchingInlineAsm) override;
89 /// @name Auto-generated Match Functions
90 /// {
91
92 #define GET_ASSEMBLER_HEADER
93 #include "AArch64GenAsmMatcher.inc"
94
95 /// }
96
97 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108 bool tryParseVectorRegister(OperandVector &Operands);
109
110 public:
111 enum AArch64MatchResultTy {
112 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113 #define GET_OPERAND_DIAGNOSTIC_TYPES
114 #include "AArch64GenAsmMatcher.inc"
115 };
AArch64AsmParser(MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)116 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
117 const MCInstrInfo &MII, const MCTargetOptions &Options)
118 : MCTargetAsmParser(), STI(STI) {
119 MCAsmParserExtension::Initialize(Parser);
120 MCStreamer &S = getParser().getStreamer();
121 if (S.getTargetStreamer() == nullptr)
122 new AArch64TargetStreamer(S);
123
124 // Initialize the set of available features.
125 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
126 }
127
128 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129 SMLoc NameLoc, OperandVector &Operands) override;
130 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131 bool ParseDirective(AsmToken DirectiveID) override;
132 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133 unsigned Kind) override;
134
135 static bool classifySymbolRef(const MCExpr *Expr,
136 AArch64MCExpr::VariantKind &ELFRefKind,
137 MCSymbolRefExpr::VariantKind &DarwinRefKind,
138 int64_t &Addend);
139 };
140 } // end anonymous namespace
141
142 namespace {
143
144 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
145 /// instruction.
146 class AArch64Operand : public MCParsedAsmOperand {
147 private:
148 enum KindTy {
149 k_Immediate,
150 k_ShiftedImm,
151 k_CondCode,
152 k_Register,
153 k_VectorList,
154 k_VectorIndex,
155 k_Token,
156 k_SysReg,
157 k_SysCR,
158 k_Prefetch,
159 k_ShiftExtend,
160 k_FPImm,
161 k_Barrier
162 } Kind;
163
164 SMLoc StartLoc, EndLoc;
165
166 struct TokOp {
167 const char *Data;
168 unsigned Length;
169 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
170 };
171
172 struct RegOp {
173 unsigned RegNum;
174 bool isVector;
175 };
176
177 struct VectorListOp {
178 unsigned RegNum;
179 unsigned Count;
180 unsigned NumElements;
181 unsigned ElementKind;
182 };
183
184 struct VectorIndexOp {
185 unsigned Val;
186 };
187
188 struct ImmOp {
189 const MCExpr *Val;
190 };
191
192 struct ShiftedImmOp {
193 const MCExpr *Val;
194 unsigned ShiftAmount;
195 };
196
197 struct CondCodeOp {
198 AArch64CC::CondCode Code;
199 };
200
201 struct FPImmOp {
202 unsigned Val; // Encoded 8-bit representation.
203 };
204
205 struct BarrierOp {
206 unsigned Val; // Not the enum since not all values have names.
207 const char *Data;
208 unsigned Length;
209 };
210
211 struct SysRegOp {
212 const char *Data;
213 unsigned Length;
214 uint32_t MRSReg;
215 uint32_t MSRReg;
216 uint32_t PStateField;
217 };
218
219 struct SysCRImmOp {
220 unsigned Val;
221 };
222
223 struct PrefetchOp {
224 unsigned Val;
225 const char *Data;
226 unsigned Length;
227 };
228
229 struct ShiftExtendOp {
230 AArch64_AM::ShiftExtendType Type;
231 unsigned Amount;
232 bool HasExplicitAmount;
233 };
234
235 struct ExtendOp {
236 unsigned Val;
237 };
238
239 union {
240 struct TokOp Tok;
241 struct RegOp Reg;
242 struct VectorListOp VectorList;
243 struct VectorIndexOp VectorIndex;
244 struct ImmOp Imm;
245 struct ShiftedImmOp ShiftedImm;
246 struct CondCodeOp CondCode;
247 struct FPImmOp FPImm;
248 struct BarrierOp Barrier;
249 struct SysRegOp SysReg;
250 struct SysCRImmOp SysCRImm;
251 struct PrefetchOp Prefetch;
252 struct ShiftExtendOp ShiftExtend;
253 };
254
255 // Keep the MCContext around as the MCExprs may need manipulated during
256 // the add<>Operands() calls.
257 MCContext &Ctx;
258
259 public:
AArch64Operand(KindTy K,MCContext & Ctx)260 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
261
AArch64Operand(const AArch64Operand & o)262 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
263 Kind = o.Kind;
264 StartLoc = o.StartLoc;
265 EndLoc = o.EndLoc;
266 switch (Kind) {
267 case k_Token:
268 Tok = o.Tok;
269 break;
270 case k_Immediate:
271 Imm = o.Imm;
272 break;
273 case k_ShiftedImm:
274 ShiftedImm = o.ShiftedImm;
275 break;
276 case k_CondCode:
277 CondCode = o.CondCode;
278 break;
279 case k_FPImm:
280 FPImm = o.FPImm;
281 break;
282 case k_Barrier:
283 Barrier = o.Barrier;
284 break;
285 case k_Register:
286 Reg = o.Reg;
287 break;
288 case k_VectorList:
289 VectorList = o.VectorList;
290 break;
291 case k_VectorIndex:
292 VectorIndex = o.VectorIndex;
293 break;
294 case k_SysReg:
295 SysReg = o.SysReg;
296 break;
297 case k_SysCR:
298 SysCRImm = o.SysCRImm;
299 break;
300 case k_Prefetch:
301 Prefetch = o.Prefetch;
302 break;
303 case k_ShiftExtend:
304 ShiftExtend = o.ShiftExtend;
305 break;
306 }
307 }
308
309 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const310 SMLoc getStartLoc() const override { return StartLoc; }
311 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const312 SMLoc getEndLoc() const override { return EndLoc; }
313
getToken() const314 StringRef getToken() const {
315 assert(Kind == k_Token && "Invalid access!");
316 return StringRef(Tok.Data, Tok.Length);
317 }
318
isTokenSuffix() const319 bool isTokenSuffix() const {
320 assert(Kind == k_Token && "Invalid access!");
321 return Tok.IsSuffix;
322 }
323
getImm() const324 const MCExpr *getImm() const {
325 assert(Kind == k_Immediate && "Invalid access!");
326 return Imm.Val;
327 }
328
getShiftedImmVal() const329 const MCExpr *getShiftedImmVal() const {
330 assert(Kind == k_ShiftedImm && "Invalid access!");
331 return ShiftedImm.Val;
332 }
333
getShiftedImmShift() const334 unsigned getShiftedImmShift() const {
335 assert(Kind == k_ShiftedImm && "Invalid access!");
336 return ShiftedImm.ShiftAmount;
337 }
338
getCondCode() const339 AArch64CC::CondCode getCondCode() const {
340 assert(Kind == k_CondCode && "Invalid access!");
341 return CondCode.Code;
342 }
343
getFPImm() const344 unsigned getFPImm() const {
345 assert(Kind == k_FPImm && "Invalid access!");
346 return FPImm.Val;
347 }
348
getBarrier() const349 unsigned getBarrier() const {
350 assert(Kind == k_Barrier && "Invalid access!");
351 return Barrier.Val;
352 }
353
getBarrierName() const354 StringRef getBarrierName() const {
355 assert(Kind == k_Barrier && "Invalid access!");
356 return StringRef(Barrier.Data, Barrier.Length);
357 }
358
getReg() const359 unsigned getReg() const override {
360 assert(Kind == k_Register && "Invalid access!");
361 return Reg.RegNum;
362 }
363
getVectorListStart() const364 unsigned getVectorListStart() const {
365 assert(Kind == k_VectorList && "Invalid access!");
366 return VectorList.RegNum;
367 }
368
getVectorListCount() const369 unsigned getVectorListCount() const {
370 assert(Kind == k_VectorList && "Invalid access!");
371 return VectorList.Count;
372 }
373
getVectorIndex() const374 unsigned getVectorIndex() const {
375 assert(Kind == k_VectorIndex && "Invalid access!");
376 return VectorIndex.Val;
377 }
378
getSysReg() const379 StringRef getSysReg() const {
380 assert(Kind == k_SysReg && "Invalid access!");
381 return StringRef(SysReg.Data, SysReg.Length);
382 }
383
getSysCR() const384 unsigned getSysCR() const {
385 assert(Kind == k_SysCR && "Invalid access!");
386 return SysCRImm.Val;
387 }
388
getPrefetch() const389 unsigned getPrefetch() const {
390 assert(Kind == k_Prefetch && "Invalid access!");
391 return Prefetch.Val;
392 }
393
getPrefetchName() const394 StringRef getPrefetchName() const {
395 assert(Kind == k_Prefetch && "Invalid access!");
396 return StringRef(Prefetch.Data, Prefetch.Length);
397 }
398
getShiftExtendType() const399 AArch64_AM::ShiftExtendType getShiftExtendType() const {
400 assert(Kind == k_ShiftExtend && "Invalid access!");
401 return ShiftExtend.Type;
402 }
403
getShiftExtendAmount() const404 unsigned getShiftExtendAmount() const {
405 assert(Kind == k_ShiftExtend && "Invalid access!");
406 return ShiftExtend.Amount;
407 }
408
hasShiftExtendAmount() const409 bool hasShiftExtendAmount() const {
410 assert(Kind == k_ShiftExtend && "Invalid access!");
411 return ShiftExtend.HasExplicitAmount;
412 }
413
isImm() const414 bool isImm() const override { return Kind == k_Immediate; }
isMem() const415 bool isMem() const override { return false; }
isSImm9() const416 bool isSImm9() const {
417 if (!isImm())
418 return false;
419 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
420 if (!MCE)
421 return false;
422 int64_t Val = MCE->getValue();
423 return (Val >= -256 && Val < 256);
424 }
isSImm7s4() const425 bool isSImm7s4() const {
426 if (!isImm())
427 return false;
428 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
429 if (!MCE)
430 return false;
431 int64_t Val = MCE->getValue();
432 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
433 }
isSImm7s8() const434 bool isSImm7s8() const {
435 if (!isImm())
436 return false;
437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
438 if (!MCE)
439 return false;
440 int64_t Val = MCE->getValue();
441 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
442 }
isSImm7s16() const443 bool isSImm7s16() const {
444 if (!isImm())
445 return false;
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447 if (!MCE)
448 return false;
449 int64_t Val = MCE->getValue();
450 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
451 }
452
isSymbolicUImm12Offset(const MCExpr * Expr,unsigned Scale) const453 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
454 AArch64MCExpr::VariantKind ELFRefKind;
455 MCSymbolRefExpr::VariantKind DarwinRefKind;
456 int64_t Addend;
457 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
458 Addend)) {
459 // If we don't understand the expression, assume the best and
460 // let the fixup and relocation code deal with it.
461 return true;
462 }
463
464 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
465 ELFRefKind == AArch64MCExpr::VK_LO12 ||
466 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
467 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
469 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
472 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
473 // Note that we don't range-check the addend. It's adjusted modulo page
474 // size when converted, so there is no "out of range" condition when using
475 // @pageoff.
476 return Addend >= 0 && (Addend % Scale) == 0;
477 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
478 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
479 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
480 return Addend == 0;
481 }
482
483 return false;
484 }
485
isUImm12Offset() const486 template <int Scale> bool isUImm12Offset() const {
487 if (!isImm())
488 return false;
489
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
491 if (!MCE)
492 return isSymbolicUImm12Offset(getImm(), Scale);
493
494 int64_t Val = MCE->getValue();
495 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
496 }
497
isImm0_7() const498 bool isImm0_7() const {
499 if (!isImm())
500 return false;
501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
502 if (!MCE)
503 return false;
504 int64_t Val = MCE->getValue();
505 return (Val >= 0 && Val < 8);
506 }
isImm1_8() const507 bool isImm1_8() const {
508 if (!isImm())
509 return false;
510 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
511 if (!MCE)
512 return false;
513 int64_t Val = MCE->getValue();
514 return (Val > 0 && Val < 9);
515 }
isImm0_15() const516 bool isImm0_15() const {
517 if (!isImm())
518 return false;
519 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
520 if (!MCE)
521 return false;
522 int64_t Val = MCE->getValue();
523 return (Val >= 0 && Val < 16);
524 }
isImm1_16() const525 bool isImm1_16() const {
526 if (!isImm())
527 return false;
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529 if (!MCE)
530 return false;
531 int64_t Val = MCE->getValue();
532 return (Val > 0 && Val < 17);
533 }
isImm0_31() const534 bool isImm0_31() const {
535 if (!isImm())
536 return false;
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538 if (!MCE)
539 return false;
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 32);
542 }
isImm1_31() const543 bool isImm1_31() const {
544 if (!isImm())
545 return false;
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547 if (!MCE)
548 return false;
549 int64_t Val = MCE->getValue();
550 return (Val >= 1 && Val < 32);
551 }
isImm1_32() const552 bool isImm1_32() const {
553 if (!isImm())
554 return false;
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 if (!MCE)
557 return false;
558 int64_t Val = MCE->getValue();
559 return (Val >= 1 && Val < 33);
560 }
isImm0_63() const561 bool isImm0_63() const {
562 if (!isImm())
563 return false;
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565 if (!MCE)
566 return false;
567 int64_t Val = MCE->getValue();
568 return (Val >= 0 && Val < 64);
569 }
isImm1_63() const570 bool isImm1_63() const {
571 if (!isImm())
572 return false;
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 if (!MCE)
575 return false;
576 int64_t Val = MCE->getValue();
577 return (Val >= 1 && Val < 64);
578 }
isImm1_64() const579 bool isImm1_64() const {
580 if (!isImm())
581 return false;
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583 if (!MCE)
584 return false;
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 65);
587 }
isImm0_127() const588 bool isImm0_127() const {
589 if (!isImm())
590 return false;
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592 if (!MCE)
593 return false;
594 int64_t Val = MCE->getValue();
595 return (Val >= 0 && Val < 128);
596 }
isImm0_255() const597 bool isImm0_255() const {
598 if (!isImm())
599 return false;
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601 if (!MCE)
602 return false;
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 256);
605 }
isImm0_65535() const606 bool isImm0_65535() const {
607 if (!isImm())
608 return false;
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610 if (!MCE)
611 return false;
612 int64_t Val = MCE->getValue();
613 return (Val >= 0 && Val < 65536);
614 }
isImm32_63() const615 bool isImm32_63() const {
616 if (!isImm())
617 return false;
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
619 if (!MCE)
620 return false;
621 int64_t Val = MCE->getValue();
622 return (Val >= 32 && Val < 64);
623 }
isLogicalImm32() const624 bool isLogicalImm32() const {
625 if (!isImm())
626 return false;
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628 if (!MCE)
629 return false;
630 int64_t Val = MCE->getValue();
631 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
632 return false;
633 Val &= 0xFFFFFFFF;
634 return AArch64_AM::isLogicalImmediate(Val, 32);
635 }
isLogicalImm64() const636 bool isLogicalImm64() const {
637 if (!isImm())
638 return false;
639 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640 if (!MCE)
641 return false;
642 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
643 }
isLogicalImm32Not() const644 bool isLogicalImm32Not() const {
645 if (!isImm())
646 return false;
647 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
648 if (!MCE)
649 return false;
650 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
651 return AArch64_AM::isLogicalImmediate(Val, 32);
652 }
isLogicalImm64Not() const653 bool isLogicalImm64Not() const {
654 if (!isImm())
655 return false;
656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
657 if (!MCE)
658 return false;
659 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
660 }
isShiftedImm() const661 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
isAddSubImm() const662 bool isAddSubImm() const {
663 if (!isShiftedImm() && !isImm())
664 return false;
665
666 const MCExpr *Expr;
667
668 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
669 if (isShiftedImm()) {
670 unsigned Shift = ShiftedImm.ShiftAmount;
671 Expr = ShiftedImm.Val;
672 if (Shift != 0 && Shift != 12)
673 return false;
674 } else {
675 Expr = getImm();
676 }
677
678 AArch64MCExpr::VariantKind ELFRefKind;
679 MCSymbolRefExpr::VariantKind DarwinRefKind;
680 int64_t Addend;
681 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
682 DarwinRefKind, Addend)) {
683 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
684 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
685 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
686 || ELFRefKind == AArch64MCExpr::VK_LO12
687 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
688 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
690 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
691 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
693 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
694 }
695
696 // Otherwise it should be a real immediate in range:
697 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
698 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
699 }
isCondCode() const700 bool isCondCode() const { return Kind == k_CondCode; }
isSIMDImmType10() const701 bool isSIMDImmType10() const {
702 if (!isImm())
703 return false;
704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
705 if (!MCE)
706 return false;
707 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
708 }
isBranchTarget26() const709 bool isBranchTarget26() const {
710 if (!isImm())
711 return false;
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
713 if (!MCE)
714 return true;
715 int64_t Val = MCE->getValue();
716 if (Val & 0x3)
717 return false;
718 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
719 }
isPCRelLabel19() const720 bool isPCRelLabel19() const {
721 if (!isImm())
722 return false;
723 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
724 if (!MCE)
725 return true;
726 int64_t Val = MCE->getValue();
727 if (Val & 0x3)
728 return false;
729 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
730 }
isBranchTarget14() const731 bool isBranchTarget14() const {
732 if (!isImm())
733 return false;
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
735 if (!MCE)
736 return true;
737 int64_t Val = MCE->getValue();
738 if (Val & 0x3)
739 return false;
740 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
741 }
742
743 bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const744 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
745 if (!isImm())
746 return false;
747
748 AArch64MCExpr::VariantKind ELFRefKind;
749 MCSymbolRefExpr::VariantKind DarwinRefKind;
750 int64_t Addend;
751 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
752 DarwinRefKind, Addend)) {
753 return false;
754 }
755 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
756 return false;
757
758 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
759 if (ELFRefKind == AllowedModifiers[i])
760 return Addend == 0;
761 }
762
763 return false;
764 }
765
isMovZSymbolG3() const766 bool isMovZSymbolG3() const {
767 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
768 }
769
isMovZSymbolG2() const770 bool isMovZSymbolG2() const {
771 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
772 AArch64MCExpr::VK_TPREL_G2,
773 AArch64MCExpr::VK_DTPREL_G2});
774 }
775
isMovZSymbolG1() const776 bool isMovZSymbolG1() const {
777 return isMovWSymbol({
778 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
779 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
780 AArch64MCExpr::VK_DTPREL_G1,
781 });
782 }
783
isMovZSymbolG0() const784 bool isMovZSymbolG0() const {
785 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
786 AArch64MCExpr::VK_TPREL_G0,
787 AArch64MCExpr::VK_DTPREL_G0});
788 }
789
isMovKSymbolG3() const790 bool isMovKSymbolG3() const {
791 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
792 }
793
isMovKSymbolG2() const794 bool isMovKSymbolG2() const {
795 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
796 }
797
isMovKSymbolG1() const798 bool isMovKSymbolG1() const {
799 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
800 AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC});
802 }
803
isMovKSymbolG0() const804 bool isMovKSymbolG0() const {
805 return isMovWSymbol(
806 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
807 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
808 }
809
810 template<int RegWidth, int Shift>
isMOVZMovAlias() const811 bool isMOVZMovAlias() const {
812 if (!isImm()) return false;
813
814 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815 if (!CE) return false;
816 uint64_t Value = CE->getValue();
817
818 if (RegWidth == 32)
819 Value &= 0xffffffffULL;
820
821 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
822 if (Value == 0 && Shift != 0)
823 return false;
824
825 return (Value & ~(0xffffULL << Shift)) == 0;
826 }
827
828 template<int RegWidth, int Shift>
isMOVNMovAlias() const829 bool isMOVNMovAlias() const {
830 if (!isImm()) return false;
831
832 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833 if (!CE) return false;
834 uint64_t Value = CE->getValue();
835
836 // MOVZ takes precedence over MOVN.
837 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
838 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
839 return false;
840
841 Value = ~Value;
842 if (RegWidth == 32)
843 Value &= 0xffffffffULL;
844
845 return (Value & ~(0xffffULL << Shift)) == 0;
846 }
847
isFPImm() const848 bool isFPImm() const { return Kind == k_FPImm; }
isBarrier() const849 bool isBarrier() const { return Kind == k_Barrier; }
isSysReg() const850 bool isSysReg() const { return Kind == k_SysReg; }
isMRSSystemRegister() const851 bool isMRSSystemRegister() const {
852 if (!isSysReg()) return false;
853
854 return SysReg.MRSReg != -1U;
855 }
isMSRSystemRegister() const856 bool isMSRSystemRegister() const {
857 if (!isSysReg()) return false;
858
859 return SysReg.MSRReg != -1U;
860 }
isSystemPStateField() const861 bool isSystemPStateField() const {
862 if (!isSysReg()) return false;
863
864 return SysReg.PStateField != -1U;
865 }
isReg() const866 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
isVectorReg() const867 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
isVectorRegLo() const868 bool isVectorRegLo() const {
869 return Kind == k_Register && Reg.isVector &&
870 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
871 Reg.RegNum);
872 }
isGPR32as64() const873 bool isGPR32as64() const {
874 return Kind == k_Register && !Reg.isVector &&
875 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
876 }
877
isGPR64sp0() const878 bool isGPR64sp0() const {
879 return Kind == k_Register && !Reg.isVector &&
880 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
881 }
882
883 /// Is this a vector list with the type implicit (presumably attached to the
884 /// instruction itself)?
isImplicitlyTypedVectorList() const885 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
886 return Kind == k_VectorList && VectorList.Count == NumRegs &&
887 !VectorList.ElementKind;
888 }
889
890 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
isTypedVectorList() const891 bool isTypedVectorList() const {
892 if (Kind != k_VectorList)
893 return false;
894 if (VectorList.Count != NumRegs)
895 return false;
896 if (VectorList.ElementKind != ElementKind)
897 return false;
898 return VectorList.NumElements == NumElements;
899 }
900
isVectorIndex1() const901 bool isVectorIndex1() const {
902 return Kind == k_VectorIndex && VectorIndex.Val == 1;
903 }
isVectorIndexB() const904 bool isVectorIndexB() const {
905 return Kind == k_VectorIndex && VectorIndex.Val < 16;
906 }
isVectorIndexH() const907 bool isVectorIndexH() const {
908 return Kind == k_VectorIndex && VectorIndex.Val < 8;
909 }
isVectorIndexS() const910 bool isVectorIndexS() const {
911 return Kind == k_VectorIndex && VectorIndex.Val < 4;
912 }
isVectorIndexD() const913 bool isVectorIndexD() const {
914 return Kind == k_VectorIndex && VectorIndex.Val < 2;
915 }
isToken() const916 bool isToken() const override { return Kind == k_Token; }
isTokenEqual(StringRef Str) const917 bool isTokenEqual(StringRef Str) const {
918 return Kind == k_Token && getToken() == Str;
919 }
isSysCR() const920 bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const921 bool isPrefetch() const { return Kind == k_Prefetch; }
isShiftExtend() const922 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const923 bool isShifter() const {
924 if (!isShiftExtend())
925 return false;
926
927 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
928 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
929 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
930 ST == AArch64_AM::MSL);
931 }
isExtend() const932 bool isExtend() const {
933 if (!isShiftExtend())
934 return false;
935
936 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
938 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
939 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
940 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
941 ET == AArch64_AM::LSL) &&
942 getShiftExtendAmount() <= 4;
943 }
944
isExtend64() const945 bool isExtend64() const {
946 if (!isExtend())
947 return false;
948 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
949 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
950 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
951 }
isExtendLSL64() const952 bool isExtendLSL64() const {
953 if (!isExtend())
954 return false;
955 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
956 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
957 ET == AArch64_AM::LSL) &&
958 getShiftExtendAmount() <= 4;
959 }
960
isMemXExtend() const961 template<int Width> bool isMemXExtend() const {
962 if (!isExtend())
963 return false;
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
966 (getShiftExtendAmount() == Log2_32(Width / 8) ||
967 getShiftExtendAmount() == 0);
968 }
969
isMemWExtend() const970 template<int Width> bool isMemWExtend() const {
971 if (!isExtend())
972 return false;
973 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
974 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
975 (getShiftExtendAmount() == Log2_32(Width / 8) ||
976 getShiftExtendAmount() == 0);
977 }
978
979 template <unsigned width>
isArithmeticShifter() const980 bool isArithmeticShifter() const {
981 if (!isShifter())
982 return false;
983
984 // An arithmetic shifter is LSL, LSR, or ASR.
985 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
986 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
987 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
988 }
989
990 template <unsigned width>
isLogicalShifter() const991 bool isLogicalShifter() const {
992 if (!isShifter())
993 return false;
994
995 // A logical shifter is LSL, LSR, ASR or ROR.
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
999 getShiftExtendAmount() < width;
1000 }
1001
isMovImm32Shifter() const1002 bool isMovImm32Shifter() const {
1003 if (!isShifter())
1004 return false;
1005
1006 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1007 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1008 if (ST != AArch64_AM::LSL)
1009 return false;
1010 uint64_t Val = getShiftExtendAmount();
1011 return (Val == 0 || Val == 16);
1012 }
1013
isMovImm64Shifter() const1014 bool isMovImm64Shifter() const {
1015 if (!isShifter())
1016 return false;
1017
1018 // A MOVi shifter is LSL of 0 or 16.
1019 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1020 if (ST != AArch64_AM::LSL)
1021 return false;
1022 uint64_t Val = getShiftExtendAmount();
1023 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1024 }
1025
isLogicalVecShifter() const1026 bool isLogicalVecShifter() const {
1027 if (!isShifter())
1028 return false;
1029
1030 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1031 unsigned Shift = getShiftExtendAmount();
1032 return getShiftExtendType() == AArch64_AM::LSL &&
1033 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1034 }
1035
isLogicalVecHalfWordShifter() const1036 bool isLogicalVecHalfWordShifter() const {
1037 if (!isLogicalVecShifter())
1038 return false;
1039
1040 // A logical vector shifter is a left shift by 0 or 8.
1041 unsigned Shift = getShiftExtendAmount();
1042 return getShiftExtendType() == AArch64_AM::LSL &&
1043 (Shift == 0 || Shift == 8);
1044 }
1045
isMoveVecShifter() const1046 bool isMoveVecShifter() const {
1047 if (!isShiftExtend())
1048 return false;
1049
1050 // A logical vector shifter is a left shift by 8 or 16.
1051 unsigned Shift = getShiftExtendAmount();
1052 return getShiftExtendType() == AArch64_AM::MSL &&
1053 (Shift == 8 || Shift == 16);
1054 }
1055
1056 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1057 // to LDUR/STUR when the offset is not legal for the former but is for
1058 // the latter. As such, in addition to checking for being a legal unscaled
1059 // address, also check that it is not a legal scaled address. This avoids
1060 // ambiguity in the matcher.
1061 template<int Width>
isSImm9OffsetFB() const1062 bool isSImm9OffsetFB() const {
1063 return isSImm9() && !isUImm12Offset<Width / 8>();
1064 }
1065
isAdrpLabel() const1066 bool isAdrpLabel() const {
1067 // Validation was handled during parsing, so we just sanity check that
1068 // something didn't go haywire.
1069 if (!isImm())
1070 return false;
1071
1072 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073 int64_t Val = CE->getValue();
1074 int64_t Min = - (4096 * (1LL << (21 - 1)));
1075 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1076 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1077 }
1078
1079 return true;
1080 }
1081
isAdrLabel() const1082 bool isAdrLabel() const {
1083 // Validation was handled during parsing, so we just sanity check that
1084 // something didn't go haywire.
1085 if (!isImm())
1086 return false;
1087
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Min = - (1LL << (21 - 1));
1091 int64_t Max = ((1LL << (21 - 1)) - 1);
1092 return Val >= Min && Val <= Max;
1093 }
1094
1095 return true;
1096 }
1097
addExpr(MCInst & Inst,const MCExpr * Expr) const1098 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1099 // Add as immediates when possible. Null MCExpr = 0.
1100 if (!Expr)
1101 Inst.addOperand(MCOperand::CreateImm(0));
1102 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1103 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1104 else
1105 Inst.addOperand(MCOperand::CreateExpr(Expr));
1106 }
1107
addRegOperands(MCInst & Inst,unsigned N) const1108 void addRegOperands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 Inst.addOperand(MCOperand::CreateReg(getReg()));
1111 }
1112
addGPR32as64Operands(MCInst & Inst,unsigned N) const1113 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1115 assert(
1116 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1117
1118 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1119 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1120 RI->getEncodingValue(getReg()));
1121
1122 Inst.addOperand(MCOperand::CreateReg(Reg));
1123 }
1124
addVectorReg64Operands(MCInst & Inst,unsigned N) const1125 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1127 assert(
1128 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1129 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1130 }
1131
addVectorReg128Operands(MCInst & Inst,unsigned N) const1132 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1134 assert(
1135 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1136 Inst.addOperand(MCOperand::CreateReg(getReg()));
1137 }
1138
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1139 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::CreateReg(getReg()));
1142 }
1143
1144 template <unsigned NumRegs>
addVectorList64Operands(MCInst & Inst,unsigned N) const1145 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1148 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1149 unsigned FirstReg = FirstRegs[NumRegs - 1];
1150
1151 Inst.addOperand(
1152 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1153 }
1154
1155 template <unsigned NumRegs>
addVectorList128Operands(MCInst & Inst,unsigned N) const1156 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1158 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1159 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1160 unsigned FirstReg = FirstRegs[NumRegs - 1];
1161
1162 Inst.addOperand(
1163 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1164 }
1165
addVectorIndex1Operands(MCInst & Inst,unsigned N) const1166 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1169 }
1170
addVectorIndexBOperands(MCInst & Inst,unsigned N) const1171 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1174 }
1175
addVectorIndexHOperands(MCInst & Inst,unsigned N) const1176 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1179 }
1180
addVectorIndexSOperands(MCInst & Inst,unsigned N) const1181 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1184 }
1185
addVectorIndexDOperands(MCInst & Inst,unsigned N) const1186 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1189 }
1190
addImmOperands(MCInst & Inst,unsigned N) const1191 void addImmOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 // If this is a pageoff symrefexpr with an addend, adjust the addend
1194 // to be only the page-offset portion. Otherwise, just add the expr
1195 // as-is.
1196 addExpr(Inst, getImm());
1197 }
1198
addAddSubImmOperands(MCInst & Inst,unsigned N) const1199 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 2 && "Invalid number of operands!");
1201 if (isShiftedImm()) {
1202 addExpr(Inst, getShiftedImmVal());
1203 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1204 } else {
1205 addExpr(Inst, getImm());
1206 Inst.addOperand(MCOperand::CreateImm(0));
1207 }
1208 }
1209
addCondCodeOperands(MCInst & Inst,unsigned N) const1210 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1213 }
1214
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1215 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1218 if (!MCE)
1219 addExpr(Inst, getImm());
1220 else
1221 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1222 }
1223
addAdrLabelOperands(MCInst & Inst,unsigned N) const1224 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1225 addImmOperands(Inst, N);
1226 }
1227
1228 template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1229 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1230 assert(N == 1 && "Invalid number of operands!");
1231 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1232
1233 if (!MCE) {
1234 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1235 return;
1236 }
1237 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1238 }
1239
addSImm9Operands(MCInst & Inst,unsigned N) const1240 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1244 }
1245
addSImm7s4Operands(MCInst & Inst,unsigned N) const1246 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1250 }
1251
addSImm7s8Operands(MCInst & Inst,unsigned N) const1252 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1256 }
1257
addSImm7s16Operands(MCInst & Inst,unsigned N) const1258 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1262 }
1263
addImm0_7Operands(MCInst & Inst,unsigned N) const1264 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1267 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1268 }
1269
addImm1_8Operands(MCInst & Inst,unsigned N) const1270 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1271 assert(N == 1 && "Invalid number of operands!");
1272 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1273 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1274 }
1275
addImm0_15Operands(MCInst & Inst,unsigned N) const1276 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1280 }
1281
addImm1_16Operands(MCInst & Inst,unsigned N) const1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1285 assert(MCE && "Invalid constant immediate operand!");
1286 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1287 }
1288
addImm0_31Operands(MCInst & Inst,unsigned N) const1289 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1293 }
1294
addImm1_31Operands(MCInst & Inst,unsigned N) const1295 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1299 }
1300
addImm1_32Operands(MCInst & Inst,unsigned N) const1301 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1305 }
1306
addImm0_63Operands(MCInst & Inst,unsigned N) const1307 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1311 }
1312
addImm1_63Operands(MCInst & Inst,unsigned N) const1313 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317 }
1318
addImm1_64Operands(MCInst & Inst,unsigned N) const1319 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1323 }
1324
addImm0_127Operands(MCInst & Inst,unsigned N) const1325 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1329 }
1330
addImm0_255Operands(MCInst & Inst,unsigned N) const1331 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1335 }
1336
addImm0_65535Operands(MCInst & Inst,unsigned N) const1337 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1341 }
1342
addImm32_63Operands(MCInst & Inst,unsigned N) const1343 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1347 }
1348
addLogicalImm32Operands(MCInst & Inst,unsigned N) const1349 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 uint64_t encoding =
1353 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1354 Inst.addOperand(MCOperand::CreateImm(encoding));
1355 }
1356
addLogicalImm64Operands(MCInst & Inst,unsigned N) const1357 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1361 Inst.addOperand(MCOperand::CreateImm(encoding));
1362 }
1363
addLogicalImm32NotOperands(MCInst & Inst,unsigned N) const1364 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1367 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1368 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1370 }
1371
addLogicalImm64NotOperands(MCInst & Inst,unsigned N) const1372 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1375 uint64_t encoding =
1376 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1377 Inst.addOperand(MCOperand::CreateImm(encoding));
1378 }
1379
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const1380 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1385 }
1386
addBranchTarget26Operands(MCInst & Inst,unsigned N) const1387 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1388 // Branch operands don't encode the low bits, so shift them off
1389 // here. If it's a label, however, just put it on directly as there's
1390 // not enough information now to do anything.
1391 assert(N == 1 && "Invalid number of operands!");
1392 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1393 if (!MCE) {
1394 addExpr(Inst, getImm());
1395 return;
1396 }
1397 assert(MCE && "Invalid constant immediate operand!");
1398 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1399 }
1400
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const1401 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1402 // Branch operands don't encode the low bits, so shift them off
1403 // here. If it's a label, however, just put it on directly as there's
1404 // not enough information now to do anything.
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1407 if (!MCE) {
1408 addExpr(Inst, getImm());
1409 return;
1410 }
1411 assert(MCE && "Invalid constant immediate operand!");
1412 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1413 }
1414
addBranchTarget14Operands(MCInst & Inst,unsigned N) const1415 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1416 // Branch operands don't encode the low bits, so shift them off
1417 // here. If it's a label, however, just put it on directly as there's
1418 // not enough information now to do anything.
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1421 if (!MCE) {
1422 addExpr(Inst, getImm());
1423 return;
1424 }
1425 assert(MCE && "Invalid constant immediate operand!");
1426 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1427 }
1428
addFPImmOperands(MCInst & Inst,unsigned N) const1429 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1432 }
1433
addBarrierOperands(MCInst & Inst,unsigned N) const1434 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1437 }
1438
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const1439 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1441
1442 Inst.addOperand(MCOperand::CreateImm(SysReg.MRSReg));
1443 }
1444
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const1445 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1447
1448 Inst.addOperand(MCOperand::CreateImm(SysReg.MSRReg));
1449 }
1450
addSystemPStateFieldOperands(MCInst & Inst,unsigned N) const1451 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1452 assert(N == 1 && "Invalid number of operands!");
1453
1454 Inst.addOperand(MCOperand::CreateImm(SysReg.PStateField));
1455 }
1456
addSysCROperands(MCInst & Inst,unsigned N) const1457 void addSysCROperands(MCInst &Inst, unsigned N) const {
1458 assert(N == 1 && "Invalid number of operands!");
1459 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1460 }
1461
addPrefetchOperands(MCInst & Inst,unsigned N) const1462 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1463 assert(N == 1 && "Invalid number of operands!");
1464 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1465 }
1466
addShifterOperands(MCInst & Inst,unsigned N) const1467 void addShifterOperands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1469 unsigned Imm =
1470 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1471 Inst.addOperand(MCOperand::CreateImm(Imm));
1472 }
1473
addExtendOperands(MCInst & Inst,unsigned N) const1474 void addExtendOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1476 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1477 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1478 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1479 Inst.addOperand(MCOperand::CreateImm(Imm));
1480 }
1481
addExtend64Operands(MCInst & Inst,unsigned N) const1482 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1484 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1485 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1486 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1487 Inst.addOperand(MCOperand::CreateImm(Imm));
1488 }
1489
addMemExtendOperands(MCInst & Inst,unsigned N) const1490 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && "Invalid number of operands!");
1492 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1493 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1494 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1495 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1496 }
1497
1498 // For 8-bit load/store instructions with a register offset, both the
1499 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1500 // they're disambiguated by whether the shift was explicit or implicit rather
1501 // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const1502 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1503 assert(N == 2 && "Invalid number of operands!");
1504 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1505 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1506 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1507 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1508 }
1509
1510 template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const1511 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1513
1514 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1515 uint64_t Value = CE->getValue();
1516 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1517 }
1518
1519 template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const1520 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1522
1523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1524 uint64_t Value = CE->getValue();
1525 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1526 }
1527
1528 void print(raw_ostream &OS) const override;
1529
1530 static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,bool IsSuffix,SMLoc S,MCContext & Ctx)1531 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1532 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1533 Op->Tok.Data = Str.data();
1534 Op->Tok.Length = Str.size();
1535 Op->Tok.IsSuffix = IsSuffix;
1536 Op->StartLoc = S;
1537 Op->EndLoc = S;
1538 return Op;
1539 }
1540
1541 static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,bool isVector,SMLoc S,SMLoc E,MCContext & Ctx)1542 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1543 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1544 Op->Reg.RegNum = RegNum;
1545 Op->Reg.isVector = isVector;
1546 Op->StartLoc = S;
1547 Op->EndLoc = E;
1548 return Op;
1549 }
1550
1551 static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned NumElements,char ElementKind,SMLoc S,SMLoc E,MCContext & Ctx)1552 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1553 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1554 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1555 Op->VectorList.RegNum = RegNum;
1556 Op->VectorList.Count = Count;
1557 Op->VectorList.NumElements = NumElements;
1558 Op->VectorList.ElementKind = ElementKind;
1559 Op->StartLoc = S;
1560 Op->EndLoc = E;
1561 return Op;
1562 }
1563
1564 static std::unique_ptr<AArch64Operand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)1565 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1566 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1567 Op->VectorIndex.Val = Idx;
1568 Op->StartLoc = S;
1569 Op->EndLoc = E;
1570 return Op;
1571 }
1572
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)1573 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1574 SMLoc E, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1576 Op->Imm.Val = Val;
1577 Op->StartLoc = S;
1578 Op->EndLoc = E;
1579 return Op;
1580 }
1581
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)1582 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1583 unsigned ShiftAmount,
1584 SMLoc S, SMLoc E,
1585 MCContext &Ctx) {
1586 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1587 Op->ShiftedImm .Val = Val;
1588 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1589 Op->StartLoc = S;
1590 Op->EndLoc = E;
1591 return Op;
1592 }
1593
1594 static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)1595 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1596 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1597 Op->CondCode.Code = Code;
1598 Op->StartLoc = S;
1599 Op->EndLoc = E;
1600 return Op;
1601 }
1602
CreateFPImm(unsigned Val,SMLoc S,MCContext & Ctx)1603 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1604 MCContext &Ctx) {
1605 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1606 Op->FPImm.Val = Val;
1607 Op->StartLoc = S;
1608 Op->EndLoc = S;
1609 return Op;
1610 }
1611
CreateBarrier(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1612 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1613 StringRef Str,
1614 SMLoc S,
1615 MCContext &Ctx) {
1616 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1617 Op->Barrier.Val = Val;
1618 Op->Barrier.Data = Str.data();
1619 Op->Barrier.Length = Str.size();
1620 Op->StartLoc = S;
1621 Op->EndLoc = S;
1622 return Op;
1623 }
1624
CreateSysReg(StringRef Str,SMLoc S,uint32_t MRSReg,uint32_t MSRReg,uint32_t PStateField,MCContext & Ctx)1625 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1626 uint32_t MRSReg,
1627 uint32_t MSRReg,
1628 uint32_t PStateField,
1629 MCContext &Ctx) {
1630 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1631 Op->SysReg.Data = Str.data();
1632 Op->SysReg.Length = Str.size();
1633 Op->SysReg.MRSReg = MRSReg;
1634 Op->SysReg.MSRReg = MSRReg;
1635 Op->SysReg.PStateField = PStateField;
1636 Op->StartLoc = S;
1637 Op->EndLoc = S;
1638 return Op;
1639 }
1640
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)1641 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1642 SMLoc E, MCContext &Ctx) {
1643 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1644 Op->SysCRImm.Val = Val;
1645 Op->StartLoc = S;
1646 Op->EndLoc = E;
1647 return Op;
1648 }
1649
CreatePrefetch(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1650 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1651 StringRef Str,
1652 SMLoc S,
1653 MCContext &Ctx) {
1654 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1655 Op->Prefetch.Val = Val;
1656 Op->Barrier.Data = Str.data();
1657 Op->Barrier.Length = Str.size();
1658 Op->StartLoc = S;
1659 Op->EndLoc = S;
1660 return Op;
1661 }
1662
1663 static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1670 Op->StartLoc = S;
1671 Op->EndLoc = E;
1672 return Op;
1673 }
1674 };
1675
1676 } // end anonymous namespace.
1677
print(raw_ostream & OS) const1678 void AArch64Operand::print(raw_ostream &OS) const {
1679 switch (Kind) {
1680 case k_FPImm:
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1683 break;
1684 case k_Barrier: {
1685 StringRef Name = getBarrierName();
1686 if (!Name.empty())
1687 OS << "<barrier " << Name << ">";
1688 else
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1690 break;
1691 }
1692 case k_Immediate:
1693 getImm()->print(OS);
1694 break;
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 getShiftedImmVal()->print(OS);
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1700 break;
1701 }
1702 case k_CondCode:
1703 OS << "<condcode " << getCondCode() << ">";
1704 break;
1705 case k_Register:
1706 OS << "<register " << getReg() << ">";
1707 break;
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1713 OS << ">";
1714 break;
1715 }
1716 case k_VectorIndex:
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1718 break;
1719 case k_SysReg:
1720 OS << "<sysreg: " << getSysReg() << '>';
1721 break;
1722 case k_Token:
1723 OS << "'" << getToken() << "'";
1724 break;
1725 case k_SysCR:
1726 OS << "c" << getSysCR();
1727 break;
1728 case k_Prefetch: {
1729 StringRef Name = getPrefetchName();
1730 if (!Name.empty())
1731 OS << "<prfop " << Name << ">";
1732 else
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1734 break;
1735 }
1736 case k_ShiftExtend: {
1737 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1738 << getShiftExtendAmount();
1739 if (!hasShiftExtendAmount())
1740 OS << "<imp>";
1741 OS << '>';
1742 break;
1743 }
1744 }
1745 }
1746
1747 /// @name Auto-generated Match Functions
1748 /// {
1749
1750 static unsigned MatchRegisterName(StringRef Name);
1751
1752 /// }
1753
matchVectorRegName(StringRef Name)1754 static unsigned matchVectorRegName(StringRef Name) {
1755 return StringSwitch<unsigned>(Name)
1756 .Case("v0", AArch64::Q0)
1757 .Case("v1", AArch64::Q1)
1758 .Case("v2", AArch64::Q2)
1759 .Case("v3", AArch64::Q3)
1760 .Case("v4", AArch64::Q4)
1761 .Case("v5", AArch64::Q5)
1762 .Case("v6", AArch64::Q6)
1763 .Case("v7", AArch64::Q7)
1764 .Case("v8", AArch64::Q8)
1765 .Case("v9", AArch64::Q9)
1766 .Case("v10", AArch64::Q10)
1767 .Case("v11", AArch64::Q11)
1768 .Case("v12", AArch64::Q12)
1769 .Case("v13", AArch64::Q13)
1770 .Case("v14", AArch64::Q14)
1771 .Case("v15", AArch64::Q15)
1772 .Case("v16", AArch64::Q16)
1773 .Case("v17", AArch64::Q17)
1774 .Case("v18", AArch64::Q18)
1775 .Case("v19", AArch64::Q19)
1776 .Case("v20", AArch64::Q20)
1777 .Case("v21", AArch64::Q21)
1778 .Case("v22", AArch64::Q22)
1779 .Case("v23", AArch64::Q23)
1780 .Case("v24", AArch64::Q24)
1781 .Case("v25", AArch64::Q25)
1782 .Case("v26", AArch64::Q26)
1783 .Case("v27", AArch64::Q27)
1784 .Case("v28", AArch64::Q28)
1785 .Case("v29", AArch64::Q29)
1786 .Case("v30", AArch64::Q30)
1787 .Case("v31", AArch64::Q31)
1788 .Default(0);
1789 }
1790
isValidVectorKind(StringRef Name)1791 static bool isValidVectorKind(StringRef Name) {
1792 return StringSwitch<bool>(Name.lower())
1793 .Case(".8b", true)
1794 .Case(".16b", true)
1795 .Case(".4h", true)
1796 .Case(".8h", true)
1797 .Case(".2s", true)
1798 .Case(".4s", true)
1799 .Case(".1d", true)
1800 .Case(".2d", true)
1801 .Case(".1q", true)
1802 // Accept the width neutral ones, too, for verbose syntax. If those
1803 // aren't used in the right places, the token operand won't match so
1804 // all will work out.
1805 .Case(".b", true)
1806 .Case(".h", true)
1807 .Case(".s", true)
1808 .Case(".d", true)
1809 .Default(false);
1810 }
1811
parseValidVectorKind(StringRef Name,unsigned & NumElements,char & ElementKind)1812 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1813 char &ElementKind) {
1814 assert(isValidVectorKind(Name));
1815
1816 ElementKind = Name.lower()[Name.size() - 1];
1817 NumElements = 0;
1818
1819 if (Name.size() == 2)
1820 return;
1821
1822 // Parse the lane count
1823 Name = Name.drop_front();
1824 while (isdigit(Name.front())) {
1825 NumElements = 10 * NumElements + (Name.front() - '0');
1826 Name = Name.drop_front();
1827 }
1828 }
1829
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc)1830 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1831 SMLoc &EndLoc) {
1832 StartLoc = getLoc();
1833 RegNo = tryParseRegister();
1834 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1835 return (RegNo == (unsigned)-1);
1836 }
1837
1838 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,bool isVector)1839 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1840 bool isVector) {
1841 unsigned RegNum = isVector ? matchVectorRegName(Name)
1842 : MatchRegisterName(Name);
1843
1844 if (RegNum == 0) {
1845 // Check for aliases registered via .req. Canonicalize to lower case.
1846 // That's more consistent since register names are case insensitive, and
1847 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1848 auto Entry = RegisterReqs.find(Name.lower());
1849 if (Entry == RegisterReqs.end())
1850 return 0;
1851 // set RegNum if the match is the right kind of register
1852 if (isVector == Entry->getValue().first)
1853 RegNum = Entry->getValue().second;
1854 }
1855 return RegNum;
1856 }
1857
1858 /// tryParseRegister - Try to parse a register name. The token must be an
1859 /// Identifier when called, and if it is a register name the token is eaten and
1860 /// the register is added to the operand list.
tryParseRegister()1861 int AArch64AsmParser::tryParseRegister() {
1862 MCAsmParser &Parser = getParser();
1863 const AsmToken &Tok = Parser.getTok();
1864 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1865
1866 std::string lowerCase = Tok.getString().lower();
1867 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1868 // Also handle a few aliases of registers.
1869 if (RegNum == 0)
1870 RegNum = StringSwitch<unsigned>(lowerCase)
1871 .Case("fp", AArch64::FP)
1872 .Case("lr", AArch64::LR)
1873 .Case("x31", AArch64::XZR)
1874 .Case("w31", AArch64::WZR)
1875 .Default(0);
1876
1877 if (RegNum == 0)
1878 return -1;
1879
1880 Parser.Lex(); // Eat identifier token.
1881 return RegNum;
1882 }
1883
1884 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1885 /// kind specifier. If it is a register specifier, eat the token and return it.
tryMatchVectorRegister(StringRef & Kind,bool expected)1886 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1887 MCAsmParser &Parser = getParser();
1888 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1889 TokError("vector register expected");
1890 return -1;
1891 }
1892
1893 StringRef Name = Parser.getTok().getString();
1894 // If there is a kind specifier, it's separated from the register name by
1895 // a '.'.
1896 size_t Start = 0, Next = Name.find('.');
1897 StringRef Head = Name.slice(Start, Next);
1898 unsigned RegNum = matchRegisterNameAlias(Head, true);
1899
1900 if (RegNum) {
1901 if (Next != StringRef::npos) {
1902 Kind = Name.slice(Next, StringRef::npos);
1903 if (!isValidVectorKind(Kind)) {
1904 TokError("invalid vector kind qualifier");
1905 return -1;
1906 }
1907 }
1908 Parser.Lex(); // Eat the register token.
1909 return RegNum;
1910 }
1911
1912 if (expected)
1913 TokError("vector register expected");
1914 return -1;
1915 }
1916
1917 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1918 AArch64AsmParser::OperandMatchResultTy
tryParseSysCROperand(OperandVector & Operands)1919 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1920 MCAsmParser &Parser = getParser();
1921 SMLoc S = getLoc();
1922
1923 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1924 Error(S, "Expected cN operand where 0 <= N <= 15");
1925 return MatchOperand_ParseFail;
1926 }
1927
1928 StringRef Tok = Parser.getTok().getIdentifier();
1929 if (Tok[0] != 'c' && Tok[0] != 'C') {
1930 Error(S, "Expected cN operand where 0 <= N <= 15");
1931 return MatchOperand_ParseFail;
1932 }
1933
1934 uint32_t CRNum;
1935 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1936 if (BadNum || CRNum > 15) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1939 }
1940
1941 Parser.Lex(); // Eat identifier token.
1942 Operands.push_back(
1943 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1944 return MatchOperand_Success;
1945 }
1946
1947 /// tryParsePrefetch - Try to parse a prefetch operand.
1948 AArch64AsmParser::OperandMatchResultTy
tryParsePrefetch(OperandVector & Operands)1949 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1950 MCAsmParser &Parser = getParser();
1951 SMLoc S = getLoc();
1952 const AsmToken &Tok = Parser.getTok();
1953 // Either an identifier for named values or a 5-bit immediate.
1954 bool Hash = Tok.is(AsmToken::Hash);
1955 if (Hash || Tok.is(AsmToken::Integer)) {
1956 if (Hash)
1957 Parser.Lex(); // Eat hash token.
1958 const MCExpr *ImmVal;
1959 if (getParser().parseExpression(ImmVal))
1960 return MatchOperand_ParseFail;
1961
1962 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1963 if (!MCE) {
1964 TokError("immediate value expected for prefetch operand");
1965 return MatchOperand_ParseFail;
1966 }
1967 unsigned prfop = MCE->getValue();
1968 if (prfop > 31) {
1969 TokError("prefetch operand out of range, [0,31] expected");
1970 return MatchOperand_ParseFail;
1971 }
1972
1973 bool Valid;
1974 auto Mapper = AArch64PRFM::PRFMMapper();
1975 StringRef Name =
1976 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
1977 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
1978 S, getContext()));
1979 return MatchOperand_Success;
1980 }
1981
1982 if (Tok.isNot(AsmToken::Identifier)) {
1983 TokError("pre-fetch hint expected");
1984 return MatchOperand_ParseFail;
1985 }
1986
1987 bool Valid;
1988 auto Mapper = AArch64PRFM::PRFMMapper();
1989 unsigned prfop =
1990 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
1991 if (!Valid) {
1992 TokError("pre-fetch hint expected");
1993 return MatchOperand_ParseFail;
1994 }
1995
1996 Parser.Lex(); // Eat identifier token.
1997 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
1998 S, getContext()));
1999 return MatchOperand_Success;
2000 }
2001
2002 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2003 /// instruction.
2004 AArch64AsmParser::OperandMatchResultTy
tryParseAdrpLabel(OperandVector & Operands)2005 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2006 MCAsmParser &Parser = getParser();
2007 SMLoc S = getLoc();
2008 const MCExpr *Expr;
2009
2010 if (Parser.getTok().is(AsmToken::Hash)) {
2011 Parser.Lex(); // Eat hash token.
2012 }
2013
2014 if (parseSymbolicImmVal(Expr))
2015 return MatchOperand_ParseFail;
2016
2017 AArch64MCExpr::VariantKind ELFRefKind;
2018 MCSymbolRefExpr::VariantKind DarwinRefKind;
2019 int64_t Addend;
2020 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2021 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2022 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2023 // No modifier was specified at all; this is the syntax for an ELF basic
2024 // ADRP relocation (unfortunately).
2025 Expr =
2026 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2027 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2028 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2029 Addend != 0) {
2030 Error(S, "gotpage label reference not allowed an addend");
2031 return MatchOperand_ParseFail;
2032 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2033 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2034 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2035 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2036 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2037 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2038 // The operand must be an @page or @gotpage qualified symbolref.
2039 Error(S, "page or gotpage label reference expected");
2040 return MatchOperand_ParseFail;
2041 }
2042 }
2043
2044 // We have either a label reference possibly with addend or an immediate. The
2045 // addend is a raw value here. The linker will adjust it to only reference the
2046 // page.
2047 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2048 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2049
2050 return MatchOperand_Success;
2051 }
2052
2053 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2054 /// instruction.
2055 AArch64AsmParser::OperandMatchResultTy
tryParseAdrLabel(OperandVector & Operands)2056 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2057 MCAsmParser &Parser = getParser();
2058 SMLoc S = getLoc();
2059 const MCExpr *Expr;
2060
2061 if (Parser.getTok().is(AsmToken::Hash)) {
2062 Parser.Lex(); // Eat hash token.
2063 }
2064
2065 if (getParser().parseExpression(Expr))
2066 return MatchOperand_ParseFail;
2067
2068 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2069 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2070
2071 return MatchOperand_Success;
2072 }
2073
2074 /// tryParseFPImm - A floating point immediate expression operand.
2075 AArch64AsmParser::OperandMatchResultTy
tryParseFPImm(OperandVector & Operands)2076 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2077 MCAsmParser &Parser = getParser();
2078 SMLoc S = getLoc();
2079
2080 bool Hash = false;
2081 if (Parser.getTok().is(AsmToken::Hash)) {
2082 Parser.Lex(); // Eat '#'
2083 Hash = true;
2084 }
2085
2086 // Handle negation, as that still comes through as a separate token.
2087 bool isNegative = false;
2088 if (Parser.getTok().is(AsmToken::Minus)) {
2089 isNegative = true;
2090 Parser.Lex();
2091 }
2092 const AsmToken &Tok = Parser.getTok();
2093 if (Tok.is(AsmToken::Real)) {
2094 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2095 if (isNegative)
2096 RealVal.changeSign();
2097
2098 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2099 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2100 Parser.Lex(); // Eat the token.
2101 // Check for out of range values. As an exception, we let Zero through,
2102 // as we handle that special case in post-processing before matching in
2103 // order to use the zero register for it.
2104 if (Val == -1 && !RealVal.isPosZero()) {
2105 TokError("expected compatible register or floating-point constant");
2106 return MatchOperand_ParseFail;
2107 }
2108 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2109 return MatchOperand_Success;
2110 }
2111 if (Tok.is(AsmToken::Integer)) {
2112 int64_t Val;
2113 if (!isNegative && Tok.getString().startswith("0x")) {
2114 Val = Tok.getIntVal();
2115 if (Val > 255 || Val < 0) {
2116 TokError("encoded floating point value out of range");
2117 return MatchOperand_ParseFail;
2118 }
2119 } else {
2120 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2121 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2122 // If we had a '-' in front, toggle the sign bit.
2123 IntVal ^= (uint64_t)isNegative << 63;
2124 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2125 }
2126 Parser.Lex(); // Eat the token.
2127 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128 return MatchOperand_Success;
2129 }
2130
2131 if (!Hash)
2132 return MatchOperand_NoMatch;
2133
2134 TokError("invalid floating point immediate");
2135 return MatchOperand_ParseFail;
2136 }
2137
2138 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2139 AArch64AsmParser::OperandMatchResultTy
tryParseAddSubImm(OperandVector & Operands)2140 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2141 MCAsmParser &Parser = getParser();
2142 SMLoc S = getLoc();
2143
2144 if (Parser.getTok().is(AsmToken::Hash))
2145 Parser.Lex(); // Eat '#'
2146 else if (Parser.getTok().isNot(AsmToken::Integer))
2147 // Operand should start from # or should be integer, emit error otherwise.
2148 return MatchOperand_NoMatch;
2149
2150 const MCExpr *Imm;
2151 if (parseSymbolicImmVal(Imm))
2152 return MatchOperand_ParseFail;
2153 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2154 uint64_t ShiftAmount = 0;
2155 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2156 if (MCE) {
2157 int64_t Val = MCE->getValue();
2158 if (Val > 0xfff && (Val & 0xfff) == 0) {
2159 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2160 ShiftAmount = 12;
2161 }
2162 }
2163 SMLoc E = Parser.getTok().getLoc();
2164 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2165 getContext()));
2166 return MatchOperand_Success;
2167 }
2168
2169 // Eat ','
2170 Parser.Lex();
2171
2172 // The optional operand must be "lsl #N" where N is non-negative.
2173 if (!Parser.getTok().is(AsmToken::Identifier) ||
2174 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2175 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2176 return MatchOperand_ParseFail;
2177 }
2178
2179 // Eat 'lsl'
2180 Parser.Lex();
2181
2182 if (Parser.getTok().is(AsmToken::Hash)) {
2183 Parser.Lex();
2184 }
2185
2186 if (Parser.getTok().isNot(AsmToken::Integer)) {
2187 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2188 return MatchOperand_ParseFail;
2189 }
2190
2191 int64_t ShiftAmount = Parser.getTok().getIntVal();
2192
2193 if (ShiftAmount < 0) {
2194 Error(Parser.getTok().getLoc(), "positive shift amount required");
2195 return MatchOperand_ParseFail;
2196 }
2197 Parser.Lex(); // Eat the number
2198
2199 SMLoc E = Parser.getTok().getLoc();
2200 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2201 S, E, getContext()));
2202 return MatchOperand_Success;
2203 }
2204
2205 /// parseCondCodeString - Parse a Condition Code string.
parseCondCodeString(StringRef Cond)2206 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2207 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2208 .Case("eq", AArch64CC::EQ)
2209 .Case("ne", AArch64CC::NE)
2210 .Case("cs", AArch64CC::HS)
2211 .Case("hs", AArch64CC::HS)
2212 .Case("cc", AArch64CC::LO)
2213 .Case("lo", AArch64CC::LO)
2214 .Case("mi", AArch64CC::MI)
2215 .Case("pl", AArch64CC::PL)
2216 .Case("vs", AArch64CC::VS)
2217 .Case("vc", AArch64CC::VC)
2218 .Case("hi", AArch64CC::HI)
2219 .Case("ls", AArch64CC::LS)
2220 .Case("ge", AArch64CC::GE)
2221 .Case("lt", AArch64CC::LT)
2222 .Case("gt", AArch64CC::GT)
2223 .Case("le", AArch64CC::LE)
2224 .Case("al", AArch64CC::AL)
2225 .Case("nv", AArch64CC::NV)
2226 .Default(AArch64CC::Invalid);
2227 return CC;
2228 }
2229
2230 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)2231 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2232 bool invertCondCode) {
2233 MCAsmParser &Parser = getParser();
2234 SMLoc S = getLoc();
2235 const AsmToken &Tok = Parser.getTok();
2236 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2237
2238 StringRef Cond = Tok.getString();
2239 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2240 if (CC == AArch64CC::Invalid)
2241 return TokError("invalid condition code");
2242 Parser.Lex(); // Eat identifier token.
2243
2244 if (invertCondCode) {
2245 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2246 return TokError("condition codes AL and NV are invalid for this instruction");
2247 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2248 }
2249
2250 Operands.push_back(
2251 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2252 return false;
2253 }
2254
2255 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2256 /// them if present.
2257 AArch64AsmParser::OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector & Operands)2258 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2259 MCAsmParser &Parser = getParser();
2260 const AsmToken &Tok = Parser.getTok();
2261 std::string LowerID = Tok.getString().lower();
2262 AArch64_AM::ShiftExtendType ShOp =
2263 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2264 .Case("lsl", AArch64_AM::LSL)
2265 .Case("lsr", AArch64_AM::LSR)
2266 .Case("asr", AArch64_AM::ASR)
2267 .Case("ror", AArch64_AM::ROR)
2268 .Case("msl", AArch64_AM::MSL)
2269 .Case("uxtb", AArch64_AM::UXTB)
2270 .Case("uxth", AArch64_AM::UXTH)
2271 .Case("uxtw", AArch64_AM::UXTW)
2272 .Case("uxtx", AArch64_AM::UXTX)
2273 .Case("sxtb", AArch64_AM::SXTB)
2274 .Case("sxth", AArch64_AM::SXTH)
2275 .Case("sxtw", AArch64_AM::SXTW)
2276 .Case("sxtx", AArch64_AM::SXTX)
2277 .Default(AArch64_AM::InvalidShiftExtend);
2278
2279 if (ShOp == AArch64_AM::InvalidShiftExtend)
2280 return MatchOperand_NoMatch;
2281
2282 SMLoc S = Tok.getLoc();
2283 Parser.Lex();
2284
2285 bool Hash = getLexer().is(AsmToken::Hash);
2286 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2287 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2288 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2289 ShOp == AArch64_AM::MSL) {
2290 // We expect a number here.
2291 TokError("expected #imm after shift specifier");
2292 return MatchOperand_ParseFail;
2293 }
2294
2295 // "extend" type operatoins don't need an immediate, #0 is implicit.
2296 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2297 Operands.push_back(
2298 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2299 return MatchOperand_Success;
2300 }
2301
2302 if (Hash)
2303 Parser.Lex(); // Eat the '#'.
2304
2305 // Make sure we do actually have a number or a parenthesized expression.
2306 SMLoc E = Parser.getTok().getLoc();
2307 if (!Parser.getTok().is(AsmToken::Integer) &&
2308 !Parser.getTok().is(AsmToken::LParen)) {
2309 Error(E, "expected integer shift amount");
2310 return MatchOperand_ParseFail;
2311 }
2312
2313 const MCExpr *ImmVal;
2314 if (getParser().parseExpression(ImmVal))
2315 return MatchOperand_ParseFail;
2316
2317 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2318 if (!MCE) {
2319 Error(E, "expected constant '#imm' after shift specifier");
2320 return MatchOperand_ParseFail;
2321 }
2322
2323 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324 Operands.push_back(AArch64Operand::CreateShiftExtend(
2325 ShOp, MCE->getValue(), true, S, E, getContext()));
2326 return MatchOperand_Success;
2327 }
2328
2329 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2330 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)2331 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2332 OperandVector &Operands) {
2333 if (Name.find('.') != StringRef::npos)
2334 return TokError("invalid operand");
2335
2336 Mnemonic = Name;
2337 Operands.push_back(
2338 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2339
2340 MCAsmParser &Parser = getParser();
2341 const AsmToken &Tok = Parser.getTok();
2342 StringRef Op = Tok.getString();
2343 SMLoc S = Tok.getLoc();
2344
2345 const MCExpr *Expr = nullptr;
2346
2347 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2348 do { \
2349 Expr = MCConstantExpr::Create(op1, getContext()); \
2350 Operands.push_back( \
2351 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2352 Operands.push_back( \
2353 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2354 Operands.push_back( \
2355 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2356 Expr = MCConstantExpr::Create(op2, getContext()); \
2357 Operands.push_back( \
2358 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2359 } while (0)
2360
2361 if (Mnemonic == "ic") {
2362 if (!Op.compare_lower("ialluis")) {
2363 // SYS #0, C7, C1, #0
2364 SYS_ALIAS(0, 7, 1, 0);
2365 } else if (!Op.compare_lower("iallu")) {
2366 // SYS #0, C7, C5, #0
2367 SYS_ALIAS(0, 7, 5, 0);
2368 } else if (!Op.compare_lower("ivau")) {
2369 // SYS #3, C7, C5, #1
2370 SYS_ALIAS(3, 7, 5, 1);
2371 } else {
2372 return TokError("invalid operand for IC instruction");
2373 }
2374 } else if (Mnemonic == "dc") {
2375 if (!Op.compare_lower("zva")) {
2376 // SYS #3, C7, C4, #1
2377 SYS_ALIAS(3, 7, 4, 1);
2378 } else if (!Op.compare_lower("ivac")) {
2379 // SYS #3, C7, C6, #1
2380 SYS_ALIAS(0, 7, 6, 1);
2381 } else if (!Op.compare_lower("isw")) {
2382 // SYS #0, C7, C6, #2
2383 SYS_ALIAS(0, 7, 6, 2);
2384 } else if (!Op.compare_lower("cvac")) {
2385 // SYS #3, C7, C10, #1
2386 SYS_ALIAS(3, 7, 10, 1);
2387 } else if (!Op.compare_lower("csw")) {
2388 // SYS #0, C7, C10, #2
2389 SYS_ALIAS(0, 7, 10, 2);
2390 } else if (!Op.compare_lower("cvau")) {
2391 // SYS #3, C7, C11, #1
2392 SYS_ALIAS(3, 7, 11, 1);
2393 } else if (!Op.compare_lower("civac")) {
2394 // SYS #3, C7, C14, #1
2395 SYS_ALIAS(3, 7, 14, 1);
2396 } else if (!Op.compare_lower("cisw")) {
2397 // SYS #0, C7, C14, #2
2398 SYS_ALIAS(0, 7, 14, 2);
2399 } else {
2400 return TokError("invalid operand for DC instruction");
2401 }
2402 } else if (Mnemonic == "at") {
2403 if (!Op.compare_lower("s1e1r")) {
2404 // SYS #0, C7, C8, #0
2405 SYS_ALIAS(0, 7, 8, 0);
2406 } else if (!Op.compare_lower("s1e2r")) {
2407 // SYS #4, C7, C8, #0
2408 SYS_ALIAS(4, 7, 8, 0);
2409 } else if (!Op.compare_lower("s1e3r")) {
2410 // SYS #6, C7, C8, #0
2411 SYS_ALIAS(6, 7, 8, 0);
2412 } else if (!Op.compare_lower("s1e1w")) {
2413 // SYS #0, C7, C8, #1
2414 SYS_ALIAS(0, 7, 8, 1);
2415 } else if (!Op.compare_lower("s1e2w")) {
2416 // SYS #4, C7, C8, #1
2417 SYS_ALIAS(4, 7, 8, 1);
2418 } else if (!Op.compare_lower("s1e3w")) {
2419 // SYS #6, C7, C8, #1
2420 SYS_ALIAS(6, 7, 8, 1);
2421 } else if (!Op.compare_lower("s1e0r")) {
2422 // SYS #0, C7, C8, #3
2423 SYS_ALIAS(0, 7, 8, 2);
2424 } else if (!Op.compare_lower("s1e0w")) {
2425 // SYS #0, C7, C8, #3
2426 SYS_ALIAS(0, 7, 8, 3);
2427 } else if (!Op.compare_lower("s12e1r")) {
2428 // SYS #4, C7, C8, #4
2429 SYS_ALIAS(4, 7, 8, 4);
2430 } else if (!Op.compare_lower("s12e1w")) {
2431 // SYS #4, C7, C8, #5
2432 SYS_ALIAS(4, 7, 8, 5);
2433 } else if (!Op.compare_lower("s12e0r")) {
2434 // SYS #4, C7, C8, #6
2435 SYS_ALIAS(4, 7, 8, 6);
2436 } else if (!Op.compare_lower("s12e0w")) {
2437 // SYS #4, C7, C8, #7
2438 SYS_ALIAS(4, 7, 8, 7);
2439 } else {
2440 return TokError("invalid operand for AT instruction");
2441 }
2442 } else if (Mnemonic == "tlbi") {
2443 if (!Op.compare_lower("vmalle1is")) {
2444 // SYS #0, C8, C3, #0
2445 SYS_ALIAS(0, 8, 3, 0);
2446 } else if (!Op.compare_lower("alle2is")) {
2447 // SYS #4, C8, C3, #0
2448 SYS_ALIAS(4, 8, 3, 0);
2449 } else if (!Op.compare_lower("alle3is")) {
2450 // SYS #6, C8, C3, #0
2451 SYS_ALIAS(6, 8, 3, 0);
2452 } else if (!Op.compare_lower("vae1is")) {
2453 // SYS #0, C8, C3, #1
2454 SYS_ALIAS(0, 8, 3, 1);
2455 } else if (!Op.compare_lower("vae2is")) {
2456 // SYS #4, C8, C3, #1
2457 SYS_ALIAS(4, 8, 3, 1);
2458 } else if (!Op.compare_lower("vae3is")) {
2459 // SYS #6, C8, C3, #1
2460 SYS_ALIAS(6, 8, 3, 1);
2461 } else if (!Op.compare_lower("aside1is")) {
2462 // SYS #0, C8, C3, #2
2463 SYS_ALIAS(0, 8, 3, 2);
2464 } else if (!Op.compare_lower("vaae1is")) {
2465 // SYS #0, C8, C3, #3
2466 SYS_ALIAS(0, 8, 3, 3);
2467 } else if (!Op.compare_lower("alle1is")) {
2468 // SYS #4, C8, C3, #4
2469 SYS_ALIAS(4, 8, 3, 4);
2470 } else if (!Op.compare_lower("vale1is")) {
2471 // SYS #0, C8, C3, #5
2472 SYS_ALIAS(0, 8, 3, 5);
2473 } else if (!Op.compare_lower("vaale1is")) {
2474 // SYS #0, C8, C3, #7
2475 SYS_ALIAS(0, 8, 3, 7);
2476 } else if (!Op.compare_lower("vmalle1")) {
2477 // SYS #0, C8, C7, #0
2478 SYS_ALIAS(0, 8, 7, 0);
2479 } else if (!Op.compare_lower("alle2")) {
2480 // SYS #4, C8, C7, #0
2481 SYS_ALIAS(4, 8, 7, 0);
2482 } else if (!Op.compare_lower("vale2is")) {
2483 // SYS #4, C8, C3, #5
2484 SYS_ALIAS(4, 8, 3, 5);
2485 } else if (!Op.compare_lower("vale3is")) {
2486 // SYS #6, C8, C3, #5
2487 SYS_ALIAS(6, 8, 3, 5);
2488 } else if (!Op.compare_lower("alle3")) {
2489 // SYS #6, C8, C7, #0
2490 SYS_ALIAS(6, 8, 7, 0);
2491 } else if (!Op.compare_lower("vae1")) {
2492 // SYS #0, C8, C7, #1
2493 SYS_ALIAS(0, 8, 7, 1);
2494 } else if (!Op.compare_lower("vae2")) {
2495 // SYS #4, C8, C7, #1
2496 SYS_ALIAS(4, 8, 7, 1);
2497 } else if (!Op.compare_lower("vae3")) {
2498 // SYS #6, C8, C7, #1
2499 SYS_ALIAS(6, 8, 7, 1);
2500 } else if (!Op.compare_lower("aside1")) {
2501 // SYS #0, C8, C7, #2
2502 SYS_ALIAS(0, 8, 7, 2);
2503 } else if (!Op.compare_lower("vaae1")) {
2504 // SYS #0, C8, C7, #3
2505 SYS_ALIAS(0, 8, 7, 3);
2506 } else if (!Op.compare_lower("alle1")) {
2507 // SYS #4, C8, C7, #4
2508 SYS_ALIAS(4, 8, 7, 4);
2509 } else if (!Op.compare_lower("vale1")) {
2510 // SYS #0, C8, C7, #5
2511 SYS_ALIAS(0, 8, 7, 5);
2512 } else if (!Op.compare_lower("vale2")) {
2513 // SYS #4, C8, C7, #5
2514 SYS_ALIAS(4, 8, 7, 5);
2515 } else if (!Op.compare_lower("vale3")) {
2516 // SYS #6, C8, C7, #5
2517 SYS_ALIAS(6, 8, 7, 5);
2518 } else if (!Op.compare_lower("vaale1")) {
2519 // SYS #0, C8, C7, #7
2520 SYS_ALIAS(0, 8, 7, 7);
2521 } else if (!Op.compare_lower("ipas2e1")) {
2522 // SYS #4, C8, C4, #1
2523 SYS_ALIAS(4, 8, 4, 1);
2524 } else if (!Op.compare_lower("ipas2le1")) {
2525 // SYS #4, C8, C4, #5
2526 SYS_ALIAS(4, 8, 4, 5);
2527 } else if (!Op.compare_lower("ipas2e1is")) {
2528 // SYS #4, C8, C4, #1
2529 SYS_ALIAS(4, 8, 0, 1);
2530 } else if (!Op.compare_lower("ipas2le1is")) {
2531 // SYS #4, C8, C4, #5
2532 SYS_ALIAS(4, 8, 0, 5);
2533 } else if (!Op.compare_lower("vmalls12e1")) {
2534 // SYS #4, C8, C7, #6
2535 SYS_ALIAS(4, 8, 7, 6);
2536 } else if (!Op.compare_lower("vmalls12e1is")) {
2537 // SYS #4, C8, C3, #6
2538 SYS_ALIAS(4, 8, 3, 6);
2539 } else {
2540 return TokError("invalid operand for TLBI instruction");
2541 }
2542 }
2543
2544 #undef SYS_ALIAS
2545
2546 Parser.Lex(); // Eat operand.
2547
2548 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2549 bool HasRegister = false;
2550
2551 // Check for the optional register operand.
2552 if (getLexer().is(AsmToken::Comma)) {
2553 Parser.Lex(); // Eat comma.
2554
2555 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2556 return TokError("expected register operand");
2557
2558 HasRegister = true;
2559 }
2560
2561 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2562 Parser.eatToEndOfStatement();
2563 return TokError("unexpected token in argument list");
2564 }
2565
2566 if (ExpectRegister && !HasRegister) {
2567 return TokError("specified " + Mnemonic + " op requires a register");
2568 }
2569 else if (!ExpectRegister && HasRegister) {
2570 return TokError("specified " + Mnemonic + " op does not use a register");
2571 }
2572
2573 Parser.Lex(); // Consume the EndOfStatement
2574 return false;
2575 }
2576
2577 AArch64AsmParser::OperandMatchResultTy
tryParseBarrierOperand(OperandVector & Operands)2578 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2579 MCAsmParser &Parser = getParser();
2580 const AsmToken &Tok = Parser.getTok();
2581
2582 // Can be either a #imm style literal or an option name
2583 bool Hash = Tok.is(AsmToken::Hash);
2584 if (Hash || Tok.is(AsmToken::Integer)) {
2585 // Immediate operand.
2586 if (Hash)
2587 Parser.Lex(); // Eat the '#'
2588 const MCExpr *ImmVal;
2589 SMLoc ExprLoc = getLoc();
2590 if (getParser().parseExpression(ImmVal))
2591 return MatchOperand_ParseFail;
2592 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2593 if (!MCE) {
2594 Error(ExprLoc, "immediate value expected for barrier operand");
2595 return MatchOperand_ParseFail;
2596 }
2597 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2598 Error(ExprLoc, "barrier operand out of range");
2599 return MatchOperand_ParseFail;
2600 }
2601 bool Valid;
2602 auto Mapper = AArch64DB::DBarrierMapper();
2603 StringRef Name =
2604 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2605 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2606 ExprLoc, getContext()));
2607 return MatchOperand_Success;
2608 }
2609
2610 if (Tok.isNot(AsmToken::Identifier)) {
2611 TokError("invalid operand for instruction");
2612 return MatchOperand_ParseFail;
2613 }
2614
2615 bool Valid;
2616 auto Mapper = AArch64DB::DBarrierMapper();
2617 unsigned Opt =
2618 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2619 if (!Valid) {
2620 TokError("invalid barrier option name");
2621 return MatchOperand_ParseFail;
2622 }
2623
2624 // The only valid named option for ISB is 'sy'
2625 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2626 TokError("'sy' or #imm operand expected");
2627 return MatchOperand_ParseFail;
2628 }
2629
2630 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2631 getLoc(), getContext()));
2632 Parser.Lex(); // Consume the option
2633
2634 return MatchOperand_Success;
2635 }
2636
2637 AArch64AsmParser::OperandMatchResultTy
tryParseSysReg(OperandVector & Operands)2638 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2639 MCAsmParser &Parser = getParser();
2640 const AsmToken &Tok = Parser.getTok();
2641
2642 if (Tok.isNot(AsmToken::Identifier))
2643 return MatchOperand_NoMatch;
2644
2645 bool IsKnown;
2646 auto MRSMapper = AArch64SysReg::MRSMapper();
2647 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2648 IsKnown);
2649 assert(IsKnown == (MRSReg != -1U) &&
2650 "register should be -1 if and only if it's unknown");
2651
2652 auto MSRMapper = AArch64SysReg::MSRMapper();
2653 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2654 IsKnown);
2655 assert(IsKnown == (MSRReg != -1U) &&
2656 "register should be -1 if and only if it's unknown");
2657
2658 auto PStateMapper = AArch64PState::PStateMapper();
2659 uint32_t PStateField =
2660 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2661 assert(IsKnown == (PStateField != -1U) &&
2662 "register should be -1 if and only if it's unknown");
2663
2664 Operands.push_back(AArch64Operand::CreateSysReg(
2665 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2666 Parser.Lex(); // Eat identifier
2667
2668 return MatchOperand_Success;
2669 }
2670
2671 /// tryParseVectorRegister - Parse a vector register operand.
tryParseVectorRegister(OperandVector & Operands)2672 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2673 MCAsmParser &Parser = getParser();
2674 if (Parser.getTok().isNot(AsmToken::Identifier))
2675 return true;
2676
2677 SMLoc S = getLoc();
2678 // Check for a vector register specifier first.
2679 StringRef Kind;
2680 int64_t Reg = tryMatchVectorRegister(Kind, false);
2681 if (Reg == -1)
2682 return true;
2683 Operands.push_back(
2684 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2685 // If there was an explicit qualifier, that goes on as a literal text
2686 // operand.
2687 if (!Kind.empty())
2688 Operands.push_back(
2689 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2690
2691 // If there is an index specifier following the register, parse that too.
2692 if (Parser.getTok().is(AsmToken::LBrac)) {
2693 SMLoc SIdx = getLoc();
2694 Parser.Lex(); // Eat left bracket token.
2695
2696 const MCExpr *ImmVal;
2697 if (getParser().parseExpression(ImmVal))
2698 return false;
2699 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2700 if (!MCE) {
2701 TokError("immediate value expected for vector index");
2702 return false;
2703 }
2704
2705 SMLoc E = getLoc();
2706 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2707 Error(E, "']' expected");
2708 return false;
2709 }
2710
2711 Parser.Lex(); // Eat right bracket token.
2712
2713 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2714 E, getContext()));
2715 }
2716
2717 return false;
2718 }
2719
2720 /// parseRegister - Parse a non-vector register operand.
parseRegister(OperandVector & Operands)2721 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2722 MCAsmParser &Parser = getParser();
2723 SMLoc S = getLoc();
2724 // Try for a vector register.
2725 if (!tryParseVectorRegister(Operands))
2726 return false;
2727
2728 // Try for a scalar register.
2729 int64_t Reg = tryParseRegister();
2730 if (Reg == -1)
2731 return true;
2732 Operands.push_back(
2733 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2734
2735 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2736 // as a string token in the instruction itself.
2737 if (getLexer().getKind() == AsmToken::LBrac) {
2738 SMLoc LBracS = getLoc();
2739 Parser.Lex();
2740 const AsmToken &Tok = Parser.getTok();
2741 if (Tok.is(AsmToken::Integer)) {
2742 SMLoc IntS = getLoc();
2743 int64_t Val = Tok.getIntVal();
2744 if (Val == 1) {
2745 Parser.Lex();
2746 if (getLexer().getKind() == AsmToken::RBrac) {
2747 SMLoc RBracS = getLoc();
2748 Parser.Lex();
2749 Operands.push_back(
2750 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2751 Operands.push_back(
2752 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2753 Operands.push_back(
2754 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2755 return false;
2756 }
2757 }
2758 }
2759 }
2760
2761 return false;
2762 }
2763
parseSymbolicImmVal(const MCExpr * & ImmVal)2764 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2765 MCAsmParser &Parser = getParser();
2766 bool HasELFModifier = false;
2767 AArch64MCExpr::VariantKind RefKind;
2768
2769 if (Parser.getTok().is(AsmToken::Colon)) {
2770 Parser.Lex(); // Eat ':"
2771 HasELFModifier = true;
2772
2773 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2774 Error(Parser.getTok().getLoc(),
2775 "expect relocation specifier in operand after ':'");
2776 return true;
2777 }
2778
2779 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2780 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2781 .Case("lo12", AArch64MCExpr::VK_LO12)
2782 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2783 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2784 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2785 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2786 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2787 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2788 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2789 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2790 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2791 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2792 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2793 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2794 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2795 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2796 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2797 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2798 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2799 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2800 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2801 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2802 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2803 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2804 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2805 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2806 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2807 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2808 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2809 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2810 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2811 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2812 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2813 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2814 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2815 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2816 .Default(AArch64MCExpr::VK_INVALID);
2817
2818 if (RefKind == AArch64MCExpr::VK_INVALID) {
2819 Error(Parser.getTok().getLoc(),
2820 "expect relocation specifier in operand after ':'");
2821 return true;
2822 }
2823
2824 Parser.Lex(); // Eat identifier
2825
2826 if (Parser.getTok().isNot(AsmToken::Colon)) {
2827 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2828 return true;
2829 }
2830 Parser.Lex(); // Eat ':'
2831 }
2832
2833 if (getParser().parseExpression(ImmVal))
2834 return true;
2835
2836 if (HasELFModifier)
2837 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2838
2839 return false;
2840 }
2841
2842 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
parseVectorList(OperandVector & Operands)2843 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2844 MCAsmParser &Parser = getParser();
2845 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2846 SMLoc S = getLoc();
2847 Parser.Lex(); // Eat left bracket token.
2848 StringRef Kind;
2849 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2850 if (FirstReg == -1)
2851 return true;
2852 int64_t PrevReg = FirstReg;
2853 unsigned Count = 1;
2854
2855 if (Parser.getTok().is(AsmToken::Minus)) {
2856 Parser.Lex(); // Eat the minus.
2857
2858 SMLoc Loc = getLoc();
2859 StringRef NextKind;
2860 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2861 if (Reg == -1)
2862 return true;
2863 // Any Kind suffices must match on all regs in the list.
2864 if (Kind != NextKind)
2865 return Error(Loc, "mismatched register size suffix");
2866
2867 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2868
2869 if (Space == 0 || Space > 3) {
2870 return Error(Loc, "invalid number of vectors");
2871 }
2872
2873 Count += Space;
2874 }
2875 else {
2876 while (Parser.getTok().is(AsmToken::Comma)) {
2877 Parser.Lex(); // Eat the comma token.
2878
2879 SMLoc Loc = getLoc();
2880 StringRef NextKind;
2881 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2882 if (Reg == -1)
2883 return true;
2884 // Any Kind suffices must match on all regs in the list.
2885 if (Kind != NextKind)
2886 return Error(Loc, "mismatched register size suffix");
2887
2888 // Registers must be incremental (with wraparound at 31)
2889 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2890 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2891 return Error(Loc, "registers must be sequential");
2892
2893 PrevReg = Reg;
2894 ++Count;
2895 }
2896 }
2897
2898 if (Parser.getTok().isNot(AsmToken::RCurly))
2899 return Error(getLoc(), "'}' expected");
2900 Parser.Lex(); // Eat the '}' token.
2901
2902 if (Count > 4)
2903 return Error(S, "invalid number of vectors");
2904
2905 unsigned NumElements = 0;
2906 char ElementKind = 0;
2907 if (!Kind.empty())
2908 parseValidVectorKind(Kind, NumElements, ElementKind);
2909
2910 Operands.push_back(AArch64Operand::CreateVectorList(
2911 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2912
2913 // If there is an index specifier following the list, parse that too.
2914 if (Parser.getTok().is(AsmToken::LBrac)) {
2915 SMLoc SIdx = getLoc();
2916 Parser.Lex(); // Eat left bracket token.
2917
2918 const MCExpr *ImmVal;
2919 if (getParser().parseExpression(ImmVal))
2920 return false;
2921 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2922 if (!MCE) {
2923 TokError("immediate value expected for vector index");
2924 return false;
2925 }
2926
2927 SMLoc E = getLoc();
2928 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2929 Error(E, "']' expected");
2930 return false;
2931 }
2932
2933 Parser.Lex(); // Eat right bracket token.
2934
2935 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2936 E, getContext()));
2937 }
2938 return false;
2939 }
2940
2941 AArch64AsmParser::OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector & Operands)2942 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2943 MCAsmParser &Parser = getParser();
2944 const AsmToken &Tok = Parser.getTok();
2945 if (!Tok.is(AsmToken::Identifier))
2946 return MatchOperand_NoMatch;
2947
2948 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2949
2950 MCContext &Ctx = getContext();
2951 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2952 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2953 return MatchOperand_NoMatch;
2954
2955 SMLoc S = getLoc();
2956 Parser.Lex(); // Eat register
2957
2958 if (Parser.getTok().isNot(AsmToken::Comma)) {
2959 Operands.push_back(
2960 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2961 return MatchOperand_Success;
2962 }
2963 Parser.Lex(); // Eat comma.
2964
2965 if (Parser.getTok().is(AsmToken::Hash))
2966 Parser.Lex(); // Eat hash
2967
2968 if (Parser.getTok().isNot(AsmToken::Integer)) {
2969 Error(getLoc(), "index must be absent or #0");
2970 return MatchOperand_ParseFail;
2971 }
2972
2973 const MCExpr *ImmVal;
2974 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2975 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2976 Error(getLoc(), "index must be absent or #0");
2977 return MatchOperand_ParseFail;
2978 }
2979
2980 Operands.push_back(
2981 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2982 return MatchOperand_Success;
2983 }
2984
2985 /// parseOperand - Parse a arm instruction operand. For now this parses the
2986 /// operand regardless of the mnemonic.
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)2987 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2988 bool invertCondCode) {
2989 MCAsmParser &Parser = getParser();
2990 // Check if the current operand has a custom associated parser, if so, try to
2991 // custom parse the operand, or fallback to the general approach.
2992 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2993 if (ResTy == MatchOperand_Success)
2994 return false;
2995 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2996 // there was a match, but an error occurred, in which case, just return that
2997 // the operand parsing failed.
2998 if (ResTy == MatchOperand_ParseFail)
2999 return true;
3000
3001 // Nothing custom, so do general case parsing.
3002 SMLoc S, E;
3003 switch (getLexer().getKind()) {
3004 default: {
3005 SMLoc S = getLoc();
3006 const MCExpr *Expr;
3007 if (parseSymbolicImmVal(Expr))
3008 return Error(S, "invalid operand");
3009
3010 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3012 return false;
3013 }
3014 case AsmToken::LBrac: {
3015 SMLoc Loc = Parser.getTok().getLoc();
3016 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3017 getContext()));
3018 Parser.Lex(); // Eat '['
3019
3020 // There's no comma after a '[', so we can parse the next operand
3021 // immediately.
3022 return parseOperand(Operands, false, false);
3023 }
3024 case AsmToken::LCurly:
3025 return parseVectorList(Operands);
3026 case AsmToken::Identifier: {
3027 // If we're expecting a Condition Code operand, then just parse that.
3028 if (isCondCode)
3029 return parseCondCode(Operands, invertCondCode);
3030
3031 // If it's a register name, parse it.
3032 if (!parseRegister(Operands))
3033 return false;
3034
3035 // This could be an optional "shift" or "extend" operand.
3036 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3037 // We can only continue if no tokens were eaten.
3038 if (GotShift != MatchOperand_NoMatch)
3039 return GotShift;
3040
3041 // This was not a register so parse other operands that start with an
3042 // identifier (like labels) as expressions and create them as immediates.
3043 const MCExpr *IdVal;
3044 S = getLoc();
3045 if (getParser().parseExpression(IdVal))
3046 return true;
3047
3048 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3049 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3050 return false;
3051 }
3052 case AsmToken::Integer:
3053 case AsmToken::Real:
3054 case AsmToken::Hash: {
3055 // #42 -> immediate.
3056 S = getLoc();
3057 if (getLexer().is(AsmToken::Hash))
3058 Parser.Lex();
3059
3060 // Parse a negative sign
3061 bool isNegative = false;
3062 if (Parser.getTok().is(AsmToken::Minus)) {
3063 isNegative = true;
3064 // We need to consume this token only when we have a Real, otherwise
3065 // we let parseSymbolicImmVal take care of it
3066 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3067 Parser.Lex();
3068 }
3069
3070 // The only Real that should come through here is a literal #0.0 for
3071 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3072 // so convert the value.
3073 const AsmToken &Tok = Parser.getTok();
3074 if (Tok.is(AsmToken::Real)) {
3075 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3076 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3077 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3078 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3079 Mnemonic != "fcmlt")
3080 return TokError("unexpected floating point literal");
3081 else if (IntVal != 0 || isNegative)
3082 return TokError("expected floating-point constant #0.0");
3083 Parser.Lex(); // Eat the token.
3084
3085 Operands.push_back(
3086 AArch64Operand::CreateToken("#0", false, S, getContext()));
3087 Operands.push_back(
3088 AArch64Operand::CreateToken(".0", false, S, getContext()));
3089 return false;
3090 }
3091
3092 const MCExpr *ImmVal;
3093 if (parseSymbolicImmVal(ImmVal))
3094 return true;
3095
3096 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3097 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3098 return false;
3099 }
3100 case AsmToken::Equal: {
3101 SMLoc Loc = Parser.getTok().getLoc();
3102 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3103 return Error(Loc, "unexpected token in operand");
3104 Parser.Lex(); // Eat '='
3105 const MCExpr *SubExprVal;
3106 if (getParser().parseExpression(SubExprVal))
3107 return true;
3108
3109 if (Operands.size() < 2 ||
3110 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3111 return true;
3112
3113 bool IsXReg =
3114 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3115 Operands[1]->getReg());
3116
3117 MCContext& Ctx = getContext();
3118 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3119 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3120 if (isa<MCConstantExpr>(SubExprVal)) {
3121 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3122 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3123 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3124 ShiftAmt += 16;
3125 Imm >>= 16;
3126 }
3127 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3128 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3129 Operands.push_back(AArch64Operand::CreateImm(
3130 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3131 if (ShiftAmt)
3132 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3133 ShiftAmt, true, S, E, Ctx));
3134 return false;
3135 }
3136 APInt Simm = APInt(64, Imm << ShiftAmt);
3137 // check if the immediate is an unsigned or signed 32-bit int for W regs
3138 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3139 return Error(Loc, "Immediate too large for register");
3140 }
3141 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3142 const MCExpr *CPLoc =
3143 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3144 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3145 return false;
3146 }
3147 }
3148 }
3149
3150 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3151 /// operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)3152 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3153 StringRef Name, SMLoc NameLoc,
3154 OperandVector &Operands) {
3155 MCAsmParser &Parser = getParser();
3156 Name = StringSwitch<StringRef>(Name.lower())
3157 .Case("beq", "b.eq")
3158 .Case("bne", "b.ne")
3159 .Case("bhs", "b.hs")
3160 .Case("bcs", "b.cs")
3161 .Case("blo", "b.lo")
3162 .Case("bcc", "b.cc")
3163 .Case("bmi", "b.mi")
3164 .Case("bpl", "b.pl")
3165 .Case("bvs", "b.vs")
3166 .Case("bvc", "b.vc")
3167 .Case("bhi", "b.hi")
3168 .Case("bls", "b.ls")
3169 .Case("bge", "b.ge")
3170 .Case("blt", "b.lt")
3171 .Case("bgt", "b.gt")
3172 .Case("ble", "b.le")
3173 .Case("bal", "b.al")
3174 .Case("bnv", "b.nv")
3175 .Default(Name);
3176
3177 // First check for the AArch64-specific .req directive.
3178 if (Parser.getTok().is(AsmToken::Identifier) &&
3179 Parser.getTok().getIdentifier() == ".req") {
3180 parseDirectiveReq(Name, NameLoc);
3181 // We always return 'error' for this, as we're done with this
3182 // statement and don't need to match the 'instruction."
3183 return true;
3184 }
3185
3186 // Create the leading tokens for the mnemonic, split by '.' characters.
3187 size_t Start = 0, Next = Name.find('.');
3188 StringRef Head = Name.slice(Start, Next);
3189
3190 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3191 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3192 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3193 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3194 Parser.eatToEndOfStatement();
3195 return IsError;
3196 }
3197
3198 Operands.push_back(
3199 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3200 Mnemonic = Head;
3201
3202 // Handle condition codes for a branch mnemonic
3203 if (Head == "b" && Next != StringRef::npos) {
3204 Start = Next;
3205 Next = Name.find('.', Start + 1);
3206 Head = Name.slice(Start + 1, Next);
3207
3208 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3209 (Head.data() - Name.data()));
3210 AArch64CC::CondCode CC = parseCondCodeString(Head);
3211 if (CC == AArch64CC::Invalid)
3212 return Error(SuffixLoc, "invalid condition code");
3213 Operands.push_back(
3214 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3215 Operands.push_back(
3216 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3217 }
3218
3219 // Add the remaining tokens in the mnemonic.
3220 while (Next != StringRef::npos) {
3221 Start = Next;
3222 Next = Name.find('.', Start + 1);
3223 Head = Name.slice(Start, Next);
3224 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3225 (Head.data() - Name.data()) + 1);
3226 Operands.push_back(
3227 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3228 }
3229
3230 // Conditional compare instructions have a Condition Code operand, which needs
3231 // to be parsed and an immediate operand created.
3232 bool condCodeFourthOperand =
3233 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3234 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3235 Head == "csinc" || Head == "csinv" || Head == "csneg");
3236
3237 // These instructions are aliases to some of the conditional select
3238 // instructions. However, the condition code is inverted in the aliased
3239 // instruction.
3240 //
3241 // FIXME: Is this the correct way to handle these? Or should the parser
3242 // generate the aliased instructions directly?
3243 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3244 bool condCodeThirdOperand =
3245 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3246
3247 // Read the remaining operands.
3248 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3249 // Read the first operand.
3250 if (parseOperand(Operands, false, false)) {
3251 Parser.eatToEndOfStatement();
3252 return true;
3253 }
3254
3255 unsigned N = 2;
3256 while (getLexer().is(AsmToken::Comma)) {
3257 Parser.Lex(); // Eat the comma.
3258
3259 // Parse and remember the operand.
3260 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3261 (N == 3 && condCodeThirdOperand) ||
3262 (N == 2 && condCodeSecondOperand),
3263 condCodeSecondOperand || condCodeThirdOperand)) {
3264 Parser.eatToEndOfStatement();
3265 return true;
3266 }
3267
3268 // After successfully parsing some operands there are two special cases to
3269 // consider (i.e. notional operands not separated by commas). Both are due
3270 // to memory specifiers:
3271 // + An RBrac will end an address for load/store/prefetch
3272 // + An '!' will indicate a pre-indexed operation.
3273 //
3274 // It's someone else's responsibility to make sure these tokens are sane
3275 // in the given context!
3276 if (Parser.getTok().is(AsmToken::RBrac)) {
3277 SMLoc Loc = Parser.getTok().getLoc();
3278 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3279 getContext()));
3280 Parser.Lex();
3281 }
3282
3283 if (Parser.getTok().is(AsmToken::Exclaim)) {
3284 SMLoc Loc = Parser.getTok().getLoc();
3285 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3286 getContext()));
3287 Parser.Lex();
3288 }
3289
3290 ++N;
3291 }
3292 }
3293
3294 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3295 SMLoc Loc = Parser.getTok().getLoc();
3296 Parser.eatToEndOfStatement();
3297 return Error(Loc, "unexpected token in argument list");
3298 }
3299
3300 Parser.Lex(); // Consume the EndOfStatement
3301 return false;
3302 }
3303
3304 // FIXME: This entire function is a giant hack to provide us with decent
3305 // operand range validation/diagnostics until TableGen/MC can be extended
3306 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SmallVectorImpl<SMLoc> & Loc)3307 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3308 SmallVectorImpl<SMLoc> &Loc) {
3309 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3310 // Check for indexed addressing modes w/ the base register being the
3311 // same as a destination/source register or pair load where
3312 // the Rt == Rt2. All of those are undefined behaviour.
3313 switch (Inst.getOpcode()) {
3314 case AArch64::LDPSWpre:
3315 case AArch64::LDPWpost:
3316 case AArch64::LDPWpre:
3317 case AArch64::LDPXpost:
3318 case AArch64::LDPXpre: {
3319 unsigned Rt = Inst.getOperand(1).getReg();
3320 unsigned Rt2 = Inst.getOperand(2).getReg();
3321 unsigned Rn = Inst.getOperand(3).getReg();
3322 if (RI->isSubRegisterEq(Rn, Rt))
3323 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3324 "is also a destination");
3325 if (RI->isSubRegisterEq(Rn, Rt2))
3326 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3327 "is also a destination");
3328 // FALLTHROUGH
3329 }
3330 case AArch64::LDPDi:
3331 case AArch64::LDPQi:
3332 case AArch64::LDPSi:
3333 case AArch64::LDPSWi:
3334 case AArch64::LDPWi:
3335 case AArch64::LDPXi: {
3336 unsigned Rt = Inst.getOperand(0).getReg();
3337 unsigned Rt2 = Inst.getOperand(1).getReg();
3338 if (Rt == Rt2)
3339 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3340 break;
3341 }
3342 case AArch64::LDPDpost:
3343 case AArch64::LDPDpre:
3344 case AArch64::LDPQpost:
3345 case AArch64::LDPQpre:
3346 case AArch64::LDPSpost:
3347 case AArch64::LDPSpre:
3348 case AArch64::LDPSWpost: {
3349 unsigned Rt = Inst.getOperand(1).getReg();
3350 unsigned Rt2 = Inst.getOperand(2).getReg();
3351 if (Rt == Rt2)
3352 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3353 break;
3354 }
3355 case AArch64::STPDpost:
3356 case AArch64::STPDpre:
3357 case AArch64::STPQpost:
3358 case AArch64::STPQpre:
3359 case AArch64::STPSpost:
3360 case AArch64::STPSpre:
3361 case AArch64::STPWpost:
3362 case AArch64::STPWpre:
3363 case AArch64::STPXpost:
3364 case AArch64::STPXpre: {
3365 unsigned Rt = Inst.getOperand(1).getReg();
3366 unsigned Rt2 = Inst.getOperand(2).getReg();
3367 unsigned Rn = Inst.getOperand(3).getReg();
3368 if (RI->isSubRegisterEq(Rn, Rt))
3369 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3370 "is also a source");
3371 if (RI->isSubRegisterEq(Rn, Rt2))
3372 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3373 "is also a source");
3374 break;
3375 }
3376 case AArch64::LDRBBpre:
3377 case AArch64::LDRBpre:
3378 case AArch64::LDRHHpre:
3379 case AArch64::LDRHpre:
3380 case AArch64::LDRSBWpre:
3381 case AArch64::LDRSBXpre:
3382 case AArch64::LDRSHWpre:
3383 case AArch64::LDRSHXpre:
3384 case AArch64::LDRSWpre:
3385 case AArch64::LDRWpre:
3386 case AArch64::LDRXpre:
3387 case AArch64::LDRBBpost:
3388 case AArch64::LDRBpost:
3389 case AArch64::LDRHHpost:
3390 case AArch64::LDRHpost:
3391 case AArch64::LDRSBWpost:
3392 case AArch64::LDRSBXpost:
3393 case AArch64::LDRSHWpost:
3394 case AArch64::LDRSHXpost:
3395 case AArch64::LDRSWpost:
3396 case AArch64::LDRWpost:
3397 case AArch64::LDRXpost: {
3398 unsigned Rt = Inst.getOperand(1).getReg();
3399 unsigned Rn = Inst.getOperand(2).getReg();
3400 if (RI->isSubRegisterEq(Rn, Rt))
3401 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3402 "is also a source");
3403 break;
3404 }
3405 case AArch64::STRBBpost:
3406 case AArch64::STRBpost:
3407 case AArch64::STRHHpost:
3408 case AArch64::STRHpost:
3409 case AArch64::STRWpost:
3410 case AArch64::STRXpost:
3411 case AArch64::STRBBpre:
3412 case AArch64::STRBpre:
3413 case AArch64::STRHHpre:
3414 case AArch64::STRHpre:
3415 case AArch64::STRWpre:
3416 case AArch64::STRXpre: {
3417 unsigned Rt = Inst.getOperand(1).getReg();
3418 unsigned Rn = Inst.getOperand(2).getReg();
3419 if (RI->isSubRegisterEq(Rn, Rt))
3420 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3421 "is also a source");
3422 break;
3423 }
3424 }
3425
3426 // Now check immediate ranges. Separate from the above as there is overlap
3427 // in the instructions being checked and this keeps the nested conditionals
3428 // to a minimum.
3429 switch (Inst.getOpcode()) {
3430 case AArch64::ADDSWri:
3431 case AArch64::ADDSXri:
3432 case AArch64::ADDWri:
3433 case AArch64::ADDXri:
3434 case AArch64::SUBSWri:
3435 case AArch64::SUBSXri:
3436 case AArch64::SUBWri:
3437 case AArch64::SUBXri: {
3438 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3439 // some slight duplication here.
3440 if (Inst.getOperand(2).isExpr()) {
3441 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3442 AArch64MCExpr::VariantKind ELFRefKind;
3443 MCSymbolRefExpr::VariantKind DarwinRefKind;
3444 int64_t Addend;
3445 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3446 return Error(Loc[2], "invalid immediate expression");
3447 }
3448
3449 // Only allow these with ADDXri.
3450 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3451 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3452 Inst.getOpcode() == AArch64::ADDXri)
3453 return false;
3454
3455 // Only allow these with ADDXri/ADDWri
3456 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3457 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3458 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3459 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3460 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3461 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3463 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3464 (Inst.getOpcode() == AArch64::ADDXri ||
3465 Inst.getOpcode() == AArch64::ADDWri))
3466 return false;
3467
3468 // Don't allow expressions in the immediate field otherwise
3469 return Error(Loc[2], "invalid immediate expression");
3470 }
3471 return false;
3472 }
3473 default:
3474 return false;
3475 }
3476 }
3477
showMatchError(SMLoc Loc,unsigned ErrCode)3478 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3479 switch (ErrCode) {
3480 case Match_MissingFeature:
3481 return Error(Loc,
3482 "instruction requires a CPU feature not currently enabled");
3483 case Match_InvalidOperand:
3484 return Error(Loc, "invalid operand for instruction");
3485 case Match_InvalidSuffix:
3486 return Error(Loc, "invalid type suffix for instruction");
3487 case Match_InvalidCondCode:
3488 return Error(Loc, "expected AArch64 condition code");
3489 case Match_AddSubRegExtendSmall:
3490 return Error(Loc,
3491 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3492 case Match_AddSubRegExtendLarge:
3493 return Error(Loc,
3494 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3495 case Match_AddSubSecondSource:
3496 return Error(Loc,
3497 "expected compatible register, symbol or integer in range [0, 4095]");
3498 case Match_LogicalSecondSource:
3499 return Error(Loc, "expected compatible register or logical immediate");
3500 case Match_InvalidMovImm32Shift:
3501 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3502 case Match_InvalidMovImm64Shift:
3503 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3504 case Match_AddSubRegShift32:
3505 return Error(Loc,
3506 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3507 case Match_AddSubRegShift64:
3508 return Error(Loc,
3509 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3510 case Match_InvalidFPImm:
3511 return Error(Loc,
3512 "expected compatible register or floating-point constant");
3513 case Match_InvalidMemoryIndexedSImm9:
3514 return Error(Loc, "index must be an integer in range [-256, 255].");
3515 case Match_InvalidMemoryIndexed4SImm7:
3516 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3517 case Match_InvalidMemoryIndexed8SImm7:
3518 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3519 case Match_InvalidMemoryIndexed16SImm7:
3520 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3521 case Match_InvalidMemoryWExtend8:
3522 return Error(Loc,
3523 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3524 case Match_InvalidMemoryWExtend16:
3525 return Error(Loc,
3526 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3527 case Match_InvalidMemoryWExtend32:
3528 return Error(Loc,
3529 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3530 case Match_InvalidMemoryWExtend64:
3531 return Error(Loc,
3532 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3533 case Match_InvalidMemoryWExtend128:
3534 return Error(Loc,
3535 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3536 case Match_InvalidMemoryXExtend8:
3537 return Error(Loc,
3538 "expected 'lsl' or 'sxtx' with optional shift of #0");
3539 case Match_InvalidMemoryXExtend16:
3540 return Error(Loc,
3541 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3542 case Match_InvalidMemoryXExtend32:
3543 return Error(Loc,
3544 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3545 case Match_InvalidMemoryXExtend64:
3546 return Error(Loc,
3547 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3548 case Match_InvalidMemoryXExtend128:
3549 return Error(Loc,
3550 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3551 case Match_InvalidMemoryIndexed1:
3552 return Error(Loc, "index must be an integer in range [0, 4095].");
3553 case Match_InvalidMemoryIndexed2:
3554 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3555 case Match_InvalidMemoryIndexed4:
3556 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3557 case Match_InvalidMemoryIndexed8:
3558 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3559 case Match_InvalidMemoryIndexed16:
3560 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3561 case Match_InvalidImm0_7:
3562 return Error(Loc, "immediate must be an integer in range [0, 7].");
3563 case Match_InvalidImm0_15:
3564 return Error(Loc, "immediate must be an integer in range [0, 15].");
3565 case Match_InvalidImm0_31:
3566 return Error(Loc, "immediate must be an integer in range [0, 31].");
3567 case Match_InvalidImm0_63:
3568 return Error(Loc, "immediate must be an integer in range [0, 63].");
3569 case Match_InvalidImm0_127:
3570 return Error(Loc, "immediate must be an integer in range [0, 127].");
3571 case Match_InvalidImm0_65535:
3572 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3573 case Match_InvalidImm1_8:
3574 return Error(Loc, "immediate must be an integer in range [1, 8].");
3575 case Match_InvalidImm1_16:
3576 return Error(Loc, "immediate must be an integer in range [1, 16].");
3577 case Match_InvalidImm1_32:
3578 return Error(Loc, "immediate must be an integer in range [1, 32].");
3579 case Match_InvalidImm1_64:
3580 return Error(Loc, "immediate must be an integer in range [1, 64].");
3581 case Match_InvalidIndex1:
3582 return Error(Loc, "expected lane specifier '[1]'");
3583 case Match_InvalidIndexB:
3584 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3585 case Match_InvalidIndexH:
3586 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3587 case Match_InvalidIndexS:
3588 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3589 case Match_InvalidIndexD:
3590 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3591 case Match_InvalidLabel:
3592 return Error(Loc, "expected label or encodable integer pc offset");
3593 case Match_MRS:
3594 return Error(Loc, "expected readable system register");
3595 case Match_MSR:
3596 return Error(Loc, "expected writable system register or pstate");
3597 case Match_MnemonicFail:
3598 return Error(Loc, "unrecognized instruction mnemonic");
3599 default:
3600 llvm_unreachable("unexpected error code!");
3601 }
3602 }
3603
3604 static const char *getSubtargetFeatureName(uint64_t Val);
3605
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)3606 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3607 OperandVector &Operands,
3608 MCStreamer &Out,
3609 uint64_t &ErrorInfo,
3610 bool MatchingInlineAsm) {
3611 assert(!Operands.empty() && "Unexpect empty operand list!");
3612 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3613 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3614
3615 StringRef Tok = Op.getToken();
3616 unsigned NumOperands = Operands.size();
3617
3618 if (NumOperands == 4 && Tok == "lsl") {
3619 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3620 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3621 if (Op2.isReg() && Op3.isImm()) {
3622 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3623 if (Op3CE) {
3624 uint64_t Op3Val = Op3CE->getValue();
3625 uint64_t NewOp3Val = 0;
3626 uint64_t NewOp4Val = 0;
3627 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3628 Op2.getReg())) {
3629 NewOp3Val = (32 - Op3Val) & 0x1f;
3630 NewOp4Val = 31 - Op3Val;
3631 } else {
3632 NewOp3Val = (64 - Op3Val) & 0x3f;
3633 NewOp4Val = 63 - Op3Val;
3634 }
3635
3636 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3637 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3638
3639 Operands[0] = AArch64Operand::CreateToken(
3640 "ubfm", false, Op.getStartLoc(), getContext());
3641 Operands.push_back(AArch64Operand::CreateImm(
3642 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3643 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3644 Op3.getEndLoc(), getContext());
3645 }
3646 }
3647 } else if (NumOperands == 5) {
3648 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3649 // UBFIZ -> UBFM aliases.
3650 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3651 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3652 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3653 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3654
3655 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3656 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3657 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3658
3659 if (Op3CE && Op4CE) {
3660 uint64_t Op3Val = Op3CE->getValue();
3661 uint64_t Op4Val = Op4CE->getValue();
3662
3663 uint64_t RegWidth = 0;
3664 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3665 Op1.getReg()))
3666 RegWidth = 64;
3667 else
3668 RegWidth = 32;
3669
3670 if (Op3Val >= RegWidth)
3671 return Error(Op3.getStartLoc(),
3672 "expected integer in range [0, 31]");
3673 if (Op4Val < 1 || Op4Val > RegWidth)
3674 return Error(Op4.getStartLoc(),
3675 "expected integer in range [1, 32]");
3676
3677 uint64_t NewOp3Val = 0;
3678 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3679 Op1.getReg()))
3680 NewOp3Val = (32 - Op3Val) & 0x1f;
3681 else
3682 NewOp3Val = (64 - Op3Val) & 0x3f;
3683
3684 uint64_t NewOp4Val = Op4Val - 1;
3685
3686 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3687 return Error(Op4.getStartLoc(),
3688 "requested insert overflows register");
3689
3690 const MCExpr *NewOp3 =
3691 MCConstantExpr::Create(NewOp3Val, getContext());
3692 const MCExpr *NewOp4 =
3693 MCConstantExpr::Create(NewOp4Val, getContext());
3694 Operands[3] = AArch64Operand::CreateImm(
3695 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3696 Operands[4] = AArch64Operand::CreateImm(
3697 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3698 if (Tok == "bfi")
3699 Operands[0] = AArch64Operand::CreateToken(
3700 "bfm", false, Op.getStartLoc(), getContext());
3701 else if (Tok == "sbfiz")
3702 Operands[0] = AArch64Operand::CreateToken(
3703 "sbfm", false, Op.getStartLoc(), getContext());
3704 else if (Tok == "ubfiz")
3705 Operands[0] = AArch64Operand::CreateToken(
3706 "ubfm", false, Op.getStartLoc(), getContext());
3707 else
3708 llvm_unreachable("No valid mnemonic for alias?");
3709 }
3710 }
3711
3712 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3713 // UBFX -> UBFM aliases.
3714 } else if (NumOperands == 5 &&
3715 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3716 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3717 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3718 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3719
3720 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3721 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3722 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3723
3724 if (Op3CE && Op4CE) {
3725 uint64_t Op3Val = Op3CE->getValue();
3726 uint64_t Op4Val = Op4CE->getValue();
3727
3728 uint64_t RegWidth = 0;
3729 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3730 Op1.getReg()))
3731 RegWidth = 64;
3732 else
3733 RegWidth = 32;
3734
3735 if (Op3Val >= RegWidth)
3736 return Error(Op3.getStartLoc(),
3737 "expected integer in range [0, 31]");
3738 if (Op4Val < 1 || Op4Val > RegWidth)
3739 return Error(Op4.getStartLoc(),
3740 "expected integer in range [1, 32]");
3741
3742 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3743
3744 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3745 return Error(Op4.getStartLoc(),
3746 "requested extract overflows register");
3747
3748 const MCExpr *NewOp4 =
3749 MCConstantExpr::Create(NewOp4Val, getContext());
3750 Operands[4] = AArch64Operand::CreateImm(
3751 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3752 if (Tok == "bfxil")
3753 Operands[0] = AArch64Operand::CreateToken(
3754 "bfm", false, Op.getStartLoc(), getContext());
3755 else if (Tok == "sbfx")
3756 Operands[0] = AArch64Operand::CreateToken(
3757 "sbfm", false, Op.getStartLoc(), getContext());
3758 else if (Tok == "ubfx")
3759 Operands[0] = AArch64Operand::CreateToken(
3760 "ubfm", false, Op.getStartLoc(), getContext());
3761 else
3762 llvm_unreachable("No valid mnemonic for alias?");
3763 }
3764 }
3765 }
3766 }
3767 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3768 // InstAlias can't quite handle this since the reg classes aren't
3769 // subclasses.
3770 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3771 // The source register can be Wn here, but the matcher expects a
3772 // GPR64. Twiddle it here if necessary.
3773 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3774 if (Op.isReg()) {
3775 unsigned Reg = getXRegFromWReg(Op.getReg());
3776 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3777 Op.getEndLoc(), getContext());
3778 }
3779 }
3780 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3781 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3782 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3783 if (Op.isReg() &&
3784 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3785 Op.getReg())) {
3786 // The source register can be Wn here, but the matcher expects a
3787 // GPR64. Twiddle it here if necessary.
3788 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3789 if (Op.isReg()) {
3790 unsigned Reg = getXRegFromWReg(Op.getReg());
3791 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3792 Op.getEndLoc(), getContext());
3793 }
3794 }
3795 }
3796 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3797 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3798 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3799 if (Op.isReg() &&
3800 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3801 Op.getReg())) {
3802 // The source register can be Wn here, but the matcher expects a
3803 // GPR32. Twiddle it here if necessary.
3804 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3805 if (Op.isReg()) {
3806 unsigned Reg = getWRegFromXReg(Op.getReg());
3807 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3808 Op.getEndLoc(), getContext());
3809 }
3810 }
3811 }
3812
3813 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3814 if (NumOperands == 3 && Tok == "fmov") {
3815 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3816 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3817 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3818 unsigned zreg =
3819 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3820 RegOp.getReg())
3821 ? AArch64::WZR
3822 : AArch64::XZR;
3823 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3824 Op.getEndLoc(), getContext());
3825 }
3826 }
3827
3828 MCInst Inst;
3829 // First try to match against the secondary set of tables containing the
3830 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3831 unsigned MatchResult =
3832 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3833
3834 // If that fails, try against the alternate table containing long-form NEON:
3835 // "fadd v0.2s, v1.2s, v2.2s"
3836 if (MatchResult != Match_Success)
3837 MatchResult =
3838 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3839
3840 switch (MatchResult) {
3841 case Match_Success: {
3842 // Perform range checking and other semantic validations
3843 SmallVector<SMLoc, 8> OperandLocs;
3844 NumOperands = Operands.size();
3845 for (unsigned i = 1; i < NumOperands; ++i)
3846 OperandLocs.push_back(Operands[i]->getStartLoc());
3847 if (validateInstruction(Inst, OperandLocs))
3848 return true;
3849
3850 Inst.setLoc(IDLoc);
3851 Out.EmitInstruction(Inst, STI);
3852 return false;
3853 }
3854 case Match_MissingFeature: {
3855 assert(ErrorInfo && "Unknown missing feature!");
3856 // Special case the error message for the very common case where only
3857 // a single subtarget feature is missing (neon, e.g.).
3858 std::string Msg = "instruction requires:";
3859 uint64_t Mask = 1;
3860 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3861 if (ErrorInfo & Mask) {
3862 Msg += " ";
3863 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3864 }
3865 Mask <<= 1;
3866 }
3867 return Error(IDLoc, Msg);
3868 }
3869 case Match_MnemonicFail:
3870 return showMatchError(IDLoc, MatchResult);
3871 case Match_InvalidOperand: {
3872 SMLoc ErrorLoc = IDLoc;
3873 if (ErrorInfo != ~0ULL) {
3874 if (ErrorInfo >= Operands.size())
3875 return Error(IDLoc, "too few operands for instruction");
3876
3877 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3878 if (ErrorLoc == SMLoc())
3879 ErrorLoc = IDLoc;
3880 }
3881 // If the match failed on a suffix token operand, tweak the diagnostic
3882 // accordingly.
3883 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3884 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3885 MatchResult = Match_InvalidSuffix;
3886
3887 return showMatchError(ErrorLoc, MatchResult);
3888 }
3889 case Match_InvalidMemoryIndexed1:
3890 case Match_InvalidMemoryIndexed2:
3891 case Match_InvalidMemoryIndexed4:
3892 case Match_InvalidMemoryIndexed8:
3893 case Match_InvalidMemoryIndexed16:
3894 case Match_InvalidCondCode:
3895 case Match_AddSubRegExtendSmall:
3896 case Match_AddSubRegExtendLarge:
3897 case Match_AddSubSecondSource:
3898 case Match_LogicalSecondSource:
3899 case Match_AddSubRegShift32:
3900 case Match_AddSubRegShift64:
3901 case Match_InvalidMovImm32Shift:
3902 case Match_InvalidMovImm64Shift:
3903 case Match_InvalidFPImm:
3904 case Match_InvalidMemoryWExtend8:
3905 case Match_InvalidMemoryWExtend16:
3906 case Match_InvalidMemoryWExtend32:
3907 case Match_InvalidMemoryWExtend64:
3908 case Match_InvalidMemoryWExtend128:
3909 case Match_InvalidMemoryXExtend8:
3910 case Match_InvalidMemoryXExtend16:
3911 case Match_InvalidMemoryXExtend32:
3912 case Match_InvalidMemoryXExtend64:
3913 case Match_InvalidMemoryXExtend128:
3914 case Match_InvalidMemoryIndexed4SImm7:
3915 case Match_InvalidMemoryIndexed8SImm7:
3916 case Match_InvalidMemoryIndexed16SImm7:
3917 case Match_InvalidMemoryIndexedSImm9:
3918 case Match_InvalidImm0_7:
3919 case Match_InvalidImm0_15:
3920 case Match_InvalidImm0_31:
3921 case Match_InvalidImm0_63:
3922 case Match_InvalidImm0_127:
3923 case Match_InvalidImm0_65535:
3924 case Match_InvalidImm1_8:
3925 case Match_InvalidImm1_16:
3926 case Match_InvalidImm1_32:
3927 case Match_InvalidImm1_64:
3928 case Match_InvalidIndex1:
3929 case Match_InvalidIndexB:
3930 case Match_InvalidIndexH:
3931 case Match_InvalidIndexS:
3932 case Match_InvalidIndexD:
3933 case Match_InvalidLabel:
3934 case Match_MSR:
3935 case Match_MRS: {
3936 if (ErrorInfo >= Operands.size())
3937 return Error(IDLoc, "too few operands for instruction");
3938 // Any time we get here, there's nothing fancy to do. Just get the
3939 // operand SMLoc and display the diagnostic.
3940 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3941 if (ErrorLoc == SMLoc())
3942 ErrorLoc = IDLoc;
3943 return showMatchError(ErrorLoc, MatchResult);
3944 }
3945 }
3946
3947 llvm_unreachable("Implement any new match types added!");
3948 }
3949
3950 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)3951 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3952 const MCObjectFileInfo::Environment Format =
3953 getContext().getObjectFileInfo()->getObjectFileType();
3954 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3955 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3956
3957 StringRef IDVal = DirectiveID.getIdentifier();
3958 SMLoc Loc = DirectiveID.getLoc();
3959 if (IDVal == ".hword")
3960 return parseDirectiveWord(2, Loc);
3961 if (IDVal == ".word")
3962 return parseDirectiveWord(4, Loc);
3963 if (IDVal == ".xword")
3964 return parseDirectiveWord(8, Loc);
3965 if (IDVal == ".tlsdesccall")
3966 return parseDirectiveTLSDescCall(Loc);
3967 if (IDVal == ".ltorg" || IDVal == ".pool")
3968 return parseDirectiveLtorg(Loc);
3969 if (IDVal == ".unreq")
3970 return parseDirectiveUnreq(DirectiveID.getLoc());
3971
3972 if (!IsMachO && !IsCOFF) {
3973 if (IDVal == ".inst")
3974 return parseDirectiveInst(Loc);
3975 }
3976
3977 return parseDirectiveLOH(IDVal, Loc);
3978 }
3979
3980 /// parseDirectiveWord
3981 /// ::= .word [ expression (, expression)* ]
parseDirectiveWord(unsigned Size,SMLoc L)3982 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3983 MCAsmParser &Parser = getParser();
3984 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3985 for (;;) {
3986 const MCExpr *Value;
3987 if (getParser().parseExpression(Value))
3988 return true;
3989
3990 getParser().getStreamer().EmitValue(Value, Size);
3991
3992 if (getLexer().is(AsmToken::EndOfStatement))
3993 break;
3994
3995 // FIXME: Improve diagnostic.
3996 if (getLexer().isNot(AsmToken::Comma))
3997 return Error(L, "unexpected token in directive");
3998 Parser.Lex();
3999 }
4000 }
4001
4002 Parser.Lex();
4003 return false;
4004 }
4005
4006 /// parseDirectiveInst
4007 /// ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)4008 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4009 MCAsmParser &Parser = getParser();
4010 if (getLexer().is(AsmToken::EndOfStatement)) {
4011 Parser.eatToEndOfStatement();
4012 Error(Loc, "expected expression following directive");
4013 return false;
4014 }
4015
4016 for (;;) {
4017 const MCExpr *Expr;
4018
4019 if (getParser().parseExpression(Expr)) {
4020 Error(Loc, "expected expression");
4021 return false;
4022 }
4023
4024 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4025 if (!Value) {
4026 Error(Loc, "expected constant expression");
4027 return false;
4028 }
4029
4030 getTargetStreamer().emitInst(Value->getValue());
4031
4032 if (getLexer().is(AsmToken::EndOfStatement))
4033 break;
4034
4035 if (getLexer().isNot(AsmToken::Comma)) {
4036 Error(Loc, "unexpected token in directive");
4037 return false;
4038 }
4039
4040 Parser.Lex(); // Eat comma.
4041 }
4042
4043 Parser.Lex();
4044 return false;
4045 }
4046
4047 // parseDirectiveTLSDescCall:
4048 // ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)4049 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4050 StringRef Name;
4051 if (getParser().parseIdentifier(Name))
4052 return Error(L, "expected symbol after directive");
4053
4054 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4055 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4056 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4057
4058 MCInst Inst;
4059 Inst.setOpcode(AArch64::TLSDESCCALL);
4060 Inst.addOperand(MCOperand::CreateExpr(Expr));
4061
4062 getParser().getStreamer().EmitInstruction(Inst, STI);
4063 return false;
4064 }
4065
4066 /// ::= .loh <lohName | lohId> label1, ..., labelN
4067 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)4068 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4069 if (IDVal != MCLOHDirectiveName())
4070 return true;
4071 MCLOHType Kind;
4072 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4073 if (getParser().getTok().isNot(AsmToken::Integer))
4074 return TokError("expected an identifier or a number in directive");
4075 // We successfully get a numeric value for the identifier.
4076 // Check if it is valid.
4077 int64_t Id = getParser().getTok().getIntVal();
4078 if (Id <= -1U && !isValidMCLOHType(Id))
4079 return TokError("invalid numeric identifier in directive");
4080 Kind = (MCLOHType)Id;
4081 } else {
4082 StringRef Name = getTok().getIdentifier();
4083 // We successfully parse an identifier.
4084 // Check if it is a recognized one.
4085 int Id = MCLOHNameToId(Name);
4086
4087 if (Id == -1)
4088 return TokError("invalid identifier in directive");
4089 Kind = (MCLOHType)Id;
4090 }
4091 // Consume the identifier.
4092 Lex();
4093 // Get the number of arguments of this LOH.
4094 int NbArgs = MCLOHIdToNbArgs(Kind);
4095
4096 assert(NbArgs != -1 && "Invalid number of arguments");
4097
4098 SmallVector<MCSymbol *, 3> Args;
4099 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4100 StringRef Name;
4101 if (getParser().parseIdentifier(Name))
4102 return TokError("expected identifier in directive");
4103 Args.push_back(getContext().GetOrCreateSymbol(Name));
4104
4105 if (Idx + 1 == NbArgs)
4106 break;
4107 if (getLexer().isNot(AsmToken::Comma))
4108 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4109 Lex();
4110 }
4111 if (getLexer().isNot(AsmToken::EndOfStatement))
4112 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4113
4114 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4115 return false;
4116 }
4117
4118 /// parseDirectiveLtorg
4119 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)4120 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4121 getTargetStreamer().emitCurrentConstantPool();
4122 return false;
4123 }
4124
4125 /// parseDirectiveReq
4126 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)4127 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4128 MCAsmParser &Parser = getParser();
4129 Parser.Lex(); // Eat the '.req' token.
4130 SMLoc SRegLoc = getLoc();
4131 unsigned RegNum = tryParseRegister();
4132 bool IsVector = false;
4133
4134 if (RegNum == static_cast<unsigned>(-1)) {
4135 StringRef Kind;
4136 RegNum = tryMatchVectorRegister(Kind, false);
4137 if (!Kind.empty()) {
4138 Error(SRegLoc, "vector register without type specifier expected");
4139 return false;
4140 }
4141 IsVector = true;
4142 }
4143
4144 if (RegNum == static_cast<unsigned>(-1)) {
4145 Parser.eatToEndOfStatement();
4146 Error(SRegLoc, "register name or alias expected");
4147 return false;
4148 }
4149
4150 // Shouldn't be anything else.
4151 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4152 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4153 Parser.eatToEndOfStatement();
4154 return false;
4155 }
4156
4157 Parser.Lex(); // Consume the EndOfStatement
4158
4159 auto pair = std::make_pair(IsVector, RegNum);
4160 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4161 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4162
4163 return true;
4164 }
4165
4166 /// parseDirectiveUneq
4167 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)4168 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4169 MCAsmParser &Parser = getParser();
4170 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4171 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4172 Parser.eatToEndOfStatement();
4173 return false;
4174 }
4175 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4176 Parser.Lex(); // Eat the identifier.
4177 return false;
4178 }
4179
4180 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)4181 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4182 AArch64MCExpr::VariantKind &ELFRefKind,
4183 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4184 int64_t &Addend) {
4185 ELFRefKind = AArch64MCExpr::VK_INVALID;
4186 DarwinRefKind = MCSymbolRefExpr::VK_None;
4187 Addend = 0;
4188
4189 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4190 ELFRefKind = AE->getKind();
4191 Expr = AE->getSubExpr();
4192 }
4193
4194 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4195 if (SE) {
4196 // It's a simple symbol reference with no addend.
4197 DarwinRefKind = SE->getKind();
4198 return true;
4199 }
4200
4201 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4202 if (!BE)
4203 return false;
4204
4205 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4206 if (!SE)
4207 return false;
4208 DarwinRefKind = SE->getKind();
4209
4210 if (BE->getOpcode() != MCBinaryExpr::Add &&
4211 BE->getOpcode() != MCBinaryExpr::Sub)
4212 return false;
4213
4214 // See if the addend is is a constant, otherwise there's more going
4215 // on here than we can deal with.
4216 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4217 if (!AddendExpr)
4218 return false;
4219
4220 Addend = AddendExpr->getValue();
4221 if (BE->getOpcode() == MCBinaryExpr::Sub)
4222 Addend = -Addend;
4223
4224 // It's some symbol reference + a constant addend, but really
4225 // shouldn't use both Darwin and ELF syntax.
4226 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4227 DarwinRefKind == MCSymbolRefExpr::VK_None;
4228 }
4229
4230 /// Force static initialization.
LLVMInitializeAArch64AsmParser()4231 extern "C" void LLVMInitializeAArch64AsmParser() {
4232 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4233 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4234 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4235 }
4236
4237 #define GET_REGISTER_MATCHER
4238 #define GET_SUBTARGET_FEATURE_NAME
4239 #define GET_MATCHER_IMPLEMENTATION
4240 #include "AArch64GenAsmMatcher.inc"
4241
4242 // Define this matcher function after the auto-generated include so we
4243 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)4244 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4245 unsigned Kind) {
4246 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4247 // If the kind is a token for a literal immediate, check if our asm
4248 // operand matches. This is for InstAliases which have a fixed-value
4249 // immediate in the syntax.
4250 int64_t ExpectedVal;
4251 switch (Kind) {
4252 default:
4253 return Match_InvalidOperand;
4254 case MCK__35_0:
4255 ExpectedVal = 0;
4256 break;
4257 case MCK__35_1:
4258 ExpectedVal = 1;
4259 break;
4260 case MCK__35_12:
4261 ExpectedVal = 12;
4262 break;
4263 case MCK__35_16:
4264 ExpectedVal = 16;
4265 break;
4266 case MCK__35_2:
4267 ExpectedVal = 2;
4268 break;
4269 case MCK__35_24:
4270 ExpectedVal = 24;
4271 break;
4272 case MCK__35_3:
4273 ExpectedVal = 3;
4274 break;
4275 case MCK__35_32:
4276 ExpectedVal = 32;
4277 break;
4278 case MCK__35_4:
4279 ExpectedVal = 4;
4280 break;
4281 case MCK__35_48:
4282 ExpectedVal = 48;
4283 break;
4284 case MCK__35_6:
4285 ExpectedVal = 6;
4286 break;
4287 case MCK__35_64:
4288 ExpectedVal = 64;
4289 break;
4290 case MCK__35_8:
4291 ExpectedVal = 8;
4292 break;
4293 }
4294 if (!Op.isImm())
4295 return Match_InvalidOperand;
4296 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4297 if (!CE)
4298 return Match_InvalidOperand;
4299 if (CE->getValue() == ExpectedVal)
4300 return Match_Success;
4301 return Match_InvalidOperand;
4302 }
4303