1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// 11//===----------------------------------------------------------------------===// 12 13 14class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [], 15 list<string> altNames = []> 16 : Register<n, altNames> { 17 let HWEncoding = enc; 18 let Namespace = "AArch64"; 19 let SubRegs = subregs; 20} 21 22let Namespace = "AArch64" in { 23 def sub_32 : SubRegIndex<32>; 24 25 def bsub : SubRegIndex<8>; 26 def hsub : SubRegIndex<16>; 27 def ssub : SubRegIndex<32>; 28 def dsub : SubRegIndex<32>; 29 def sube32 : SubRegIndex<32>; 30 def subo32 : SubRegIndex<32>; 31 def qhisub : SubRegIndex<64>; 32 def qsub : SubRegIndex<64>; 33 def sube64 : SubRegIndex<64>; 34 def subo64 : SubRegIndex<64>; 35 // SVE 36 def zsub : SubRegIndex<128>; 37 // Note: zsub_hi should never be used directly because it represents 38 // the scalable part of the SVE vector and cannot be manipulated as a 39 // subvector in the same way the lower 128bits can. 40 def zsub_hi : SubRegIndex<128>; 41 // Note: Code depends on these having consecutive numbers 42 def dsub0 : SubRegIndex<64>; 43 def dsub1 : SubRegIndex<64>; 44 def dsub2 : SubRegIndex<64>; 45 def dsub3 : SubRegIndex<64>; 46 // Note: Code depends on these having consecutive numbers 47 def qsub0 : SubRegIndex<128>; 48 def qsub1 : SubRegIndex<128>; 49 def qsub2 : SubRegIndex<128>; 50 def qsub3 : SubRegIndex<128>; 51} 52 53let Namespace = "AArch64" in { 54 def vreg : RegAltNameIndex; 55 def vlist1 : RegAltNameIndex; 56} 57 58//===----------------------------------------------------------------------===// 59// Registers 60//===----------------------------------------------------------------------===// 61def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>; 62def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>; 63def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>; 64def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>; 65def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>; 66def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>; 67def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>; 68def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>; 69def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>; 70def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>; 71def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>; 72def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>; 73def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>; 74def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>; 75def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>; 76def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>; 77def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>; 78def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>; 79def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>; 80def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>; 81def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>; 82def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>; 83def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>; 84def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>; 85def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>; 86def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>; 87def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>; 88def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>; 89def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>; 90def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>; 91def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>; 92def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>; 93def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>; 94 95let SubRegIndices = [sub_32] in { 96def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>; 97def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>; 98def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>; 99def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>; 100def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>; 101def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>; 102def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>; 103def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>; 104def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>; 105def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>; 106def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>; 107def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>; 108def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>; 109def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>; 110def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>; 111def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>; 112def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>; 113def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>; 114def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>; 115def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>; 116def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>; 117def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>; 118def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>; 119def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>; 120def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>; 121def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>; 122def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>; 123def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>; 124def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>; 125def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>; 126def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>; 127def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>; 128def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>; 129} 130 131// Condition code register. 132def NZCV : AArch64Reg<0, "nzcv">; 133 134// First fault status register 135def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>; 136 137// GPR register classes with the intersections of GPR32/GPR32sp and 138// GPR64/GPR64sp for use by the coalescer. 139def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> { 140 let AltOrders = [(rotl GPR32common, 8)]; 141 let AltOrderSelect = [{ return 1; }]; 142} 143def GPR64common : RegisterClass<"AArch64", [i64], 64, 144 (add (sequence "X%u", 0, 28), FP, LR)> { 145 let AltOrders = [(rotl GPR64common, 8)]; 146 let AltOrderSelect = [{ return 1; }]; 147} 148// GPR register classes which exclude SP/WSP. 149def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> { 150 let AltOrders = [(rotl GPR32, 8)]; 151 let AltOrderSelect = [{ return 1; }]; 152} 153def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> { 154 let AltOrders = [(rotl GPR64, 8)]; 155 let AltOrderSelect = [{ return 1; }]; 156} 157 158// GPR register classes which include SP/WSP. 159def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> { 160 let AltOrders = [(rotl GPR32sp, 8)]; 161 let AltOrderSelect = [{ return 1; }]; 162} 163def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> { 164 let AltOrders = [(rotl GPR64sp, 8)]; 165 let AltOrderSelect = [{ return 1; }]; 166} 167 168def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>; 169def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>; 170 171def GPR64spPlus0Operand : AsmOperandClass { 172 let Name = "GPR64sp0"; 173 let RenderMethod = "addRegOperands"; 174 let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>"; 175 let ParserMethod = "tryParseGPR64sp0Operand"; 176} 177 178def GPR64sp0 : RegisterOperand<GPR64sp> { 179 let ParserMatchClass = GPR64spPlus0Operand; 180} 181 182// GPR32/GPR64 but with zero-register substitution enabled. 183// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all. 184def GPR32z : RegisterOperand<GPR32> { 185 let GIZeroRegister = WZR; 186} 187def GPR64z : RegisterOperand<GPR64> { 188 let GIZeroRegister = XZR; 189} 190 191// GPR register classes which include WZR/XZR AND SP/WSP. This is not a 192// constraint used by any instructions, it is used as a common super-class. 193def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>; 194def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>; 195 196// For tail calls, we can't use callee-saved registers, as they are restored 197// to the saved value before the tail call, which would clobber a call address. 198// This is for indirect tail calls to store the address of the destination. 199def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21, 200 X22, X23, X24, X25, X26, 201 X27, X28, FP, LR)>; 202 203// GPR register classes for post increment amount of vector load/store that 204// has alternate printing when Rm=31 and prints a constant immediate value 205// equal to the total number of bytes transferred. 206 207// FIXME: TableGen *should* be able to do these itself now. There appears to be 208// a bug in counting how many operands a Post-indexed MCInst should have which 209// means the aliases don't trigger. 210def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">; 211def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">; 212def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">; 213def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">; 214def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">; 215def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">; 216def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">; 217def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">; 218def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">; 219def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">; 220def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">; 221def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">; 222 223// Condition code regclass. 224def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> { 225 let CopyCost = -1; // Don't allow copying of status registers. 226 227 // CCR is not allocatable. 228 let isAllocatable = 0; 229} 230 231//===----------------------------------------------------------------------===// 232// Floating Point Scalar Registers 233//===----------------------------------------------------------------------===// 234 235def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>; 236def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>; 237def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>; 238def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>; 239def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>; 240def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>; 241def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>; 242def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>; 243def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>; 244def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>; 245def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>; 246def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>; 247def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>; 248def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>; 249def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>; 250def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>; 251def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>; 252def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>; 253def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>; 254def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>; 255def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>; 256def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>; 257def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>; 258def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>; 259def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>; 260def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>; 261def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>; 262def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>; 263def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>; 264def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>; 265def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>; 266def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>; 267 268let SubRegIndices = [bsub] in { 269def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>; 270def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>; 271def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>; 272def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>; 273def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>; 274def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>; 275def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>; 276def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>; 277def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>; 278def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>; 279def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>; 280def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>; 281def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>; 282def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>; 283def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>; 284def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>; 285def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>; 286def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>; 287def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>; 288def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>; 289def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>; 290def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>; 291def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>; 292def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>; 293def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>; 294def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>; 295def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>; 296def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>; 297def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>; 298def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>; 299def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>; 300def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>; 301} 302 303let SubRegIndices = [hsub] in { 304def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>; 305def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>; 306def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>; 307def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>; 308def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>; 309def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>; 310def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>; 311def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>; 312def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>; 313def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>; 314def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>; 315def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>; 316def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>; 317def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>; 318def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>; 319def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>; 320def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>; 321def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>; 322def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>; 323def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>; 324def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>; 325def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>; 326def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>; 327def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>; 328def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>; 329def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>; 330def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>; 331def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>; 332def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>; 333def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>; 334def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>; 335def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>; 336} 337 338let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in { 339def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>; 340def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>; 341def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>; 342def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>; 343def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>; 344def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>; 345def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>; 346def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>; 347def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>; 348def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>; 349def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>; 350def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>; 351def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>; 352def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>; 353def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>; 354def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>; 355def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>; 356def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>; 357def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>; 358def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>; 359def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>; 360def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>; 361def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>; 362def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>; 363def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>; 364def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>; 365def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>; 366def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>; 367def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>; 368def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>; 369def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>; 370def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>; 371} 372 373let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in { 374def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>; 375def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>; 376def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>; 377def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>; 378def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>; 379def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>; 380def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>; 381def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>; 382def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>; 383def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>; 384def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>; 385def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>; 386def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>; 387def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>; 388def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>; 389def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>; 390def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>; 391def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>; 392def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>; 393def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>; 394def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>; 395def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>; 396def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>; 397def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>; 398def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>; 399def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>; 400def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>; 401def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>; 402def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>; 403def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>; 404def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>; 405def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>; 406} 407 408def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> { 409 let Size = 8; 410} 411def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> { 412 let Size = 16; 413} 414def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>; 415def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32, 416 v1i64, v4f16], 417 64, (sequence "D%u", 0, 31)>; 418// We don't (yet) have an f128 legal type, so don't use that here. We 419// normalize 128-bit vectors to v2f64 for arg passing and such, so use 420// that here. 421def FPR128 : RegisterClass<"AArch64", 422 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128, 423 v8f16], 424 128, (sequence "Q%u", 0, 31)>; 425 426// The lower 16 vector registers. Some instructions can only take registers 427// in this range. 428def FPR128_lo : RegisterClass<"AArch64", 429 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16], 430 128, (trunc FPR128, 16)>; 431 432// Pairs, triples, and quads of 64-bit vector registers. 433def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>; 434def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2], 435 [(rotl FPR64, 0), (rotl FPR64, 1), 436 (rotl FPR64, 2)]>; 437def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3], 438 [(rotl FPR64, 0), (rotl FPR64, 1), 439 (rotl FPR64, 2), (rotl FPR64, 3)]>; 440def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> { 441 let Size = 128; 442} 443def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> { 444 let Size = 192; 445} 446def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> { 447 let Size = 256; 448} 449 450// Pairs, triples, and quads of 128-bit vector registers. 451def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>; 452def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2], 453 [(rotl FPR128, 0), (rotl FPR128, 1), 454 (rotl FPR128, 2)]>; 455def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3], 456 [(rotl FPR128, 0), (rotl FPR128, 1), 457 (rotl FPR128, 2), (rotl FPR128, 3)]>; 458def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> { 459 let Size = 256; 460} 461def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> { 462 let Size = 384; 463} 464def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> { 465 let Size = 512; 466} 467 468 469// Vector operand versions of the FP registers. Alternate name printing and 470// assmebler matching. 471def VectorReg64AsmOperand : AsmOperandClass { 472 let Name = "VectorReg64"; 473 let PredicateMethod = "isNeonVectorReg"; 474} 475def VectorReg128AsmOperand : AsmOperandClass { 476 let Name = "VectorReg128"; 477 let PredicateMethod = "isNeonVectorReg"; 478} 479 480def V64 : RegisterOperand<FPR64, "printVRegOperand"> { 481 let ParserMatchClass = VectorReg64AsmOperand; 482} 483 484def V128 : RegisterOperand<FPR128, "printVRegOperand"> { 485 let ParserMatchClass = VectorReg128AsmOperand; 486} 487 488def VectorRegLoAsmOperand : AsmOperandClass { 489 let Name = "VectorRegLo"; 490 let PredicateMethod = "isNeonVectorRegLo"; 491} 492def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> { 493 let ParserMatchClass = VectorRegLoAsmOperand; 494} 495 496class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize> 497 : AsmOperandClass { 498 let Name = "TypedVectorList" # count # "_" # lanes # eltsize; 499 500 let PredicateMethod 501 = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">"; 502 let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">"; 503} 504 505class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize> 506 : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '" 507 # eltsize # "'>">; 508 509multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> { 510 // With implicit types (probably on instruction instead). E.g. { v0, v1 } 511 def _64AsmOperand : AsmOperandClass { 512 let Name = NAME # "64"; 513 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 514 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">"; 515 } 516 517 def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> { 518 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand"); 519 } 520 521 def _128AsmOperand : AsmOperandClass { 522 let Name = NAME # "128"; 523 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 524 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">"; 525 } 526 527 def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> { 528 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand"); 529 } 530 531 // 64-bit register lists with explicit type. 532 533 // { v0.8b, v1.8b } 534 def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>; 535 def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> { 536 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand"); 537 } 538 539 // { v0.4h, v1.4h } 540 def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>; 541 def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> { 542 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand"); 543 } 544 545 // { v0.2s, v1.2s } 546 def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>; 547 def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> { 548 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand"); 549 } 550 551 // { v0.1d, v1.1d } 552 def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>; 553 def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> { 554 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand"); 555 } 556 557 // 128-bit register lists with explicit type 558 559 // { v0.16b, v1.16b } 560 def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>; 561 def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> { 562 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand"); 563 } 564 565 // { v0.8h, v1.8h } 566 def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>; 567 def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> { 568 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand"); 569 } 570 571 // { v0.4s, v1.4s } 572 def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>; 573 def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> { 574 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand"); 575 } 576 577 // { v0.2d, v1.2d } 578 def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>; 579 def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> { 580 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand"); 581 } 582 583 // { v0.b, v1.b } 584 def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>; 585 def "b" : TypedVecListRegOperand<Reg128, 0, "b"> { 586 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand"); 587 } 588 589 // { v0.h, v1.h } 590 def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>; 591 def "h" : TypedVecListRegOperand<Reg128, 0, "h"> { 592 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand"); 593 } 594 595 // { v0.s, v1.s } 596 def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>; 597 def "s" : TypedVecListRegOperand<Reg128, 0, "s"> { 598 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand"); 599 } 600 601 // { v0.d, v1.d } 602 def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>; 603 def "d" : TypedVecListRegOperand<Reg128, 0, "d"> { 604 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand"); 605 } 606 607 608} 609 610defm VecListOne : VectorList<1, FPR64, FPR128>; 611defm VecListTwo : VectorList<2, DD, QQ>; 612defm VecListThree : VectorList<3, DDD, QQQ>; 613defm VecListFour : VectorList<4, DDDD, QQQQ>; 614 615class FPRAsmOperand<string RC> : AsmOperandClass { 616 let Name = "FPRAsmOperand" # RC; 617 let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>"; 618 let RenderMethod = "addRegOperands"; 619} 620 621// Register operand versions of the scalar FP registers. 622def FPR8Op : RegisterOperand<FPR8, "printOperand"> { 623 let ParserMatchClass = FPRAsmOperand<"FPR8">; 624} 625 626def FPR16Op : RegisterOperand<FPR16, "printOperand"> { 627 let ParserMatchClass = FPRAsmOperand<"FPR16">; 628} 629 630def FPR32Op : RegisterOperand<FPR32, "printOperand"> { 631 let ParserMatchClass = FPRAsmOperand<"FPR32">; 632} 633 634def FPR64Op : RegisterOperand<FPR64, "printOperand"> { 635 let ParserMatchClass = FPRAsmOperand<"FPR64">; 636} 637 638def FPR128Op : RegisterOperand<FPR128, "printOperand"> { 639 let ParserMatchClass = FPRAsmOperand<"FPR128">; 640} 641 642//===----------------------------------------------------------------------===// 643// ARMv8.1a atomic CASP register operands 644 645 646def WSeqPairs : RegisterTuples<[sube32, subo32], 647 [(rotl GPR32, 0), (rotl GPR32, 1)]>; 648def XSeqPairs : RegisterTuples<[sube64, subo64], 649 [(rotl GPR64, 0), (rotl GPR64, 1)]>; 650 651def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32, 652 (add WSeqPairs)>{ 653 let Size = 64; 654} 655def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64, 656 (add XSeqPairs)>{ 657 let Size = 128; 658} 659 660 661let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in { 662 def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; } 663 def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; } 664} 665 666def WSeqPairClassOperand : 667 RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> { 668 let ParserMatchClass = WSeqPairsAsmOperandClass; 669} 670def XSeqPairClassOperand : 671 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> { 672 let ParserMatchClass = XSeqPairsAsmOperandClass; 673} 674 675 676//===----- END: v8.1a atomic CASP register operands -----------------------===// 677 678// SVE predicate registers 679def P0 : AArch64Reg<0, "p0">, DwarfRegNum<[48]>; 680def P1 : AArch64Reg<1, "p1">, DwarfRegNum<[49]>; 681def P2 : AArch64Reg<2, "p2">, DwarfRegNum<[50]>; 682def P3 : AArch64Reg<3, "p3">, DwarfRegNum<[51]>; 683def P4 : AArch64Reg<4, "p4">, DwarfRegNum<[52]>; 684def P5 : AArch64Reg<5, "p5">, DwarfRegNum<[53]>; 685def P6 : AArch64Reg<6, "p6">, DwarfRegNum<[54]>; 686def P7 : AArch64Reg<7, "p7">, DwarfRegNum<[55]>; 687def P8 : AArch64Reg<8, "p8">, DwarfRegNum<[56]>; 688def P9 : AArch64Reg<9, "p9">, DwarfRegNum<[57]>; 689def P10 : AArch64Reg<10, "p10">, DwarfRegNum<[58]>; 690def P11 : AArch64Reg<11, "p11">, DwarfRegNum<[59]>; 691def P12 : AArch64Reg<12, "p12">, DwarfRegNum<[60]>; 692def P13 : AArch64Reg<13, "p13">, DwarfRegNum<[61]>; 693def P14 : AArch64Reg<14, "p14">, DwarfRegNum<[62]>; 694def P15 : AArch64Reg<15, "p15">, DwarfRegNum<[63]>; 695 696// The part of SVE registers that don't overlap Neon registers. 697// These are only used as part of clobber lists. 698def Z0_HI : AArch64Reg<0, "z0_hi">; 699def Z1_HI : AArch64Reg<1, "z1_hi">; 700def Z2_HI : AArch64Reg<2, "z2_hi">; 701def Z3_HI : AArch64Reg<3, "z3_hi">; 702def Z4_HI : AArch64Reg<4, "z4_hi">; 703def Z5_HI : AArch64Reg<5, "z5_hi">; 704def Z6_HI : AArch64Reg<6, "z6_hi">; 705def Z7_HI : AArch64Reg<7, "z7_hi">; 706def Z8_HI : AArch64Reg<8, "z8_hi">; 707def Z9_HI : AArch64Reg<9, "z9_hi">; 708def Z10_HI : AArch64Reg<10, "z10_hi">; 709def Z11_HI : AArch64Reg<11, "z11_hi">; 710def Z12_HI : AArch64Reg<12, "z12_hi">; 711def Z13_HI : AArch64Reg<13, "z13_hi">; 712def Z14_HI : AArch64Reg<14, "z14_hi">; 713def Z15_HI : AArch64Reg<15, "z15_hi">; 714def Z16_HI : AArch64Reg<16, "z16_hi">; 715def Z17_HI : AArch64Reg<17, "z17_hi">; 716def Z18_HI : AArch64Reg<18, "z18_hi">; 717def Z19_HI : AArch64Reg<19, "z19_hi">; 718def Z20_HI : AArch64Reg<20, "z20_hi">; 719def Z21_HI : AArch64Reg<21, "z21_hi">; 720def Z22_HI : AArch64Reg<22, "z22_hi">; 721def Z23_HI : AArch64Reg<23, "z23_hi">; 722def Z24_HI : AArch64Reg<24, "z24_hi">; 723def Z25_HI : AArch64Reg<25, "z25_hi">; 724def Z26_HI : AArch64Reg<26, "z26_hi">; 725def Z27_HI : AArch64Reg<27, "z27_hi">; 726def Z28_HI : AArch64Reg<28, "z28_hi">; 727def Z29_HI : AArch64Reg<29, "z29_hi">; 728def Z30_HI : AArch64Reg<30, "z30_hi">; 729def Z31_HI : AArch64Reg<31, "z31_hi">; 730 731// SVE variable-size vector registers 732let SubRegIndices = [zsub,zsub_hi] in { 733def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>; 734def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>; 735def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>; 736def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>; 737def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>; 738def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>; 739def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>; 740def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>; 741def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>; 742def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>; 743def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>; 744def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>; 745def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>; 746def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>; 747def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>; 748def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>; 749def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>; 750def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>; 751def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>; 752def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>; 753def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>; 754def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>; 755def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>; 756def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>; 757def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>; 758def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>; 759def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>; 760def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>; 761def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>; 762def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>; 763def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>; 764def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>; 765} 766 767// Enum descibing the element size for destructive 768// operations. 769class ElementSizeEnum<bits<3> val> { 770 bits<3> Value = val; 771} 772 773def ElementSizeNone : ElementSizeEnum<0>; 774def ElementSizeB : ElementSizeEnum<1>; 775def ElementSizeH : ElementSizeEnum<2>; 776def ElementSizeS : ElementSizeEnum<3>; 777def ElementSizeD : ElementSizeEnum<4>; 778def ElementSizeQ : ElementSizeEnum<5>; // Unused 779 780class SVERegOp <string Suffix, AsmOperandClass C, 781 ElementSizeEnum Size, 782 RegisterClass RC> : RegisterOperand<RC> { 783 ElementSizeEnum ElementSize; 784 785 let ElementSize = Size; 786 let PrintMethod = !if(!eq(Suffix, ""), 787 "printSVERegOp<>", 788 "printSVERegOp<'" # Suffix # "'>"); 789 let ParserMatchClass = C; 790} 791 792class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 793 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 794class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 795 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 796 797//****************************************************************************** 798 799// SVE predicate register classes. 800class PPRClass<int lastreg> : RegisterClass< 801 "AArch64", 802 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1 ], 16, 803 (sequence "P%u", 0, lastreg)> { 804 let Size = 16; 805} 806 807def PPR : PPRClass<15>; 808def PPR_3b : PPRClass<7>; // Restricted 3 bit SVE predicate register class. 809 810class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass { 811 let Name = "SVE" # name # "Reg"; 812 let PredicateMethod = "isSVEPredicateVectorRegOfWidth<" 813 # Width # ", " # "AArch64::" # RegClass # "RegClassID>"; 814 let DiagnosticType = "InvalidSVE" # name # "Reg"; 815 let RenderMethod = "addRegOperands"; 816 let ParserMethod = "tryParseSVEPredicateVector"; 817} 818 819def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>; 820def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>; 821def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>; 822def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>; 823def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>; 824 825def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>; 826def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>; 827def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>; 828def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>; 829def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>; 830 831def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>; 832def PPRAsmOp3b8 : PPRAsmOperand<"Predicate3bB", "PPR_3b", 8>; 833def PPRAsmOp3b16 : PPRAsmOperand<"Predicate3bH", "PPR_3b", 16>; 834def PPRAsmOp3b32 : PPRAsmOperand<"Predicate3bS", "PPR_3b", 32>; 835def PPRAsmOp3b64 : PPRAsmOperand<"Predicate3bD", "PPR_3b", 64>; 836 837def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>; 838def PPR3b8 : PPRRegOp<"b", PPRAsmOp3b8, ElementSizeB, PPR_3b>; 839def PPR3b16 : PPRRegOp<"h", PPRAsmOp3b16, ElementSizeH, PPR_3b>; 840def PPR3b32 : PPRRegOp<"s", PPRAsmOp3b32, ElementSizeS, PPR_3b>; 841def PPR3b64 : PPRRegOp<"d", PPRAsmOp3b64, ElementSizeD, PPR_3b>; 842 843//****************************************************************************** 844 845// SVE vector register class 846def ZPR : RegisterClass<"AArch64", 847 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 848 nxv2f16, nxv4f16, nxv8f16, 849 nxv1f32, nxv2f32, nxv4f32, 850 nxv1f64, nxv2f64], 851 128, (sequence "Z%u", 0, 31)> { 852 let Size = 128; 853} 854 855// SVE restricted 4 bit scalable vector register class 856def ZPR_4b : RegisterClass<"AArch64", 857 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 858 nxv2f16, nxv4f16, nxv8f16, 859 nxv1f32, nxv2f32, nxv4f32, 860 nxv1f64, nxv2f64], 861 128, (sequence "Z%u", 0, 15)> { 862 let Size = 128; 863} 864 865// SVE restricted 3 bit scalable vector register class 866def ZPR_3b : RegisterClass<"AArch64", 867 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 868 nxv2f16, nxv4f16, nxv8f16, 869 nxv1f32, nxv2f32, nxv4f32, 870 nxv1f64, nxv2f64], 871 128, (sequence "Z%u", 0, 7)> { 872 let Size = 128; 873} 874 875class ZPRAsmOperand<string name, int Width, string RegClassSuffix = ""> 876 : AsmOperandClass { 877 let Name = "SVE" # name # "Reg"; 878 let PredicateMethod = "isSVEDataVectorRegOfWidth<" 879 # Width # ", AArch64::ZPR" 880 # RegClassSuffix # "RegClassID>"; 881 let RenderMethod = "addRegOperands"; 882 let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width; 883 let ParserMethod = "tryParseSVEDataVector<false, " 884 # !if(!eq(Width, 0), "false", "true") # ">"; 885} 886 887def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>; 888def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>; 889def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>; 890def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>; 891def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>; 892def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>; 893 894def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>; 895def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>; 896def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>; 897def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>; 898def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>; 899def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>; 900 901def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">; 902def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">; 903def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">; 904 905def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>; 906def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>; 907def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>; 908 909def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">; 910def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">; 911def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">; 912 913def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>; 914def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>; 915def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>; 916 917class FPRasZPR<int Width> : AsmOperandClass{ 918 let Name = "FPR" # Width # "asZPR"; 919 let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>"; 920 let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">"; 921} 922 923class FPRasZPROperand<int Width> : RegisterOperand<ZPR> { 924 let ParserMatchClass = FPRasZPR<Width>; 925 let PrintMethod = "printZPRasFPR<" # Width # ">"; 926} 927 928def FPR8asZPR : FPRasZPROperand<8>; 929def FPR16asZPR : FPRasZPROperand<16>; 930def FPR32asZPR : FPRasZPROperand<32>; 931def FPR64asZPR : FPRasZPROperand<64>; 932def FPR128asZPR : FPRasZPROperand<128>; 933 934let Namespace = "AArch64" in { 935 def zsub0 : SubRegIndex<128, -1>; 936 def zsub1 : SubRegIndex<128, -1>; 937 def zsub2 : SubRegIndex<128, -1>; 938 def zsub3 : SubRegIndex<128, -1>; 939} 940 941// Pairs, triples, and quads of SVE vector registers. 942def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>; 943def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>; 944def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>; 945 946def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> { 947 let Size = 256; 948} 949def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> { 950 let Size = 384; 951} 952def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> { 953 let Size = 512; 954} 955 956class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass { 957 let Name = "SVEVectorList" # NumRegs # ElementWidth; 958 let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>"; 959 let PredicateMethod = 960 "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">"; 961 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">"; 962} 963 964def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> { 965 let ParserMatchClass = ZPRVectorList<8, 1>; 966} 967 968def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> { 969 let ParserMatchClass = ZPRVectorList<16, 1>; 970} 971 972def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> { 973 let ParserMatchClass = ZPRVectorList<32, 1>; 974} 975 976def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> { 977 let ParserMatchClass = ZPRVectorList<64, 1>; 978} 979 980def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> { 981 let ParserMatchClass = ZPRVectorList<8, 2>; 982} 983 984def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> { 985 let ParserMatchClass = ZPRVectorList<16, 2>; 986} 987 988def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> { 989 let ParserMatchClass = ZPRVectorList<32, 2>; 990} 991 992def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> { 993 let ParserMatchClass = ZPRVectorList<64, 2>; 994} 995 996def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> { 997 let ParserMatchClass = ZPRVectorList<8, 3>; 998} 999 1000def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> { 1001 let ParserMatchClass = ZPRVectorList<16, 3>; 1002} 1003 1004def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> { 1005 let ParserMatchClass = ZPRVectorList<32, 3>; 1006} 1007 1008def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> { 1009 let ParserMatchClass = ZPRVectorList<64, 3>; 1010} 1011 1012def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> { 1013 let ParserMatchClass = ZPRVectorList<8, 4>; 1014} 1015 1016def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> { 1017 let ParserMatchClass = ZPRVectorList<16, 4>; 1018} 1019 1020def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> { 1021 let ParserMatchClass = ZPRVectorList<32, 4>; 1022} 1023 1024def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> { 1025 let ParserMatchClass = ZPRVectorList<64, 4>; 1026} 1027 1028class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale, 1029 bit ScaleAlwaysSame = 0b0> : AsmOperandClass { 1030 let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale 1031 # !if(ScaleAlwaysSame, "Only", ""); 1032 1033 let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<" 1034 # RegWidth # ", AArch64::ZPRRegClassID, " 1035 # "AArch64_AM::" # ShiftExtend # ", " 1036 # Scale # ", " 1037 # !if(ScaleAlwaysSame, "true", "false") 1038 # ">"; 1039 let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale; 1040 let RenderMethod = "addRegOperands"; 1041 let ParserMethod = "tryParseSVEDataVector<true, true>"; 1042} 1043 1044class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr, 1045 int RegWidth, int Scale, string Suffix = ""> 1046 : RegisterOperand<ZPR> { 1047 let ParserMatchClass = 1048 !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix); 1049 let PrintMethod = "printRegWithShiftExtend<" 1050 # !if(SignExtend, "true", "false") # ", " 1051 # Scale # ", " 1052 # !if(IsLSL, "'x'", "'w'") # ", " 1053 # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">"; 1054} 1055 1056foreach RegWidth = [32, 64] in { 1057 // UXTW(8|16|32|64) 1058 def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>; 1059 def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>; 1060 def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>; 1061 def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>; 1062 def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>; 1063 1064 def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">; 1065 def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>; 1066 def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>; 1067 def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>; 1068 def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>; 1069 1070 // SXTW(8|16|32|64) 1071 def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>; 1072 def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>; 1073 def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>; 1074 def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>; 1075 def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>; 1076 1077 def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">; 1078 def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>; 1079 def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>; 1080 def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>; 1081 def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>; 1082 1083 // LSL(8|16|32|64) 1084 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>; 1085 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>; 1086 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>; 1087 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>; 1088 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>; 1089 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>; 1090 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>; 1091 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>; 1092} 1093 1094class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass { 1095 let Name = AsmOperandName # Scale; 1096 let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">"; 1097 let DiagnosticType = "Invalid" # AsmOperandName # Scale; 1098 let RenderMethod = "addRegOperands"; 1099 let ParserMethod = "tryParseGPROperand<true>"; 1100} 1101 1102class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{ 1103 let ParserMatchClass = !cast<AsmOperandClass>(Name); 1104 let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>"; 1105} 1106 1107foreach Scale = [8, 16, 32, 64] in { 1108 def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">; 1109 def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>; 1110 1111 def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">; 1112 def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>; 1113} 1114