1//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the ARM VFP instruction set.
11//
12//===----------------------------------------------------------------------===//
13
14def SDT_CMPFP0  : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
15def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
16                                       SDTCisSameAs<1, 2>]>;
17
18def arm_fmstat : SDNode<"ARMISD::FMSTAT",  SDTNone, [SDNPInGlue, SDNPOutGlue]>;
19def arm_cmpfp  : SDNode<"ARMISD::CMPFP",   SDT_ARMCmp, [SDNPOutGlue]>;
20def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
21def arm_fmdrr  : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
22
23
24//===----------------------------------------------------------------------===//
25// Operand Definitions.
26//
27
28// 8-bit floating-point immediate encodings.
29def FPImmOperand : AsmOperandClass {
30  let Name = "FPImm";
31  let ParserMethod = "parseFPImm";
32}
33
34def vfp_f32imm : Operand<f32>,
35                 PatLeaf<(f32 fpimm), [{
36      return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
37    }], SDNodeXForm<fpimm, [{
38      APFloat InVal = N->getValueAPF();
39      uint32_t enc = ARM_AM::getFP32Imm(InVal);
40      return CurDAG->getTargetConstant(enc, MVT::i32);
41    }]>> {
42  let PrintMethod = "printFPImmOperand";
43  let ParserMatchClass = FPImmOperand;
44}
45
46def vfp_f64imm : Operand<f64>,
47                 PatLeaf<(f64 fpimm), [{
48      return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
49    }], SDNodeXForm<fpimm, [{
50      APFloat InVal = N->getValueAPF();
51      uint32_t enc = ARM_AM::getFP64Imm(InVal);
52      return CurDAG->getTargetConstant(enc, MVT::i32);
53    }]>> {
54  let PrintMethod = "printFPImmOperand";
55  let ParserMatchClass = FPImmOperand;
56}
57
58def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
59  return cast<LoadSDNode>(N)->getAlignment() >= 4;
60}]>;
61
62def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
63                             (store node:$val, node:$ptr), [{
64  return cast<StoreSDNode>(N)->getAlignment() >= 4;
65}]>;
66
67// The VCVT to/from fixed-point instructions encode the 'fbits' operand
68// (the number of fixed bits) differently than it appears in the assembly
69// source. It's encoded as "Size - fbits" where Size is the size of the
70// fixed-point representation (32 or 16) and fbits is the value appearing
71// in the assembly source, an integer in [0,16] or (0,32], depending on size.
72def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
73def fbits32 : Operand<i32> {
74  let PrintMethod = "printFBits32";
75  let ParserMatchClass = fbits32_asm_operand;
76}
77
78def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
79def fbits16 : Operand<i32> {
80  let PrintMethod = "printFBits16";
81  let ParserMatchClass = fbits16_asm_operand;
82}
83
84//===----------------------------------------------------------------------===//
85//  Load / store Instructions.
86//
87
88let canFoldAsLoad = 1, isReMaterializable = 1 in {
89
90def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
91                 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
92                 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>;
93
94def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
95                 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
96                 [(set SPR:$Sd, (load addrmode5:$addr))]> {
97  // Some single precision VFP instructions may be executed on both NEON and VFP
98  // pipelines.
99  let D = VFPNeonDomain;
100}
101
102} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
103
104def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
105                 IIC_fpStore64, "vstr", "\t$Dd, $addr",
106                 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>;
107
108def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
109                 IIC_fpStore32, "vstr", "\t$Sd, $addr",
110                 [(store SPR:$Sd, addrmode5:$addr)]> {
111  // Some single precision VFP instructions may be executed on both NEON and VFP
112  // pipelines.
113  let D = VFPNeonDomain;
114}
115
116//===----------------------------------------------------------------------===//
117//  Load / store multiple Instructions.
118//
119
120multiclass vfp_ldst_mult<string asm, bit L_bit,
121                         InstrItinClass itin, InstrItinClass itin_upd> {
122  // Double Precision
123  def DIA :
124    AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
125          IndexModeNone, itin,
126          !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
127    let Inst{24-23} = 0b01;       // Increment After
128    let Inst{21}    = 0;          // No writeback
129    let Inst{20}    = L_bit;
130  }
131  def DIA_UPD :
132    AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
133                               variable_ops),
134          IndexModeUpd, itin_upd,
135          !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
136    let Inst{24-23} = 0b01;       // Increment After
137    let Inst{21}    = 1;          // Writeback
138    let Inst{20}    = L_bit;
139  }
140  def DDB_UPD :
141    AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
142                               variable_ops),
143          IndexModeUpd, itin_upd,
144          !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
145    let Inst{24-23} = 0b10;       // Decrement Before
146    let Inst{21}    = 1;          // Writeback
147    let Inst{20}    = L_bit;
148  }
149
150  // Single Precision
151  def SIA :
152    AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
153          IndexModeNone, itin,
154          !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
155    let Inst{24-23} = 0b01;       // Increment After
156    let Inst{21}    = 0;          // No writeback
157    let Inst{20}    = L_bit;
158
159    // Some single precision VFP instructions may be executed on both NEON and
160    // VFP pipelines.
161    let D = VFPNeonDomain;
162  }
163  def SIA_UPD :
164    AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
165                               variable_ops),
166          IndexModeUpd, itin_upd,
167          !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
168    let Inst{24-23} = 0b01;       // Increment After
169    let Inst{21}    = 1;          // Writeback
170    let Inst{20}    = L_bit;
171
172    // Some single precision VFP instructions may be executed on both NEON and
173    // VFP pipelines.
174    let D = VFPNeonDomain;
175  }
176  def SDB_UPD :
177    AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
178                               variable_ops),
179          IndexModeUpd, itin_upd,
180          !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
181    let Inst{24-23} = 0b10;       // Decrement Before
182    let Inst{21}    = 1;          // Writeback
183    let Inst{20}    = L_bit;
184
185    // Some single precision VFP instructions may be executed on both NEON and
186    // VFP pipelines.
187    let D = VFPNeonDomain;
188  }
189}
190
191let hasSideEffects = 0 in {
192
193let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
194defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
195
196let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
197defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
198
199} // hasSideEffects
200
201def : MnemonicAlias<"vldm", "vldmia">;
202def : MnemonicAlias<"vstm", "vstmia">;
203
204// FLDM/FSTM - Load / Store multiple single / double precision registers for
205// pre-ARMv6 cores.
206// These instructions are deprecated!
207def : VFP2MnemonicAlias<"fldmias", "vldmia">;
208def : VFP2MnemonicAlias<"fldmdbs", "vldmdb">;
209def : VFP2MnemonicAlias<"fldmeas", "vldmdb">;
210def : VFP2MnemonicAlias<"fldmfds", "vldmia">;
211def : VFP2MnemonicAlias<"fldmiad", "vldmia">;
212def : VFP2MnemonicAlias<"fldmdbd", "vldmdb">;
213def : VFP2MnemonicAlias<"fldmead", "vldmdb">;
214def : VFP2MnemonicAlias<"fldmfdd", "vldmia">;
215
216def : VFP2MnemonicAlias<"fstmias", "vstmia">;
217def : VFP2MnemonicAlias<"fstmdbs", "vstmdb">;
218def : VFP2MnemonicAlias<"fstmeas", "vstmia">;
219def : VFP2MnemonicAlias<"fstmfds", "vstmdb">;
220def : VFP2MnemonicAlias<"fstmiad", "vstmia">;
221def : VFP2MnemonicAlias<"fstmdbd", "vstmdb">;
222def : VFP2MnemonicAlias<"fstmead", "vstmia">;
223def : VFP2MnemonicAlias<"fstmfdd", "vstmdb">;
224
225def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
226                Requires<[HasVFP2]>;
227def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
228                Requires<[HasVFP2]>;
229def : InstAlias<"vpop${p} $r",  (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
230                Requires<[HasVFP2]>;
231def : InstAlias<"vpop${p} $r",  (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
232                Requires<[HasVFP2]>;
233defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
234                         (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
235defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
236                         (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
237defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
238                         (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
239defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
240                         (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
241
242// FLDMX, FSTMX - Load and store multiple unknown precision registers for
243// pre-armv6 cores.
244// These instruction are deprecated so we don't want them to get selected.
245multiclass vfp_ldstx_mult<string asm, bit L_bit> {
246  // Unknown precision
247  def XIA :
248    AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
249          IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
250    let Inst{24-23} = 0b01;       // Increment After
251    let Inst{21}    = 0;          // No writeback
252    let Inst{20}    = L_bit;
253  }
254  def XIA_UPD :
255    AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
256          IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
257    let Inst{24-23} = 0b01;         // Increment After
258    let Inst{21}    = 1;            // Writeback
259    let Inst{20}    = L_bit;
260  }
261  def XDB_UPD :
262    AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
263          IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
264    let Inst{24-23} = 0b10;         // Decrement Before
265    let Inst{21}    = 1;            // Writeback
266    let Inst{20}    = L_bit;
267  }
268}
269
270defm FLDM : vfp_ldstx_mult<"fldm", 1>;
271defm FSTM : vfp_ldstx_mult<"fstm", 0>;
272
273def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
274def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
275
276def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
277def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
278
279//===----------------------------------------------------------------------===//
280// FP Binary Operations.
281//
282
283let TwoOperandAliasConstraint = "$Dn = $Dd" in
284def VADDD  : ADbI<0b11100, 0b11, 0, 0,
285                  (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
286                  IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
287                  [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
288
289let TwoOperandAliasConstraint = "$Sn = $Sd" in
290def VADDS  : ASbIn<0b11100, 0b11, 0, 0,
291                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
292                   IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
293                   [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
294  // Some single precision VFP instructions may be executed on both NEON and
295  // VFP pipelines on A8.
296  let D = VFPNeonA8Domain;
297}
298
299let TwoOperandAliasConstraint = "$Dn = $Dd" in
300def VSUBD  : ADbI<0b11100, 0b11, 1, 0,
301                  (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
302                  IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
303                  [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
304
305let TwoOperandAliasConstraint = "$Sn = $Sd" in
306def VSUBS  : ASbIn<0b11100, 0b11, 1, 0,
307                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
308                   IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
309                   [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
310  // Some single precision VFP instructions may be executed on both NEON and
311  // VFP pipelines on A8.
312  let D = VFPNeonA8Domain;
313}
314
315let TwoOperandAliasConstraint = "$Dn = $Dd" in
316def VDIVD  : ADbI<0b11101, 0b00, 0, 0,
317                  (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
318                  IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
319                  [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
320
321let TwoOperandAliasConstraint = "$Sn = $Sd" in
322def VDIVS  : ASbI<0b11101, 0b00, 0, 0,
323                  (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
324                  IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
325                  [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
326
327let TwoOperandAliasConstraint = "$Dn = $Dd" in
328def VMULD  : ADbI<0b11100, 0b10, 0, 0,
329                  (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
330                  IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
331                  [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
332
333let TwoOperandAliasConstraint = "$Sn = $Sd" in
334def VMULS  : ASbIn<0b11100, 0b10, 0, 0,
335                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
336                   IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
337                   [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
338  // Some single precision VFP instructions may be executed on both NEON and
339  // VFP pipelines on A8.
340  let D = VFPNeonA8Domain;
341}
342
343def VNMULD : ADbI<0b11100, 0b10, 1, 0,
344                  (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
345                  IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
346                  [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
347
348def VNMULS : ASbI<0b11100, 0b10, 1, 0,
349                  (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
350                  IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
351                  [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
352  // Some single precision VFP instructions may be executed on both NEON and
353  // VFP pipelines on A8.
354  let D = VFPNeonA8Domain;
355}
356
357multiclass vsel_inst<string op, bits<2> opc, int CC> {
358  let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
359      Uses = [CPSR], AddedComplexity = 4 in {
360    def S : ASbInp<0b11100, opc, 0,
361                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
362                   NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
363                   [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
364                   Requires<[HasFPARMv8]>;
365
366    def D : ADbInp<0b11100, opc, 0,
367                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
368                   NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
369                   [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
370                   Requires<[HasFPARMv8, HasDPVFP]>;
371  }
372}
373
374// The CC constants here match ARMCC::CondCodes.
375defm VSELGT : vsel_inst<"gt", 0b11, 12>;
376defm VSELGE : vsel_inst<"ge", 0b10, 10>;
377defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
378defm VSELVS : vsel_inst<"vs", 0b01, 6>;
379
380multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
381  let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in {
382    def S : ASbInp<0b11101, 0b00, opc,
383                   (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
384                   NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
385                   [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
386                   Requires<[HasFPARMv8]>;
387
388    def D : ADbInp<0b11101, 0b00, opc,
389                   (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
390                   NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
391                   [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
392                   Requires<[HasFPARMv8, HasDPVFP]>;
393  }
394}
395
396defm VMAXNM : vmaxmin_inst<"vmaxnm", 0, ARMvmaxnm>;
397defm VMINNM : vmaxmin_inst<"vminnm", 1, ARMvminnm>;
398
399// Match reassociated forms only if not sign dependent rounding.
400def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
401          (VNMULD DPR:$a, DPR:$b)>,
402          Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
403def : Pat<(fmul (fneg SPR:$a), SPR:$b),
404          (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
405
406// These are encoded as unary instructions.
407let Defs = [FPSCR_NZCV] in {
408def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
409                  (outs), (ins DPR:$Dd, DPR:$Dm),
410                  IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
411                  [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
412
413def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
414                  (outs), (ins SPR:$Sd, SPR:$Sm),
415                  IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
416                  [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
417  // Some single precision VFP instructions may be executed on both NEON and
418  // VFP pipelines on A8.
419  let D = VFPNeonA8Domain;
420}
421
422// FIXME: Verify encoding after integrated assembler is working.
423def VCMPD  : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
424                  (outs), (ins DPR:$Dd, DPR:$Dm),
425                  IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
426                  [/* For disassembly only; pattern left blank */]>;
427
428def VCMPS  : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
429                  (outs), (ins SPR:$Sd, SPR:$Sm),
430                  IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
431                  [/* For disassembly only; pattern left blank */]> {
432  // Some single precision VFP instructions may be executed on both NEON and
433  // VFP pipelines on A8.
434  let D = VFPNeonA8Domain;
435}
436} // Defs = [FPSCR_NZCV]
437
438//===----------------------------------------------------------------------===//
439// FP Unary Operations.
440//
441
442def VABSD  : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
443                  (outs DPR:$Dd), (ins DPR:$Dm),
444                  IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
445                  [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
446
447def VABSS  : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
448                   (outs SPR:$Sd), (ins SPR:$Sm),
449                   IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
450                   [(set SPR:$Sd, (fabs SPR:$Sm))]> {
451  // Some single precision VFP instructions may be executed on both NEON and
452  // VFP pipelines on A8.
453  let D = VFPNeonA8Domain;
454}
455
456let Defs = [FPSCR_NZCV] in {
457def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
458                   (outs), (ins DPR:$Dd),
459                   IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
460                   [(arm_cmpfp0 (f64 DPR:$Dd))]> {
461  let Inst{3-0} = 0b0000;
462  let Inst{5}   = 0;
463}
464
465def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
466                   (outs), (ins SPR:$Sd),
467                   IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
468                   [(arm_cmpfp0 SPR:$Sd)]> {
469  let Inst{3-0} = 0b0000;
470  let Inst{5}   = 0;
471
472  // Some single precision VFP instructions may be executed on both NEON and
473  // VFP pipelines on A8.
474  let D = VFPNeonA8Domain;
475}
476
477// FIXME: Verify encoding after integrated assembler is working.
478def VCMPZD  : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
479                   (outs), (ins DPR:$Dd),
480                   IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
481                   [/* For disassembly only; pattern left blank */]> {
482  let Inst{3-0} = 0b0000;
483  let Inst{5}   = 0;
484}
485
486def VCMPZS  : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
487                   (outs), (ins SPR:$Sd),
488                   IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
489                   [/* For disassembly only; pattern left blank */]> {
490  let Inst{3-0} = 0b0000;
491  let Inst{5}   = 0;
492
493  // Some single precision VFP instructions may be executed on both NEON and
494  // VFP pipelines on A8.
495  let D = VFPNeonA8Domain;
496}
497} // Defs = [FPSCR_NZCV]
498
499def VCVTDS  : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
500                   (outs DPR:$Dd), (ins SPR:$Sm),
501                   IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
502                   [(set DPR:$Dd, (fextend SPR:$Sm))]> {
503  // Instruction operands.
504  bits<5> Dd;
505  bits<5> Sm;
506
507  // Encode instruction operands.
508  let Inst{3-0}   = Sm{4-1};
509  let Inst{5}     = Sm{0};
510  let Inst{15-12} = Dd{3-0};
511  let Inst{22}    = Dd{4};
512
513  let Predicates = [HasVFP2, HasDPVFP];
514}
515
516// Special case encoding: bits 11-8 is 0b1011.
517def VCVTSD  : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
518                    IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
519                    [(set SPR:$Sd, (fround DPR:$Dm))]> {
520  // Instruction operands.
521  bits<5> Sd;
522  bits<5> Dm;
523
524  // Encode instruction operands.
525  let Inst{3-0}   = Dm{3-0};
526  let Inst{5}     = Dm{4};
527  let Inst{15-12} = Sd{4-1};
528  let Inst{22}    = Sd{0};
529
530  let Inst{27-23} = 0b11101;
531  let Inst{21-16} = 0b110111;
532  let Inst{11-8}  = 0b1011;
533  let Inst{7-6}   = 0b11;
534  let Inst{4}     = 0;
535
536  let Predicates = [HasVFP2, HasDPVFP];
537}
538
539// Between half, single and double-precision.  For disassembly only.
540
541// FIXME: Verify encoding after integrated assembler is working.
542def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
543                 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
544                 [/* For disassembly only; pattern left blank */]>;
545
546def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
547                 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
548                 [/* For disassembly only; pattern left blank */]>;
549
550def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
551                 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
552                 [/* For disassembly only; pattern left blank */]>;
553
554def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
555                 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
556                 [/* For disassembly only; pattern left blank */]>;
557
558def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
559                   (outs DPR:$Dd), (ins SPR:$Sm),
560                   NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm",
561                   []>, Requires<[HasFPARMv8, HasDPVFP]> {
562  // Instruction operands.
563  bits<5> Sm;
564
565  // Encode instruction operands.
566  let Inst{3-0} = Sm{4-1};
567  let Inst{5}   = Sm{0};
568}
569
570def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
571                   (outs SPR:$Sd), (ins DPR:$Dm),
572                   NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm",
573                   []>, Requires<[HasFPARMv8, HasDPVFP]> {
574  // Instruction operands.
575  bits<5> Sd;
576  bits<5> Dm;
577
578  // Encode instruction operands.
579  let Inst{3-0}     = Dm{3-0};
580  let Inst{5}       = Dm{4};
581  let Inst{15-12}   = Sd{4-1};
582  let Inst{22}      = Sd{0};
583}
584
585def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
586                   (outs DPR:$Dd), (ins SPR:$Sm),
587                   NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm",
588                   []>, Requires<[HasFPARMv8, HasDPVFP]> {
589  // Instruction operands.
590  bits<5> Sm;
591
592  // Encode instruction operands.
593  let Inst{3-0} = Sm{4-1};
594  let Inst{5}   = Sm{0};
595}
596
597def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
598                   (outs SPR:$Sd), (ins DPR:$Dm),
599                   NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm",
600                   []>, Requires<[HasFPARMv8, HasDPVFP]> {
601  // Instruction operands.
602  bits<5> Sd;
603  bits<5> Dm;
604
605  // Encode instruction operands.
606  let Inst{15-12} = Sd{4-1};
607  let Inst{22}    = Sd{0};
608  let Inst{3-0}   = Dm{3-0};
609  let Inst{5}     = Dm{4};
610}
611
612def : Pat<(fp_to_f16 SPR:$a),
613          (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
614
615def : Pat<(fp_to_f16 (f64 DPR:$a)),
616          (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>;
617
618def : Pat<(f16_to_fp GPR:$a),
619          (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
620
621def : Pat<(f64 (f16_to_fp GPR:$a)),
622          (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>;
623
624multiclass vcvt_inst<string opc, bits<2> rm,
625                     SDPatternOperator node = null_frag> {
626  let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
627    def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
628                    (outs SPR:$Sd), (ins SPR:$Sm),
629                    NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
630                    []>,
631                    Requires<[HasFPARMv8]> {
632      let Inst{17-16} = rm;
633    }
634
635    def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
636                    (outs SPR:$Sd), (ins SPR:$Sm),
637                    NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
638                    []>,
639                    Requires<[HasFPARMv8]> {
640      let Inst{17-16} = rm;
641    }
642
643    def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
644                    (outs SPR:$Sd), (ins DPR:$Dm),
645                    NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
646                    []>,
647                    Requires<[HasFPARMv8, HasDPVFP]> {
648      bits<5> Dm;
649
650      let Inst{17-16} = rm;
651
652      // Encode instruction operands
653      let Inst{3-0} = Dm{3-0};
654      let Inst{5}   = Dm{4};
655      let Inst{8} = 1;
656    }
657
658    def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
659                    (outs SPR:$Sd), (ins DPR:$Dm),
660                    NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
661                    []>,
662                    Requires<[HasFPARMv8, HasDPVFP]> {
663      bits<5> Dm;
664
665      let Inst{17-16} = rm;
666
667      // Encode instruction operands
668      let Inst{3-0}  = Dm{3-0};
669      let Inst{5}    = Dm{4};
670      let Inst{8} = 1;
671    }
672  }
673
674  let Predicates = [HasFPARMv8] in {
675    def : Pat<(i32 (fp_to_sint (node SPR:$a))),
676              (COPY_TO_REGCLASS
677                (!cast<Instruction>(NAME#"SS") SPR:$a),
678                GPR)>;
679    def : Pat<(i32 (fp_to_uint (node SPR:$a))),
680              (COPY_TO_REGCLASS
681                (!cast<Instruction>(NAME#"US") SPR:$a),
682                GPR)>;
683  }
684  let Predicates = [HasFPARMv8, HasDPVFP] in {
685    def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
686              (COPY_TO_REGCLASS
687                (!cast<Instruction>(NAME#"SD") DPR:$a),
688                GPR)>;
689    def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
690              (COPY_TO_REGCLASS
691                (!cast<Instruction>(NAME#"UD") DPR:$a),
692                GPR)>;
693  }
694}
695
696defm VCVTA : vcvt_inst<"a", 0b00, frnd>;
697defm VCVTN : vcvt_inst<"n", 0b01>;
698defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
699defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
700
701def VNEGD  : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
702                  (outs DPR:$Dd), (ins DPR:$Dm),
703                  IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
704                  [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
705
706def VNEGS  : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
707                   (outs SPR:$Sd), (ins SPR:$Sm),
708                   IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
709                   [(set SPR:$Sd, (fneg SPR:$Sm))]> {
710  // Some single precision VFP instructions may be executed on both NEON and
711  // VFP pipelines on A8.
712  let D = VFPNeonA8Domain;
713}
714
715multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
716  def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
717               (outs SPR:$Sd), (ins SPR:$Sm),
718               NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
719               [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
720               Requires<[HasFPARMv8]> {
721    let Inst{7} = op2;
722    let Inst{16} = op;
723  }
724  def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
725                (outs DPR:$Dd), (ins DPR:$Dm),
726                NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm",
727                [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
728                Requires<[HasFPARMv8, HasDPVFP]> {
729    let Inst{7} = op2;
730    let Inst{16} = op;
731  }
732
733  def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
734                  (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p)>,
735        Requires<[HasFPARMv8]>;
736  def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
737                  (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p)>,
738        Requires<[HasFPARMv8,HasDPVFP]>;
739}
740
741defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
742defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
743defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
744
745multiclass vrint_inst_anpm<string opc, bits<2> rm,
746                           SDPatternOperator node = null_frag> {
747  let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
748    def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
749                   (outs SPR:$Sd), (ins SPR:$Sm),
750                   NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
751                   [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
752                   Requires<[HasFPARMv8]> {
753      let Inst{17-16} = rm;
754    }
755    def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
756                   (outs DPR:$Dd), (ins DPR:$Dm),
757                   NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
758                   [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
759                   Requires<[HasFPARMv8, HasDPVFP]> {
760      let Inst{17-16} = rm;
761    }
762  }
763
764  def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
765                  (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm)>,
766        Requires<[HasFPARMv8]>;
767  def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
768                  (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>,
769        Requires<[HasFPARMv8,HasDPVFP]>;
770}
771
772defm VRINTA : vrint_inst_anpm<"a", 0b00, frnd>;
773defm VRINTN : vrint_inst_anpm<"n", 0b01>;
774defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
775defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
776
777def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
778                  (outs DPR:$Dd), (ins DPR:$Dm),
779                  IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
780                  [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
781
782def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
783                  (outs SPR:$Sd), (ins SPR:$Sm),
784                  IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
785                  [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
786
787let hasSideEffects = 0 in {
788def VMOVD  : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
789                  (outs DPR:$Dd), (ins DPR:$Dm),
790                  IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
791
792def VMOVS  : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
793                  (outs SPR:$Sd), (ins SPR:$Sm),
794                  IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
795} // hasSideEffects
796
797//===----------------------------------------------------------------------===//
798// FP <-> GPR Copies.  Int <-> FP Conversions.
799//
800
801def VMOVRS : AVConv2I<0b11100001, 0b1010,
802                      (outs GPR:$Rt), (ins SPR:$Sn),
803                      IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
804                      [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
805  // Instruction operands.
806  bits<4> Rt;
807  bits<5> Sn;
808
809  // Encode instruction operands.
810  let Inst{19-16} = Sn{4-1};
811  let Inst{7}     = Sn{0};
812  let Inst{15-12} = Rt;
813
814  let Inst{6-5}   = 0b00;
815  let Inst{3-0}   = 0b0000;
816
817  // Some single precision VFP instructions may be executed on both NEON and VFP
818  // pipelines.
819  let D = VFPNeonDomain;
820}
821
822// Bitcast i32 -> f32.  NEON prefers to use VMOVDRR.
823def VMOVSR : AVConv4I<0b11100000, 0b1010,
824                      (outs SPR:$Sn), (ins GPR:$Rt),
825                      IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
826                      [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
827             Requires<[HasVFP2, UseVMOVSR]> {
828  // Instruction operands.
829  bits<5> Sn;
830  bits<4> Rt;
831
832  // Encode instruction operands.
833  let Inst{19-16} = Sn{4-1};
834  let Inst{7}     = Sn{0};
835  let Inst{15-12} = Rt;
836
837  let Inst{6-5}   = 0b00;
838  let Inst{3-0}   = 0b0000;
839
840  // Some single precision VFP instructions may be executed on both NEON and VFP
841  // pipelines.
842  let D = VFPNeonDomain;
843}
844
845let hasSideEffects = 0 in {
846def VMOVRRD  : AVConv3I<0b11000101, 0b1011,
847                        (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
848                        IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
849                 [/* FIXME: Can't write pattern for multiple result instr*/]> {
850  // Instruction operands.
851  bits<5> Dm;
852  bits<4> Rt;
853  bits<4> Rt2;
854
855  // Encode instruction operands.
856  let Inst{3-0}   = Dm{3-0};
857  let Inst{5}     = Dm{4};
858  let Inst{15-12} = Rt;
859  let Inst{19-16} = Rt2;
860
861  let Inst{7-6} = 0b00;
862
863  // Some single precision VFP instructions may be executed on both NEON and VFP
864  // pipelines.
865  let D = VFPNeonDomain;
866
867  // This instruction is equivalent to
868  // $Rt = EXTRACT_SUBREG $Dm, ssub_0
869  // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
870  let isExtractSubreg = 1;
871}
872
873def VMOVRRS  : AVConv3I<0b11000101, 0b1010,
874                      (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
875                 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
876                 [/* For disassembly only; pattern left blank */]> {
877  bits<5> src1;
878  bits<4> Rt;
879  bits<4> Rt2;
880
881  // Encode instruction operands.
882  let Inst{3-0}   = src1{4-1};
883  let Inst{5}     = src1{0};
884  let Inst{15-12} = Rt;
885  let Inst{19-16} = Rt2;
886
887  let Inst{7-6} = 0b00;
888
889  // Some single precision VFP instructions may be executed on both NEON and VFP
890  // pipelines.
891  let D = VFPNeonDomain;
892  let DecoderMethod = "DecodeVMOVRRS";
893}
894} // hasSideEffects
895
896// FMDHR: GPR -> SPR
897// FMDLR: GPR -> SPR
898
899def VMOVDRR : AVConv5I<0b11000100, 0b1011,
900                      (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
901                      IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
902                      [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
903  // Instruction operands.
904  bits<5> Dm;
905  bits<4> Rt;
906  bits<4> Rt2;
907
908  // Encode instruction operands.
909  let Inst{3-0}   = Dm{3-0};
910  let Inst{5}     = Dm{4};
911  let Inst{15-12} = Rt;
912  let Inst{19-16} = Rt2;
913
914  let Inst{7-6}   = 0b00;
915
916  // Some single precision VFP instructions may be executed on both NEON and VFP
917  // pipelines.
918  let D = VFPNeonDomain;
919
920  // This instruction is equivalent to
921  // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
922  let isRegSequence = 1;
923}
924
925let hasSideEffects = 0 in
926def VMOVSRR : AVConv5I<0b11000100, 0b1010,
927                     (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
928                IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
929                [/* For disassembly only; pattern left blank */]> {
930  // Instruction operands.
931  bits<5> dst1;
932  bits<4> src1;
933  bits<4> src2;
934
935  // Encode instruction operands.
936  let Inst{3-0}   = dst1{4-1};
937  let Inst{5}     = dst1{0};
938  let Inst{15-12} = src1;
939  let Inst{19-16} = src2;
940
941  let Inst{7-6} = 0b00;
942
943  // Some single precision VFP instructions may be executed on both NEON and VFP
944  // pipelines.
945  let D = VFPNeonDomain;
946
947  let DecoderMethod = "DecodeVMOVSRR";
948}
949
950// FMRDH: SPR -> GPR
951// FMRDL: SPR -> GPR
952// FMRRS: SPR -> GPR
953// FMRX:  SPR system reg -> GPR
954// FMSRR: GPR -> SPR
955// FMXR:  GPR -> VFP system reg
956
957
958// Int -> FP:
959
960class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
961                        bits<4> opcod4, dag oops, dag iops,
962                        InstrItinClass itin, string opc, string asm,
963                        list<dag> pattern>
964  : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
965             pattern> {
966  // Instruction operands.
967  bits<5> Dd;
968  bits<5> Sm;
969
970  // Encode instruction operands.
971  let Inst{3-0}   = Sm{4-1};
972  let Inst{5}     = Sm{0};
973  let Inst{15-12} = Dd{3-0};
974  let Inst{22}    = Dd{4};
975
976  let Predicates = [HasVFP2, HasDPVFP];
977}
978
979class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
980                         bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
981                         string opc, string asm, list<dag> pattern>
982  : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
983              pattern> {
984  // Instruction operands.
985  bits<5> Sd;
986  bits<5> Sm;
987
988  // Encode instruction operands.
989  let Inst{3-0}   = Sm{4-1};
990  let Inst{5}     = Sm{0};
991  let Inst{15-12} = Sd{4-1};
992  let Inst{22}    = Sd{0};
993}
994
995def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
996                               (outs DPR:$Dd), (ins SPR:$Sm),
997                               IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
998                               []> {
999  let Inst{7} = 1; // s32
1000}
1001
1002let Predicates=[HasVFP2, HasDPVFP] in {
1003  def : VFPPat<(f64 (sint_to_fp GPR:$a)),
1004               (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1005
1006  def : VFPPat<(f64 (sint_to_fp (i32 (load addrmode5:$a)))),
1007               (VSITOD (VLDRS addrmode5:$a))>;
1008}
1009
1010def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1011                                (outs SPR:$Sd),(ins SPR:$Sm),
1012                                IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
1013                                []> {
1014  let Inst{7} = 1; // s32
1015
1016  // Some single precision VFP instructions may be executed on both NEON and
1017  // VFP pipelines on A8.
1018  let D = VFPNeonA8Domain;
1019}
1020
1021def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
1022                   (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1023
1024def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (load addrmode5:$a)))),
1025                   (VSITOS (VLDRS addrmode5:$a))>;
1026
1027def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1028                               (outs DPR:$Dd), (ins SPR:$Sm),
1029                               IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
1030                               []> {
1031  let Inst{7} = 0; // u32
1032}
1033
1034let Predicates=[HasVFP2, HasDPVFP] in {
1035  def : VFPPat<(f64 (uint_to_fp GPR:$a)),
1036               (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1037
1038  def : VFPPat<(f64 (uint_to_fp (i32 (load addrmode5:$a)))),
1039               (VUITOD (VLDRS addrmode5:$a))>;
1040}
1041
1042def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1043                                (outs SPR:$Sd), (ins SPR:$Sm),
1044                                IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
1045                                []> {
1046  let Inst{7} = 0; // u32
1047
1048  // Some single precision VFP instructions may be executed on both NEON and
1049  // VFP pipelines on A8.
1050  let D = VFPNeonA8Domain;
1051}
1052
1053def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
1054                   (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1055
1056def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (load addrmode5:$a)))),
1057                   (VUITOS (VLDRS addrmode5:$a))>;
1058
1059// FP -> Int:
1060
1061class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1062                        bits<4> opcod4, dag oops, dag iops,
1063                        InstrItinClass itin, string opc, string asm,
1064                        list<dag> pattern>
1065  : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1066             pattern> {
1067  // Instruction operands.
1068  bits<5> Sd;
1069  bits<5> Dm;
1070
1071  // Encode instruction operands.
1072  let Inst{3-0}   = Dm{3-0};
1073  let Inst{5}     = Dm{4};
1074  let Inst{15-12} = Sd{4-1};
1075  let Inst{22}    = Sd{0};
1076
1077  let Predicates = [HasVFP2, HasDPVFP];
1078}
1079
1080class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1081                         bits<4> opcod4, dag oops, dag iops,
1082                         InstrItinClass itin, string opc, string asm,
1083                         list<dag> pattern>
1084  : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1085              pattern> {
1086  // Instruction operands.
1087  bits<5> Sd;
1088  bits<5> Sm;
1089
1090  // Encode instruction operands.
1091  let Inst{3-0}   = Sm{4-1};
1092  let Inst{5}     = Sm{0};
1093  let Inst{15-12} = Sd{4-1};
1094  let Inst{22}    = Sd{0};
1095}
1096
1097// Always set Z bit in the instruction, i.e. "round towards zero" variants.
1098def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1099                                (outs SPR:$Sd), (ins DPR:$Dm),
1100                                IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
1101                                []> {
1102  let Inst{7} = 1; // Z bit
1103}
1104
1105let Predicates=[HasVFP2, HasDPVFP] in {
1106  def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
1107               (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1108
1109  def : VFPPat<(store (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
1110               (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1111}
1112
1113def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1114                                 (outs SPR:$Sd), (ins SPR:$Sm),
1115                                 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
1116                                 []> {
1117  let Inst{7} = 1; // Z bit
1118
1119  // Some single precision VFP instructions may be executed on both NEON and
1120  // VFP pipelines on A8.
1121  let D = VFPNeonA8Domain;
1122}
1123
1124def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
1125                   (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1126
1127def : VFPNoNEONPat<(store (i32 (fp_to_sint (f32 SPR:$a))), addrmode5:$ptr),
1128                   (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1129
1130def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1131                               (outs SPR:$Sd), (ins DPR:$Dm),
1132                               IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
1133                               []> {
1134  let Inst{7} = 1; // Z bit
1135}
1136
1137let Predicates=[HasVFP2, HasDPVFP] in {
1138  def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
1139               (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1140
1141  def : VFPPat<(store (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
1142               (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1143}
1144
1145def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1146                                 (outs SPR:$Sd), (ins SPR:$Sm),
1147                                 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
1148                                 []> {
1149  let Inst{7} = 1; // Z bit
1150
1151  // Some single precision VFP instructions may be executed on both NEON and
1152  // VFP pipelines on A8.
1153  let D = VFPNeonA8Domain;
1154}
1155
1156def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
1157                   (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1158
1159def : VFPNoNEONPat<(store (i32 (fp_to_uint (f32 SPR:$a))), addrmode5:$ptr),
1160                  (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1161
1162// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
1163let Uses = [FPSCR] in {
1164// FIXME: Verify encoding after integrated assembler is working.
1165def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1166                                (outs SPR:$Sd), (ins DPR:$Dm),
1167                                IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
1168                                [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
1169  let Inst{7} = 0; // Z bit
1170}
1171
1172def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1173                                 (outs SPR:$Sd), (ins SPR:$Sm),
1174                                 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
1175                                 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
1176  let Inst{7} = 0; // Z bit
1177}
1178
1179def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1180                                (outs SPR:$Sd), (ins DPR:$Dm),
1181                                IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
1182                                [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
1183  let Inst{7} = 0; // Z bit
1184}
1185
1186def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1187                                 (outs SPR:$Sd), (ins SPR:$Sm),
1188                                 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
1189                                 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
1190  let Inst{7} = 0; // Z bit
1191}
1192}
1193
1194// Convert between floating-point and fixed-point
1195// Data type for fixed-point naming convention:
1196//   S16 (U=0, sx=0) -> SH
1197//   U16 (U=1, sx=0) -> UH
1198//   S32 (U=0, sx=1) -> SL
1199//   U32 (U=1, sx=1) -> UL
1200
1201let Constraints = "$a = $dst" in {
1202
1203// FP to Fixed-Point:
1204
1205// Single Precision register
1206class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1207                          bit op5, dag oops, dag iops, InstrItinClass itin,
1208                          string opc, string asm, list<dag> pattern>
1209  : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>,
1210  Sched<[WriteCvtFP]> {
1211  bits<5> dst;
1212  // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1213  let Inst{22} = dst{0};
1214  let Inst{15-12} = dst{4-1};
1215}
1216
1217// Double Precision register
1218class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1219                          bit op5, dag oops, dag iops, InstrItinClass itin,
1220                          string opc, string asm, list<dag> pattern>
1221  : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern>,
1222    Sched<[WriteCvtFP]> {
1223  bits<5> dst;
1224  // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1225  let Inst{22} = dst{4};
1226  let Inst{15-12} = dst{3-0};
1227
1228  let Predicates = [HasVFP2, HasDPVFP];
1229}
1230
1231def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
1232                       (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1233                 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []> {
1234  // Some single precision VFP instructions may be executed on both NEON and
1235  // VFP pipelines on A8.
1236  let D = VFPNeonA8Domain;
1237}
1238
1239def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
1240                       (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1241                 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> {
1242  // Some single precision VFP instructions may be executed on both NEON and
1243  // VFP pipelines on A8.
1244  let D = VFPNeonA8Domain;
1245}
1246
1247def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
1248                       (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1249                 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> {
1250  // Some single precision VFP instructions may be executed on both NEON and
1251  // VFP pipelines on A8.
1252  let D = VFPNeonA8Domain;
1253}
1254
1255def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
1256                       (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1257                 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> {
1258  // Some single precision VFP instructions may be executed on both NEON and
1259  // VFP pipelines on A8.
1260  let D = VFPNeonA8Domain;
1261}
1262
1263def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
1264                       (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1265                 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>;
1266
1267def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
1268                       (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1269                 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>;
1270
1271def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
1272                       (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1273                 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>;
1274
1275def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
1276                       (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1277                 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>;
1278
1279// Fixed-Point to FP:
1280
1281def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
1282                       (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1283                 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []> {
1284  // Some single precision VFP instructions may be executed on both NEON and
1285  // VFP pipelines on A8.
1286  let D = VFPNeonA8Domain;
1287}
1288
1289def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
1290                       (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1291                 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []> {
1292  // Some single precision VFP instructions may be executed on both NEON and
1293  // VFP pipelines on A8.
1294  let D = VFPNeonA8Domain;
1295}
1296
1297def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
1298                       (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1299                 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []> {
1300  // Some single precision VFP instructions may be executed on both NEON and
1301  // VFP pipelines on A8.
1302  let D = VFPNeonA8Domain;
1303}
1304
1305def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
1306                       (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1307                 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []> {
1308  // Some single precision VFP instructions may be executed on both NEON and
1309  // VFP pipelines on A8.
1310  let D = VFPNeonA8Domain;
1311}
1312
1313def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
1314                       (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1315                 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>;
1316
1317def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
1318                       (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1319                 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>;
1320
1321def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
1322                       (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1323                 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>;
1324
1325def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
1326                       (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1327                 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>;
1328
1329} // End of 'let Constraints = "$a = $dst" in'
1330
1331//===----------------------------------------------------------------------===//
1332// FP Multiply-Accumulate Operations.
1333//
1334
1335def VMLAD : ADbI<0b11100, 0b00, 0, 0,
1336                 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1337                 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
1338                 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1339                                          (f64 DPR:$Ddin)))]>,
1340              RegConstraint<"$Ddin = $Dd">,
1341              Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1342
1343def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
1344                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1345                  IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
1346                  [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1347                                           SPR:$Sdin))]>,
1348              RegConstraint<"$Sdin = $Sd">,
1349              Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1350  // Some single precision VFP instructions may be executed on both NEON and
1351  // VFP pipelines on A8.
1352  let D = VFPNeonA8Domain;
1353}
1354
1355def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1356          (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1357          Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1358def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1359          (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1360          Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>;
1361
1362def VMLSD : ADbI<0b11100, 0b00, 1, 0,
1363                 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1364                 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
1365                 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1366                                          (f64 DPR:$Ddin)))]>,
1367              RegConstraint<"$Ddin = $Dd">,
1368              Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1369
1370def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
1371                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1372                  IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
1373                  [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1374                                           SPR:$Sdin))]>,
1375              RegConstraint<"$Sdin = $Sd">,
1376              Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1377  // Some single precision VFP instructions may be executed on both NEON and
1378  // VFP pipelines on A8.
1379  let D = VFPNeonA8Domain;
1380}
1381
1382def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1383          (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1384          Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1385def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1386          (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1387          Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1388
1389def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
1390                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1391                  IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
1392                  [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1393                                          (f64 DPR:$Ddin)))]>,
1394                RegConstraint<"$Ddin = $Dd">,
1395                Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1396
1397def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
1398                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1399                  IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
1400                  [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1401                                           SPR:$Sdin))]>,
1402                RegConstraint<"$Sdin = $Sd">,
1403                Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1404  // Some single precision VFP instructions may be executed on both NEON and
1405  // VFP pipelines on A8.
1406  let D = VFPNeonA8Domain;
1407}
1408
1409def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1410          (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1411          Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1412def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1413          (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1414          Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1415
1416def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
1417                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1418                  IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
1419                  [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1420                                           (f64 DPR:$Ddin)))]>,
1421               RegConstraint<"$Ddin = $Dd">,
1422               Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1423
1424def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
1425                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1426                  IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
1427             [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1428                         RegConstraint<"$Sdin = $Sd">,
1429                Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1430  // Some single precision VFP instructions may be executed on both NEON and
1431  // VFP pipelines on A8.
1432  let D = VFPNeonA8Domain;
1433}
1434
1435def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1436          (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1437          Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
1438def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1439          (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1440          Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1441
1442//===----------------------------------------------------------------------===//
1443// Fused FP Multiply-Accumulate Operations.
1444//
1445def VFMAD : ADbI<0b11101, 0b10, 0, 0,
1446                 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1447                 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
1448                 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1449                                          (f64 DPR:$Ddin)))]>,
1450              RegConstraint<"$Ddin = $Dd">,
1451              Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1452
1453def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
1454                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1455                  IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
1456                  [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1457                                           SPR:$Sdin))]>,
1458              RegConstraint<"$Sdin = $Sd">,
1459              Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1460  // Some single precision VFP instructions may be executed on both NEON and
1461  // VFP pipelines.
1462}
1463
1464def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1465          (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
1466          Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1467def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1468          (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
1469          Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1470
1471// Match @llvm.fma.* intrinsics
1472// (fma x, y, z) -> (vfms z, x, y)
1473def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
1474          (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1475      Requires<[HasVFP4,HasDPVFP]>;
1476def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
1477          (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1478      Requires<[HasVFP4]>;
1479
1480def VFMSD : ADbI<0b11101, 0b10, 1, 0,
1481                 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1482                 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
1483                 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1484                                          (f64 DPR:$Ddin)))]>,
1485              RegConstraint<"$Ddin = $Dd">,
1486              Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1487
1488def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
1489                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1490                  IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
1491                  [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1492                                           SPR:$Sdin))]>,
1493              RegConstraint<"$Sdin = $Sd">,
1494              Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1495  // Some single precision VFP instructions may be executed on both NEON and
1496  // VFP pipelines.
1497}
1498
1499def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1500          (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
1501          Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1502def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1503          (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
1504          Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1505
1506// Match @llvm.fma.* intrinsics
1507// (fma (fneg x), y, z) -> (vfms z, x, y)
1508def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
1509          (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1510      Requires<[HasVFP4,HasDPVFP]>;
1511def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
1512          (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1513      Requires<[HasVFP4]>;
1514// (fma x, (fneg y), z) -> (vfms z, x, y)
1515def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
1516          (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1517      Requires<[HasVFP4,HasDPVFP]>;
1518def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
1519          (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1520      Requires<[HasVFP4]>;
1521
1522def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
1523                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1524                  IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
1525                  [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1526                                          (f64 DPR:$Ddin)))]>,
1527                RegConstraint<"$Ddin = $Dd">,
1528                Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1529
1530def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
1531                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1532                  IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
1533                  [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1534                                           SPR:$Sdin))]>,
1535                RegConstraint<"$Sdin = $Sd">,
1536                Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1537  // Some single precision VFP instructions may be executed on both NEON and
1538  // VFP pipelines.
1539}
1540
1541def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1542          (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
1543          Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1544def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1545          (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
1546          Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1547
1548// Match @llvm.fma.* intrinsics
1549// (fneg (fma x, y, z)) -> (vfnma z, x, y)
1550def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
1551          (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1552      Requires<[HasVFP4,HasDPVFP]>;
1553def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
1554          (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1555      Requires<[HasVFP4]>;
1556// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
1557def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
1558          (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1559      Requires<[HasVFP4,HasDPVFP]>;
1560def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
1561          (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1562      Requires<[HasVFP4]>;
1563
1564def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
1565                  (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1566                  IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
1567                  [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1568                                           (f64 DPR:$Ddin)))]>,
1569               RegConstraint<"$Ddin = $Dd">,
1570               Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1571
1572def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
1573                  (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1574                  IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
1575             [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1576                         RegConstraint<"$Sdin = $Sd">,
1577                  Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1578  // Some single precision VFP instructions may be executed on both NEON and
1579  // VFP pipelines.
1580}
1581
1582def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1583          (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
1584          Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
1585def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1586          (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
1587          Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1588
1589// Match @llvm.fma.* intrinsics
1590
1591// (fma x, y, (fneg z)) -> (vfnms z, x, y))
1592def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
1593          (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1594      Requires<[HasVFP4,HasDPVFP]>;
1595def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
1596          (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1597      Requires<[HasVFP4]>;
1598// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
1599def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
1600          (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1601      Requires<[HasVFP4,HasDPVFP]>;
1602def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
1603          (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1604      Requires<[HasVFP4]>;
1605// (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
1606def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
1607          (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1608      Requires<[HasVFP4,HasDPVFP]>;
1609def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
1610          (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1611      Requires<[HasVFP4]>;
1612
1613//===----------------------------------------------------------------------===//
1614// FP Conditional moves.
1615//
1616
1617let hasSideEffects = 0 in {
1618def VMOVDcc  : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
1619                    IIC_fpUNA64,
1620                    [(set (f64 DPR:$Dd),
1621                          (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
1622               RegConstraint<"$Dn = $Dd">, Requires<[HasVFP2,HasDPVFP]>;
1623
1624def VMOVScc  : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
1625                    IIC_fpUNA32,
1626                    [(set (f32 SPR:$Sd),
1627                          (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
1628               RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>;
1629} // hasSideEffects
1630
1631//===----------------------------------------------------------------------===//
1632// Move from VFP System Register to ARM core register.
1633//
1634
1635class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1636                 list<dag> pattern>:
1637  VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1638
1639  // Instruction operand.
1640  bits<4> Rt;
1641
1642  let Inst{27-20} = 0b11101111;
1643  let Inst{19-16} = opc19_16;
1644  let Inst{15-12} = Rt;
1645  let Inst{11-8}  = 0b1010;
1646  let Inst{7}     = 0;
1647  let Inst{6-5}   = 0b00;
1648  let Inst{4}     = 1;
1649  let Inst{3-0}   = 0b0000;
1650}
1651
1652// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
1653// to APSR.
1654let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in
1655def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
1656                        "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
1657
1658// Application level FPSCR -> GPR
1659let hasSideEffects = 1, Uses = [FPSCR] in
1660def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
1661                      "vmrs", "\t$Rt, fpscr",
1662                      [(set GPR:$Rt, (int_arm_get_fpscr))]>;
1663
1664// System level FPEXC, FPSID -> GPR
1665let Uses = [FPSCR] in {
1666  def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
1667                              "vmrs", "\t$Rt, fpexc", []>;
1668  def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
1669                              "vmrs", "\t$Rt, fpsid", []>;
1670  def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPR:$Rt), (ins),
1671                              "vmrs", "\t$Rt, mvfr0", []>;
1672  def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPR:$Rt), (ins),
1673                              "vmrs", "\t$Rt, mvfr1", []>;
1674  def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPR:$Rt), (ins),
1675                              "vmrs", "\t$Rt, mvfr2", []>, Requires<[HasFPARMv8]>;
1676  def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPR:$Rt), (ins),
1677                              "vmrs", "\t$Rt, fpinst", []>;
1678  def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPR:$Rt), (ins),
1679                                "vmrs", "\t$Rt, fpinst2", []>;
1680}
1681
1682//===----------------------------------------------------------------------===//
1683// Move from ARM core register to VFP System Register.
1684//
1685
1686class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1687               list<dag> pattern>:
1688  VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1689
1690  // Instruction operand.
1691  bits<4> src;
1692
1693  // Encode instruction operand.
1694  let Inst{15-12} = src;
1695
1696  let Inst{27-20} = 0b11101110;
1697  let Inst{19-16} = opc19_16;
1698  let Inst{11-8}  = 0b1010;
1699  let Inst{7}     = 0;
1700  let Inst{4}     = 1;
1701}
1702
1703let Defs = [FPSCR] in {
1704  // Application level GPR -> FPSCR
1705  def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
1706                      "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
1707  // System level GPR -> FPEXC
1708  def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
1709                      "vmsr", "\tfpexc, $src", []>;
1710  // System level GPR -> FPSID
1711  def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
1712                      "vmsr", "\tfpsid, $src", []>;
1713
1714  def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPR:$src),
1715                              "vmsr", "\tfpinst, $src", []>;
1716  def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPR:$src),
1717                                "vmsr", "\tfpinst2, $src", []>;
1718}
1719
1720//===----------------------------------------------------------------------===//
1721// Misc.
1722//
1723
1724// Materialize FP immediates. VFP3 only.
1725let isReMaterializable = 1 in {
1726def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
1727                    VFPMiscFrm, IIC_fpUNA64,
1728                    "vmov", ".f64\t$Dd, $imm",
1729                    [(set DPR:$Dd, vfp_f64imm:$imm)]>,
1730              Requires<[HasVFP3,HasDPVFP]> {
1731  bits<5> Dd;
1732  bits<8> imm;
1733
1734  let Inst{27-23} = 0b11101;
1735  let Inst{22}    = Dd{4};
1736  let Inst{21-20} = 0b11;
1737  let Inst{19-16} = imm{7-4};
1738  let Inst{15-12} = Dd{3-0};
1739  let Inst{11-9}  = 0b101;
1740  let Inst{8}     = 1;          // Double precision.
1741  let Inst{7-4}   = 0b0000;
1742  let Inst{3-0}   = imm{3-0};
1743}
1744
1745def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1746                     VFPMiscFrm, IIC_fpUNA32,
1747                     "vmov", ".f32\t$Sd, $imm",
1748                     [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1749  bits<5> Sd;
1750  bits<8> imm;
1751
1752  let Inst{27-23} = 0b11101;
1753  let Inst{22}    = Sd{0};
1754  let Inst{21-20} = 0b11;
1755  let Inst{19-16} = imm{7-4};
1756  let Inst{15-12} = Sd{4-1};
1757  let Inst{11-9}  = 0b101;
1758  let Inst{8}     = 0;          // Single precision.
1759  let Inst{7-4}   = 0b0000;
1760  let Inst{3-0}   = imm{3-0};
1761}
1762}
1763
1764//===----------------------------------------------------------------------===//
1765// Assembler aliases.
1766//
1767// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
1768// support them all, but supporting at least some of the basics is
1769// good to be friendly.
1770def : VFP2MnemonicAlias<"flds", "vldr">;
1771def : VFP2MnemonicAlias<"fldd", "vldr">;
1772def : VFP2MnemonicAlias<"fmrs", "vmov">;
1773def : VFP2MnemonicAlias<"fmsr", "vmov">;
1774def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
1775def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
1776def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
1777def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
1778def : VFP2MnemonicAlias<"fmrdd", "vmov">;
1779def : VFP2MnemonicAlias<"fmrds", "vmov">;
1780def : VFP2MnemonicAlias<"fmrrd", "vmov">;
1781def : VFP2MnemonicAlias<"fmdrr", "vmov">;
1782def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
1783def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
1784def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
1785def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
1786def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
1787def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
1788def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
1789def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
1790def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
1791def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
1792def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
1793def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
1794def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
1795def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
1796def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
1797def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
1798def : VFP2MnemonicAlias<"fsts", "vstr">;
1799def : VFP2MnemonicAlias<"fstd", "vstr">;
1800def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
1801def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
1802def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
1803def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
1804def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
1805def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
1806def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
1807def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
1808def : VFP2MnemonicAlias<"fmrx", "vmrs">;
1809def : VFP2MnemonicAlias<"fmxr", "vmsr">;
1810
1811// Be friendly and accept the old form of zero-compare
1812def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
1813def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
1814
1815
1816def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
1817def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
1818                    (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
1819def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
1820                      (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
1821def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
1822                    (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
1823def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
1824                      (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
1825
1826// No need for the size suffix on VSQRT. It's implied by the register classes.
1827def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
1828def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
1829
1830// VLDR/VSTR accept an optional type suffix.
1831def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
1832                    (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
1833def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
1834                    (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
1835def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
1836                    (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
1837def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
1838                    (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
1839
1840// VMOV can accept optional 32-bit or less data type suffix suffix.
1841def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
1842                    (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1843def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
1844                    (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1845def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
1846                    (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1847def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
1848                    (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1849def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
1850                    (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1851def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
1852                    (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1853
1854def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
1855                    (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
1856def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
1857                    (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
1858
1859// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
1860// VMOVD does.
1861def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
1862                    (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
1863
1864// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
1865// These aliases provide added functionality over vmov.f instructions by
1866// allowing users to write assembly containing encoded floating point constants
1867// (e.g. #0x70 vs #1.0).  Without these alises there is no way for the
1868// assembler to accept encoded fp constants (but the equivalent fp-literal is
1869// accepted directly by vmovf).
1870def : VFP3InstAlias<"fconstd${p} $Dd, $val",
1871                    (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
1872def : VFP3InstAlias<"fconsts${p} $Sd, $val",
1873                    (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;
1874