1; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
2
3define float @test_f32(float* %A, float* %B, float* %C) nounwind {
4;CHECK-LABEL: test_f32:
5;CHECK: fmadd
6;CHECK-NOT: fmadd
7  %tmp1 = load float, float* %A
8  %tmp2 = load float, float* %B
9  %tmp3 = load float, float* %C
10  %tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
11  ret float %tmp4
12}
13
14define <2 x float> @test_v2f32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
15;CHECK-LABEL: test_v2f32:
16;CHECK: fmla.2s
17;CHECK-NOT: fmla.2s
18  %tmp1 = load <2 x float>, <2 x float>* %A
19  %tmp2 = load <2 x float>, <2 x float>* %B
20  %tmp3 = load <2 x float>, <2 x float>* %C
21  %tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
22  ret <2 x float> %tmp4
23}
24
25define <4 x float> @test_v4f32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
26;CHECK-LABEL: test_v4f32:
27;CHECK: fmla.4s
28;CHECK-NOT: fmla.4s
29  %tmp1 = load <4 x float>, <4 x float>* %A
30  %tmp2 = load <4 x float>, <4 x float>* %B
31  %tmp3 = load <4 x float>, <4 x float>* %C
32  %tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
33  ret <4 x float> %tmp4
34}
35
36define <8 x float> @test_v8f32(<8 x float>* %A, <8 x float>* %B, <8 x float>* %C) nounwind {
37;CHECK-LABEL: test_v8f32:
38;CHECK: fmla.4s
39;CHECK: fmla.4s
40;CHECK-NOT: fmla.4s
41  %tmp1 = load <8 x float>, <8 x float>* %A
42  %tmp2 = load <8 x float>, <8 x float>* %B
43  %tmp3 = load <8 x float>, <8 x float>* %C
44  %tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
45  ret <8 x float> %tmp4
46}
47
48define double @test_f64(double* %A, double* %B, double* %C) nounwind {
49;CHECK-LABEL: test_f64:
50;CHECK: fmadd
51;CHECK-NOT: fmadd
52  %tmp1 = load double, double* %A
53  %tmp2 = load double, double* %B
54  %tmp3 = load double, double* %C
55  %tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3)
56  ret double %tmp4
57}
58
59define <2 x double> @test_v2f64(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
60;CHECK-LABEL: test_v2f64:
61;CHECK: fmla.2d
62;CHECK-NOT: fmla.2d
63  %tmp1 = load <2 x double>, <2 x double>* %A
64  %tmp2 = load <2 x double>, <2 x double>* %B
65  %tmp3 = load <2 x double>, <2 x double>* %C
66  %tmp4 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
67  ret <2 x double> %tmp4
68}
69
70define <4 x double> @test_v4f64(<4 x double>* %A, <4 x double>* %B, <4 x double>* %C) nounwind {
71;CHECK-LABEL: test_v4f64:
72;CHECK: fmla.2d
73;CHECK: fmla.2d
74;CHECK-NOT: fmla.2d
75  %tmp1 = load <4 x double>, <4 x double>* %A
76  %tmp2 = load <4 x double>, <4 x double>* %B
77  %tmp3 = load <4 x double>, <4 x double>* %C
78  %tmp4 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %tmp1, <4 x double> %tmp2, <4 x double> %tmp3)
79  ret <4 x double> %tmp4
80}
81
82declare float @llvm.fmuladd.f32(float, float, float) nounwind readnone
83declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
84declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
85declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
86declare double @llvm.fmuladd.f64(double, double, double) nounwind readnone
87declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
88declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
89