1; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s -check-prefix=VFP2
2; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s -check-prefix=NEON
3; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
4; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
5; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard %s -o - | FileCheck %s -check-prefix=HARD
6; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard %s -o - | FileCheck %s -check-prefix=VMLA
7; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-m33 -float-abi=hard %s -o - | FileCheck %s -check-prefix=VMLA
8
9define float @t1(float %acc, float %a, float %b) {
10entry:
11; VFP2-LABEL: t1:
12; VFP2: vmla.f32
13
14; NEON-LABEL: t1:
15; NEON: vmla.f32
16
17; A8-LABEL: t1:
18; A8: vmul.f32
19; A8: vadd.f32
20
21; VMLA-LABEL: t1:
22; VMLA:       vmul.f32
23; VMLA-NEXT:  vadd.f32
24
25  %0 = fmul float %a, %b
26  %1 = fadd float %acc, %0
27	ret float %1
28}
29
30define float @vmla_minsize(float %acc, float %a, float %b) #0 {
31entry:
32; VMLA-LABEL: vmla_minsize:
33; VMLA:       vmla.f32  s0, s1, s2
34; VMLA-NEXT:  bx  lr
35
36  %0 = fmul float %a, %b
37  %1 = fadd float %acc, %0
38	ret float %1
39}
40
41define double @t2(double %acc, double %a, double %b) {
42entry:
43; VFP2-LABEL: t2:
44; VFP2: vmla.f64
45
46; NEON-LABEL: t2:
47; NEON: vmla.f64
48
49; A8-LABEL: t2:
50; A8: vmul.f64
51; A8: vadd.f64
52  %0 = fmul double %a, %b
53  %1 = fadd double %acc, %0
54	ret double %1
55}
56
57define float @t3(float %acc, float %a, float %b) {
58entry:
59; VFP2-LABEL: t3:
60; VFP2: vmla.f32
61
62; NEON-LABEL: t3:
63; NEON: vmla.f32
64
65; A8-LABEL: t3:
66; A8: vmul.f32
67; A8: vadd.f32
68  %0 = fmul float %a, %b
69  %1 = fadd float %0, %acc
70	ret float %1
71}
72
73; It's possible to make use of fp vmla / vmls on Cortex-A9.
74; rdar://8659675
75define void @t4(float %acc1, float %a, float %b, float %acc2, float %c, float* %P1, float* %P2) {
76entry:
77; A8-LABEL: t4:
78; A8: vmul.f32
79; A8: vmul.f32
80; A8: vadd.f32
81; A8: vadd.f32
82
83; Two vmla with now RAW hazard
84; A9-LABEL: t4:
85; A9: vmla.f32
86; A9: vmla.f32
87
88; HARD-LABEL: t4:
89; HARD: vmla.f32 s0, s1, s2
90; HARD: vmla.f32 s3, s1, s4
91  %0 = fmul float %a, %b
92  %1 = fadd float %acc1, %0
93  %2 = fmul float %a, %c
94  %3 = fadd float %acc2, %2
95  store float %1, float* %P1
96  store float %3, float* %P2
97  ret void
98}
99
100define float @t5(float %a, float %b, float %c, float %d, float %e) {
101entry:
102; A8-LABEL: t5:
103; A8: vmul.f32
104; A8: vmul.f32
105; A8: vadd.f32
106; A8: vadd.f32
107
108; A9-LABEL: t5:
109; A9: vmla.f32
110; A9: vmul.f32
111; A9: vadd.f32
112
113; HARD-LABEL: t5:
114; HARD: vmla.f32 s4, s0, s1
115; HARD: vmul.f32 s0, s2, s3
116; HARD: vadd.f32 s0, s4, s0
117  %0 = fmul float %a, %b
118  %1 = fadd float %e, %0
119  %2 = fmul float %c, %d
120  %3 = fadd float %1, %2
121  ret float %3
122}
123
124attributes #0 = { minsize nounwind optsize }
125