1; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
2
3define i32 @foo(float %scale, float %scale2) nounwind {
4entry:
5  %scale.addr = alloca float, align 4
6  %scale2.addr = alloca float, align 4
7  store float %scale, float* %scale.addr, align 4
8  store float %scale2, float* %scale2.addr, align 4
9  %tmp = load float* %scale.addr, align 4
10  %tmp1 = load float* %scale2.addr, align 4
11  call void asm sideeffect "vmul.f32    q0, q0, ${0:y} \0A\09vmul.f32    q1, q1, ${0:y} \0A\09vmul.f32    q1, q0, ${1:y} \0A\09", "w,w,~{q0},~{q1}"(float %tmp, float %tmp1) nounwind
12  ret i32 0
13}
14
15define void @f0() nounwind {
16entry:
17; CHECK: f0
18; CHECK: .word -1
19call void asm sideeffect ".word ${0:B} \0A\09", "i"(i32 0) nounwind
20ret void
21}
22
23define void @f1() nounwind {
24entry:
25; CHECK: f1
26; CHECK: .word 65535
27call void asm sideeffect ".word ${0:L} \0A\09", "i"(i32 -1) nounwind
28ret void
29}
30
31@f2_ptr = internal global i32* @f2_var, align 4
32@f2_var = external global i32
33
34define void @f2() nounwind {
35entry:
36; CHECK: f2
37; CHECK: ldr r0, [r{{[0-9]+}}]
38call void asm sideeffect "ldr r0, [${0:m}]\0A\09", "*m,~{r0}"(i32** @f2_ptr) nounwind
39ret void
40}
41
42@f3_ptr = internal global i64* @f3_var, align 4
43@f3_var = external global i64
44@f3_var2 = external global i64
45
46define void @f3() nounwind {
47entry:
48; CHECK: f3
49; CHECK: stm {{lr|r[0-9]+}}, {[[REG1:(r[0-9]+)]], r{{[0-9]+}}}
50; CHECK: adds {{lr|r[0-9]+}}, [[REG1]]
51; CHECK: ldm {{lr|r[0-9]+}}, {r{{[0-9]+}}, r{{[0-9]+}}}
52%tmp = load i64* @f3_var, align 4
53%tmp1 = load i64* @f3_var2, align 4
54%0 = call i64 asm sideeffect "stm ${0:m}, ${1:M}\0A\09adds $3, $1\0A\09", "=*m,=r,1,r"(i64** @f3_ptr, i64 %tmp, i64 %tmp1) nounwind
55store i64 %0, i64* @f3_var, align 4
56%1 = call i64 asm sideeffect "ldm ${1:m}, ${0:M}\0A\09", "=r,*m"(i64** @f3_ptr) nounwind
57store i64 %1, i64* @f3_var, align 4
58ret void
59}
60