1; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+fp64,+msa,-nooddspreg \
2; RUN:     -no-integrated-as < %s | FileCheck %s -check-prefix=ALL \
3; RUN:     -check-prefix=ODDSPREG
4; RUN: llc -march=mipsel -mcpu=mips32 -mattr=+fp64,+msa,+nooddspreg \
5; RUN:     -no-integrated-as < %s | FileCheck %s -check-prefix=ALL \
6; RUN:     -check-prefix=NOODDSPREG
7
8@v4f32 = global <4 x float> zeroinitializer
9
10define void @msa_insert_0(float %a) {
11entry:
12  ; Force the float into an odd-numbered register using named registers and
13  ; load the vector.
14  %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
15  %0 = load volatile <4 x float>, <4 x float>* @v4f32
16
17  ; Clobber all except $f12/$w12 and $f13
18  ;
19  ; The intention is that if odd single precision registers are permitted, the
20  ; allocator will choose $f12/$w12 for the vector and $f13 for the float to
21  ; avoid the spill/reload.
22  ;
23  ; On the other hand, if odd single precision registers are not permitted, it
24  ; must copy $f13 to an even-numbered register before inserting into the
25  ; vector.
26  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
27  %1 = insertelement <4 x float> %0, float %b, i32 0
28  store <4 x float> %1, <4 x float>* @v4f32
29  ret void
30}
31
32; ALL-LABEL:  msa_insert_0:
33; ALL:            mov.s $f13, $f12
34; ALL:            lw $[[R0:[0-9]+]], %got(v4f32)(
35; ALL:            ld.w $w[[W0:[0-9]+]], 0($[[R0]])
36; NOODDSPREG:     mov.s $f[[F0:[0-9]+]], $f13
37; NOODDSPREG:     insve.w $w[[W0]][0], $w[[F0]][0]
38; ODDSPREG:       insve.w $w[[W0]][0], $w13[0]
39; ALL:            teqi $zero, 1
40; ALL-NOT: sdc1
41; ALL-NOT: ldc1
42; ALL:            st.w $w[[W0]], 0($[[R0]])
43
44define void @msa_insert_1(float %a) {
45entry:
46  ; Force the float into an odd-numbered register using named registers and
47  ; load the vector.
48  %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
49  %0 = load volatile <4 x float>, <4 x float>* @v4f32
50
51  ; Clobber all except $f12/$w12 and $f13
52  ;
53  ; The intention is that if odd single precision registers are permitted, the
54  ; allocator will choose $f12/$w12 for the vector and $f13 for the float to
55  ; avoid the spill/reload.
56  ;
57  ; On the other hand, if odd single precision registers are not permitted, it
58  ; must copy $f13 to an even-numbered register before inserting into the
59  ; vector.
60  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
61  %1 = insertelement <4 x float> %0, float %b, i32 1
62  store <4 x float> %1, <4 x float>* @v4f32
63  ret void
64}
65
66; ALL-LABEL:  msa_insert_1:
67; ALL:            mov.s $f13, $f12
68; ALL:            lw $[[R0:[0-9]+]], %got(v4f32)(
69; ALL:            ld.w $w[[W0:[0-9]+]], 0($[[R0]])
70; NOODDSPREG:     mov.s $f[[F0:[0-9]+]], $f13
71; NOODDSPREG:     insve.w $w[[W0]][1], $w[[F0]][0]
72; ODDSPREG:       insve.w $w[[W0]][1], $w13[0]
73; ALL:            teqi $zero, 1
74; ALL-NOT: sdc1
75; ALL-NOT: ldc1
76; ALL:            st.w $w[[W0]], 0($[[R0]])
77
78define float @msa_extract_0() {
79entry:
80  %0 = load volatile <4 x float>, <4 x float>* @v4f32
81  %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
82
83  ; Clobber all except $f12, and $f13
84  ;
85  ; The intention is that if odd single precision registers are permitted, the
86  ; allocator will choose $f13/$w13 for the vector since that saves on moves.
87  ;
88  ; On the other hand, if odd single precision registers are not permitted, it
89  ; must move it to $f12/$w12.
90  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
91
92  %2 = extractelement <4 x float> %1, i32 0
93  ret float %2
94}
95
96; ALL-LABEL:  msa_extract_0:
97; ALL:            lw $[[R0:[0-9]+]], %got(v4f32)(
98; ALL:            ld.w $w12, 0($[[R0]])
99; ALL:            move.v $w[[W0:13]], $w12
100; NOODDSPREG:     move.v $w[[W0:12]], $w13
101; ALL:            teqi $zero, 1
102; ALL-NOT: st.w
103; ALL-NOT: ld.w
104; ALL:            mov.s $f0, $f[[W0]]
105
106define float @msa_extract_1() {
107entry:
108  %0 = load volatile <4 x float>, <4 x float>* @v4f32
109  %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
110
111  ; Clobber all except $f13
112  ;
113  ; The intention is that if odd single precision registers are permitted, the
114  ; allocator will choose $f13/$w13 for the vector since that saves on moves.
115  ;
116  ; On the other hand, if odd single precision registers are not permitted, it
117  ; must be spilled.
118  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
119
120  %2 = extractelement <4 x float> %1, i32 1
121  ret float %2
122}
123
124; ALL-LABEL:  msa_extract_1:
125; ALL:            lw $[[R0:[0-9]+]], %got(v4f32)(
126; ALL:            ld.w $w12, 0($[[R0]])
127; ALL:            splati.w $w[[W0:[0-9]+]], $w13[1]
128; NOODDSPREG:     st.w $w[[W0]], 0($sp)
129; ODDSPREG-NOT: st.w
130; ODDSPREG-NOT: ld.w
131; ALL:            teqi $zero, 1
132; ODDSPREG-NOT: st.w
133; ODDSPREG-NOT: ld.w
134; NOODDSPREG:     ld.w $w0, 0($sp)
135; ODDSPREG:       mov.s $f0, $f[[W0]]
136