1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mcpu=e500 \
3; RUN:   -mtriple=powerpc-unknown-linux-gnu -mattr=spe | FileCheck %s \
4; RUN:   -check-prefix=SPE
5
6declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
7declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
8declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
9declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
10
11declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
12declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
13declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
14declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
15
16declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
17declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
18declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
19declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
20
21declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
22declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
23declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
24declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
25
26define i32 @d_to_i32(double %m) #0 {
27; SPE-LABEL: d_to_i32:
28; SPE:       # %bb.0: # %entry
29; SPE-NEXT:    evmergelo r3, r3, r4
30; SPE-NEXT:    efdctsiz r3, r3
31; SPE-NEXT:    blr
32entry:
33  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
34  ret i32 %conv
35}
36
37define i64 @d_to_i64(double %m) #0 {
38; SPE-LABEL: d_to_i64:
39; SPE:       # %bb.0: # %entry
40; SPE-NEXT:    mflr r0
41; SPE-NEXT:    stw r0, 4(r1)
42; SPE-NEXT:    stwu r1, -16(r1)
43; SPE-NEXT:    .cfi_def_cfa_offset 16
44; SPE-NEXT:    .cfi_offset lr, 4
45; SPE-NEXT:    evmergelo r4, r3, r4
46; SPE-NEXT:    evmergehi r3, r4, r4
47; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
48; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
49; SPE-NEXT:    bl __fixdfdi
50; SPE-NEXT:    lwz r0, 20(r1)
51; SPE-NEXT:    addi r1, r1, 16
52; SPE-NEXT:    mtlr r0
53; SPE-NEXT:    blr
54entry:
55  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
56  ret i64 %conv
57}
58
59define i64 @d_to_u64(double %m) #0 {
60; SPE-LABEL: d_to_u64:
61; SPE:       # %bb.0: # %entry
62; SPE-NEXT:    mflr r0
63; SPE-NEXT:    stw r0, 4(r1)
64; SPE-NEXT:    stwu r1, -16(r1)
65; SPE-NEXT:    .cfi_def_cfa_offset 16
66; SPE-NEXT:    .cfi_offset lr, 4
67; SPE-NEXT:    evmergelo r4, r3, r4
68; SPE-NEXT:    evmergehi r3, r4, r4
69; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
70; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
71; SPE-NEXT:    bl __fixunsdfdi
72; SPE-NEXT:    lwz r0, 20(r1)
73; SPE-NEXT:    addi r1, r1, 16
74; SPE-NEXT:    mtlr r0
75; SPE-NEXT:    blr
76entry:
77  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
78  ret i64 %conv
79}
80
81define zeroext i32 @d_to_u32(double %m) #0 {
82; SPE-LABEL: d_to_u32:
83; SPE:       # %bb.0: # %entry
84; SPE-NEXT:    evmergelo r3, r3, r4
85; SPE-NEXT:    efdctuiz r3, r3
86; SPE-NEXT:    blr
87entry:
88  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
89  ret i32 %conv
90}
91
92define signext i32 @f_to_i32(float %m) #0 {
93; SPE-LABEL: f_to_i32:
94; SPE:       # %bb.0: # %entry
95; SPE-NEXT:    efsctsiz r3, r3
96; SPE-NEXT:    blr
97entry:
98  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
99  ret i32 %conv
100}
101
102define i64 @f_to_i64(float %m) #0 {
103; SPE-LABEL: f_to_i64:
104; SPE:       # %bb.0: # %entry
105; SPE-NEXT:    mflr r0
106; SPE-NEXT:    stw r0, 4(r1)
107; SPE-NEXT:    stwu r1, -16(r1)
108; SPE-NEXT:    .cfi_def_cfa_offset 16
109; SPE-NEXT:    .cfi_offset lr, 4
110; SPE-NEXT:    bl __fixsfdi
111; SPE-NEXT:    lwz r0, 20(r1)
112; SPE-NEXT:    addi r1, r1, 16
113; SPE-NEXT:    mtlr r0
114; SPE-NEXT:    blr
115entry:
116  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
117  ret i64 %conv
118}
119
120define i64 @f_to_u64(float %m) #0 {
121; SPE-LABEL: f_to_u64:
122; SPE:       # %bb.0: # %entry
123; SPE-NEXT:    mflr r0
124; SPE-NEXT:    stw r0, 4(r1)
125; SPE-NEXT:    stwu r1, -16(r1)
126; SPE-NEXT:    .cfi_def_cfa_offset 16
127; SPE-NEXT:    .cfi_offset lr, 4
128; SPE-NEXT:    bl __fixunssfdi
129; SPE-NEXT:    lwz r0, 20(r1)
130; SPE-NEXT:    addi r1, r1, 16
131; SPE-NEXT:    mtlr r0
132; SPE-NEXT:    blr
133entry:
134  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
135  ret i64 %conv
136}
137
138define zeroext i32 @f_to_u32(float %m) #0 {
139; SPE-LABEL: f_to_u32:
140; SPE:       # %bb.0: # %entry
141; SPE-NEXT:    efsctuiz r3, r3
142; SPE-NEXT:    blr
143entry:
144  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
145  ret i32 %conv
146}
147
148define double @i32_to_d(i32 signext %m) #0 {
149; SPE-LABEL: i32_to_d:
150; SPE:       # %bb.0: # %entry
151; SPE-NEXT:    efdcfsi r4, r3
152; SPE-NEXT:    evmergehi r3, r4, r4
153; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
154; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
155; SPE-NEXT:    blr
156entry:
157  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
158  ret double %conv
159}
160
161define double @i64_to_d(i64 %m) #0 {
162; SPE-LABEL: i64_to_d:
163; SPE:       # %bb.0: # %entry
164; SPE-NEXT:    mflr r0
165; SPE-NEXT:    stw r0, 4(r1)
166; SPE-NEXT:    stwu r1, -16(r1)
167; SPE-NEXT:    .cfi_def_cfa_offset 16
168; SPE-NEXT:    .cfi_offset lr, 4
169; SPE-NEXT:    bl __floatdidf
170; SPE-NEXT:    evmergelo r4, r3, r4
171; SPE-NEXT:    evmergehi r3, r4, r4
172; SPE-NEXT:    lwz r0, 20(r1)
173; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
174; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
175; SPE-NEXT:    addi r1, r1, 16
176; SPE-NEXT:    mtlr r0
177; SPE-NEXT:    blr
178entry:
179  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
180  ret double %conv
181}
182
183define double @u32_to_d(i32 zeroext %m) #0 {
184; SPE-LABEL: u32_to_d:
185; SPE:       # %bb.0: # %entry
186; SPE-NEXT:    efdcfui r4, r3
187; SPE-NEXT:    evmergehi r3, r4, r4
188; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
189; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
190; SPE-NEXT:    blr
191entry:
192  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
193  ret double %conv
194}
195
196define double @u64_to_d(i64 %m) #0 {
197; SPE-LABEL: u64_to_d:
198; SPE:       # %bb.0: # %entry
199; SPE-NEXT:    mflr r0
200; SPE-NEXT:    stw r0, 4(r1)
201; SPE-NEXT:    stwu r1, -16(r1)
202; SPE-NEXT:    .cfi_def_cfa_offset 16
203; SPE-NEXT:    .cfi_offset lr, 4
204; SPE-NEXT:    bl __floatundidf
205; SPE-NEXT:    evmergelo r4, r3, r4
206; SPE-NEXT:    evmergehi r3, r4, r4
207; SPE-NEXT:    lwz r0, 20(r1)
208; SPE-NEXT:    # kill: def $r3 killed $r3 killed $s3
209; SPE-NEXT:    # kill: def $r4 killed $r4 killed $s4
210; SPE-NEXT:    addi r1, r1, 16
211; SPE-NEXT:    mtlr r0
212; SPE-NEXT:    blr
213entry:
214  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
215  ret double %conv
216}
217
218define float @i32_to_f(i32 signext %m) #0 {
219; SPE-LABEL: i32_to_f:
220; SPE:       # %bb.0: # %entry
221; SPE-NEXT:    efscfsi r3, r3
222; SPE-NEXT:    blr
223entry:
224  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
225  ret float %conv
226}
227
228define float @i64_to_f(i64 %m) #0 {
229; SPE-LABEL: i64_to_f:
230; SPE:       # %bb.0: # %entry
231; SPE-NEXT:    mflr r0
232; SPE-NEXT:    stw r0, 4(r1)
233; SPE-NEXT:    stwu r1, -16(r1)
234; SPE-NEXT:    .cfi_def_cfa_offset 16
235; SPE-NEXT:    .cfi_offset lr, 4
236; SPE-NEXT:    bl __floatdisf
237; SPE-NEXT:    lwz r0, 20(r1)
238; SPE-NEXT:    addi r1, r1, 16
239; SPE-NEXT:    mtlr r0
240; SPE-NEXT:    blr
241entry:
242  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
243  ret float %conv
244}
245
246define float @u32_to_f(i32 zeroext %m) #0 {
247; SPE-LABEL: u32_to_f:
248; SPE:       # %bb.0: # %entry
249; SPE-NEXT:    efscfui r3, r3
250; SPE-NEXT:    blr
251entry:
252  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
253  ret float %conv
254}
255
256define float @u64_to_f(i64 %m) #0 {
257; SPE-LABEL: u64_to_f:
258; SPE:       # %bb.0: # %entry
259; SPE-NEXT:    mflr r0
260; SPE-NEXT:    stw r0, 4(r1)
261; SPE-NEXT:    stwu r1, -16(r1)
262; SPE-NEXT:    .cfi_def_cfa_offset 16
263; SPE-NEXT:    .cfi_offset lr, 4
264; SPE-NEXT:    bl __floatundisf
265; SPE-NEXT:    lwz r0, 20(r1)
266; SPE-NEXT:    addi r1, r1, 16
267; SPE-NEXT:    mtlr r0
268; SPE-NEXT:    blr
269entry:
270  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
271  ret float %conv
272}
273
274attributes #0 = { strictfp }
275