1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
3; RUN:   < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s
4; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
5; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s
6; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
7; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
8; RUN:   FileCheck %s -check-prefix=NOVSX
9; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
10; RUN:   -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
11
12declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
13declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
14declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
15declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
16
17declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
18declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
19declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
20declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
21
22declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
23declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
24declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
25declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
26
27declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
28declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
29declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
30declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
31
32define i32 @d_to_i32(double %m) #0 {
33; CHECK-LABEL: d_to_i32:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    xscvdpsxws f0, f1
36; CHECK-NEXT:    mffprwz r3, f0
37; CHECK-NEXT:    blr
38;
39; NOVSX-LABEL: d_to_i32:
40; NOVSX:       # %bb.0: # %entry
41; NOVSX-NEXT:    fctiwz f0, f1
42; NOVSX-NEXT:    addi r3, r1, -4
43; NOVSX-NEXT:    stfiwx f0, 0, r3
44; NOVSX-NEXT:    lwz r3, -4(r1)
45; NOVSX-NEXT:    blr
46entry:
47  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
48  ret i32 %conv
49}
50
51define i64 @d_to_i64(double %m) #0 {
52; CHECK-LABEL: d_to_i64:
53; CHECK:       # %bb.0: # %entry
54; CHECK-NEXT:    xscvdpsxds f0, f1
55; CHECK-NEXT:    mffprd r3, f0
56; CHECK-NEXT:    blr
57;
58; NOVSX-LABEL: d_to_i64:
59; NOVSX:       # %bb.0: # %entry
60; NOVSX-NEXT:    fctidz f0, f1
61; NOVSX-NEXT:    stfd f0, -8(r1)
62; NOVSX-NEXT:    ld r3, -8(r1)
63; NOVSX-NEXT:    blr
64entry:
65  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
66  ret i64 %conv
67}
68
69define i64 @d_to_u64(double %m) #0 {
70; CHECK-LABEL: d_to_u64:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    xscvdpuxds f0, f1
73; CHECK-NEXT:    mffprd r3, f0
74; CHECK-NEXT:    blr
75;
76; NOVSX-LABEL: d_to_u64:
77; NOVSX:       # %bb.0: # %entry
78; NOVSX-NEXT:    fctiduz f0, f1
79; NOVSX-NEXT:    stfd f0, -8(r1)
80; NOVSX-NEXT:    ld r3, -8(r1)
81; NOVSX-NEXT:    blr
82entry:
83  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
84  ret i64 %conv
85}
86
87define zeroext i32 @d_to_u32(double %m) #0 {
88; CHECK-LABEL: d_to_u32:
89; CHECK:       # %bb.0: # %entry
90; CHECK-NEXT:    xscvdpuxws f0, f1
91; CHECK-NEXT:    mffprwz r3, f0
92; CHECK-NEXT:    clrldi r3, r3, 32
93; CHECK-NEXT:    blr
94;
95; NOVSX-LABEL: d_to_u32:
96; NOVSX:       # %bb.0: # %entry
97; NOVSX-NEXT:    fctiwuz f0, f1
98; NOVSX-NEXT:    addi r3, r1, -4
99; NOVSX-NEXT:    stfiwx f0, 0, r3
100; NOVSX-NEXT:    lwz r3, -4(r1)
101; NOVSX-NEXT:    blr
102entry:
103  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
104  ret i32 %conv
105}
106
107define signext i32 @f_to_i32(float %m) #0 {
108; CHECK-LABEL: f_to_i32:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    xscvdpsxws f0, f1
111; CHECK-NEXT:    mffprwz r3, f0
112; CHECK-NEXT:    extsw r3, r3
113; CHECK-NEXT:    blr
114;
115; NOVSX-LABEL: f_to_i32:
116; NOVSX:       # %bb.0: # %entry
117; NOVSX-NEXT:    fctiwz f0, f1
118; NOVSX-NEXT:    addi r3, r1, -4
119; NOVSX-NEXT:    stfiwx f0, 0, r3
120; NOVSX-NEXT:    lwa r3, -4(r1)
121; NOVSX-NEXT:    blr
122entry:
123  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
124  ret i32 %conv
125}
126
127define i64 @f_to_i64(float %m) #0 {
128; CHECK-LABEL: f_to_i64:
129; CHECK:       # %bb.0: # %entry
130; CHECK-NEXT:    xscvdpsxds f0, f1
131; CHECK-NEXT:    mffprd r3, f0
132; CHECK-NEXT:    blr
133;
134; NOVSX-LABEL: f_to_i64:
135; NOVSX:       # %bb.0: # %entry
136; NOVSX-NEXT:    fctidz f0, f1
137; NOVSX-NEXT:    stfd f0, -8(r1)
138; NOVSX-NEXT:    ld r3, -8(r1)
139; NOVSX-NEXT:    blr
140entry:
141  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
142  ret i64 %conv
143}
144
145define i64 @f_to_u64(float %m) #0 {
146; CHECK-LABEL: f_to_u64:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    xscvdpuxds f0, f1
149; CHECK-NEXT:    mffprd r3, f0
150; CHECK-NEXT:    blr
151;
152; NOVSX-LABEL: f_to_u64:
153; NOVSX:       # %bb.0: # %entry
154; NOVSX-NEXT:    fctiduz f0, f1
155; NOVSX-NEXT:    stfd f0, -8(r1)
156; NOVSX-NEXT:    ld r3, -8(r1)
157; NOVSX-NEXT:    blr
158entry:
159  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
160  ret i64 %conv
161}
162
163define zeroext i32 @f_to_u32(float %m) #0 {
164; CHECK-LABEL: f_to_u32:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    xscvdpuxws f0, f1
167; CHECK-NEXT:    mffprwz r3, f0
168; CHECK-NEXT:    clrldi r3, r3, 32
169; CHECK-NEXT:    blr
170;
171; NOVSX-LABEL: f_to_u32:
172; NOVSX:       # %bb.0: # %entry
173; NOVSX-NEXT:    fctiwuz f0, f1
174; NOVSX-NEXT:    addi r3, r1, -4
175; NOVSX-NEXT:    stfiwx f0, 0, r3
176; NOVSX-NEXT:    lwz r3, -4(r1)
177; NOVSX-NEXT:    blr
178entry:
179  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
180  ret i32 %conv
181}
182
183define double @i32_to_d(i32 signext %m) #0 {
184; CHECK-LABEL: i32_to_d:
185; CHECK:       # %bb.0: # %entry
186; CHECK-NEXT:    mtfprwa f0, r3
187; CHECK-NEXT:    xscvsxddp f1, f0
188; CHECK-NEXT:    blr
189;
190; NOVSX-LABEL: i32_to_d:
191; NOVSX:       # %bb.0: # %entry
192; NOVSX-NEXT:    addi r4, r1, -4
193; NOVSX-NEXT:    stw r3, -4(r1)
194; NOVSX-NEXT:    lfiwax f0, 0, r4
195; NOVSX-NEXT:    fcfid f1, f0
196; NOVSX-NEXT:    blr
197entry:
198  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
199  ret double %conv
200}
201
202define double @i64_to_d(i64 %m) #0 {
203; CHECK-LABEL: i64_to_d:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    mtfprd f0, r3
206; CHECK-NEXT:    xscvsxddp f1, f0
207; CHECK-NEXT:    blr
208;
209; NOVSX-LABEL: i64_to_d:
210; NOVSX:       # %bb.0: # %entry
211; NOVSX-NEXT:    std r3, -8(r1)
212; NOVSX-NEXT:    lfd f0, -8(r1)
213; NOVSX-NEXT:    fcfid f1, f0
214; NOVSX-NEXT:    blr
215entry:
216  %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
217  ret double %conv
218}
219
220define double @u32_to_d(i32 zeroext %m) #0 {
221; CHECK-LABEL: u32_to_d:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    mtfprwz f0, r3
224; CHECK-NEXT:    xscvuxddp f1, f0
225; CHECK-NEXT:    blr
226;
227; NOVSX-LABEL: u32_to_d:
228; NOVSX:       # %bb.0: # %entry
229; NOVSX-NEXT:    addi r4, r1, -4
230; NOVSX-NEXT:    stw r3, -4(r1)
231; NOVSX-NEXT:    lfiwzx f0, 0, r4
232; NOVSX-NEXT:    fcfidu f1, f0
233; NOVSX-NEXT:    blr
234entry:
235  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
236  ret double %conv
237}
238
239define double @u64_to_d(i64 %m) #0 {
240; CHECK-LABEL: u64_to_d:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    mtfprd f0, r3
243; CHECK-NEXT:    xscvuxddp f1, f0
244; CHECK-NEXT:    blr
245;
246; NOVSX-LABEL: u64_to_d:
247; NOVSX:       # %bb.0: # %entry
248; NOVSX-NEXT:    std r3, -8(r1)
249; NOVSX-NEXT:    lfd f0, -8(r1)
250; NOVSX-NEXT:    fcfidu f1, f0
251; NOVSX-NEXT:    blr
252entry:
253  %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
254  ret double %conv
255}
256
257define float @i32_to_f(i32 signext %m) #0 {
258; CHECK-LABEL: i32_to_f:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    mtfprwa f0, r3
261; CHECK-NEXT:    xscvsxdsp f1, f0
262; CHECK-NEXT:    blr
263;
264; NOVSX-LABEL: i32_to_f:
265; NOVSX:       # %bb.0: # %entry
266; NOVSX-NEXT:    addi r4, r1, -4
267; NOVSX-NEXT:    stw r3, -4(r1)
268; NOVSX-NEXT:    lfiwax f0, 0, r4
269; NOVSX-NEXT:    fcfids f1, f0
270; NOVSX-NEXT:    blr
271entry:
272  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
273  ret float %conv
274}
275
276define float @i64_to_f(i64 %m) #0 {
277; CHECK-LABEL: i64_to_f:
278; CHECK:       # %bb.0: # %entry
279; CHECK-NEXT:    mtfprd f0, r3
280; CHECK-NEXT:    xscvsxdsp f1, f0
281; CHECK-NEXT:    blr
282;
283; NOVSX-LABEL: i64_to_f:
284; NOVSX:       # %bb.0: # %entry
285; NOVSX-NEXT:    std r3, -8(r1)
286; NOVSX-NEXT:    lfd f0, -8(r1)
287; NOVSX-NEXT:    fcfids f1, f0
288; NOVSX-NEXT:    blr
289entry:
290  %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
291  ret float %conv
292}
293
294define float @u32_to_f(i32 zeroext %m) #0 {
295; CHECK-LABEL: u32_to_f:
296; CHECK:       # %bb.0: # %entry
297; CHECK-NEXT:    mtfprwz f0, r3
298; CHECK-NEXT:    xscvuxdsp f1, f0
299; CHECK-NEXT:    blr
300;
301; NOVSX-LABEL: u32_to_f:
302; NOVSX:       # %bb.0: # %entry
303; NOVSX-NEXT:    addi r4, r1, -4
304; NOVSX-NEXT:    stw r3, -4(r1)
305; NOVSX-NEXT:    lfiwzx f0, 0, r4
306; NOVSX-NEXT:    fcfidus f1, f0
307; NOVSX-NEXT:    blr
308entry:
309  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
310  ret float %conv
311}
312
313define float @u64_to_f(i64 %m) #0 {
314; CHECK-LABEL: u64_to_f:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    mtfprd f0, r3
317; CHECK-NEXT:    xscvuxdsp f1, f0
318; CHECK-NEXT:    blr
319;
320; NOVSX-LABEL: u64_to_f:
321; NOVSX:       # %bb.0: # %entry
322; NOVSX-NEXT:    std r3, -8(r1)
323; NOVSX-NEXT:    lfd f0, -8(r1)
324; NOVSX-NEXT:    fcfidus f1, f0
325; NOVSX-NEXT:    blr
326entry:
327  %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
328  ret float %conv
329}
330
331define void @fptoint_nofpexcept_f64(double %m, i32* %addr1, i64* %addr2) {
332; MIR-LABEL: name: fptoint_nofpexcept_f64
333; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
334; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
335; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXDS
336; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXDS
337entry:
338  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore") #0
339  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore") #0
340  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore") #0
341  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore") #0
342  store volatile i32 %conv1, i32* %addr1, align 4
343  store volatile i32 %conv2, i32* %addr1, align 4
344  store volatile i64 %conv3, i64* %addr2, align 8
345  store volatile i64 %conv4, i64* %addr2, align 8
346  ret void
347}
348
349define void @fptoint_nofpexcept_f32(float %m, i32* %addr1, i64* %addr2) {
350; MIR-LABEL: name: fptoint_nofpexcept_f32
351; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
352; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
353; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXDS
354; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXDS
355entry:
356  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore") #0
357  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore") #0
358  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore") #0
359  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore") #0
360  store volatile i32 %conv1, i32* %addr1, align 4
361  store volatile i32 %conv2, i32* %addr1, align 4
362  store volatile i64 %conv3, i64* %addr2, align 8
363  store volatile i64 %conv4, i64* %addr2, align 8
364  ret void
365}
366
367define void @inttofp_nofpexcept_i32(i32 %m, float* %addr1, double* %addr2) {
368; MIR-LABEL: name: inttofp_nofpexcept_i32
369; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
370; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
371; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
372; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
373entry:
374  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
375  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
376  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
377  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
378  store volatile float  %conv1, float*  %addr1, align 4
379  store volatile float  %conv2, float*  %addr1, align 4
380  store volatile double %conv3, double* %addr2, align 8
381  store volatile double %conv4, double* %addr2, align 8
382  ret void
383}
384
385define void @inttofp_nofpexcept_i64(i64 %m, float* %addr1, double* %addr2) {
386; MIR-LABEL: name: inttofp_nofpexcept_i64
387; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
388; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
389; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
390; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
391entry:
392  %conv1 = tail call float  @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
393  %conv2 = tail call float  @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
394  %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
395  %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
396  store volatile float  %conv1, float*  %addr1, align 4
397  store volatile float  %conv2, float*  %addr1, align 4
398  store volatile double %conv3, double* %addr2, align 8
399  store volatile double %conv4, double* %addr2, align 8
400  ret void
401}
402
403define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) {
404; MIR-LABEL: name: inttofp_nofpexcept_vec
405; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
406entry:
407  %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
408  ret <2 x double> %conv
409}
410
411declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
412
413attributes #0 = { strictfp }
414