1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s -check-prefix=RV64I
6; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+f \
7; RUN:   -target-abi ilp32f < %s | FileCheck %s -check-prefix=RV32IF
8; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+f -mattr=+d \
9; RUN:   -target-abi ilp32d < %s | FileCheck %s -check-prefix=RV32IFD
10; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+f -mattr=+d \
11; RUN:   -target-abi lp64d < %s | FileCheck %s -check-prefix=RV64IFD
12; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+f \
13; RUN:   -mattr=+experimental-zfh -target-abi ilp32f < %s \
14; RUN:   | FileCheck %s -check-prefix=RV32IFZFH
15; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+f -mattr=+d \
16; RUN:   -mattr=+experimental-zfh -target-abi ilp32d < %s \
17; RUN:   | FileCheck %s -check-prefix=RV32IFDZFH
18; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+f -mattr=+d \
19; RUN:   -mattr=+experimental-zfh -target-abi lp64d < %s \
20; RUN:   | FileCheck %s -check-prefix=RV64IFDZFH
21
22; Test fcopysign scenarios where the sign argument is casted to the type of the
23; magnitude argument. Those casts can be folded away by the DAGCombiner.
24
25declare double @llvm.copysign.f64(double, double)
26declare float @llvm.copysign.f32(float, float)
27declare half @llvm.copysign.f16(half, half)
28
29define double @fold_promote_d_s(double %a, float %b) nounwind {
30; RV32I-LABEL: fold_promote_d_s:
31; RV32I:       # %bb.0:
32; RV32I-NEXT:    lui a3, 524288
33; RV32I-NEXT:    and a2, a2, a3
34; RV32I-NEXT:    addi a3, a3, -1
35; RV32I-NEXT:    and a1, a1, a3
36; RV32I-NEXT:    or a1, a1, a2
37; RV32I-NEXT:    ret
38;
39; RV64I-LABEL: fold_promote_d_s:
40; RV64I:       # %bb.0:
41; RV64I-NEXT:    addi a2, zero, -1
42; RV64I-NEXT:    slli a2, a2, 63
43; RV64I-NEXT:    addi a2, a2, -1
44; RV64I-NEXT:    and a0, a0, a2
45; RV64I-NEXT:    addi a2, zero, 1
46; RV64I-NEXT:    slli a2, a2, 31
47; RV64I-NEXT:    and a1, a1, a2
48; RV64I-NEXT:    slli a1, a1, 32
49; RV64I-NEXT:    or a0, a0, a1
50; RV64I-NEXT:    ret
51;
52; RV32IF-LABEL: fold_promote_d_s:
53; RV32IF:       # %bb.0:
54; RV32IF-NEXT:    fmv.x.w a2, fa0
55; RV32IF-NEXT:    lui a3, 524288
56; RV32IF-NEXT:    and a2, a2, a3
57; RV32IF-NEXT:    addi a3, a3, -1
58; RV32IF-NEXT:    and a1, a1, a3
59; RV32IF-NEXT:    or a1, a1, a2
60; RV32IF-NEXT:    ret
61;
62; RV32IFD-LABEL: fold_promote_d_s:
63; RV32IFD:       # %bb.0:
64; RV32IFD-NEXT:    fcvt.d.s ft0, fa1
65; RV32IFD-NEXT:    fsgnj.d fa0, fa0, ft0
66; RV32IFD-NEXT:    ret
67;
68; RV64IFD-LABEL: fold_promote_d_s:
69; RV64IFD:       # %bb.0:
70; RV64IFD-NEXT:    fcvt.d.s ft0, fa1
71; RV64IFD-NEXT:    fsgnj.d fa0, fa0, ft0
72; RV64IFD-NEXT:    ret
73;
74; RV32IFZFH-LABEL: fold_promote_d_s:
75; RV32IFZFH:       # %bb.0:
76; RV32IFZFH-NEXT:    fmv.x.w a2, fa0
77; RV32IFZFH-NEXT:    lui a3, 524288
78; RV32IFZFH-NEXT:    and a2, a2, a3
79; RV32IFZFH-NEXT:    addi a3, a3, -1
80; RV32IFZFH-NEXT:    and a1, a1, a3
81; RV32IFZFH-NEXT:    or a1, a1, a2
82; RV32IFZFH-NEXT:    ret
83;
84; RV32IFDZFH-LABEL: fold_promote_d_s:
85; RV32IFDZFH:       # %bb.0:
86; RV32IFDZFH-NEXT:    fcvt.d.s ft0, fa1
87; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
88; RV32IFDZFH-NEXT:    ret
89;
90; RV64IFDZFH-LABEL: fold_promote_d_s:
91; RV64IFDZFH:       # %bb.0:
92; RV64IFDZFH-NEXT:    fcvt.d.s ft0, fa1
93; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
94; RV64IFDZFH-NEXT:    ret
95  %c = fpext float %b to double
96  %t = call double @llvm.copysign.f64(double %a, double %c)
97  ret double %t
98}
99
100define double @fold_promote_d_h(double %a, half %b) nounwind {
101; RV32I-LABEL: fold_promote_d_h:
102; RV32I:       # %bb.0:
103; RV32I-NEXT:    lui a3, 524288
104; RV32I-NEXT:    addi a3, a3, -1
105; RV32I-NEXT:    and a1, a1, a3
106; RV32I-NEXT:    lui a3, 8
107; RV32I-NEXT:    and a2, a2, a3
108; RV32I-NEXT:    slli a2, a2, 16
109; RV32I-NEXT:    or a1, a1, a2
110; RV32I-NEXT:    ret
111;
112; RV64I-LABEL: fold_promote_d_h:
113; RV64I:       # %bb.0:
114; RV64I-NEXT:    addi a2, zero, -1
115; RV64I-NEXT:    slli a2, a2, 63
116; RV64I-NEXT:    addi a2, a2, -1
117; RV64I-NEXT:    and a0, a0, a2
118; RV64I-NEXT:    lui a2, 8
119; RV64I-NEXT:    and a1, a1, a2
120; RV64I-NEXT:    slli a1, a1, 48
121; RV64I-NEXT:    or a0, a0, a1
122; RV64I-NEXT:    ret
123;
124; RV32IF-LABEL: fold_promote_d_h:
125; RV32IF:       # %bb.0:
126; RV32IF-NEXT:    fmv.x.w a2, fa0
127; RV32IF-NEXT:    lui a3, 524288
128; RV32IF-NEXT:    and a2, a2, a3
129; RV32IF-NEXT:    addi a3, a3, -1
130; RV32IF-NEXT:    and a1, a1, a3
131; RV32IF-NEXT:    or a1, a1, a2
132; RV32IF-NEXT:    ret
133;
134; RV32IFD-LABEL: fold_promote_d_h:
135; RV32IFD:       # %bb.0:
136; RV32IFD-NEXT:    fcvt.d.s ft0, fa1
137; RV32IFD-NEXT:    fsgnj.d fa0, fa0, ft0
138; RV32IFD-NEXT:    ret
139;
140; RV64IFD-LABEL: fold_promote_d_h:
141; RV64IFD:       # %bb.0:
142; RV64IFD-NEXT:    fcvt.d.s ft0, fa1
143; RV64IFD-NEXT:    fsgnj.d fa0, fa0, ft0
144; RV64IFD-NEXT:    ret
145;
146; RV32IFZFH-LABEL: fold_promote_d_h:
147; RV32IFZFH:       # %bb.0:
148; RV32IFZFH-NEXT:    fmv.x.h a2, fa0
149; RV32IFZFH-NEXT:    lui a3, 524288
150; RV32IFZFH-NEXT:    addi a3, a3, -1
151; RV32IFZFH-NEXT:    and a1, a1, a3
152; RV32IFZFH-NEXT:    lui a3, 8
153; RV32IFZFH-NEXT:    and a2, a2, a3
154; RV32IFZFH-NEXT:    slli a2, a2, 16
155; RV32IFZFH-NEXT:    or a1, a1, a2
156; RV32IFZFH-NEXT:    ret
157;
158; RV32IFDZFH-LABEL: fold_promote_d_h:
159; RV32IFDZFH:       # %bb.0:
160; RV32IFDZFH-NEXT:    fcvt.d.h ft0, fa1
161; RV32IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
162; RV32IFDZFH-NEXT:    ret
163;
164; RV64IFDZFH-LABEL: fold_promote_d_h:
165; RV64IFDZFH:       # %bb.0:
166; RV64IFDZFH-NEXT:    fcvt.d.h ft0, fa1
167; RV64IFDZFH-NEXT:    fsgnj.d fa0, fa0, ft0
168; RV64IFDZFH-NEXT:    ret
169  %c = fpext half %b to double
170  %t = call double @llvm.copysign.f64(double %a, double %c)
171  ret double %t
172}
173
174define float @fold_promote_f_h(float %a, half %b) nounwind {
175; RV32I-LABEL: fold_promote_f_h:
176; RV32I:       # %bb.0:
177; RV32I-NEXT:    lui a2, 524288
178; RV32I-NEXT:    addi a2, a2, -1
179; RV32I-NEXT:    and a0, a0, a2
180; RV32I-NEXT:    lui a2, 8
181; RV32I-NEXT:    and a1, a1, a2
182; RV32I-NEXT:    slli a1, a1, 16
183; RV32I-NEXT:    or a0, a0, a1
184; RV32I-NEXT:    ret
185;
186; RV64I-LABEL: fold_promote_f_h:
187; RV64I:       # %bb.0:
188; RV64I-NEXT:    lui a2, 524288
189; RV64I-NEXT:    addiw a2, a2, -1
190; RV64I-NEXT:    and a0, a0, a2
191; RV64I-NEXT:    addi a2, zero, 1
192; RV64I-NEXT:    slli a2, a2, 33
193; RV64I-NEXT:    addi a2, a2, -1
194; RV64I-NEXT:    slli a2, a2, 15
195; RV64I-NEXT:    and a1, a1, a2
196; RV64I-NEXT:    slli a1, a1, 16
197; RV64I-NEXT:    or a0, a0, a1
198; RV64I-NEXT:    ret
199;
200; RV32IF-LABEL: fold_promote_f_h:
201; RV32IF:       # %bb.0:
202; RV32IF-NEXT:    fsgnj.s fa0, fa0, fa1
203; RV32IF-NEXT:    ret
204;
205; RV32IFD-LABEL: fold_promote_f_h:
206; RV32IFD:       # %bb.0:
207; RV32IFD-NEXT:    fsgnj.s fa0, fa0, fa1
208; RV32IFD-NEXT:    ret
209;
210; RV64IFD-LABEL: fold_promote_f_h:
211; RV64IFD:       # %bb.0:
212; RV64IFD-NEXT:    fsgnj.s fa0, fa0, fa1
213; RV64IFD-NEXT:    ret
214;
215; RV32IFZFH-LABEL: fold_promote_f_h:
216; RV32IFZFH:       # %bb.0:
217; RV32IFZFH-NEXT:    fcvt.s.h ft0, fa1
218; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, ft0
219; RV32IFZFH-NEXT:    ret
220;
221; RV32IFDZFH-LABEL: fold_promote_f_h:
222; RV32IFDZFH:       # %bb.0:
223; RV32IFDZFH-NEXT:    fcvt.s.h ft0, fa1
224; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
225; RV32IFDZFH-NEXT:    ret
226;
227; RV64IFDZFH-LABEL: fold_promote_f_h:
228; RV64IFDZFH:       # %bb.0:
229; RV64IFDZFH-NEXT:    fcvt.s.h ft0, fa1
230; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
231; RV64IFDZFH-NEXT:    ret
232  %c = fpext half %b to float
233  %t = call float @llvm.copysign.f32(float %a, float %c)
234  ret float %t
235}
236
237define float @fold_demote_s_d(float %a, double %b) nounwind {
238; RV32I-LABEL: fold_demote_s_d:
239; RV32I:       # %bb.0:
240; RV32I-NEXT:    lui a1, 524288
241; RV32I-NEXT:    and a2, a2, a1
242; RV32I-NEXT:    addi a1, a1, -1
243; RV32I-NEXT:    and a0, a0, a1
244; RV32I-NEXT:    or a0, a0, a2
245; RV32I-NEXT:    ret
246;
247; RV64I-LABEL: fold_demote_s_d:
248; RV64I:       # %bb.0:
249; RV64I-NEXT:    lui a2, 524288
250; RV64I-NEXT:    addiw a2, a2, -1
251; RV64I-NEXT:    and a0, a0, a2
252; RV64I-NEXT:    addi a2, zero, -1
253; RV64I-NEXT:    slli a2, a2, 63
254; RV64I-NEXT:    and a1, a1, a2
255; RV64I-NEXT:    srli a1, a1, 32
256; RV64I-NEXT:    or a0, a0, a1
257; RV64I-NEXT:    ret
258;
259; RV32IF-LABEL: fold_demote_s_d:
260; RV32IF:       # %bb.0:
261; RV32IF-NEXT:    fmv.w.x ft0, a1
262; RV32IF-NEXT:    fsgnj.s fa0, fa0, ft0
263; RV32IF-NEXT:    ret
264;
265; RV32IFD-LABEL: fold_demote_s_d:
266; RV32IFD:       # %bb.0:
267; RV32IFD-NEXT:    fcvt.s.d ft0, fa1
268; RV32IFD-NEXT:    fsgnj.s fa0, fa0, ft0
269; RV32IFD-NEXT:    ret
270;
271; RV64IFD-LABEL: fold_demote_s_d:
272; RV64IFD:       # %bb.0:
273; RV64IFD-NEXT:    fcvt.s.d ft0, fa1
274; RV64IFD-NEXT:    fsgnj.s fa0, fa0, ft0
275; RV64IFD-NEXT:    ret
276;
277; RV32IFZFH-LABEL: fold_demote_s_d:
278; RV32IFZFH:       # %bb.0:
279; RV32IFZFH-NEXT:    fmv.w.x ft0, a1
280; RV32IFZFH-NEXT:    fsgnj.s fa0, fa0, ft0
281; RV32IFZFH-NEXT:    ret
282;
283; RV32IFDZFH-LABEL: fold_demote_s_d:
284; RV32IFDZFH:       # %bb.0:
285; RV32IFDZFH-NEXT:    fcvt.s.d ft0, fa1
286; RV32IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
287; RV32IFDZFH-NEXT:    ret
288;
289; RV64IFDZFH-LABEL: fold_demote_s_d:
290; RV64IFDZFH:       # %bb.0:
291; RV64IFDZFH-NEXT:    fcvt.s.d ft0, fa1
292; RV64IFDZFH-NEXT:    fsgnj.s fa0, fa0, ft0
293; RV64IFDZFH-NEXT:    ret
294  %c = fptrunc double %b to float
295  %t = call float @llvm.copysign.f32(float %a, float %c)
296  ret float %t
297}
298
299define half @fold_demote_h_s(half %a, float %b) nounwind {
300; RV32I-LABEL: fold_demote_h_s:
301; RV32I:       # %bb.0:
302; RV32I-NEXT:    addi sp, sp, -16
303; RV32I-NEXT:    sw ra, 12(sp)
304; RV32I-NEXT:    sw s0, 8(sp)
305; RV32I-NEXT:    mv s0, a1
306; RV32I-NEXT:    lui a1, 16
307; RV32I-NEXT:    addi a1, a1, -1
308; RV32I-NEXT:    and a0, a0, a1
309; RV32I-NEXT:    call __gnu_h2f_ieee
310; RV32I-NEXT:    lui a1, 524288
311; RV32I-NEXT:    and a2, s0, a1
312; RV32I-NEXT:    addi a1, a1, -1
313; RV32I-NEXT:    and a0, a0, a1
314; RV32I-NEXT:    or a0, a0, a2
315; RV32I-NEXT:    call __gnu_f2h_ieee
316; RV32I-NEXT:    lw s0, 8(sp)
317; RV32I-NEXT:    lw ra, 12(sp)
318; RV32I-NEXT:    addi sp, sp, 16
319; RV32I-NEXT:    ret
320;
321; RV64I-LABEL: fold_demote_h_s:
322; RV64I:       # %bb.0:
323; RV64I-NEXT:    addi sp, sp, -16
324; RV64I-NEXT:    sd ra, 8(sp)
325; RV64I-NEXT:    sd s0, 0(sp)
326; RV64I-NEXT:    mv s0, a1
327; RV64I-NEXT:    lui a1, 16
328; RV64I-NEXT:    addiw a1, a1, -1
329; RV64I-NEXT:    and a0, a0, a1
330; RV64I-NEXT:    call __gnu_h2f_ieee
331; RV64I-NEXT:    lui a1, 524288
332; RV64I-NEXT:    and a2, s0, a1
333; RV64I-NEXT:    addiw a1, a1, -1
334; RV64I-NEXT:    and a0, a0, a1
335; RV64I-NEXT:    or a0, a0, a2
336; RV64I-NEXT:    call __gnu_f2h_ieee
337; RV64I-NEXT:    ld s0, 0(sp)
338; RV64I-NEXT:    ld ra, 8(sp)
339; RV64I-NEXT:    addi sp, sp, 16
340; RV64I-NEXT:    ret
341;
342; RV32IF-LABEL: fold_demote_h_s:
343; RV32IF:       # %bb.0:
344; RV32IF-NEXT:    addi sp, sp, -16
345; RV32IF-NEXT:    sw ra, 12(sp)
346; RV32IF-NEXT:    fsw fs0, 8(sp)
347; RV32IF-NEXT:    fmv.s fs0, fa1
348; RV32IF-NEXT:    call __gnu_f2h_ieee
349; RV32IF-NEXT:    call __gnu_h2f_ieee
350; RV32IF-NEXT:    fsgnj.s fa0, fa0, fs0
351; RV32IF-NEXT:    flw fs0, 8(sp)
352; RV32IF-NEXT:    lw ra, 12(sp)
353; RV32IF-NEXT:    addi sp, sp, 16
354; RV32IF-NEXT:    ret
355;
356; RV32IFD-LABEL: fold_demote_h_s:
357; RV32IFD:       # %bb.0:
358; RV32IFD-NEXT:    addi sp, sp, -16
359; RV32IFD-NEXT:    sw ra, 12(sp)
360; RV32IFD-NEXT:    fsd fs0, 0(sp)
361; RV32IFD-NEXT:    fmv.s fs0, fa1
362; RV32IFD-NEXT:    call __gnu_f2h_ieee
363; RV32IFD-NEXT:    call __gnu_h2f_ieee
364; RV32IFD-NEXT:    fsgnj.s fa0, fa0, fs0
365; RV32IFD-NEXT:    fld fs0, 0(sp)
366; RV32IFD-NEXT:    lw ra, 12(sp)
367; RV32IFD-NEXT:    addi sp, sp, 16
368; RV32IFD-NEXT:    ret
369;
370; RV64IFD-LABEL: fold_demote_h_s:
371; RV64IFD:       # %bb.0:
372; RV64IFD-NEXT:    addi sp, sp, -16
373; RV64IFD-NEXT:    sd ra, 8(sp)
374; RV64IFD-NEXT:    fsd fs0, 0(sp)
375; RV64IFD-NEXT:    fmv.s fs0, fa1
376; RV64IFD-NEXT:    call __gnu_f2h_ieee
377; RV64IFD-NEXT:    call __gnu_h2f_ieee
378; RV64IFD-NEXT:    fsgnj.s fa0, fa0, fs0
379; RV64IFD-NEXT:    fld fs0, 0(sp)
380; RV64IFD-NEXT:    ld ra, 8(sp)
381; RV64IFD-NEXT:    addi sp, sp, 16
382; RV64IFD-NEXT:    ret
383;
384; RV32IFZFH-LABEL: fold_demote_h_s:
385; RV32IFZFH:       # %bb.0:
386; RV32IFZFH-NEXT:    fcvt.h.s ft0, fa1
387; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, ft0
388; RV32IFZFH-NEXT:    ret
389;
390; RV32IFDZFH-LABEL: fold_demote_h_s:
391; RV32IFDZFH:       # %bb.0:
392; RV32IFDZFH-NEXT:    fcvt.h.s ft0, fa1
393; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
394; RV32IFDZFH-NEXT:    ret
395;
396; RV64IFDZFH-LABEL: fold_demote_h_s:
397; RV64IFDZFH:       # %bb.0:
398; RV64IFDZFH-NEXT:    fcvt.h.s ft0, fa1
399; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
400; RV64IFDZFH-NEXT:    ret
401  %c = fptrunc float %b to half
402  %t = call half @llvm.copysign.f16(half %a, half %c)
403  ret half %t
404}
405
406define half @fold_demote_h_d(half %a, double %b) nounwind {
407; RV32I-LABEL: fold_demote_h_d:
408; RV32I:       # %bb.0:
409; RV32I-NEXT:    addi sp, sp, -16
410; RV32I-NEXT:    sw ra, 12(sp)
411; RV32I-NEXT:    sw s0, 8(sp)
412; RV32I-NEXT:    mv s0, a2
413; RV32I-NEXT:    lui a1, 16
414; RV32I-NEXT:    addi a1, a1, -1
415; RV32I-NEXT:    and a0, a0, a1
416; RV32I-NEXT:    call __gnu_h2f_ieee
417; RV32I-NEXT:    lui a1, 524288
418; RV32I-NEXT:    and a2, s0, a1
419; RV32I-NEXT:    addi a1, a1, -1
420; RV32I-NEXT:    and a0, a0, a1
421; RV32I-NEXT:    or a0, a0, a2
422; RV32I-NEXT:    call __gnu_f2h_ieee
423; RV32I-NEXT:    lw s0, 8(sp)
424; RV32I-NEXT:    lw ra, 12(sp)
425; RV32I-NEXT:    addi sp, sp, 16
426; RV32I-NEXT:    ret
427;
428; RV64I-LABEL: fold_demote_h_d:
429; RV64I:       # %bb.0:
430; RV64I-NEXT:    addi sp, sp, -16
431; RV64I-NEXT:    sd ra, 8(sp)
432; RV64I-NEXT:    sd s0, 0(sp)
433; RV64I-NEXT:    mv s0, a1
434; RV64I-NEXT:    lui a1, 16
435; RV64I-NEXT:    addiw a1, a1, -1
436; RV64I-NEXT:    and a0, a0, a1
437; RV64I-NEXT:    call __gnu_h2f_ieee
438; RV64I-NEXT:    lui a1, 524288
439; RV64I-NEXT:    addiw a1, a1, -1
440; RV64I-NEXT:    and a0, a0, a1
441; RV64I-NEXT:    addi a1, zero, -1
442; RV64I-NEXT:    slli a1, a1, 63
443; RV64I-NEXT:    and a1, s0, a1
444; RV64I-NEXT:    srli a1, a1, 32
445; RV64I-NEXT:    or a0, a0, a1
446; RV64I-NEXT:    call __gnu_f2h_ieee
447; RV64I-NEXT:    ld s0, 0(sp)
448; RV64I-NEXT:    ld ra, 8(sp)
449; RV64I-NEXT:    addi sp, sp, 16
450; RV64I-NEXT:    ret
451;
452; RV32IF-LABEL: fold_demote_h_d:
453; RV32IF:       # %bb.0:
454; RV32IF-NEXT:    addi sp, sp, -16
455; RV32IF-NEXT:    sw ra, 12(sp)
456; RV32IF-NEXT:    sw s0, 8(sp)
457; RV32IF-NEXT:    mv s0, a1
458; RV32IF-NEXT:    call __gnu_f2h_ieee
459; RV32IF-NEXT:    call __gnu_h2f_ieee
460; RV32IF-NEXT:    fmv.w.x ft0, s0
461; RV32IF-NEXT:    fsgnj.s fa0, fa0, ft0
462; RV32IF-NEXT:    lw s0, 8(sp)
463; RV32IF-NEXT:    lw ra, 12(sp)
464; RV32IF-NEXT:    addi sp, sp, 16
465; RV32IF-NEXT:    ret
466;
467; RV32IFD-LABEL: fold_demote_h_d:
468; RV32IFD:       # %bb.0:
469; RV32IFD-NEXT:    addi sp, sp, -16
470; RV32IFD-NEXT:    sw ra, 12(sp)
471; RV32IFD-NEXT:    fsd fs0, 0(sp)
472; RV32IFD-NEXT:    fmv.d fs0, fa1
473; RV32IFD-NEXT:    call __gnu_f2h_ieee
474; RV32IFD-NEXT:    call __gnu_h2f_ieee
475; RV32IFD-NEXT:    fcvt.s.d ft0, fs0
476; RV32IFD-NEXT:    fsgnj.s fa0, fa0, ft0
477; RV32IFD-NEXT:    fld fs0, 0(sp)
478; RV32IFD-NEXT:    lw ra, 12(sp)
479; RV32IFD-NEXT:    addi sp, sp, 16
480; RV32IFD-NEXT:    ret
481;
482; RV64IFD-LABEL: fold_demote_h_d:
483; RV64IFD:       # %bb.0:
484; RV64IFD-NEXT:    addi sp, sp, -16
485; RV64IFD-NEXT:    sd ra, 8(sp)
486; RV64IFD-NEXT:    fsd fs0, 0(sp)
487; RV64IFD-NEXT:    fmv.d fs0, fa1
488; RV64IFD-NEXT:    call __gnu_f2h_ieee
489; RV64IFD-NEXT:    call __gnu_h2f_ieee
490; RV64IFD-NEXT:    fcvt.s.d ft0, fs0
491; RV64IFD-NEXT:    fsgnj.s fa0, fa0, ft0
492; RV64IFD-NEXT:    fld fs0, 0(sp)
493; RV64IFD-NEXT:    ld ra, 8(sp)
494; RV64IFD-NEXT:    addi sp, sp, 16
495; RV64IFD-NEXT:    ret
496;
497; RV32IFZFH-LABEL: fold_demote_h_d:
498; RV32IFZFH:       # %bb.0:
499; RV32IFZFH-NEXT:    srli a0, a1, 16
500; RV32IFZFH-NEXT:    fmv.h.x ft0, a0
501; RV32IFZFH-NEXT:    fsgnj.h fa0, fa0, ft0
502; RV32IFZFH-NEXT:    ret
503;
504; RV32IFDZFH-LABEL: fold_demote_h_d:
505; RV32IFDZFH:       # %bb.0:
506; RV32IFDZFH-NEXT:    fcvt.h.d ft0, fa1
507; RV32IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
508; RV32IFDZFH-NEXT:    ret
509;
510; RV64IFDZFH-LABEL: fold_demote_h_d:
511; RV64IFDZFH:       # %bb.0:
512; RV64IFDZFH-NEXT:    fcvt.h.d ft0, fa1
513; RV64IFDZFH-NEXT:    fsgnj.h fa0, fa0, ft0
514; RV64IFDZFH-NEXT:    ret
515  %c = fptrunc double %b to half
516  %t = call half @llvm.copysign.f16(half %a, half %c)
517  ret half %t
518}
519