1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple armv8a-none-none-eabihf -mattr=fullfp16 -asm-verbose=false < %s | FileCheck %s
3
4define void @test_fadd(half* %p, half* %q) {
5; CHECK-LABEL: test_fadd:
6; CHECK:         vldr.16 s0, [r1]
7; CHECK-NEXT:    vldr.16 s2, [r0]
8; CHECK-NEXT:    vadd.f16 s0, s2, s0
9; CHECK-NEXT:    vstr.16 s0, [r0]
10; CHECK-NEXT:    bx lr
11  %a = load half, half* %p, align 2
12  %b = load half, half* %q, align 2
13  %r = fadd half %a, %b
14  store half %r, half* %p
15  ret void
16}
17
18define void @test_fsub(half* %p, half* %q) {
19; CHECK-LABEL: test_fsub:
20; CHECK:         vldr.16 s0, [r1]
21; CHECK-NEXT:    vldr.16 s2, [r0]
22; CHECK-NEXT:    vsub.f16 s0, s2, s0
23; CHECK-NEXT:    vstr.16 s0, [r0]
24; CHECK-NEXT:    bx lr
25  %a = load half, half* %p, align 2
26  %b = load half, half* %q, align 2
27  %r = fsub half %a, %b
28  store half %r, half* %p
29  ret void
30}
31
32define void @test_fmul(half* %p, half* %q) {
33; CHECK-LABEL: test_fmul:
34; CHECK:         vldr.16 s0, [r1]
35; CHECK-NEXT:    vldr.16 s2, [r0]
36; CHECK-NEXT:    vmul.f16 s0, s2, s0
37; CHECK-NEXT:    vstr.16 s0, [r0]
38; CHECK-NEXT:    bx lr
39  %a = load half, half* %p, align 2
40  %b = load half, half* %q, align 2
41  %r = fmul half %a, %b
42  store half %r, half* %p
43  ret void
44}
45
46define void @test_fdiv(half* %p, half* %q) {
47; CHECK-LABEL: test_fdiv:
48; CHECK:         vldr.16 s0, [r1]
49; CHECK-NEXT:    vldr.16 s2, [r0]
50; CHECK-NEXT:    vdiv.f16 s0, s2, s0
51; CHECK-NEXT:    vstr.16 s0, [r0]
52; CHECK-NEXT:    bx lr
53  %a = load half, half* %p, align 2
54  %b = load half, half* %q, align 2
55  %r = fdiv half %a, %b
56  store half %r, half* %p
57  ret void
58}
59
60define arm_aapcs_vfpcc void @test_frem(half* %p, half* %q) {
61; CHECK-LABEL: test_frem:
62; CHECK:         .save {r4, lr}
63; CHECK-NEXT:    push {r4, lr}
64; CHECK-NEXT:    vldr.16 s0, [r0]
65; CHECK-NEXT:    vldr.16 s2, [r1]
66; CHECK-NEXT:    mov r4, r0
67; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
68; CHECK-NEXT:    vcvtb.f32.f16 s1, s2
69; CHECK-NEXT:    bl fmodf
70; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
71; CHECK-NEXT:    vstr.16 s0, [r4]
72; CHECK-NEXT:    pop {r4, pc}
73  %a = load half, half* %p, align 2
74  %b = load half, half* %q, align 2
75  %r = frem half %a, %b
76  store half %r, half* %p
77  ret void
78}
79
80define void @test_load_store(half* %p, half* %q) {
81; CHECK-LABEL: test_load_store:
82; CHECK:         vldr.16 s0, [r0]
83; CHECK-NEXT:    vstr.16 s0, [r1]
84; CHECK-NEXT:    bx lr
85  %a = load half, half* %p, align 2
86  store half %a, half* %q
87  ret void
88}
89
90define i32 @test_fptosi_i32(half* %p) {
91; CHECK-LABEL: test_fptosi_i32:
92; CHECK:         vldr.16 s0, [r0]
93; CHECK-NEXT:    vcvt.s32.f16 s0, s0
94; CHECK-NEXT:    vmov r0, s0
95; CHECK-NEXT:    bx lr
96  %a = load half, half* %p, align 2
97  %r = fptosi half %a to i32
98  ret i32 %r
99}
100
101; FIXME
102;define i64 @test_fptosi_i64(half* %p) {
103;  %a = load half, half* %p, align 2
104;  %r = fptosi half %a to i64
105;  ret i64 %r
106;}
107
108define i32 @test_fptoui_i32(half* %p) {
109; CHECK-LABEL: test_fptoui_i32:
110; CHECK:         vldr.16 s0, [r0]
111; CHECK-NEXT:    vcvt.u32.f16 s0, s0
112; CHECK-NEXT:    vmov r0, s0
113; CHECK-NEXT:    bx lr
114  %a = load half, half* %p, align 2
115  %r = fptoui half %a to i32
116  ret i32 %r
117}
118
119; FIXME
120;define i64 @test_fptoui_i64(half* %p) {
121;  %a = load half, half* %p, align 2
122;  %r = fptoui half %a to i64
123;  ret i64 %r
124;}
125
126define void @test_sitofp_i32(i32 %a, half* %p) {
127; CHECK-LABEL: test_sitofp_i32:
128; CHECK:         vmov s0, r0
129; CHECK-NEXT:    vcvt.f16.s32 s0, s0
130; CHECK-NEXT:    vstr.16 s0, [r1]
131; CHECK-NEXT:    bx lr
132  %r = sitofp i32 %a to half
133  store half %r, half* %p
134  ret void
135}
136
137define void @test_uitofp_i32(i32 %a, half* %p) {
138; CHECK-LABEL: test_uitofp_i32:
139; CHECK:         vmov s0, r0
140; CHECK-NEXT:    vcvt.f16.u32 s0, s0
141; CHECK-NEXT:    vstr.16 s0, [r1]
142; CHECK-NEXT:    bx lr
143  %r = uitofp i32 %a to half
144  store half %r, half* %p
145  ret void
146}
147
148; FIXME
149;define void @test_sitofp_i64(i64 %a, half* %p) {
150;  %r = sitofp i64 %a to half
151;  store half %r, half* %p
152;  ret void
153;}
154
155; FIXME
156;define void @test_uitofp_i64(i64 %a, half* %p) {
157;  %r = uitofp i64 %a to half
158;  store half %r, half* %p
159;  ret void
160;}
161
162define void @test_fptrunc_float(float %f, half* %p) {
163; CHECK-LABEL: test_fptrunc_float:
164; CHECK:         vcvtb.f16.f32 s0, s0
165; CHECK-NEXT:    vstr.16 s0, [r0]
166; CHECK-NEXT:    bx lr
167  %a = fptrunc float %f to half
168  store half %a, half* %p
169  ret void
170}
171
172define void @test_fptrunc_double(double %d, half* %p) {
173; CHECK-LABEL: test_fptrunc_double:
174; CHECK:         vcvtb.f16.f64 s0, d0
175; CHECK-NEXT:    vstr.16 s0, [r0]
176; CHECK-NEXT:    bx lr
177  %a = fptrunc double %d to half
178  store half %a, half* %p
179  ret void
180}
181
182define float @test_fpextend_float(half* %p) {
183; CHECK-LABEL: test_fpextend_float:
184; CHECK:         vldr.16 s0, [r0]
185; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
186; CHECK-NEXT:    bx lr
187  %a = load half, half* %p, align 2
188  %r = fpext half %a to float
189  ret float %r
190}
191
192define double @test_fpextend_double(half* %p) {
193; CHECK-LABEL: test_fpextend_double:
194; CHECK:         vldr.16 s0, [r0]
195; CHECK-NEXT:    vcvtb.f64.f16 d0, s0
196; CHECK-NEXT:    bx lr
197  %a = load half, half* %p, align 2
198  %r = fpext half %a to double
199  ret double %r
200}
201
202define i16 @test_bitcast_halftoi16(half* %p) {
203; CHECK-LABEL: test_bitcast_halftoi16:
204; CHECK:         ldrh r0, [r0]
205; CHECK-NEXT:    bx lr
206  %a = load half, half* %p, align 2
207  %r = bitcast half %a to i16
208  ret i16 %r
209}
210
211define void @test_bitcast_i16tohalf(i16 %a, half* %p) {
212; CHECK-LABEL: test_bitcast_i16tohalf:
213; CHECK:         strh r0, [r1]
214; CHECK-NEXT:    bx lr
215  %r = bitcast i16 %a to half
216  store half %r, half* %p
217  ret void
218}
219
220define void @test_sqrt(half* %p) {
221; CHECK-LABEL: test_sqrt:
222; CHECK:         vldr.16 s0, [r0]
223; CHECK-NEXT:    vsqrt.f16 s0, s0
224; CHECK-NEXT:    vstr.16 s0, [r0]
225; CHECK-NEXT:    bx lr
226  %a = load half, half* %p, align 2
227  %r = call half @llvm.sqrt.f16(half %a)
228  store half %r, half* %p
229  ret void
230}
231
232define void @test_fpowi(half* %p, i32 %b) {
233; CHECK-LABEL: test_fpowi:
234; CHECK:         .save {r4, lr}
235; CHECK-NEXT:    push {r4, lr}
236; CHECK-NEXT:    vldr.16 s0, [r0]
237; CHECK-NEXT:    mov r4, r0
238; CHECK-NEXT:    mov r0, r1
239; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
240; CHECK-NEXT:    bl __powisf2
241; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
242; CHECK-NEXT:    vstr.16 s0, [r4]
243; CHECK-NEXT:    pop {r4, pc}
244  %a = load half, half* %p, align 2
245  %r = call half @llvm.powi.f16(half %a, i32 %b)
246  store half %r, half* %p
247  ret void
248}
249
250define void @test_sin(half* %p) {
251; CHECK-LABEL: test_sin:
252; CHECK:         .save {r4, lr}
253; CHECK-NEXT:    push {r4, lr}
254; CHECK-NEXT:    vldr.16 s0, [r0]
255; CHECK-NEXT:    mov r4, r0
256; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
257; CHECK-NEXT:    bl sinf
258; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
259; CHECK-NEXT:    vstr.16 s0, [r4]
260; CHECK-NEXT:    pop {r4, pc}
261  %a = load half, half* %p, align 2
262  %r = call half @llvm.sin.f16(half %a)
263  store half %r, half* %p
264  ret void
265}
266
267define void @test_cos(half* %p) {
268; CHECK-LABEL: test_cos:
269; CHECK:         .save {r4, lr}
270; CHECK-NEXT:    push {r4, lr}
271; CHECK-NEXT:    vldr.16 s0, [r0]
272; CHECK-NEXT:    mov r4, r0
273; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
274; CHECK-NEXT:    bl cosf
275; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
276; CHECK-NEXT:    vstr.16 s0, [r4]
277; CHECK-NEXT:    pop {r4, pc}
278  %a = load half, half* %p, align 2
279  %r = call half @llvm.cos.f16(half %a)
280  store half %r, half* %p
281  ret void
282}
283
284define void @test_pow(half* %p, half* %q) {
285; CHECK-LABEL: test_pow:
286; CHECK:         .save {r4, lr}
287; CHECK-NEXT:    push {r4, lr}
288; CHECK-NEXT:    vldr.16 s0, [r0]
289; CHECK-NEXT:    vldr.16 s2, [r1]
290; CHECK-NEXT:    mov r4, r0
291; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
292; CHECK-NEXT:    vcvtb.f32.f16 s1, s2
293; CHECK-NEXT:    bl powf
294; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
295; CHECK-NEXT:    vstr.16 s0, [r4]
296; CHECK-NEXT:    pop {r4, pc}
297  %a = load half, half* %p, align 2
298  %b = load half, half* %q, align 2
299  %r = call half @llvm.pow.f16(half %a, half %b)
300  store half %r, half* %p
301  ret void
302}
303
304define void @test_exp(half* %p) {
305; CHECK-LABEL: test_exp:
306; CHECK:         .save {r4, lr}
307; CHECK-NEXT:    push {r4, lr}
308; CHECK-NEXT:    vldr.16 s0, [r0]
309; CHECK-NEXT:    mov r4, r0
310; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
311; CHECK-NEXT:    bl expf
312; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
313; CHECK-NEXT:    vstr.16 s0, [r4]
314; CHECK-NEXT:    pop {r4, pc}
315  %a = load half, half* %p, align 2
316  %r = call half @llvm.exp.f16(half %a)
317  store half %r, half* %p
318  ret void
319}
320
321define void @test_exp2(half* %p) {
322; CHECK-LABEL: test_exp2:
323; CHECK:         .save {r4, lr}
324; CHECK-NEXT:    push {r4, lr}
325; CHECK-NEXT:    vldr.16 s0, [r0]
326; CHECK-NEXT:    mov r4, r0
327; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
328; CHECK-NEXT:    bl exp2f
329; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
330; CHECK-NEXT:    vstr.16 s0, [r4]
331; CHECK-NEXT:    pop {r4, pc}
332  %a = load half, half* %p, align 2
333  %r = call half @llvm.exp2.f16(half %a)
334  store half %r, half* %p
335  ret void
336}
337
338define void @test_log(half* %p) {
339; CHECK-LABEL: test_log:
340; CHECK:         .save {r4, lr}
341; CHECK-NEXT:    push {r4, lr}
342; CHECK-NEXT:    vldr.16 s0, [r0]
343; CHECK-NEXT:    mov r4, r0
344; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
345; CHECK-NEXT:    bl logf
346; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
347; CHECK-NEXT:    vstr.16 s0, [r4]
348; CHECK-NEXT:    pop {r4, pc}
349  %a = load half, half* %p, align 2
350  %r = call half @llvm.log.f16(half %a)
351  store half %r, half* %p
352  ret void
353}
354
355define void @test_log10(half* %p) {
356; CHECK-LABEL: test_log10:
357; CHECK:         .save {r4, lr}
358; CHECK-NEXT:    push {r4, lr}
359; CHECK-NEXT:    vldr.16 s0, [r0]
360; CHECK-NEXT:    mov r4, r0
361; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
362; CHECK-NEXT:    bl log10f
363; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
364; CHECK-NEXT:    vstr.16 s0, [r4]
365; CHECK-NEXT:    pop {r4, pc}
366  %a = load half, half* %p, align 2
367  %r = call half @llvm.log10.f16(half %a)
368  store half %r, half* %p
369  ret void
370}
371
372define void @test_log2(half* %p) {
373; CHECK-LABEL: test_log2:
374; CHECK:         .save {r4, lr}
375; CHECK-NEXT:    push {r4, lr}
376; CHECK-NEXT:    vldr.16 s0, [r0]
377; CHECK-NEXT:    mov r4, r0
378; CHECK-NEXT:    vcvtb.f32.f16 s0, s0
379; CHECK-NEXT:    bl log2f
380; CHECK-NEXT:    vcvtb.f16.f32 s0, s0
381; CHECK-NEXT:    vstr.16 s0, [r4]
382; CHECK-NEXT:    pop {r4, pc}
383  %a = load half, half* %p, align 2
384  %r = call half @llvm.log2.f16(half %a)
385  store half %r, half* %p
386  ret void
387}
388
389define void @test_fma(half* %p, half* %q, half* %r) {
390; CHECK-LABEL: test_fma:
391; CHECK:         vldr.16 s0, [r1]
392; CHECK-NEXT:    vldr.16 s2, [r0]
393; CHECK-NEXT:    vldr.16 s4, [r2]
394; CHECK-NEXT:    vfma.f16 s4, s2, s0
395; CHECK-NEXT:    vstr.16 s4, [r0]
396; CHECK-NEXT:    bx lr
397  %a = load half, half* %p, align 2
398  %b = load half, half* %q, align 2
399  %c = load half, half* %r, align 2
400  %v = call half @llvm.fma.f16(half %a, half %b, half %c)
401  store half %v, half* %p
402  ret void
403}
404
405define void @test_fabs(half* %p) {
406; CHECK-LABEL: test_fabs:
407; CHECK:         vldr.16 s0, [r0]
408; CHECK-NEXT:    vabs.f16 s0, s0
409; CHECK-NEXT:    vstr.16 s0, [r0]
410; CHECK-NEXT:    bx lr
411  %a = load half, half* %p, align 2
412  %r = call half @llvm.fabs.f16(half %a)
413  store half %r, half* %p
414  ret void
415}
416
417define void @test_minnum(half* %p, half* %q) {
418; CHECK-LABEL: test_minnum:
419; CHECK:         vldr.16 s0, [r1]
420; CHECK-NEXT:    vldr.16 s2, [r0]
421; CHECK-NEXT:    vminnm.f16 s0, s2, s0
422; CHECK-NEXT:    vstr.16 s0, [r0]
423; CHECK-NEXT:    bx lr
424  %a = load half, half* %p, align 2
425  %b = load half, half* %q, align 2
426  %r = call half @llvm.minnum.f16(half %a, half %b)
427  store half %r, half* %p
428  ret void
429}
430
431define void @test_maxnum(half* %p, half* %q) {
432; CHECK-LABEL: test_maxnum:
433; CHECK:         vldr.16 s0, [r1]
434; CHECK-NEXT:    vldr.16 s2, [r0]
435; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
436; CHECK-NEXT:    vstr.16 s0, [r0]
437; CHECK-NEXT:    bx lr
438  %a = load half, half* %p, align 2
439  %b = load half, half* %q, align 2
440  %r = call half @llvm.maxnum.f16(half %a, half %b)
441  store half %r, half* %p
442  ret void
443}
444
445define void @test_minimum(half* %p) {
446; CHECK-LABEL: test_minimum:
447; CHECK:         vldr.16 s2, [r0]
448; CHECK-NEXT:    vmov.f16 s0, #1.000000e+00
449; CHECK-NEXT:    vcmp.f16 s2, s0
450; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
451; CHECK-NEXT:    vselge.f16 s0, s0, s2
452; CHECK-NEXT:    vstr.16 s0, [r0]
453; CHECK-NEXT:    bx lr
454  %a = load half, half* %p, align 2
455  %c = fcmp ult half %a, 1.0
456  %r = select i1 %c, half %a, half 1.0
457  store half %r, half* %p
458  ret void
459}
460
461define void @test_maximum(half* %p) {
462; CHECK-LABEL: test_maximum:
463; CHECK:         vldr.16 s2, [r0]
464; CHECK-NEXT:    vmov.f16 s0, #1.000000e+00
465; CHECK-NEXT:    vcmp.f16 s0, s2
466; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
467; CHECK-NEXT:    vselge.f16 s0, s0, s2
468; CHECK-NEXT:    vstr.16 s0, [r0]
469; CHECK-NEXT:    bx lr
470  %a = load half, half* %p, align 2
471  %c = fcmp ugt half %a, 1.0
472  %r = select i1 %c, half %a, half 1.0
473  store half %r, half* %p
474  ret void
475}
476
477define void @test_copysign(half* %p, half* %q) {
478; CHECK-LABEL: test_copysign:
479; CHECK:         .pad #4
480; CHECK-NEXT:    sub sp, sp, #4
481; CHECK-NEXT:    vldr.16 s0, [r1]
482; CHECK-NEXT:    vstr.16 s0, [sp]
483; CHECK-NEXT:    vldr.16 s0, [r0]
484; CHECK-NEXT:    ldrb r1, [sp, #1]
485; CHECK-NEXT:    ands r1, r1, #128
486; CHECK-NEXT:    vabs.f16 s0, s0
487; CHECK-NEXT:    movwne r1, #1
488; CHECK-NEXT:    vneg.f16 s2, s0
489; CHECK-NEXT:    cmp r1, #0
490; CHECK-NEXT:    vseleq.f16 s0, s0, s2
491; CHECK-NEXT:    vstr.16 s0, [r0]
492; CHECK-NEXT:    add sp, sp, #4
493; CHECK-NEXT:    bx lr
494  %a = load half, half* %p, align 2
495  %b = load half, half* %q, align 2
496  %r = call half @llvm.copysign.f16(half %a, half %b)
497  store half %r, half* %p
498  ret void
499}
500
501define void @test_floor(half* %p) {
502; CHECK-LABEL: test_floor:
503; CHECK:         vldr.16 s0, [r0]
504; CHECK-NEXT:    vrintm.f16 s0, s0
505; CHECK-NEXT:    vstr.16 s0, [r0]
506; CHECK-NEXT:    bx lr
507  %a = load half, half* %p, align 2
508  %r = call half @llvm.floor.f16(half %a)
509  store half %r, half* %p
510  ret void
511}
512
513define void @test_ceil(half* %p) {
514; CHECK-LABEL: test_ceil:
515; CHECK:         vldr.16 s0, [r0]
516; CHECK-NEXT:    vrintp.f16 s0, s0
517; CHECK-NEXT:    vstr.16 s0, [r0]
518; CHECK-NEXT:    bx lr
519  %a = load half, half* %p, align 2
520  %r = call half @llvm.ceil.f16(half %a)
521  store half %r, half* %p
522  ret void
523}
524
525define void @test_trunc(half* %p) {
526; CHECK-LABEL: test_trunc:
527; CHECK:         vldr.16 s0, [r0]
528; CHECK-NEXT:    vrintz.f16 s0, s0
529; CHECK-NEXT:    vstr.16 s0, [r0]
530; CHECK-NEXT:    bx lr
531  %a = load half, half* %p, align 2
532  %r = call half @llvm.trunc.f16(half %a)
533  store half %r, half* %p
534  ret void
535}
536
537define void @test_rint(half* %p) {
538; CHECK-LABEL: test_rint:
539; CHECK:         vldr.16 s0, [r0]
540; CHECK-NEXT:    vrintx.f16 s0, s0
541; CHECK-NEXT:    vstr.16 s0, [r0]
542; CHECK-NEXT:    bx lr
543  %a = load half, half* %p, align 2
544  %r = call half @llvm.rint.f16(half %a)
545  store half %r, half* %p
546  ret void
547}
548
549define void @test_nearbyint(half* %p) {
550; CHECK-LABEL: test_nearbyint:
551; CHECK:         vldr.16 s0, [r0]
552; CHECK-NEXT:    vrintr.f16 s0, s0
553; CHECK-NEXT:    vstr.16 s0, [r0]
554; CHECK-NEXT:    bx lr
555  %a = load half, half* %p, align 2
556  %r = call half @llvm.nearbyint.f16(half %a)
557  store half %r, half* %p
558  ret void
559}
560
561define void @test_round(half* %p) {
562; CHECK-LABEL: test_round:
563; CHECK:         vldr.16 s0, [r0]
564; CHECK-NEXT:    vrinta.f16 s0, s0
565; CHECK-NEXT:    vstr.16 s0, [r0]
566; CHECK-NEXT:    bx lr
567  %a = load half, half* %p, align 2
568  %r = call half @llvm.round.f16(half %a)
569  store half %r, half* %p
570  ret void
571}
572
573define void @test_fmuladd(half* %p, half* %q, half* %r) {
574; CHECK-LABEL: test_fmuladd:
575; CHECK:         vldr.16 s0, [r1]
576; CHECK-NEXT:    vldr.16 s2, [r0]
577; CHECK-NEXT:    vldr.16 s4, [r2]
578; CHECK-NEXT:    vfma.f16 s4, s2, s0
579; CHECK-NEXT:    vstr.16 s4, [r0]
580; CHECK-NEXT:    bx lr
581  %a = load half, half* %p, align 2
582  %b = load half, half* %q, align 2
583  %c = load half, half* %r, align 2
584  %v = call half @llvm.fmuladd.f16(half %a, half %b, half %c)
585  store half %v, half* %p
586  ret void
587}
588
589declare half @llvm.sqrt.f16(half %a)
590declare half @llvm.powi.f16(half %a, i32 %b)
591declare half @llvm.sin.f16(half %a)
592declare half @llvm.cos.f16(half %a)
593declare half @llvm.pow.f16(half %a, half %b)
594declare half @llvm.exp.f16(half %a)
595declare half @llvm.exp2.f16(half %a)
596declare half @llvm.log.f16(half %a)
597declare half @llvm.log10.f16(half %a)
598declare half @llvm.log2.f16(half %a)
599declare half @llvm.fma.f16(half %a, half %b, half %c)
600declare half @llvm.fabs.f16(half %a)
601declare half @llvm.minnum.f16(half %a, half %b)
602declare half @llvm.maxnum.f16(half %a, half %b)
603declare half @llvm.copysign.f16(half %a, half %b)
604declare half @llvm.floor.f16(half %a)
605declare half @llvm.ceil.f16(half %a)
606declare half @llvm.trunc.f16(half %a)
607declare half @llvm.rint.f16(half %a)
608declare half @llvm.nearbyint.f16(half %a)
609declare half @llvm.round.f16(half %a)
610declare half @llvm.fmuladd.f16(half %a, half %b, half %c)
611