1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-X64
4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX1-X86
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX1-X64
6; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX512-X86
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX512-X64
8; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
9
10declare float @llvm.experimental.constrained.sitofp.f32.i1(i1, metadata, metadata)
11declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
12declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
13declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
14declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
15declare float @llvm.experimental.constrained.uitofp.f32.i1(i1, metadata, metadata)
16declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
17declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
18declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
19declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
20
21declare double @llvm.experimental.constrained.sitofp.f64.i1(i1, metadata, metadata)
22declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
23declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
24declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
25declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
26declare double @llvm.experimental.constrained.uitofp.f64.i1(i1, metadata, metadata)
27declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
28declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
29declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
30declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
31
32define float @sitofp_i1tof32(i1 %x) #0 {
33; SSE-X86-LABEL: sitofp_i1tof32:
34; SSE-X86:       # %bb.0:
35; SSE-X86-NEXT:    pushl %eax
36; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
37; SSE-X86-NEXT:    movb {{[0-9]+}}(%esp), %al
38; SSE-X86-NEXT:    andb $1, %al
39; SSE-X86-NEXT:    negb %al
40; SSE-X86-NEXT:    movsbl %al, %eax
41; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
42; SSE-X86-NEXT:    movss %xmm0, (%esp)
43; SSE-X86-NEXT:    flds (%esp)
44; SSE-X86-NEXT:    wait
45; SSE-X86-NEXT:    popl %eax
46; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
47; SSE-X86-NEXT:    retl
48;
49; SSE-X64-LABEL: sitofp_i1tof32:
50; SSE-X64:       # %bb.0:
51; SSE-X64-NEXT:    andb $1, %dil
52; SSE-X64-NEXT:    negb %dil
53; SSE-X64-NEXT:    movsbl %dil, %eax
54; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
55; SSE-X64-NEXT:    retq
56;
57; AVX-X86-LABEL: sitofp_i1tof32:
58; AVX-X86:       # %bb.0:
59; AVX-X86-NEXT:    pushl %eax
60; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
61; AVX-X86-NEXT:    movb {{[0-9]+}}(%esp), %al
62; AVX-X86-NEXT:    andb $1, %al
63; AVX-X86-NEXT:    negb %al
64; AVX-X86-NEXT:    movsbl %al, %eax
65; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
66; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
67; AVX-X86-NEXT:    flds (%esp)
68; AVX-X86-NEXT:    wait
69; AVX-X86-NEXT:    popl %eax
70; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
71; AVX-X86-NEXT:    retl
72;
73; AVX-X64-LABEL: sitofp_i1tof32:
74; AVX-X64:       # %bb.0:
75; AVX-X64-NEXT:    andb $1, %dil
76; AVX-X64-NEXT:    negb %dil
77; AVX-X64-NEXT:    movsbl %dil, %eax
78; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
79; AVX-X64-NEXT:    retq
80;
81; X87-LABEL: sitofp_i1tof32:
82; X87:       # %bb.0:
83; X87-NEXT:    pushl %eax
84; X87-NEXT:    .cfi_def_cfa_offset 8
85; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
86; X87-NEXT:    andb $1, %al
87; X87-NEXT:    negb %al
88; X87-NEXT:    movsbl %al, %eax
89; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
90; X87-NEXT:    filds {{[0-9]+}}(%esp)
91; X87-NEXT:    wait
92; X87-NEXT:    popl %eax
93; X87-NEXT:    .cfi_def_cfa_offset 4
94; X87-NEXT:    retl
95  %result = call float @llvm.experimental.constrained.sitofp.f32.i1(i1 %x,
96                                               metadata !"round.dynamic",
97                                               metadata !"fpexcept.strict") #0
98  ret float %result
99}
100
101define float @sitofp_i8tof32(i8 %x) #0 {
102; SSE-X86-LABEL: sitofp_i8tof32:
103; SSE-X86:       # %bb.0:
104; SSE-X86-NEXT:    pushl %eax
105; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
106; SSE-X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
107; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
108; SSE-X86-NEXT:    movss %xmm0, (%esp)
109; SSE-X86-NEXT:    flds (%esp)
110; SSE-X86-NEXT:    wait
111; SSE-X86-NEXT:    popl %eax
112; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
113; SSE-X86-NEXT:    retl
114;
115; SSE-X64-LABEL: sitofp_i8tof32:
116; SSE-X64:       # %bb.0:
117; SSE-X64-NEXT:    movsbl %dil, %eax
118; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
119; SSE-X64-NEXT:    retq
120;
121; AVX-X86-LABEL: sitofp_i8tof32:
122; AVX-X86:       # %bb.0:
123; AVX-X86-NEXT:    pushl %eax
124; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
125; AVX-X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
126; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
127; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
128; AVX-X86-NEXT:    flds (%esp)
129; AVX-X86-NEXT:    wait
130; AVX-X86-NEXT:    popl %eax
131; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
132; AVX-X86-NEXT:    retl
133;
134; AVX-X64-LABEL: sitofp_i8tof32:
135; AVX-X64:       # %bb.0:
136; AVX-X64-NEXT:    movsbl %dil, %eax
137; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
138; AVX-X64-NEXT:    retq
139;
140; X87-LABEL: sitofp_i8tof32:
141; X87:       # %bb.0:
142; X87-NEXT:    pushl %eax
143; X87-NEXT:    .cfi_def_cfa_offset 8
144; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
145; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
146; X87-NEXT:    filds {{[0-9]+}}(%esp)
147; X87-NEXT:    wait
148; X87-NEXT:    popl %eax
149; X87-NEXT:    .cfi_def_cfa_offset 4
150; X87-NEXT:    retl
151  %result = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %x,
152                                               metadata !"round.dynamic",
153                                               metadata !"fpexcept.strict") #0
154  ret float %result
155}
156
157define float @sitofp_i16tof32(i16 %x) #0 {
158; SSE-X86-LABEL: sitofp_i16tof32:
159; SSE-X86:       # %bb.0:
160; SSE-X86-NEXT:    pushl %eax
161; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
162; SSE-X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
163; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
164; SSE-X86-NEXT:    movss %xmm0, (%esp)
165; SSE-X86-NEXT:    flds (%esp)
166; SSE-X86-NEXT:    wait
167; SSE-X86-NEXT:    popl %eax
168; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
169; SSE-X86-NEXT:    retl
170;
171; SSE-X64-LABEL: sitofp_i16tof32:
172; SSE-X64:       # %bb.0:
173; SSE-X64-NEXT:    movswl %di, %eax
174; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
175; SSE-X64-NEXT:    retq
176;
177; AVX-X86-LABEL: sitofp_i16tof32:
178; AVX-X86:       # %bb.0:
179; AVX-X86-NEXT:    pushl %eax
180; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
181; AVX-X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
182; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
183; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
184; AVX-X86-NEXT:    flds (%esp)
185; AVX-X86-NEXT:    wait
186; AVX-X86-NEXT:    popl %eax
187; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
188; AVX-X86-NEXT:    retl
189;
190; AVX-X64-LABEL: sitofp_i16tof32:
191; AVX-X64:       # %bb.0:
192; AVX-X64-NEXT:    movswl %di, %eax
193; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
194; AVX-X64-NEXT:    retq
195;
196; X87-LABEL: sitofp_i16tof32:
197; X87:       # %bb.0:
198; X87-NEXT:    pushl %eax
199; X87-NEXT:    .cfi_def_cfa_offset 8
200; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
201; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
202; X87-NEXT:    filds {{[0-9]+}}(%esp)
203; X87-NEXT:    wait
204; X87-NEXT:    popl %eax
205; X87-NEXT:    .cfi_def_cfa_offset 4
206; X87-NEXT:    retl
207  %result = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %x,
208                                               metadata !"round.dynamic",
209                                               metadata !"fpexcept.strict") #0
210  ret float %result
211}
212
213define float @sitofp_i32tof32(i32 %x) #0 {
214; SSE-X86-LABEL: sitofp_i32tof32:
215; SSE-X86:       # %bb.0:
216; SSE-X86-NEXT:    pushl %eax
217; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
218; SSE-X86-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
219; SSE-X86-NEXT:    movss %xmm0, (%esp)
220; SSE-X86-NEXT:    flds (%esp)
221; SSE-X86-NEXT:    wait
222; SSE-X86-NEXT:    popl %eax
223; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
224; SSE-X86-NEXT:    retl
225;
226; SSE-X64-LABEL: sitofp_i32tof32:
227; SSE-X64:       # %bb.0:
228; SSE-X64-NEXT:    cvtsi2ss %edi, %xmm0
229; SSE-X64-NEXT:    retq
230;
231; AVX-X86-LABEL: sitofp_i32tof32:
232; AVX-X86:       # %bb.0:
233; AVX-X86-NEXT:    pushl %eax
234; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
235; AVX-X86-NEXT:    vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
236; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
237; AVX-X86-NEXT:    flds (%esp)
238; AVX-X86-NEXT:    wait
239; AVX-X86-NEXT:    popl %eax
240; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
241; AVX-X86-NEXT:    retl
242;
243; AVX-X64-LABEL: sitofp_i32tof32:
244; AVX-X64:       # %bb.0:
245; AVX-X64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
246; AVX-X64-NEXT:    retq
247;
248; X87-LABEL: sitofp_i32tof32:
249; X87:       # %bb.0:
250; X87-NEXT:    pushl %eax
251; X87-NEXT:    .cfi_def_cfa_offset 8
252; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
253; X87-NEXT:    movl %eax, (%esp)
254; X87-NEXT:    fildl (%esp)
255; X87-NEXT:    wait
256; X87-NEXT:    popl %eax
257; X87-NEXT:    .cfi_def_cfa_offset 4
258; X87-NEXT:    retl
259  %result = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x,
260                                               metadata !"round.dynamic",
261                                               metadata !"fpexcept.strict") #0
262  ret float %result
263}
264
265define float @sitofp_i64tof32(i64 %x) #0 {
266; SSE-X86-LABEL: sitofp_i64tof32:
267; SSE-X86:       # %bb.0:
268; SSE-X86-NEXT:    pushl %eax
269; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
270; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
271; SSE-X86-NEXT:    fstps (%esp)
272; SSE-X86-NEXT:    flds (%esp)
273; SSE-X86-NEXT:    wait
274; SSE-X86-NEXT:    popl %eax
275; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
276; SSE-X86-NEXT:    retl
277;
278; SSE-X64-LABEL: sitofp_i64tof32:
279; SSE-X64:       # %bb.0:
280; SSE-X64-NEXT:    cvtsi2ss %rdi, %xmm0
281; SSE-X64-NEXT:    retq
282;
283; AVX-X86-LABEL: sitofp_i64tof32:
284; AVX-X86:       # %bb.0:
285; AVX-X86-NEXT:    pushl %eax
286; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
287; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
288; AVX-X86-NEXT:    fstps (%esp)
289; AVX-X86-NEXT:    flds (%esp)
290; AVX-X86-NEXT:    wait
291; AVX-X86-NEXT:    popl %eax
292; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
293; AVX-X86-NEXT:    retl
294;
295; AVX-X64-LABEL: sitofp_i64tof32:
296; AVX-X64:       # %bb.0:
297; AVX-X64-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
298; AVX-X64-NEXT:    retq
299;
300; X87-LABEL: sitofp_i64tof32:
301; X87:       # %bb.0:
302; X87-NEXT:    fildll {{[0-9]+}}(%esp)
303; X87-NEXT:    wait
304; X87-NEXT:    retl
305  %result = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x,
306                                               metadata !"round.dynamic",
307                                               metadata !"fpexcept.strict") #0
308  ret float %result
309}
310
311define float @uitofp_i1tof32(i1 %x) #0 {
312; SSE-X86-LABEL: uitofp_i1tof32:
313; SSE-X86:       # %bb.0:
314; SSE-X86-NEXT:    pushl %eax
315; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
316; SSE-X86-NEXT:    movb {{[0-9]+}}(%esp), %al
317; SSE-X86-NEXT:    andb $1, %al
318; SSE-X86-NEXT:    movzbl %al, %eax
319; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
320; SSE-X86-NEXT:    movss %xmm0, (%esp)
321; SSE-X86-NEXT:    flds (%esp)
322; SSE-X86-NEXT:    wait
323; SSE-X86-NEXT:    popl %eax
324; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
325; SSE-X86-NEXT:    retl
326;
327; SSE-X64-LABEL: uitofp_i1tof32:
328; SSE-X64:       # %bb.0:
329; SSE-X64-NEXT:    andl $1, %edi
330; SSE-X64-NEXT:    cvtsi2ss %edi, %xmm0
331; SSE-X64-NEXT:    retq
332;
333; AVX-X86-LABEL: uitofp_i1tof32:
334; AVX-X86:       # %bb.0:
335; AVX-X86-NEXT:    pushl %eax
336; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
337; AVX-X86-NEXT:    movb {{[0-9]+}}(%esp), %al
338; AVX-X86-NEXT:    andb $1, %al
339; AVX-X86-NEXT:    movzbl %al, %eax
340; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
341; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
342; AVX-X86-NEXT:    flds (%esp)
343; AVX-X86-NEXT:    wait
344; AVX-X86-NEXT:    popl %eax
345; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
346; AVX-X86-NEXT:    retl
347;
348; AVX-X64-LABEL: uitofp_i1tof32:
349; AVX-X64:       # %bb.0:
350; AVX-X64-NEXT:    andl $1, %edi
351; AVX-X64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
352; AVX-X64-NEXT:    retq
353;
354; X87-LABEL: uitofp_i1tof32:
355; X87:       # %bb.0:
356; X87-NEXT:    pushl %eax
357; X87-NEXT:    .cfi_def_cfa_offset 8
358; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
359; X87-NEXT:    andb $1, %al
360; X87-NEXT:    movzbl %al, %eax
361; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
362; X87-NEXT:    filds {{[0-9]+}}(%esp)
363; X87-NEXT:    wait
364; X87-NEXT:    popl %eax
365; X87-NEXT:    .cfi_def_cfa_offset 4
366; X87-NEXT:    retl
367  %result = call float @llvm.experimental.constrained.uitofp.f32.i1(i1 %x,
368                                               metadata !"round.dynamic",
369                                               metadata !"fpexcept.strict") #0
370  ret float %result
371}
372
373define float @uitofp_i8tof32(i8 %x) #0 {
374; SSE-X86-LABEL: uitofp_i8tof32:
375; SSE-X86:       # %bb.0:
376; SSE-X86-NEXT:    pushl %eax
377; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
378; SSE-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
379; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
380; SSE-X86-NEXT:    movss %xmm0, (%esp)
381; SSE-X86-NEXT:    flds (%esp)
382; SSE-X86-NEXT:    wait
383; SSE-X86-NEXT:    popl %eax
384; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
385; SSE-X86-NEXT:    retl
386;
387; SSE-X64-LABEL: uitofp_i8tof32:
388; SSE-X64:       # %bb.0:
389; SSE-X64-NEXT:    movzbl %dil, %eax
390; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
391; SSE-X64-NEXT:    retq
392;
393; AVX-X86-LABEL: uitofp_i8tof32:
394; AVX-X86:       # %bb.0:
395; AVX-X86-NEXT:    pushl %eax
396; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
397; AVX-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
398; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
399; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
400; AVX-X86-NEXT:    flds (%esp)
401; AVX-X86-NEXT:    wait
402; AVX-X86-NEXT:    popl %eax
403; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
404; AVX-X86-NEXT:    retl
405;
406; AVX-X64-LABEL: uitofp_i8tof32:
407; AVX-X64:       # %bb.0:
408; AVX-X64-NEXT:    movzbl %dil, %eax
409; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
410; AVX-X64-NEXT:    retq
411;
412; X87-LABEL: uitofp_i8tof32:
413; X87:       # %bb.0:
414; X87-NEXT:    pushl %eax
415; X87-NEXT:    .cfi_def_cfa_offset 8
416; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
417; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
418; X87-NEXT:    filds {{[0-9]+}}(%esp)
419; X87-NEXT:    wait
420; X87-NEXT:    popl %eax
421; X87-NEXT:    .cfi_def_cfa_offset 4
422; X87-NEXT:    retl
423  %result = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %x,
424                                               metadata !"round.dynamic",
425                                               metadata !"fpexcept.strict") #0
426  ret float %result
427}
428
429define float @uitofp_i16tof32(i16 %x) #0 {
430; SSE-X86-LABEL: uitofp_i16tof32:
431; SSE-X86:       # %bb.0:
432; SSE-X86-NEXT:    pushl %eax
433; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
434; SSE-X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
435; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
436; SSE-X86-NEXT:    movss %xmm0, (%esp)
437; SSE-X86-NEXT:    flds (%esp)
438; SSE-X86-NEXT:    wait
439; SSE-X86-NEXT:    popl %eax
440; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
441; SSE-X86-NEXT:    retl
442;
443; SSE-X64-LABEL: uitofp_i16tof32:
444; SSE-X64:       # %bb.0:
445; SSE-X64-NEXT:    movzwl %di, %eax
446; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
447; SSE-X64-NEXT:    retq
448;
449; AVX-X86-LABEL: uitofp_i16tof32:
450; AVX-X86:       # %bb.0:
451; AVX-X86-NEXT:    pushl %eax
452; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
453; AVX-X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
454; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
455; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
456; AVX-X86-NEXT:    flds (%esp)
457; AVX-X86-NEXT:    wait
458; AVX-X86-NEXT:    popl %eax
459; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
460; AVX-X86-NEXT:    retl
461;
462; AVX-X64-LABEL: uitofp_i16tof32:
463; AVX-X64:       # %bb.0:
464; AVX-X64-NEXT:    movzwl %di, %eax
465; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
466; AVX-X64-NEXT:    retq
467;
468; X87-LABEL: uitofp_i16tof32:
469; X87:       # %bb.0:
470; X87-NEXT:    pushl %eax
471; X87-NEXT:    .cfi_def_cfa_offset 8
472; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
473; X87-NEXT:    movl %eax, (%esp)
474; X87-NEXT:    fildl (%esp)
475; X87-NEXT:    wait
476; X87-NEXT:    popl %eax
477; X87-NEXT:    .cfi_def_cfa_offset 4
478; X87-NEXT:    retl
479  %result = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %x,
480                                               metadata !"round.dynamic",
481                                               metadata !"fpexcept.strict") #0
482  ret float %result
483}
484
485define float @uitofp_i32tof32(i32 %x) #0 {
486; SSE-X86-LABEL: uitofp_i32tof32:
487; SSE-X86:       # %bb.0:
488; SSE-X86-NEXT:    pushl %eax
489; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
490; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
491; SSE-X86-NEXT:    orpd {{\.LCPI.*}}, %xmm0
492; SSE-X86-NEXT:    subsd {{\.LCPI.*}}, %xmm0
493; SSE-X86-NEXT:    cvtsd2ss %xmm0, %xmm0
494; SSE-X86-NEXT:    movss %xmm0, (%esp)
495; SSE-X86-NEXT:    flds (%esp)
496; SSE-X86-NEXT:    wait
497; SSE-X86-NEXT:    popl %eax
498; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
499; SSE-X86-NEXT:    retl
500;
501; SSE-X64-LABEL: uitofp_i32tof32:
502; SSE-X64:       # %bb.0:
503; SSE-X64-NEXT:    movl %edi, %eax
504; SSE-X64-NEXT:    cvtsi2ss %rax, %xmm0
505; SSE-X64-NEXT:    retq
506;
507; AVX1-X86-LABEL: uitofp_i32tof32:
508; AVX1-X86:       # %bb.0:
509; AVX1-X86-NEXT:    pushl %eax
510; AVX1-X86-NEXT:    .cfi_def_cfa_offset 8
511; AVX1-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
512; AVX1-X86-NEXT:    vorpd {{\.LCPI.*}}, %xmm0, %xmm0
513; AVX1-X86-NEXT:    vsubsd {{\.LCPI.*}}, %xmm0, %xmm0
514; AVX1-X86-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
515; AVX1-X86-NEXT:    vmovss %xmm0, (%esp)
516; AVX1-X86-NEXT:    flds (%esp)
517; AVX1-X86-NEXT:    wait
518; AVX1-X86-NEXT:    popl %eax
519; AVX1-X86-NEXT:    .cfi_def_cfa_offset 4
520; AVX1-X86-NEXT:    retl
521;
522; AVX1-X64-LABEL: uitofp_i32tof32:
523; AVX1-X64:       # %bb.0:
524; AVX1-X64-NEXT:    movl %edi, %eax
525; AVX1-X64-NEXT:    vcvtsi2ss %rax, %xmm0, %xmm0
526; AVX1-X64-NEXT:    retq
527;
528; AVX512-X86-LABEL: uitofp_i32tof32:
529; AVX512-X86:       # %bb.0:
530; AVX512-X86-NEXT:    pushl %eax
531; AVX512-X86-NEXT:    .cfi_def_cfa_offset 8
532; AVX512-X86-NEXT:    vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
533; AVX512-X86-NEXT:    vmovss %xmm0, (%esp)
534; AVX512-X86-NEXT:    flds (%esp)
535; AVX512-X86-NEXT:    wait
536; AVX512-X86-NEXT:    popl %eax
537; AVX512-X86-NEXT:    .cfi_def_cfa_offset 4
538; AVX512-X86-NEXT:    retl
539;
540; AVX512-X64-LABEL: uitofp_i32tof32:
541; AVX512-X64:       # %bb.0:
542; AVX512-X64-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
543; AVX512-X64-NEXT:    retq
544;
545; X87-LABEL: uitofp_i32tof32:
546; X87:       # %bb.0:
547; X87-NEXT:    pushl %ebp
548; X87-NEXT:    .cfi_def_cfa_offset 8
549; X87-NEXT:    .cfi_offset %ebp, -8
550; X87-NEXT:    movl %esp, %ebp
551; X87-NEXT:    .cfi_def_cfa_register %ebp
552; X87-NEXT:    andl $-8, %esp
553; X87-NEXT:    subl $8, %esp
554; X87-NEXT:    movl 8(%ebp), %eax
555; X87-NEXT:    movl %eax, (%esp)
556; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
557; X87-NEXT:    fildll (%esp)
558; X87-NEXT:    wait
559; X87-NEXT:    movl %ebp, %esp
560; X87-NEXT:    popl %ebp
561; X87-NEXT:    .cfi_def_cfa %esp, 4
562; X87-NEXT:    retl
563  %result = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x,
564                                               metadata !"round.dynamic",
565                                               metadata !"fpexcept.strict") #0
566  ret float %result
567}
568
569define float @uitofp_i64tof32(i64 %x) #0 {
570; SSE-X86-LABEL: uitofp_i64tof32:
571; SSE-X86:       # %bb.0:
572; SSE-X86-NEXT:    pushl %ebp
573; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
574; SSE-X86-NEXT:    .cfi_offset %ebp, -8
575; SSE-X86-NEXT:    movl %esp, %ebp
576; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
577; SSE-X86-NEXT:    andl $-8, %esp
578; SSE-X86-NEXT:    subl $16, %esp
579; SSE-X86-NEXT:    movl 12(%ebp), %eax
580; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
581; SSE-X86-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
582; SSE-X86-NEXT:    shrl $31, %eax
583; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
584; SSE-X86-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
585; SSE-X86-NEXT:    fstps {{[0-9]+}}(%esp)
586; SSE-X86-NEXT:    wait
587; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
588; SSE-X86-NEXT:    movss %xmm0, (%esp)
589; SSE-X86-NEXT:    flds (%esp)
590; SSE-X86-NEXT:    wait
591; SSE-X86-NEXT:    movl %ebp, %esp
592; SSE-X86-NEXT:    popl %ebp
593; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
594; SSE-X86-NEXT:    retl
595;
596; SSE-X64-LABEL: uitofp_i64tof32:
597; SSE-X64:       # %bb.0:
598; SSE-X64-NEXT:    movq %rdi, %rax
599; SSE-X64-NEXT:    shrq %rax
600; SSE-X64-NEXT:    movl %edi, %ecx
601; SSE-X64-NEXT:    andl $1, %ecx
602; SSE-X64-NEXT:    orq %rax, %rcx
603; SSE-X64-NEXT:    testq %rdi, %rdi
604; SSE-X64-NEXT:    cmovnsq %rdi, %rcx
605; SSE-X64-NEXT:    cvtsi2ss %rcx, %xmm0
606; SSE-X64-NEXT:    jns .LBB9_2
607; SSE-X64-NEXT:  # %bb.1:
608; SSE-X64-NEXT:    addss %xmm0, %xmm0
609; SSE-X64-NEXT:  .LBB9_2:
610; SSE-X64-NEXT:    retq
611;
612; AVX-X86-LABEL: uitofp_i64tof32:
613; AVX-X86:       # %bb.0:
614; AVX-X86-NEXT:    pushl %ebp
615; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
616; AVX-X86-NEXT:    .cfi_offset %ebp, -8
617; AVX-X86-NEXT:    movl %esp, %ebp
618; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
619; AVX-X86-NEXT:    andl $-8, %esp
620; AVX-X86-NEXT:    subl $16, %esp
621; AVX-X86-NEXT:    movl 12(%ebp), %eax
622; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
623; AVX-X86-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
624; AVX-X86-NEXT:    shrl $31, %eax
625; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
626; AVX-X86-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
627; AVX-X86-NEXT:    fstps {{[0-9]+}}(%esp)
628; AVX-X86-NEXT:    wait
629; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
630; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
631; AVX-X86-NEXT:    flds (%esp)
632; AVX-X86-NEXT:    wait
633; AVX-X86-NEXT:    movl %ebp, %esp
634; AVX-X86-NEXT:    popl %ebp
635; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
636; AVX-X86-NEXT:    retl
637;
638; AVX1-X64-LABEL: uitofp_i64tof32:
639; AVX1-X64:       # %bb.0:
640; AVX1-X64-NEXT:    movq %rdi, %rax
641; AVX1-X64-NEXT:    shrq %rax
642; AVX1-X64-NEXT:    movl %edi, %ecx
643; AVX1-X64-NEXT:    andl $1, %ecx
644; AVX1-X64-NEXT:    orq %rax, %rcx
645; AVX1-X64-NEXT:    testq %rdi, %rdi
646; AVX1-X64-NEXT:    cmovnsq %rdi, %rcx
647; AVX1-X64-NEXT:    vcvtsi2ss %rcx, %xmm0, %xmm0
648; AVX1-X64-NEXT:    jns .LBB9_2
649; AVX1-X64-NEXT:  # %bb.1:
650; AVX1-X64-NEXT:    vaddss %xmm0, %xmm0, %xmm0
651; AVX1-X64-NEXT:  .LBB9_2:
652; AVX1-X64-NEXT:    retq
653;
654; AVX512-X64-LABEL: uitofp_i64tof32:
655; AVX512-X64:       # %bb.0:
656; AVX512-X64-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
657; AVX512-X64-NEXT:    retq
658;
659; X87-LABEL: uitofp_i64tof32:
660; X87:       # %bb.0:
661; X87-NEXT:    pushl %ebp
662; X87-NEXT:    .cfi_def_cfa_offset 8
663; X87-NEXT:    .cfi_offset %ebp, -8
664; X87-NEXT:    movl %esp, %ebp
665; X87-NEXT:    .cfi_def_cfa_register %ebp
666; X87-NEXT:    andl $-8, %esp
667; X87-NEXT:    subl $16, %esp
668; X87-NEXT:    movl 8(%ebp), %eax
669; X87-NEXT:    movl 12(%ebp), %ecx
670; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
671; X87-NEXT:    movl %eax, {{[0-9]+}}(%esp)
672; X87-NEXT:    shrl $31, %ecx
673; X87-NEXT:    fildll {{[0-9]+}}(%esp)
674; X87-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
675; X87-NEXT:    fstps {{[0-9]+}}(%esp)
676; X87-NEXT:    flds {{[0-9]+}}(%esp)
677; X87-NEXT:    wait
678; X87-NEXT:    movl %ebp, %esp
679; X87-NEXT:    popl %ebp
680; X87-NEXT:    .cfi_def_cfa %esp, 4
681; X87-NEXT:    retl
682  %result = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x,
683                                               metadata !"round.dynamic",
684                                               metadata !"fpexcept.strict") #0
685  ret float %result
686}
687
688define double @sitofp_i8tof64(i8 %x) #0 {
689; SSE-X86-LABEL: sitofp_i8tof64:
690; SSE-X86:       # %bb.0:
691; SSE-X86-NEXT:    pushl %ebp
692; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
693; SSE-X86-NEXT:    .cfi_offset %ebp, -8
694; SSE-X86-NEXT:    movl %esp, %ebp
695; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
696; SSE-X86-NEXT:    andl $-8, %esp
697; SSE-X86-NEXT:    subl $8, %esp
698; SSE-X86-NEXT:    movsbl 8(%ebp), %eax
699; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
700; SSE-X86-NEXT:    movsd %xmm0, (%esp)
701; SSE-X86-NEXT:    fldl (%esp)
702; SSE-X86-NEXT:    wait
703; SSE-X86-NEXT:    movl %ebp, %esp
704; SSE-X86-NEXT:    popl %ebp
705; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
706; SSE-X86-NEXT:    retl
707;
708; SSE-X64-LABEL: sitofp_i8tof64:
709; SSE-X64:       # %bb.0:
710; SSE-X64-NEXT:    movsbl %dil, %eax
711; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
712; SSE-X64-NEXT:    retq
713;
714; AVX-X86-LABEL: sitofp_i8tof64:
715; AVX-X86:       # %bb.0:
716; AVX-X86-NEXT:    pushl %ebp
717; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
718; AVX-X86-NEXT:    .cfi_offset %ebp, -8
719; AVX-X86-NEXT:    movl %esp, %ebp
720; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
721; AVX-X86-NEXT:    andl $-8, %esp
722; AVX-X86-NEXT:    subl $8, %esp
723; AVX-X86-NEXT:    movsbl 8(%ebp), %eax
724; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
725; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
726; AVX-X86-NEXT:    fldl (%esp)
727; AVX-X86-NEXT:    wait
728; AVX-X86-NEXT:    movl %ebp, %esp
729; AVX-X86-NEXT:    popl %ebp
730; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
731; AVX-X86-NEXT:    retl
732;
733; AVX-X64-LABEL: sitofp_i8tof64:
734; AVX-X64:       # %bb.0:
735; AVX-X64-NEXT:    movsbl %dil, %eax
736; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
737; AVX-X64-NEXT:    retq
738;
739; X87-LABEL: sitofp_i8tof64:
740; X87:       # %bb.0:
741; X87-NEXT:    pushl %eax
742; X87-NEXT:    .cfi_def_cfa_offset 8
743; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
744; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
745; X87-NEXT:    filds {{[0-9]+}}(%esp)
746; X87-NEXT:    wait
747; X87-NEXT:    popl %eax
748; X87-NEXT:    .cfi_def_cfa_offset 4
749; X87-NEXT:    retl
750  %result = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %x,
751                                               metadata !"round.dynamic",
752                                               metadata !"fpexcept.strict") #0
753  ret double %result
754}
755
756define double @sitofp_i16tof64(i16 %x) #0 {
757; SSE-X86-LABEL: sitofp_i16tof64:
758; SSE-X86:       # %bb.0:
759; SSE-X86-NEXT:    pushl %ebp
760; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
761; SSE-X86-NEXT:    .cfi_offset %ebp, -8
762; SSE-X86-NEXT:    movl %esp, %ebp
763; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
764; SSE-X86-NEXT:    andl $-8, %esp
765; SSE-X86-NEXT:    subl $8, %esp
766; SSE-X86-NEXT:    movswl 8(%ebp), %eax
767; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
768; SSE-X86-NEXT:    movsd %xmm0, (%esp)
769; SSE-X86-NEXT:    fldl (%esp)
770; SSE-X86-NEXT:    wait
771; SSE-X86-NEXT:    movl %ebp, %esp
772; SSE-X86-NEXT:    popl %ebp
773; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
774; SSE-X86-NEXT:    retl
775;
776; SSE-X64-LABEL: sitofp_i16tof64:
777; SSE-X64:       # %bb.0:
778; SSE-X64-NEXT:    movswl %di, %eax
779; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
780; SSE-X64-NEXT:    retq
781;
782; AVX-X86-LABEL: sitofp_i16tof64:
783; AVX-X86:       # %bb.0:
784; AVX-X86-NEXT:    pushl %ebp
785; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
786; AVX-X86-NEXT:    .cfi_offset %ebp, -8
787; AVX-X86-NEXT:    movl %esp, %ebp
788; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
789; AVX-X86-NEXT:    andl $-8, %esp
790; AVX-X86-NEXT:    subl $8, %esp
791; AVX-X86-NEXT:    movswl 8(%ebp), %eax
792; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
793; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
794; AVX-X86-NEXT:    fldl (%esp)
795; AVX-X86-NEXT:    wait
796; AVX-X86-NEXT:    movl %ebp, %esp
797; AVX-X86-NEXT:    popl %ebp
798; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
799; AVX-X86-NEXT:    retl
800;
801; AVX-X64-LABEL: sitofp_i16tof64:
802; AVX-X64:       # %bb.0:
803; AVX-X64-NEXT:    movswl %di, %eax
804; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
805; AVX-X64-NEXT:    retq
806;
807; X87-LABEL: sitofp_i16tof64:
808; X87:       # %bb.0:
809; X87-NEXT:    pushl %eax
810; X87-NEXT:    .cfi_def_cfa_offset 8
811; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
812; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
813; X87-NEXT:    filds {{[0-9]+}}(%esp)
814; X87-NEXT:    wait
815; X87-NEXT:    popl %eax
816; X87-NEXT:    .cfi_def_cfa_offset 4
817; X87-NEXT:    retl
818  %result = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %x,
819                                               metadata !"round.dynamic",
820                                               metadata !"fpexcept.strict") #0
821  ret double %result
822}
823
824define double @sitofp_i32tof64(i32 %x) #0 {
825; SSE-X86-LABEL: sitofp_i32tof64:
826; SSE-X86:       # %bb.0:
827; SSE-X86-NEXT:    pushl %ebp
828; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
829; SSE-X86-NEXT:    .cfi_offset %ebp, -8
830; SSE-X86-NEXT:    movl %esp, %ebp
831; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
832; SSE-X86-NEXT:    andl $-8, %esp
833; SSE-X86-NEXT:    subl $8, %esp
834; SSE-X86-NEXT:    cvtsi2sdl 8(%ebp), %xmm0
835; SSE-X86-NEXT:    movsd %xmm0, (%esp)
836; SSE-X86-NEXT:    fldl (%esp)
837; SSE-X86-NEXT:    wait
838; SSE-X86-NEXT:    movl %ebp, %esp
839; SSE-X86-NEXT:    popl %ebp
840; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
841; SSE-X86-NEXT:    retl
842;
843; SSE-X64-LABEL: sitofp_i32tof64:
844; SSE-X64:       # %bb.0:
845; SSE-X64-NEXT:    cvtsi2sd %edi, %xmm0
846; SSE-X64-NEXT:    retq
847;
848; AVX-X86-LABEL: sitofp_i32tof64:
849; AVX-X86:       # %bb.0:
850; AVX-X86-NEXT:    pushl %ebp
851; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
852; AVX-X86-NEXT:    .cfi_offset %ebp, -8
853; AVX-X86-NEXT:    movl %esp, %ebp
854; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
855; AVX-X86-NEXT:    andl $-8, %esp
856; AVX-X86-NEXT:    subl $8, %esp
857; AVX-X86-NEXT:    vcvtsi2sdl 8(%ebp), %xmm0, %xmm0
858; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
859; AVX-X86-NEXT:    fldl (%esp)
860; AVX-X86-NEXT:    wait
861; AVX-X86-NEXT:    movl %ebp, %esp
862; AVX-X86-NEXT:    popl %ebp
863; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
864; AVX-X86-NEXT:    retl
865;
866; AVX-X64-LABEL: sitofp_i32tof64:
867; AVX-X64:       # %bb.0:
868; AVX-X64-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
869; AVX-X64-NEXT:    retq
870;
871; X87-LABEL: sitofp_i32tof64:
872; X87:       # %bb.0:
873; X87-NEXT:    pushl %eax
874; X87-NEXT:    .cfi_def_cfa_offset 8
875; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
876; X87-NEXT:    movl %eax, (%esp)
877; X87-NEXT:    fildl (%esp)
878; X87-NEXT:    wait
879; X87-NEXT:    popl %eax
880; X87-NEXT:    .cfi_def_cfa_offset 4
881; X87-NEXT:    retl
882  %result = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x,
883                                               metadata !"round.dynamic",
884                                               metadata !"fpexcept.strict") #0
885  ret double %result
886}
887
888define double @sitofp_i64tof64(i64 %x) #0 {
889; SSE-X86-LABEL: sitofp_i64tof64:
890; SSE-X86:       # %bb.0:
891; SSE-X86-NEXT:    pushl %ebp
892; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
893; SSE-X86-NEXT:    .cfi_offset %ebp, -8
894; SSE-X86-NEXT:    movl %esp, %ebp
895; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
896; SSE-X86-NEXT:    andl $-8, %esp
897; SSE-X86-NEXT:    subl $8, %esp
898; SSE-X86-NEXT:    fildll 8(%ebp)
899; SSE-X86-NEXT:    fstpl (%esp)
900; SSE-X86-NEXT:    fldl (%esp)
901; SSE-X86-NEXT:    wait
902; SSE-X86-NEXT:    movl %ebp, %esp
903; SSE-X86-NEXT:    popl %ebp
904; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
905; SSE-X86-NEXT:    retl
906;
907; SSE-X64-LABEL: sitofp_i64tof64:
908; SSE-X64:       # %bb.0:
909; SSE-X64-NEXT:    cvtsi2sd %rdi, %xmm0
910; SSE-X64-NEXT:    retq
911;
912; AVX-X86-LABEL: sitofp_i64tof64:
913; AVX-X86:       # %bb.0:
914; AVX-X86-NEXT:    pushl %ebp
915; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
916; AVX-X86-NEXT:    .cfi_offset %ebp, -8
917; AVX-X86-NEXT:    movl %esp, %ebp
918; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
919; AVX-X86-NEXT:    andl $-8, %esp
920; AVX-X86-NEXT:    subl $8, %esp
921; AVX-X86-NEXT:    fildll 8(%ebp)
922; AVX-X86-NEXT:    fstpl (%esp)
923; AVX-X86-NEXT:    fldl (%esp)
924; AVX-X86-NEXT:    wait
925; AVX-X86-NEXT:    movl %ebp, %esp
926; AVX-X86-NEXT:    popl %ebp
927; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
928; AVX-X86-NEXT:    retl
929;
930; AVX-X64-LABEL: sitofp_i64tof64:
931; AVX-X64:       # %bb.0:
932; AVX-X64-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
933; AVX-X64-NEXT:    retq
934;
935; X87-LABEL: sitofp_i64tof64:
936; X87:       # %bb.0:
937; X87-NEXT:    fildll {{[0-9]+}}(%esp)
938; X87-NEXT:    wait
939; X87-NEXT:    retl
940  %result = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x,
941                                               metadata !"round.dynamic",
942                                               metadata !"fpexcept.strict") #0
943  ret double %result
944}
945
946define double @uitofp_i1tof64(i1 %x) #0 {
947; SSE-X86-LABEL: uitofp_i1tof64:
948; SSE-X86:       # %bb.0:
949; SSE-X86-NEXT:    pushl %ebp
950; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
951; SSE-X86-NEXT:    .cfi_offset %ebp, -8
952; SSE-X86-NEXT:    movl %esp, %ebp
953; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
954; SSE-X86-NEXT:    andl $-8, %esp
955; SSE-X86-NEXT:    subl $8, %esp
956; SSE-X86-NEXT:    movb 8(%ebp), %al
957; SSE-X86-NEXT:    andb $1, %al
958; SSE-X86-NEXT:    movzbl %al, %eax
959; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
960; SSE-X86-NEXT:    movsd %xmm0, (%esp)
961; SSE-X86-NEXT:    fldl (%esp)
962; SSE-X86-NEXT:    wait
963; SSE-X86-NEXT:    movl %ebp, %esp
964; SSE-X86-NEXT:    popl %ebp
965; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
966; SSE-X86-NEXT:    retl
967;
968; SSE-X64-LABEL: uitofp_i1tof64:
969; SSE-X64:       # %bb.0:
970; SSE-X64-NEXT:    andl $1, %edi
971; SSE-X64-NEXT:    cvtsi2sd %edi, %xmm0
972; SSE-X64-NEXT:    retq
973;
974; AVX-X86-LABEL: uitofp_i1tof64:
975; AVX-X86:       # %bb.0:
976; AVX-X86-NEXT:    pushl %ebp
977; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
978; AVX-X86-NEXT:    .cfi_offset %ebp, -8
979; AVX-X86-NEXT:    movl %esp, %ebp
980; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
981; AVX-X86-NEXT:    andl $-8, %esp
982; AVX-X86-NEXT:    subl $8, %esp
983; AVX-X86-NEXT:    movb 8(%ebp), %al
984; AVX-X86-NEXT:    andb $1, %al
985; AVX-X86-NEXT:    movzbl %al, %eax
986; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
987; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
988; AVX-X86-NEXT:    fldl (%esp)
989; AVX-X86-NEXT:    wait
990; AVX-X86-NEXT:    movl %ebp, %esp
991; AVX-X86-NEXT:    popl %ebp
992; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
993; AVX-X86-NEXT:    retl
994;
995; AVX-X64-LABEL: uitofp_i1tof64:
996; AVX-X64:       # %bb.0:
997; AVX-X64-NEXT:    andl $1, %edi
998; AVX-X64-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
999; AVX-X64-NEXT:    retq
1000;
1001; X87-LABEL: uitofp_i1tof64:
1002; X87:       # %bb.0:
1003; X87-NEXT:    pushl %eax
1004; X87-NEXT:    .cfi_def_cfa_offset 8
1005; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
1006; X87-NEXT:    andb $1, %al
1007; X87-NEXT:    movzbl %al, %eax
1008; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1009; X87-NEXT:    filds {{[0-9]+}}(%esp)
1010; X87-NEXT:    wait
1011; X87-NEXT:    popl %eax
1012; X87-NEXT:    .cfi_def_cfa_offset 4
1013; X87-NEXT:    retl
1014  %result = call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %x,
1015                                               metadata !"round.dynamic",
1016                                               metadata !"fpexcept.strict") #0
1017  ret double %result
1018}
1019
1020define double @uitofp_i8tof64(i8 %x) #0 {
1021; SSE-X86-LABEL: uitofp_i8tof64:
1022; SSE-X86:       # %bb.0:
1023; SSE-X86-NEXT:    pushl %ebp
1024; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1025; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1026; SSE-X86-NEXT:    movl %esp, %ebp
1027; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1028; SSE-X86-NEXT:    andl $-8, %esp
1029; SSE-X86-NEXT:    subl $8, %esp
1030; SSE-X86-NEXT:    movzbl 8(%ebp), %eax
1031; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
1032; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1033; SSE-X86-NEXT:    fldl (%esp)
1034; SSE-X86-NEXT:    wait
1035; SSE-X86-NEXT:    movl %ebp, %esp
1036; SSE-X86-NEXT:    popl %ebp
1037; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1038; SSE-X86-NEXT:    retl
1039;
1040; SSE-X64-LABEL: uitofp_i8tof64:
1041; SSE-X64:       # %bb.0:
1042; SSE-X64-NEXT:    movzbl %dil, %eax
1043; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
1044; SSE-X64-NEXT:    retq
1045;
1046; AVX-X86-LABEL: uitofp_i8tof64:
1047; AVX-X86:       # %bb.0:
1048; AVX-X86-NEXT:    pushl %ebp
1049; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1050; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1051; AVX-X86-NEXT:    movl %esp, %ebp
1052; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1053; AVX-X86-NEXT:    andl $-8, %esp
1054; AVX-X86-NEXT:    subl $8, %esp
1055; AVX-X86-NEXT:    movzbl 8(%ebp), %eax
1056; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1057; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1058; AVX-X86-NEXT:    fldl (%esp)
1059; AVX-X86-NEXT:    wait
1060; AVX-X86-NEXT:    movl %ebp, %esp
1061; AVX-X86-NEXT:    popl %ebp
1062; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1063; AVX-X86-NEXT:    retl
1064;
1065; AVX-X64-LABEL: uitofp_i8tof64:
1066; AVX-X64:       # %bb.0:
1067; AVX-X64-NEXT:    movzbl %dil, %eax
1068; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1069; AVX-X64-NEXT:    retq
1070;
1071; X87-LABEL: uitofp_i8tof64:
1072; X87:       # %bb.0:
1073; X87-NEXT:    pushl %eax
1074; X87-NEXT:    .cfi_def_cfa_offset 8
1075; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
1076; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1077; X87-NEXT:    filds {{[0-9]+}}(%esp)
1078; X87-NEXT:    wait
1079; X87-NEXT:    popl %eax
1080; X87-NEXT:    .cfi_def_cfa_offset 4
1081; X87-NEXT:    retl
1082  %result = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %x,
1083                                               metadata !"round.dynamic",
1084                                               metadata !"fpexcept.strict") #0
1085  ret double %result
1086}
1087
1088define double @uitofp_i16tof64(i16 %x) #0 {
1089; SSE-X86-LABEL: uitofp_i16tof64:
1090; SSE-X86:       # %bb.0:
1091; SSE-X86-NEXT:    pushl %ebp
1092; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1093; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1094; SSE-X86-NEXT:    movl %esp, %ebp
1095; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1096; SSE-X86-NEXT:    andl $-8, %esp
1097; SSE-X86-NEXT:    subl $8, %esp
1098; SSE-X86-NEXT:    movzwl 8(%ebp), %eax
1099; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
1100; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1101; SSE-X86-NEXT:    fldl (%esp)
1102; SSE-X86-NEXT:    wait
1103; SSE-X86-NEXT:    movl %ebp, %esp
1104; SSE-X86-NEXT:    popl %ebp
1105; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1106; SSE-X86-NEXT:    retl
1107;
1108; SSE-X64-LABEL: uitofp_i16tof64:
1109; SSE-X64:       # %bb.0:
1110; SSE-X64-NEXT:    movzwl %di, %eax
1111; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
1112; SSE-X64-NEXT:    retq
1113;
1114; AVX-X86-LABEL: uitofp_i16tof64:
1115; AVX-X86:       # %bb.0:
1116; AVX-X86-NEXT:    pushl %ebp
1117; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1118; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1119; AVX-X86-NEXT:    movl %esp, %ebp
1120; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1121; AVX-X86-NEXT:    andl $-8, %esp
1122; AVX-X86-NEXT:    subl $8, %esp
1123; AVX-X86-NEXT:    movzwl 8(%ebp), %eax
1124; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1125; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1126; AVX-X86-NEXT:    fldl (%esp)
1127; AVX-X86-NEXT:    wait
1128; AVX-X86-NEXT:    movl %ebp, %esp
1129; AVX-X86-NEXT:    popl %ebp
1130; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1131; AVX-X86-NEXT:    retl
1132;
1133; AVX-X64-LABEL: uitofp_i16tof64:
1134; AVX-X64:       # %bb.0:
1135; AVX-X64-NEXT:    movzwl %di, %eax
1136; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1137; AVX-X64-NEXT:    retq
1138;
1139; X87-LABEL: uitofp_i16tof64:
1140; X87:       # %bb.0:
1141; X87-NEXT:    pushl %eax
1142; X87-NEXT:    .cfi_def_cfa_offset 8
1143; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1144; X87-NEXT:    movl %eax, (%esp)
1145; X87-NEXT:    fildl (%esp)
1146; X87-NEXT:    wait
1147; X87-NEXT:    popl %eax
1148; X87-NEXT:    .cfi_def_cfa_offset 4
1149; X87-NEXT:    retl
1150  %result = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %x,
1151                                               metadata !"round.dynamic",
1152                                               metadata !"fpexcept.strict") #0
1153  ret double %result
1154}
1155
1156define double @uitofp_i32tof64(i32 %x) #0 {
1157; SSE-X86-LABEL: uitofp_i32tof64:
1158; SSE-X86:       # %bb.0:
1159; SSE-X86-NEXT:    pushl %ebp
1160; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1161; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1162; SSE-X86-NEXT:    movl %esp, %ebp
1163; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1164; SSE-X86-NEXT:    andl $-8, %esp
1165; SSE-X86-NEXT:    subl $8, %esp
1166; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1167; SSE-X86-NEXT:    orpd {{\.LCPI.*}}, %xmm0
1168; SSE-X86-NEXT:    subsd {{\.LCPI.*}}, %xmm0
1169; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1170; SSE-X86-NEXT:    fldl (%esp)
1171; SSE-X86-NEXT:    wait
1172; SSE-X86-NEXT:    movl %ebp, %esp
1173; SSE-X86-NEXT:    popl %ebp
1174; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1175; SSE-X86-NEXT:    retl
1176;
1177; SSE-X64-LABEL: uitofp_i32tof64:
1178; SSE-X64:       # %bb.0:
1179; SSE-X64-NEXT:    movl %edi, %eax
1180; SSE-X64-NEXT:    cvtsi2sd %rax, %xmm0
1181; SSE-X64-NEXT:    retq
1182;
1183; AVX1-X86-LABEL: uitofp_i32tof64:
1184; AVX1-X86:       # %bb.0:
1185; AVX1-X86-NEXT:    pushl %ebp
1186; AVX1-X86-NEXT:    .cfi_def_cfa_offset 8
1187; AVX1-X86-NEXT:    .cfi_offset %ebp, -8
1188; AVX1-X86-NEXT:    movl %esp, %ebp
1189; AVX1-X86-NEXT:    .cfi_def_cfa_register %ebp
1190; AVX1-X86-NEXT:    andl $-8, %esp
1191; AVX1-X86-NEXT:    subl $8, %esp
1192; AVX1-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1193; AVX1-X86-NEXT:    vorpd {{\.LCPI.*}}, %xmm0, %xmm0
1194; AVX1-X86-NEXT:    vsubsd {{\.LCPI.*}}, %xmm0, %xmm0
1195; AVX1-X86-NEXT:    vmovsd %xmm0, (%esp)
1196; AVX1-X86-NEXT:    fldl (%esp)
1197; AVX1-X86-NEXT:    wait
1198; AVX1-X86-NEXT:    movl %ebp, %esp
1199; AVX1-X86-NEXT:    popl %ebp
1200; AVX1-X86-NEXT:    .cfi_def_cfa %esp, 4
1201; AVX1-X86-NEXT:    retl
1202;
1203; AVX1-X64-LABEL: uitofp_i32tof64:
1204; AVX1-X64:       # %bb.0:
1205; AVX1-X64-NEXT:    movl %edi, %eax
1206; AVX1-X64-NEXT:    vcvtsi2sd %rax, %xmm0, %xmm0
1207; AVX1-X64-NEXT:    retq
1208;
1209; AVX512-X86-LABEL: uitofp_i32tof64:
1210; AVX512-X86:       # %bb.0:
1211; AVX512-X86-NEXT:    pushl %ebp
1212; AVX512-X86-NEXT:    .cfi_def_cfa_offset 8
1213; AVX512-X86-NEXT:    .cfi_offset %ebp, -8
1214; AVX512-X86-NEXT:    movl %esp, %ebp
1215; AVX512-X86-NEXT:    .cfi_def_cfa_register %ebp
1216; AVX512-X86-NEXT:    andl $-8, %esp
1217; AVX512-X86-NEXT:    subl $8, %esp
1218; AVX512-X86-NEXT:    vcvtusi2sdl 8(%ebp), %xmm0, %xmm0
1219; AVX512-X86-NEXT:    vmovsd %xmm0, (%esp)
1220; AVX512-X86-NEXT:    fldl (%esp)
1221; AVX512-X86-NEXT:    wait
1222; AVX512-X86-NEXT:    movl %ebp, %esp
1223; AVX512-X86-NEXT:    popl %ebp
1224; AVX512-X86-NEXT:    .cfi_def_cfa %esp, 4
1225; AVX512-X86-NEXT:    retl
1226;
1227; AVX512-X64-LABEL: uitofp_i32tof64:
1228; AVX512-X64:       # %bb.0:
1229; AVX512-X64-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
1230; AVX512-X64-NEXT:    retq
1231;
1232; X87-LABEL: uitofp_i32tof64:
1233; X87:       # %bb.0:
1234; X87-NEXT:    pushl %ebp
1235; X87-NEXT:    .cfi_def_cfa_offset 8
1236; X87-NEXT:    .cfi_offset %ebp, -8
1237; X87-NEXT:    movl %esp, %ebp
1238; X87-NEXT:    .cfi_def_cfa_register %ebp
1239; X87-NEXT:    andl $-8, %esp
1240; X87-NEXT:    subl $8, %esp
1241; X87-NEXT:    movl 8(%ebp), %eax
1242; X87-NEXT:    movl %eax, (%esp)
1243; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
1244; X87-NEXT:    fildll (%esp)
1245; X87-NEXT:    wait
1246; X87-NEXT:    movl %ebp, %esp
1247; X87-NEXT:    popl %ebp
1248; X87-NEXT:    .cfi_def_cfa %esp, 4
1249; X87-NEXT:    retl
1250  %result = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x,
1251                                               metadata !"round.dynamic",
1252                                               metadata !"fpexcept.strict") #0
1253  ret double %result
1254}
1255
1256define double @uitofp_i64tof64(i64 %x) #0 {
1257; SSE-X86-LABEL: uitofp_i64tof64:
1258; SSE-X86:       # %bb.0:
1259; SSE-X86-NEXT:    pushl %ebp
1260; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1261; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1262; SSE-X86-NEXT:    movl %esp, %ebp
1263; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1264; SSE-X86-NEXT:    andl $-8, %esp
1265; SSE-X86-NEXT:    subl $24, %esp
1266; SSE-X86-NEXT:    movl 12(%ebp), %eax
1267; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1268; SSE-X86-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
1269; SSE-X86-NEXT:    shrl $31, %eax
1270; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
1271; SSE-X86-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
1272; SSE-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1273; SSE-X86-NEXT:    wait
1274; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1275; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1276; SSE-X86-NEXT:    fldl (%esp)
1277; SSE-X86-NEXT:    wait
1278; SSE-X86-NEXT:    movl %ebp, %esp
1279; SSE-X86-NEXT:    popl %ebp
1280; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1281; SSE-X86-NEXT:    retl
1282;
1283; SSE-X64-LABEL: uitofp_i64tof64:
1284; SSE-X64:       # %bb.0:
1285; SSE-X64-NEXT:    movq %rdi, %rax
1286; SSE-X64-NEXT:    shrq %rax
1287; SSE-X64-NEXT:    movl %edi, %ecx
1288; SSE-X64-NEXT:    andl $1, %ecx
1289; SSE-X64-NEXT:    orq %rax, %rcx
1290; SSE-X64-NEXT:    testq %rdi, %rdi
1291; SSE-X64-NEXT:    cmovnsq %rdi, %rcx
1292; SSE-X64-NEXT:    cvtsi2sd %rcx, %xmm0
1293; SSE-X64-NEXT:    jns .LBB18_2
1294; SSE-X64-NEXT:  # %bb.1:
1295; SSE-X64-NEXT:    addsd %xmm0, %xmm0
1296; SSE-X64-NEXT:  .LBB18_2:
1297; SSE-X64-NEXT:    retq
1298;
1299; AVX-X86-LABEL: uitofp_i64tof64:
1300; AVX-X86:       # %bb.0:
1301; AVX-X86-NEXT:    pushl %ebp
1302; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1303; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1304; AVX-X86-NEXT:    movl %esp, %ebp
1305; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1306; AVX-X86-NEXT:    andl $-8, %esp
1307; AVX-X86-NEXT:    subl $24, %esp
1308; AVX-X86-NEXT:    movl 12(%ebp), %eax
1309; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
1310; AVX-X86-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
1311; AVX-X86-NEXT:    shrl $31, %eax
1312; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
1313; AVX-X86-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
1314; AVX-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1315; AVX-X86-NEXT:    wait
1316; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
1317; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1318; AVX-X86-NEXT:    fldl (%esp)
1319; AVX-X86-NEXT:    wait
1320; AVX-X86-NEXT:    movl %ebp, %esp
1321; AVX-X86-NEXT:    popl %ebp
1322; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1323; AVX-X86-NEXT:    retl
1324;
1325; AVX1-X64-LABEL: uitofp_i64tof64:
1326; AVX1-X64:       # %bb.0:
1327; AVX1-X64-NEXT:    movq %rdi, %rax
1328; AVX1-X64-NEXT:    shrq %rax
1329; AVX1-X64-NEXT:    movl %edi, %ecx
1330; AVX1-X64-NEXT:    andl $1, %ecx
1331; AVX1-X64-NEXT:    orq %rax, %rcx
1332; AVX1-X64-NEXT:    testq %rdi, %rdi
1333; AVX1-X64-NEXT:    cmovnsq %rdi, %rcx
1334; AVX1-X64-NEXT:    vcvtsi2sd %rcx, %xmm0, %xmm0
1335; AVX1-X64-NEXT:    jns .LBB18_2
1336; AVX1-X64-NEXT:  # %bb.1:
1337; AVX1-X64-NEXT:    vaddsd %xmm0, %xmm0, %xmm0
1338; AVX1-X64-NEXT:  .LBB18_2:
1339; AVX1-X64-NEXT:    retq
1340;
1341; AVX512-X64-LABEL: uitofp_i64tof64:
1342; AVX512-X64:       # %bb.0:
1343; AVX512-X64-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
1344; AVX512-X64-NEXT:    retq
1345;
1346; X87-LABEL: uitofp_i64tof64:
1347; X87:       # %bb.0:
1348; X87-NEXT:    pushl %ebp
1349; X87-NEXT:    .cfi_def_cfa_offset 8
1350; X87-NEXT:    .cfi_offset %ebp, -8
1351; X87-NEXT:    movl %esp, %ebp
1352; X87-NEXT:    .cfi_def_cfa_register %ebp
1353; X87-NEXT:    andl $-8, %esp
1354; X87-NEXT:    subl $16, %esp
1355; X87-NEXT:    movl 8(%ebp), %eax
1356; X87-NEXT:    movl 12(%ebp), %ecx
1357; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
1358; X87-NEXT:    movl %eax, (%esp)
1359; X87-NEXT:    shrl $31, %ecx
1360; X87-NEXT:    fildll (%esp)
1361; X87-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
1362; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
1363; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1364; X87-NEXT:    wait
1365; X87-NEXT:    movl %ebp, %esp
1366; X87-NEXT:    popl %ebp
1367; X87-NEXT:    .cfi_def_cfa %esp, 4
1368; X87-NEXT:    retl
1369  %result = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x,
1370                                               metadata !"round.dynamic",
1371                                               metadata !"fpexcept.strict") #0
1372  ret double %result
1373}
1374
1375attributes #0 = { strictfp }
1376