1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
3; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86
4
5declare  i4  @llvm.sdiv.fix.sat.i4   (i4,  i4,  i32)
6declare  i15 @llvm.sdiv.fix.sat.i15  (i15, i15, i32)
7declare  i16 @llvm.sdiv.fix.sat.i16  (i16, i16, i32)
8declare  i18 @llvm.sdiv.fix.sat.i18  (i18, i18, i32)
9declare  i64 @llvm.sdiv.fix.sat.i64  (i64, i64, i32)
10declare  <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32)
11
12define i16 @func(i16 %x, i16 %y) nounwind {
13;
14; X64-LABEL: func:
15; X64:       # %bb.0:
16; X64-NEXT:    movswl %si, %esi
17; X64-NEXT:    movswl %di, %ecx
18; X64-NEXT:    shll $8, %ecx
19; X64-NEXT:    movl %ecx, %eax
20; X64-NEXT:    cltd
21; X64-NEXT:    idivl %esi
22; X64-NEXT:    # kill: def $eax killed $eax def $rax
23; X64-NEXT:    leal -1(%rax), %edi
24; X64-NEXT:    testl %esi, %esi
25; X64-NEXT:    sets %sil
26; X64-NEXT:    testl %ecx, %ecx
27; X64-NEXT:    sets %cl
28; X64-NEXT:    xorb %sil, %cl
29; X64-NEXT:    testl %edx, %edx
30; X64-NEXT:    setne %dl
31; X64-NEXT:    testb %cl, %dl
32; X64-NEXT:    cmovel %eax, %edi
33; X64-NEXT:    cmpl $65535, %edi # imm = 0xFFFF
34; X64-NEXT:    movl $65535, %ecx # imm = 0xFFFF
35; X64-NEXT:    cmovll %edi, %ecx
36; X64-NEXT:    cmpl $-65536, %ecx # imm = 0xFFFF0000
37; X64-NEXT:    movl $-65536, %eax # imm = 0xFFFF0000
38; X64-NEXT:    cmovgl %ecx, %eax
39; X64-NEXT:    shrl %eax
40; X64-NEXT:    # kill: def $ax killed $ax killed $eax
41; X64-NEXT:    retq
42;
43; X86-LABEL: func:
44; X86:       # %bb.0:
45; X86-NEXT:    pushl %ebx
46; X86-NEXT:    pushl %edi
47; X86-NEXT:    pushl %esi
48; X86-NEXT:    movswl {{[0-9]+}}(%esp), %esi
49; X86-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
50; X86-NEXT:    shll $8, %ecx
51; X86-NEXT:    movl %ecx, %eax
52; X86-NEXT:    cltd
53; X86-NEXT:    idivl %esi
54; X86-NEXT:    leal -1(%eax), %edi
55; X86-NEXT:    testl %esi, %esi
56; X86-NEXT:    sets %bl
57; X86-NEXT:    testl %ecx, %ecx
58; X86-NEXT:    sets %cl
59; X86-NEXT:    xorb %bl, %cl
60; X86-NEXT:    testl %edx, %edx
61; X86-NEXT:    setne %dl
62; X86-NEXT:    testb %cl, %dl
63; X86-NEXT:    cmovel %eax, %edi
64; X86-NEXT:    cmpl $65535, %edi # imm = 0xFFFF
65; X86-NEXT:    movl $65535, %ecx # imm = 0xFFFF
66; X86-NEXT:    cmovll %edi, %ecx
67; X86-NEXT:    cmpl $-65536, %ecx # imm = 0xFFFF0000
68; X86-NEXT:    movl $-65536, %eax # imm = 0xFFFF0000
69; X86-NEXT:    cmovgl %ecx, %eax
70; X86-NEXT:    shrl %eax
71; X86-NEXT:    # kill: def $ax killed $ax killed $eax
72; X86-NEXT:    popl %esi
73; X86-NEXT:    popl %edi
74; X86-NEXT:    popl %ebx
75; X86-NEXT:    retl
76  %tmp = call i16 @llvm.sdiv.fix.sat.i16(i16 %x, i16 %y, i32 7)
77  ret i16 %tmp
78}
79
80define i16 @func2(i8 %x, i8 %y) nounwind {
81;
82; X64-LABEL: func2:
83; X64:       # %bb.0:
84; X64-NEXT:    movsbl %dil, %eax
85; X64-NEXT:    movsbl %sil, %ecx
86; X64-NEXT:    movswl %cx, %esi
87; X64-NEXT:    movswl %ax, %ecx
88; X64-NEXT:    shll $14, %ecx
89; X64-NEXT:    movl %ecx, %eax
90; X64-NEXT:    cltd
91; X64-NEXT:    idivl %esi
92; X64-NEXT:    # kill: def $eax killed $eax def $rax
93; X64-NEXT:    leal -1(%rax), %edi
94; X64-NEXT:    testl %esi, %esi
95; X64-NEXT:    sets %sil
96; X64-NEXT:    testl %ecx, %ecx
97; X64-NEXT:    sets %cl
98; X64-NEXT:    xorb %sil, %cl
99; X64-NEXT:    testl %edx, %edx
100; X64-NEXT:    setne %dl
101; X64-NEXT:    testb %cl, %dl
102; X64-NEXT:    cmovel %eax, %edi
103; X64-NEXT:    cmpl $16383, %edi # imm = 0x3FFF
104; X64-NEXT:    movl $16383, %ecx # imm = 0x3FFF
105; X64-NEXT:    cmovll %edi, %ecx
106; X64-NEXT:    cmpl $-16384, %ecx # imm = 0xC000
107; X64-NEXT:    movl $-16384, %eax # imm = 0xC000
108; X64-NEXT:    cmovgl %ecx, %eax
109; X64-NEXT:    # kill: def $ax killed $ax killed $eax
110; X64-NEXT:    retq
111;
112; X86-LABEL: func2:
113; X86:       # %bb.0:
114; X86-NEXT:    pushl %ebx
115; X86-NEXT:    pushl %edi
116; X86-NEXT:    pushl %esi
117; X86-NEXT:    movsbl {{[0-9]+}}(%esp), %esi
118; X86-NEXT:    movsbl {{[0-9]+}}(%esp), %ecx
119; X86-NEXT:    shll $14, %ecx
120; X86-NEXT:    movl %ecx, %eax
121; X86-NEXT:    cltd
122; X86-NEXT:    idivl %esi
123; X86-NEXT:    leal -1(%eax), %edi
124; X86-NEXT:    testl %esi, %esi
125; X86-NEXT:    sets %bl
126; X86-NEXT:    testl %ecx, %ecx
127; X86-NEXT:    sets %cl
128; X86-NEXT:    xorb %bl, %cl
129; X86-NEXT:    testl %edx, %edx
130; X86-NEXT:    setne %dl
131; X86-NEXT:    testb %cl, %dl
132; X86-NEXT:    cmovel %eax, %edi
133; X86-NEXT:    cmpl $16383, %edi # imm = 0x3FFF
134; X86-NEXT:    movl $16383, %ecx # imm = 0x3FFF
135; X86-NEXT:    cmovll %edi, %ecx
136; X86-NEXT:    cmpl $-16384, %ecx # imm = 0xC000
137; X86-NEXT:    movl $-16384, %eax # imm = 0xC000
138; X86-NEXT:    cmovgl %ecx, %eax
139; X86-NEXT:    # kill: def $ax killed $ax killed $eax
140; X86-NEXT:    popl %esi
141; X86-NEXT:    popl %edi
142; X86-NEXT:    popl %ebx
143; X86-NEXT:    retl
144  %x2 = sext i8 %x to i15
145  %y2 = sext i8 %y to i15
146  %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x2, i15 %y2, i32 14)
147  %tmp2 = sext i15 %tmp to i16
148  ret i16 %tmp2
149}
150
151define i16 @func3(i15 %x, i8 %y) nounwind {
152;
153; X64-LABEL: func3:
154; X64:       # %bb.0:
155; X64-NEXT:    shll $8, %esi
156; X64-NEXT:    movswl %si, %ecx
157; X64-NEXT:    addl %edi, %edi
158; X64-NEXT:    shrl $4, %ecx
159; X64-NEXT:    movl %edi, %eax
160; X64-NEXT:    cwtd
161; X64-NEXT:    idivw %cx
162; X64-NEXT:    # kill: def $ax killed $ax def $rax
163; X64-NEXT:    leal -1(%rax), %esi
164; X64-NEXT:    testw %di, %di
165; X64-NEXT:    sets %dil
166; X64-NEXT:    testw %cx, %cx
167; X64-NEXT:    sets %cl
168; X64-NEXT:    xorb %dil, %cl
169; X64-NEXT:    testw %dx, %dx
170; X64-NEXT:    setne %dl
171; X64-NEXT:    testb %cl, %dl
172; X64-NEXT:    cmovel %eax, %esi
173; X64-NEXT:    movswl %si, %eax
174; X64-NEXT:    cmpl $16383, %eax # imm = 0x3FFF
175; X64-NEXT:    movl $16383, %ecx # imm = 0x3FFF
176; X64-NEXT:    cmovll %esi, %ecx
177; X64-NEXT:    movswl %cx, %eax
178; X64-NEXT:    cmpl $-16384, %eax # imm = 0xC000
179; X64-NEXT:    movl $49152, %eax # imm = 0xC000
180; X64-NEXT:    cmovgl %ecx, %eax
181; X64-NEXT:    # kill: def $ax killed $ax killed $eax
182; X64-NEXT:    retq
183;
184; X86-LABEL: func3:
185; X86:       # %bb.0:
186; X86-NEXT:    pushl %edi
187; X86-NEXT:    pushl %esi
188; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
189; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
190; X86-NEXT:    shll $8, %eax
191; X86-NEXT:    movswl %ax, %esi
192; X86-NEXT:    addl %ecx, %ecx
193; X86-NEXT:    shrl $4, %esi
194; X86-NEXT:    movl %ecx, %eax
195; X86-NEXT:    cwtd
196; X86-NEXT:    idivw %si
197; X86-NEXT:    # kill: def $ax killed $ax def $eax
198; X86-NEXT:    leal -1(%eax), %edi
199; X86-NEXT:    testw %cx, %cx
200; X86-NEXT:    sets %cl
201; X86-NEXT:    testw %si, %si
202; X86-NEXT:    sets %ch
203; X86-NEXT:    xorb %cl, %ch
204; X86-NEXT:    testw %dx, %dx
205; X86-NEXT:    setne %cl
206; X86-NEXT:    testb %ch, %cl
207; X86-NEXT:    cmovel %eax, %edi
208; X86-NEXT:    movswl %di, %eax
209; X86-NEXT:    cmpl $16383, %eax # imm = 0x3FFF
210; X86-NEXT:    movl $16383, %ecx # imm = 0x3FFF
211; X86-NEXT:    cmovll %edi, %ecx
212; X86-NEXT:    movswl %cx, %eax
213; X86-NEXT:    cmpl $-16384, %eax # imm = 0xC000
214; X86-NEXT:    movl $49152, %eax # imm = 0xC000
215; X86-NEXT:    cmovgl %ecx, %eax
216; X86-NEXT:    # kill: def $ax killed $ax killed $eax
217; X86-NEXT:    popl %esi
218; X86-NEXT:    popl %edi
219; X86-NEXT:    retl
220  %y2 = sext i8 %y to i15
221  %y3 = shl i15 %y2, 7
222  %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x, i15 %y3, i32 4)
223  %tmp2 = sext i15 %tmp to i16
224  ret i16 %tmp2
225}
226
227define i4 @func4(i4 %x, i4 %y) nounwind {
228;
229; X64-LABEL: func4:
230; X64:       # %bb.0:
231; X64-NEXT:    pushq %rbx
232; X64-NEXT:    shlb $4, %sil
233; X64-NEXT:    sarb $4, %sil
234; X64-NEXT:    shlb $4, %dil
235; X64-NEXT:    sarb $4, %dil
236; X64-NEXT:    shlb $2, %dil
237; X64-NEXT:    movsbl %dil, %ecx
238; X64-NEXT:    movl %ecx, %eax
239; X64-NEXT:    idivb %sil
240; X64-NEXT:    movsbl %ah, %ebx
241; X64-NEXT:    movzbl %al, %eax
242; X64-NEXT:    leal -1(%rax), %edi
243; X64-NEXT:    movzbl %dil, %edi
244; X64-NEXT:    testb %sil, %sil
245; X64-NEXT:    sets %dl
246; X64-NEXT:    testb %cl, %cl
247; X64-NEXT:    sets %cl
248; X64-NEXT:    xorb %dl, %cl
249; X64-NEXT:    testb %bl, %bl
250; X64-NEXT:    setne %dl
251; X64-NEXT:    testb %cl, %dl
252; X64-NEXT:    cmovel %eax, %edi
253; X64-NEXT:    cmpb $7, %dil
254; X64-NEXT:    movl $7, %ecx
255; X64-NEXT:    cmovll %edi, %ecx
256; X64-NEXT:    cmpb $-8, %cl
257; X64-NEXT:    movl $248, %eax
258; X64-NEXT:    cmovgl %ecx, %eax
259; X64-NEXT:    # kill: def $al killed $al killed $eax
260; X64-NEXT:    popq %rbx
261; X64-NEXT:    retq
262;
263; X86-LABEL: func4:
264; X86:       # %bb.0:
265; X86-NEXT:    pushl %esi
266; X86-NEXT:    movb {{[0-9]+}}(%esp), %dl
267; X86-NEXT:    shlb $4, %dl
268; X86-NEXT:    sarb $4, %dl
269; X86-NEXT:    movb {{[0-9]+}}(%esp), %dh
270; X86-NEXT:    shlb $4, %dh
271; X86-NEXT:    sarb $4, %dh
272; X86-NEXT:    shlb $2, %dh
273; X86-NEXT:    movsbl %dh, %eax
274; X86-NEXT:    idivb %dl
275; X86-NEXT:    movsbl %ah, %ecx
276; X86-NEXT:    movzbl %al, %esi
277; X86-NEXT:    decb %al
278; X86-NEXT:    movzbl %al, %eax
279; X86-NEXT:    testb %dl, %dl
280; X86-NEXT:    sets %dl
281; X86-NEXT:    testb %dh, %dh
282; X86-NEXT:    sets %dh
283; X86-NEXT:    xorb %dl, %dh
284; X86-NEXT:    testb %cl, %cl
285; X86-NEXT:    setne %cl
286; X86-NEXT:    testb %dh, %cl
287; X86-NEXT:    cmovel %esi, %eax
288; X86-NEXT:    cmpb $7, %al
289; X86-NEXT:    movl $7, %ecx
290; X86-NEXT:    cmovll %eax, %ecx
291; X86-NEXT:    cmpb $-8, %cl
292; X86-NEXT:    movl $248, %eax
293; X86-NEXT:    cmovgl %ecx, %eax
294; X86-NEXT:    # kill: def $al killed $al killed $eax
295; X86-NEXT:    popl %esi
296; X86-NEXT:    retl
297  %tmp = call i4 @llvm.sdiv.fix.sat.i4(i4 %x, i4 %y, i32 2)
298  ret i4 %tmp
299}
300
301define i64 @func5(i64 %x, i64 %y) nounwind {
302;
303; X64-LABEL: func5:
304; X64:       # %bb.0:
305; X64-NEXT:    pushq %rbp
306; X64-NEXT:    pushq %r15
307; X64-NEXT:    pushq %r14
308; X64-NEXT:    pushq %r13
309; X64-NEXT:    pushq %r12
310; X64-NEXT:    pushq %rbx
311; X64-NEXT:    subq $24, %rsp
312; X64-NEXT:    movq %rsi, %rdx
313; X64-NEXT:    movq %rsi, (%rsp) # 8-byte Spill
314; X64-NEXT:    movq %rdi, %r15
315; X64-NEXT:    leaq (%rdi,%rdi), %rax
316; X64-NEXT:    movq %rdi, %r12
317; X64-NEXT:    sarq $63, %r12
318; X64-NEXT:    shldq $31, %rax, %r12
319; X64-NEXT:    shlq $32, %r15
320; X64-NEXT:    movq %rsi, %r13
321; X64-NEXT:    sarq $63, %r13
322; X64-NEXT:    movq %r15, %rdi
323; X64-NEXT:    movq %r12, %rsi
324; X64-NEXT:    movq %r13, %rcx
325; X64-NEXT:    callq __divti3
326; X64-NEXT:    movq %rax, %rbx
327; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
328; X64-NEXT:    movq %rdx, %rbp
329; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
330; X64-NEXT:    subq $1, %rbx
331; X64-NEXT:    sbbq $0, %rbp
332; X64-NEXT:    testq %r12, %r12
333; X64-NEXT:    sets %al
334; X64-NEXT:    testq %r13, %r13
335; X64-NEXT:    sets %r14b
336; X64-NEXT:    xorb %al, %r14b
337; X64-NEXT:    movq %r15, %rdi
338; X64-NEXT:    movq %r12, %rsi
339; X64-NEXT:    movq (%rsp), %rdx # 8-byte Reload
340; X64-NEXT:    movq %r13, %rcx
341; X64-NEXT:    callq __modti3
342; X64-NEXT:    orq %rax, %rdx
343; X64-NEXT:    setne %al
344; X64-NEXT:    testb %r14b, %al
345; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
346; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
347; X64-NEXT:    cmpq $-1, %rbx
348; X64-NEXT:    movq $-1, %rax
349; X64-NEXT:    movq $-1, %rcx
350; X64-NEXT:    cmovbq %rbx, %rcx
351; X64-NEXT:    xorl %edx, %edx
352; X64-NEXT:    testq %rbp, %rbp
353; X64-NEXT:    cmovnsq %rax, %rbx
354; X64-NEXT:    cmoveq %rcx, %rbx
355; X64-NEXT:    cmovnsq %rdx, %rbp
356; X64-NEXT:    testq %rbx, %rbx
357; X64-NEXT:    movl $0, %ecx
358; X64-NEXT:    cmovaq %rbx, %rcx
359; X64-NEXT:    testq %rbp, %rbp
360; X64-NEXT:    cmovnsq %rbp, %rax
361; X64-NEXT:    cmovsq %rdx, %rbx
362; X64-NEXT:    cmpq $-1, %rbp
363; X64-NEXT:    cmoveq %rcx, %rbx
364; X64-NEXT:    shrdq $1, %rax, %rbx
365; X64-NEXT:    movq %rbx, %rax
366; X64-NEXT:    addq $24, %rsp
367; X64-NEXT:    popq %rbx
368; X64-NEXT:    popq %r12
369; X64-NEXT:    popq %r13
370; X64-NEXT:    popq %r14
371; X64-NEXT:    popq %r15
372; X64-NEXT:    popq %rbp
373; X64-NEXT:    retq
374;
375; X86-LABEL: func5:
376; X86:       # %bb.0:
377; X86-NEXT:    pushl %ebp
378; X86-NEXT:    movl %esp, %ebp
379; X86-NEXT:    pushl %ebx
380; X86-NEXT:    pushl %edi
381; X86-NEXT:    pushl %esi
382; X86-NEXT:    andl $-8, %esp
383; X86-NEXT:    subl $88, %esp
384; X86-NEXT:    movl 8(%ebp), %ecx
385; X86-NEXT:    movl 12(%ebp), %eax
386; X86-NEXT:    movl 20(%ebp), %ebx
387; X86-NEXT:    sarl $31, %ebx
388; X86-NEXT:    movl %eax, %edi
389; X86-NEXT:    sarl $31, %edi
390; X86-NEXT:    movl %edi, %edx
391; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
392; X86-NEXT:    shldl $31, %eax, %edx
393; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
394; X86-NEXT:    shldl $31, %ecx, %eax
395; X86-NEXT:    movl %eax, %esi
396; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
397; X86-NEXT:    shll $31, %ecx
398; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
399; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
400; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
401; X86-NEXT:    pushl %ebx
402; X86-NEXT:    pushl %ebx
403; X86-NEXT:    pushl 20(%ebp)
404; X86-NEXT:    pushl 16(%ebp)
405; X86-NEXT:    pushl %edi
406; X86-NEXT:    pushl %edx
407; X86-NEXT:    pushl %esi
408; X86-NEXT:    pushl %ecx
409; X86-NEXT:    pushl %eax
410; X86-NEXT:    calll __divti3
411; X86-NEXT:    addl $32, %esp
412; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
413; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
414; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
415; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
416; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
417; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
418; X86-NEXT:    subl $1, %esi
419; X86-NEXT:    sbbl $0, %edi
420; X86-NEXT:    sbbl $0, %eax
421; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
422; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
423; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
424; X86-NEXT:    sbbl $0, %ebx
425; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
426; X86-NEXT:    testl %edx, %edx
427; X86-NEXT:    sets %al
428; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
429; X86-NEXT:    testl %ecx, %ecx
430; X86-NEXT:    sets %ah
431; X86-NEXT:    xorb %al, %ah
432; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
433; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
434; X86-NEXT:    pushl %edx
435; X86-NEXT:    pushl %edx
436; X86-NEXT:    pushl 20(%ebp)
437; X86-NEXT:    pushl 16(%ebp)
438; X86-NEXT:    pushl %ecx
439; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
440; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
441; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
442; X86-NEXT:    pushl %eax
443; X86-NEXT:    calll __modti3
444; X86-NEXT:    addl $32, %esp
445; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
446; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
447; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
448; X86-NEXT:    orl {{[0-9]+}}(%esp), %ecx
449; X86-NEXT:    orl %eax, %ecx
450; X86-NEXT:    setne %al
451; X86-NEXT:    testb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
452; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
453; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
454; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
455; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
456; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
457; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
458; X86-NEXT:    testl %ebx, %ebx
459; X86-NEXT:    movl $0, %ecx
460; X86-NEXT:    cmovsl %ebx, %ecx
461; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
462; X86-NEXT:    movl $-1, %ecx
463; X86-NEXT:    cmovsl %esi, %ecx
464; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
465; X86-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
466; X86-NEXT:    cmovsl %edi, %ecx
467; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
468; X86-NEXT:    movl %ebx, %edx
469; X86-NEXT:    sarl $31, %edx
470; X86-NEXT:    andl %eax, %edx
471; X86-NEXT:    testl %ebx, %ebx
472; X86-NEXT:    cmovel %ebx, %edx
473; X86-NEXT:    cmpl $-1, %esi
474; X86-NEXT:    movl $-1, %eax
475; X86-NEXT:    cmovbl %esi, %eax
476; X86-NEXT:    cmpl $2147483647, %edi # imm = 0x7FFFFFFF
477; X86-NEXT:    movl $-1, %ecx
478; X86-NEXT:    cmovael %ecx, %esi
479; X86-NEXT:    cmovel %eax, %esi
480; X86-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
481; X86-NEXT:    cmovael %eax, %edi
482; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
483; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
484; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
485; X86-NEXT:    testl %esi, %esi
486; X86-NEXT:    movl $0, %eax
487; X86-NEXT:    cmoval %esi, %eax
488; X86-NEXT:    cmpl $-2147483648, %edi # imm = 0x80000000
489; X86-NEXT:    movl $0, %ecx
490; X86-NEXT:    cmoval %esi, %ecx
491; X86-NEXT:    cmovel %eax, %ecx
492; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
493; X86-NEXT:    cmoval %edi, %eax
494; X86-NEXT:    cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
495; X86-NEXT:    movl $-2147483648, %ebx # imm = 0x80000000
496; X86-NEXT:    cmovsl %ebx, %edi
497; X86-NEXT:    movl $0, %ebx
498; X86-NEXT:    cmovsl %ebx, %esi
499; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
500; X86-NEXT:    cmpl $-1, %edx
501; X86-NEXT:    cmovel %ecx, %esi
502; X86-NEXT:    cmovel %eax, %edi
503; X86-NEXT:    movl %esi, %eax
504; X86-NEXT:    movl %edi, %edx
505; X86-NEXT:    leal -12(%ebp), %esp
506; X86-NEXT:    popl %esi
507; X86-NEXT:    popl %edi
508; X86-NEXT:    popl %ebx
509; X86-NEXT:    popl %ebp
510; X86-NEXT:    retl
511  %tmp = call i64 @llvm.sdiv.fix.sat.i64(i64 %x, i64 %y, i32 31)
512  ret i64 %tmp
513}
514
515define i18 @func6(i16 %x, i16 %y) nounwind {
516;
517; X64-LABEL: func6:
518; X64:       # %bb.0:
519; X64-NEXT:    movswl %di, %ecx
520; X64-NEXT:    movswl %si, %esi
521; X64-NEXT:    shll $7, %ecx
522; X64-NEXT:    movl %ecx, %eax
523; X64-NEXT:    cltd
524; X64-NEXT:    idivl %esi
525; X64-NEXT:    # kill: def $eax killed $eax def $rax
526; X64-NEXT:    leal -1(%rax), %edi
527; X64-NEXT:    testl %esi, %esi
528; X64-NEXT:    sets %sil
529; X64-NEXT:    testl %ecx, %ecx
530; X64-NEXT:    sets %cl
531; X64-NEXT:    xorb %sil, %cl
532; X64-NEXT:    testl %edx, %edx
533; X64-NEXT:    setne %dl
534; X64-NEXT:    testb %cl, %dl
535; X64-NEXT:    cmovel %eax, %edi
536; X64-NEXT:    cmpl $131071, %edi # imm = 0x1FFFF
537; X64-NEXT:    movl $131071, %ecx # imm = 0x1FFFF
538; X64-NEXT:    cmovll %edi, %ecx
539; X64-NEXT:    cmpl $-131072, %ecx # imm = 0xFFFE0000
540; X64-NEXT:    movl $-131072, %eax # imm = 0xFFFE0000
541; X64-NEXT:    cmovgl %ecx, %eax
542; X64-NEXT:    retq
543;
544; X86-LABEL: func6:
545; X86:       # %bb.0:
546; X86-NEXT:    pushl %ebx
547; X86-NEXT:    pushl %edi
548; X86-NEXT:    pushl %esi
549; X86-NEXT:    movswl {{[0-9]+}}(%esp), %esi
550; X86-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
551; X86-NEXT:    shll $7, %ecx
552; X86-NEXT:    movl %ecx, %eax
553; X86-NEXT:    cltd
554; X86-NEXT:    idivl %esi
555; X86-NEXT:    leal -1(%eax), %edi
556; X86-NEXT:    testl %esi, %esi
557; X86-NEXT:    sets %bl
558; X86-NEXT:    testl %ecx, %ecx
559; X86-NEXT:    sets %cl
560; X86-NEXT:    xorb %bl, %cl
561; X86-NEXT:    testl %edx, %edx
562; X86-NEXT:    setne %dl
563; X86-NEXT:    testb %cl, %dl
564; X86-NEXT:    cmovel %eax, %edi
565; X86-NEXT:    cmpl $131071, %edi # imm = 0x1FFFF
566; X86-NEXT:    movl $131071, %ecx # imm = 0x1FFFF
567; X86-NEXT:    cmovll %edi, %ecx
568; X86-NEXT:    cmpl $-131072, %ecx # imm = 0xFFFE0000
569; X86-NEXT:    movl $-131072, %eax # imm = 0xFFFE0000
570; X86-NEXT:    cmovgl %ecx, %eax
571; X86-NEXT:    popl %esi
572; X86-NEXT:    popl %edi
573; X86-NEXT:    popl %ebx
574; X86-NEXT:    retl
575  %x2 = sext i16 %x to i18
576  %y2 = sext i16 %y to i18
577  %tmp = call i18 @llvm.sdiv.fix.sat.i18(i18 %x2, i18 %y2, i32 7)
578  ret i18 %tmp
579}
580
581define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
582;
583; X64-LABEL: vec:
584; X64:       # %bb.0:
585; X64-NEXT:    pushq %rbp
586; X64-NEXT:    pushq %r15
587; X64-NEXT:    pushq %r14
588; X64-NEXT:    pushq %r13
589; X64-NEXT:    pushq %r12
590; X64-NEXT:    pushq %rbx
591; X64-NEXT:    subq $104, %rsp
592; X64-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
593; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
594; X64-NEXT:    pxor %xmm2, %xmm2
595; X64-NEXT:    pcmpgtd %xmm0, %xmm2
596; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
597; X64-NEXT:    paddq %xmm0, %xmm0
598; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
599; X64-NEXT:    movq %xmm0, %rbx
600; X64-NEXT:    movq %rbx, %rbp
601; X64-NEXT:    sarq $63, %rbp
602; X64-NEXT:    shldq $31, %rbx, %rbp
603; X64-NEXT:    pxor %xmm0, %xmm0
604; X64-NEXT:    pcmpgtd %xmm1, %xmm0
605; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
606; X64-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
607; X64-NEXT:    movq %xmm1, %rdx
608; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
609; X64-NEXT:    movq %rdx, %r15
610; X64-NEXT:    sarq $63, %r15
611; X64-NEXT:    movq %rbx, %r12
612; X64-NEXT:    shlq $31, %r12
613; X64-NEXT:    movq %r12, %rdi
614; X64-NEXT:    movq %rbp, %rsi
615; X64-NEXT:    movq %r15, %rcx
616; X64-NEXT:    callq __divti3
617; X64-NEXT:    movq %rax, %r13
618; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
619; X64-NEXT:    movq %rdx, %r14
620; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
621; X64-NEXT:    subq $1, %r13
622; X64-NEXT:    sbbq $0, %r14
623; X64-NEXT:    shrq $63, %rbx
624; X64-NEXT:    xorl %r15d, %ebx
625; X64-NEXT:    movq %r12, %rdi
626; X64-NEXT:    movq %rbp, %rsi
627; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
628; X64-NEXT:    movq %r15, %rcx
629; X64-NEXT:    callq __modti3
630; X64-NEXT:    orq %rax, %rdx
631; X64-NEXT:    setne %al
632; X64-NEXT:    testb %bl, %al
633; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
634; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
635; X64-NEXT:    movl $4294967295, %edx # imm = 0xFFFFFFFF
636; X64-NEXT:    cmpq %rdx, %r13
637; X64-NEXT:    movl $4294967295, %eax # imm = 0xFFFFFFFF
638; X64-NEXT:    cmovbq %r13, %rax
639; X64-NEXT:    xorl %ecx, %ecx
640; X64-NEXT:    testq %r14, %r14
641; X64-NEXT:    cmovnsq %rdx, %r13
642; X64-NEXT:    cmoveq %rax, %r13
643; X64-NEXT:    cmovnsq %rcx, %r14
644; X64-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
645; X64-NEXT:    cmpq %rcx, %r13
646; X64-NEXT:    movq %rcx, %rax
647; X64-NEXT:    cmovaq %r13, %rax
648; X64-NEXT:    testq %r14, %r14
649; X64-NEXT:    cmovsq %rcx, %r13
650; X64-NEXT:    cmpq $-1, %r14
651; X64-NEXT:    cmoveq %rax, %r13
652; X64-NEXT:    movq %r13, %xmm0
653; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
654; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
655; X64-NEXT:    # xmm0 = mem[2,3,2,3]
656; X64-NEXT:    movq %xmm0, %rbx
657; X64-NEXT:    movq %rbx, %r13
658; X64-NEXT:    sarq $63, %r13
659; X64-NEXT:    shldq $31, %rbx, %r13
660; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
661; X64-NEXT:    # xmm0 = mem[2,3,2,3]
662; X64-NEXT:    movq %xmm0, %rdx
663; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
664; X64-NEXT:    movq %rdx, %rbp
665; X64-NEXT:    sarq $63, %rbp
666; X64-NEXT:    movq %rbx, %r15
667; X64-NEXT:    shlq $31, %r15
668; X64-NEXT:    movq %r15, %rdi
669; X64-NEXT:    movq %r13, %rsi
670; X64-NEXT:    movq %rbp, %rcx
671; X64-NEXT:    callq __divti3
672; X64-NEXT:    movq %rax, %r12
673; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
674; X64-NEXT:    movq %rdx, %r14
675; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
676; X64-NEXT:    subq $1, %r12
677; X64-NEXT:    sbbq $0, %r14
678; X64-NEXT:    shrq $63, %rbx
679; X64-NEXT:    xorl %ebp, %ebx
680; X64-NEXT:    movq %r15, %rdi
681; X64-NEXT:    movq %r13, %rsi
682; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
683; X64-NEXT:    movq %rbp, %rcx
684; X64-NEXT:    callq __modti3
685; X64-NEXT:    orq %rax, %rdx
686; X64-NEXT:    setne %al
687; X64-NEXT:    testb %bl, %al
688; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
689; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
690; X64-NEXT:    movl $4294967295, %ecx # imm = 0xFFFFFFFF
691; X64-NEXT:    cmpq %rcx, %r12
692; X64-NEXT:    movl $4294967295, %eax # imm = 0xFFFFFFFF
693; X64-NEXT:    cmovbq %r12, %rax
694; X64-NEXT:    testq %r14, %r14
695; X64-NEXT:    cmovnsq %rcx, %r12
696; X64-NEXT:    cmoveq %rax, %r12
697; X64-NEXT:    movl $0, %eax
698; X64-NEXT:    cmovnsq %rax, %r14
699; X64-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
700; X64-NEXT:    cmpq %rcx, %r12
701; X64-NEXT:    movq %rcx, %rax
702; X64-NEXT:    cmovaq %r12, %rax
703; X64-NEXT:    testq %r14, %r14
704; X64-NEXT:    cmovsq %rcx, %r12
705; X64-NEXT:    cmpq $-1, %r14
706; X64-NEXT:    cmoveq %rax, %r12
707; X64-NEXT:    movq %r12, %xmm0
708; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
709; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
710; X64-NEXT:    psrlq $1, %xmm1
711; X64-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
712; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
713; X64-NEXT:    # xmm1 = mem[2,3,2,3]
714; X64-NEXT:    pxor %xmm0, %xmm0
715; X64-NEXT:    pcmpgtd %xmm1, %xmm0
716; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
717; X64-NEXT:    paddq %xmm1, %xmm1
718; X64-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
719; X64-NEXT:    movq %xmm1, %rbx
720; X64-NEXT:    movq %rbx, %r12
721; X64-NEXT:    sarq $63, %r12
722; X64-NEXT:    shldq $31, %rbx, %r12
723; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
724; X64-NEXT:    # xmm1 = mem[2,3,2,3]
725; X64-NEXT:    pxor %xmm0, %xmm0
726; X64-NEXT:    pcmpgtd %xmm1, %xmm0
727; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
728; X64-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
729; X64-NEXT:    movq %xmm1, %rdx
730; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
731; X64-NEXT:    movq %rdx, %rbp
732; X64-NEXT:    sarq $63, %rbp
733; X64-NEXT:    movq %rbx, %r15
734; X64-NEXT:    shlq $31, %r15
735; X64-NEXT:    movq %r15, %rdi
736; X64-NEXT:    movq %r12, %rsi
737; X64-NEXT:    movq %rbp, %rcx
738; X64-NEXT:    callq __divti3
739; X64-NEXT:    movq %rax, %r13
740; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
741; X64-NEXT:    movq %rdx, %r14
742; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
743; X64-NEXT:    subq $1, %r13
744; X64-NEXT:    sbbq $0, %r14
745; X64-NEXT:    shrq $63, %rbx
746; X64-NEXT:    xorl %ebp, %ebx
747; X64-NEXT:    movq %r15, %rdi
748; X64-NEXT:    movq %r12, %rsi
749; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
750; X64-NEXT:    movq %rbp, %rcx
751; X64-NEXT:    callq __modti3
752; X64-NEXT:    orq %rax, %rdx
753; X64-NEXT:    setne %al
754; X64-NEXT:    testb %bl, %al
755; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
756; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
757; X64-NEXT:    movl $4294967295, %ecx # imm = 0xFFFFFFFF
758; X64-NEXT:    cmpq %rcx, %r13
759; X64-NEXT:    movl $4294967295, %eax # imm = 0xFFFFFFFF
760; X64-NEXT:    cmovbq %r13, %rax
761; X64-NEXT:    testq %r14, %r14
762; X64-NEXT:    cmovnsq %rcx, %r13
763; X64-NEXT:    cmoveq %rax, %r13
764; X64-NEXT:    movl $0, %eax
765; X64-NEXT:    cmovnsq %rax, %r14
766; X64-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
767; X64-NEXT:    cmpq %rcx, %r13
768; X64-NEXT:    movq %rcx, %rax
769; X64-NEXT:    cmovaq %r13, %rax
770; X64-NEXT:    testq %r14, %r14
771; X64-NEXT:    cmovsq %rcx, %r13
772; X64-NEXT:    cmpq $-1, %r14
773; X64-NEXT:    cmoveq %rax, %r13
774; X64-NEXT:    movq %r13, %xmm0
775; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
776; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
777; X64-NEXT:    # xmm0 = mem[2,3,2,3]
778; X64-NEXT:    movq %xmm0, %rbx
779; X64-NEXT:    movq %rbx, %r13
780; X64-NEXT:    sarq $63, %r13
781; X64-NEXT:    shldq $31, %rbx, %r13
782; X64-NEXT:    pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
783; X64-NEXT:    # xmm0 = mem[2,3,2,3]
784; X64-NEXT:    movq %xmm0, %rdx
785; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
786; X64-NEXT:    movq %rdx, %rbp
787; X64-NEXT:    sarq $63, %rbp
788; X64-NEXT:    movq %rbx, %r15
789; X64-NEXT:    shlq $31, %r15
790; X64-NEXT:    movq %r15, %rdi
791; X64-NEXT:    movq %r13, %rsi
792; X64-NEXT:    movq %rbp, %rcx
793; X64-NEXT:    callq __divti3
794; X64-NEXT:    movq %rax, %r12
795; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
796; X64-NEXT:    movq %rdx, %r14
797; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
798; X64-NEXT:    subq $1, %r12
799; X64-NEXT:    sbbq $0, %r14
800; X64-NEXT:    shrq $63, %rbx
801; X64-NEXT:    xorl %ebp, %ebx
802; X64-NEXT:    movq %r15, %rdi
803; X64-NEXT:    movq %r13, %rsi
804; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
805; X64-NEXT:    movq %rbp, %rcx
806; X64-NEXT:    callq __modti3
807; X64-NEXT:    orq %rax, %rdx
808; X64-NEXT:    setne %al
809; X64-NEXT:    testb %bl, %al
810; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
811; X64-NEXT:    cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
812; X64-NEXT:    movl $4294967295, %ecx # imm = 0xFFFFFFFF
813; X64-NEXT:    cmpq %rcx, %r12
814; X64-NEXT:    movl $4294967295, %eax # imm = 0xFFFFFFFF
815; X64-NEXT:    cmovbq %r12, %rax
816; X64-NEXT:    testq %r14, %r14
817; X64-NEXT:    cmovnsq %rcx, %r12
818; X64-NEXT:    cmoveq %rax, %r12
819; X64-NEXT:    movl $0, %eax
820; X64-NEXT:    cmovnsq %rax, %r14
821; X64-NEXT:    movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
822; X64-NEXT:    cmpq %rcx, %r12
823; X64-NEXT:    movq %rcx, %rax
824; X64-NEXT:    cmovaq %r12, %rax
825; X64-NEXT:    testq %r14, %r14
826; X64-NEXT:    cmovsq %rcx, %r12
827; X64-NEXT:    cmpq $-1, %r14
828; X64-NEXT:    cmoveq %rax, %r12
829; X64-NEXT:    movq %r12, %xmm0
830; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
831; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
832; X64-NEXT:    psrlq $1, %xmm1
833; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
834; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
835; X64-NEXT:    addq $104, %rsp
836; X64-NEXT:    popq %rbx
837; X64-NEXT:    popq %r12
838; X64-NEXT:    popq %r13
839; X64-NEXT:    popq %r14
840; X64-NEXT:    popq %r15
841; X64-NEXT:    popq %rbp
842; X64-NEXT:    retq
843;
844; X86-LABEL: vec:
845; X86:       # %bb.0:
846; X86-NEXT:    pushl %ebp
847; X86-NEXT:    movl %esp, %ebp
848; X86-NEXT:    pushl %ebx
849; X86-NEXT:    pushl %edi
850; X86-NEXT:    pushl %esi
851; X86-NEXT:    andl $-8, %esp
852; X86-NEXT:    subl $256, %esp # imm = 0x100
853; X86-NEXT:    movl 24(%ebp), %edx
854; X86-NEXT:    movl 40(%ebp), %edi
855; X86-NEXT:    leal {{[0-9]+}}(%esp), %ebx
856; X86-NEXT:    movl %edi, %eax
857; X86-NEXT:    sarl $31, %eax
858; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
859; X86-NEXT:    movl %edx, %ecx
860; X86-NEXT:    sarl $31, %ecx
861; X86-NEXT:    addl %edx, %edx
862; X86-NEXT:    adcl %ecx, %ecx
863; X86-NEXT:    andl $1, %ecx
864; X86-NEXT:    negl %ecx
865; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
866; X86-NEXT:    movl %ecx, %esi
867; X86-NEXT:    shldl $31, %edx, %esi
868; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
869; X86-NEXT:    shll $31, %edx
870; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
871; X86-NEXT:    pushl %eax
872; X86-NEXT:    pushl %eax
873; X86-NEXT:    pushl %eax
874; X86-NEXT:    pushl %edi
875; X86-NEXT:    pushl %ecx
876; X86-NEXT:    pushl %ecx
877; X86-NEXT:    pushl %esi
878; X86-NEXT:    pushl %edx
879; X86-NEXT:    pushl %ebx
880; X86-NEXT:    calll __modti3
881; X86-NEXT:    addl $32, %esp
882; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
883; X86-NEXT:    movl 36(%ebp), %edx
884; X86-NEXT:    movl %edx, %edi
885; X86-NEXT:    sarl $31, %edi
886; X86-NEXT:    movl 20(%ebp), %ecx
887; X86-NEXT:    movl %ecx, %esi
888; X86-NEXT:    sarl $31, %esi
889; X86-NEXT:    addl %ecx, %ecx
890; X86-NEXT:    adcl %esi, %esi
891; X86-NEXT:    andl $1, %esi
892; X86-NEXT:    negl %esi
893; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
894; X86-NEXT:    movl %esi, %ebx
895; X86-NEXT:    shldl $31, %ecx, %ebx
896; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
897; X86-NEXT:    shll $31, %ecx
898; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
899; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
900; X86-NEXT:    pushl %edi
901; X86-NEXT:    pushl %edi
902; X86-NEXT:    pushl %edi
903; X86-NEXT:    pushl %edx
904; X86-NEXT:    pushl %esi
905; X86-NEXT:    pushl %esi
906; X86-NEXT:    pushl %ebx
907; X86-NEXT:    pushl %ecx
908; X86-NEXT:    pushl %eax
909; X86-NEXT:    calll __modti3
910; X86-NEXT:    addl $32, %esp
911; X86-NEXT:    leal {{[0-9]+}}(%esp), %edi
912; X86-NEXT:    movl 28(%ebp), %ebx
913; X86-NEXT:    movl %ebx, %edx
914; X86-NEXT:    sarl $31, %edx
915; X86-NEXT:    movl 12(%ebp), %esi
916; X86-NEXT:    movl %esi, %ecx
917; X86-NEXT:    sarl $31, %ecx
918; X86-NEXT:    addl %esi, %esi
919; X86-NEXT:    adcl %ecx, %ecx
920; X86-NEXT:    andl $1, %ecx
921; X86-NEXT:    negl %ecx
922; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
923; X86-NEXT:    movl %ecx, %eax
924; X86-NEXT:    shldl $31, %esi, %eax
925; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
926; X86-NEXT:    shll $31, %esi
927; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
928; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
929; X86-NEXT:    pushl %edx
930; X86-NEXT:    pushl %edx
931; X86-NEXT:    pushl %edx
932; X86-NEXT:    pushl %ebx
933; X86-NEXT:    pushl %ecx
934; X86-NEXT:    pushl %ecx
935; X86-NEXT:    pushl %eax
936; X86-NEXT:    pushl %esi
937; X86-NEXT:    pushl %edi
938; X86-NEXT:    calll __divti3
939; X86-NEXT:    addl $32, %esp
940; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
941; X86-NEXT:    movl 32(%ebp), %edx
942; X86-NEXT:    movl %edx, %esi
943; X86-NEXT:    sarl $31, %esi
944; X86-NEXT:    movl 16(%ebp), %ecx
945; X86-NEXT:    movl %ecx, %ebx
946; X86-NEXT:    sarl $31, %ebx
947; X86-NEXT:    addl %ecx, %ecx
948; X86-NEXT:    adcl %ebx, %ebx
949; X86-NEXT:    andl $1, %ebx
950; X86-NEXT:    negl %ebx
951; X86-NEXT:    movl %ebx, %edi
952; X86-NEXT:    shldl $31, %ecx, %edi
953; X86-NEXT:    shll $31, %ecx
954; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
955; X86-NEXT:    pushl %esi
956; X86-NEXT:    pushl %esi
957; X86-NEXT:    pushl %esi
958; X86-NEXT:    pushl %edx
959; X86-NEXT:    pushl %ebx
960; X86-NEXT:    pushl %ebx
961; X86-NEXT:    pushl %edi
962; X86-NEXT:    pushl %ecx
963; X86-NEXT:    pushl %eax
964; X86-NEXT:    calll __modti3
965; X86-NEXT:    addl $32, %esp
966; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
967; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
968; X86-NEXT:    pushl %esi
969; X86-NEXT:    pushl %esi
970; X86-NEXT:    pushl %esi
971; X86-NEXT:    pushl 32(%ebp)
972; X86-NEXT:    pushl %ebx
973; X86-NEXT:    pushl %ebx
974; X86-NEXT:    pushl %edi
975; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
976; X86-NEXT:    pushl %eax
977; X86-NEXT:    calll __divti3
978; X86-NEXT:    addl $32, %esp
979; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
980; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
981; X86-NEXT:    pushl %ecx
982; X86-NEXT:    pushl %ecx
983; X86-NEXT:    pushl %ecx
984; X86-NEXT:    pushl 40(%ebp)
985; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
986; X86-NEXT:    pushl %ecx
987; X86-NEXT:    pushl %ecx
988; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
989; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
990; X86-NEXT:    pushl %eax
991; X86-NEXT:    calll __divti3
992; X86-NEXT:    addl $32, %esp
993; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
994; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
995; X86-NEXT:    pushl %ecx
996; X86-NEXT:    pushl %ecx
997; X86-NEXT:    pushl %ecx
998; X86-NEXT:    pushl 36(%ebp)
999; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1000; X86-NEXT:    pushl %ecx
1001; X86-NEXT:    pushl %ecx
1002; X86-NEXT:    movl %ecx, %edi
1003; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1004; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1005; X86-NEXT:    pushl %eax
1006; X86-NEXT:    calll __divti3
1007; X86-NEXT:    addl $32, %esp
1008; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1009; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1010; X86-NEXT:    subl $1, %eax
1011; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1012; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
1013; X86-NEXT:    movl %esi, %ecx
1014; X86-NEXT:    sbbl $0, %ecx
1015; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1016; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1017; X86-NEXT:    sbbl $0, %eax
1018; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1019; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1020; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1021; X86-NEXT:    sbbl $0, %eax
1022; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1023; X86-NEXT:    testl %edi, %edi
1024; X86-NEXT:    sets %al
1025; X86-NEXT:    cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1026; X86-NEXT:    sets %ah
1027; X86-NEXT:    xorb %al, %ah
1028; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
1029; X86-NEXT:    orl {{[0-9]+}}(%esp), %edi
1030; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
1031; X86-NEXT:    orl {{[0-9]+}}(%esp), %edx
1032; X86-NEXT:    orl %edi, %edx
1033; X86-NEXT:    setne %al
1034; X86-NEXT:    testb %ah, %al
1035; X86-NEXT:    cmovel %esi, %ecx
1036; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1037; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1038; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1039; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1040; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1041; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1042; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1043; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1044; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1045; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1046; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1047; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1048; X86-NEXT:    subl $1, %eax
1049; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1050; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
1051; X86-NEXT:    movl %edx, %ecx
1052; X86-NEXT:    sbbl $0, %ecx
1053; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1054; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1055; X86-NEXT:    sbbl $0, %eax
1056; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1057; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
1058; X86-NEXT:    movl %edi, %eax
1059; X86-NEXT:    sbbl $0, %eax
1060; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1061; X86-NEXT:    testl %ebx, %ebx
1062; X86-NEXT:    sets %bl
1063; X86-NEXT:    cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1064; X86-NEXT:    sets %bh
1065; X86-NEXT:    xorb %bl, %bh
1066; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
1067; X86-NEXT:    orl {{[0-9]+}}(%esp), %esi
1068; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1069; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
1070; X86-NEXT:    orl %esi, %eax
1071; X86-NEXT:    setne %al
1072; X86-NEXT:    testb %bh, %al
1073; X86-NEXT:    cmovel %edx, %ecx
1074; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1075; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1076; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1077; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1078; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1079; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1080; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1081; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1082; X86-NEXT:    cmovel %edi, %eax
1083; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1084; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1085; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1086; X86-NEXT:    subl $1, %eax
1087; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1088; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
1089; X86-NEXT:    movl %edi, %eax
1090; X86-NEXT:    sbbl $0, %eax
1091; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1092; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1093; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1094; X86-NEXT:    sbbl $0, %eax
1095; X86-NEXT:    movl %eax, %esi
1096; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1097; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1098; X86-NEXT:    sbbl $0, %eax
1099; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1100; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1101; X86-NEXT:    testl %ecx, %ecx
1102; X86-NEXT:    sets %al
1103; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1104; X86-NEXT:    testl %edx, %edx
1105; X86-NEXT:    sets %bl
1106; X86-NEXT:    xorb %al, %bl
1107; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
1108; X86-NEXT:    pushl %edx
1109; X86-NEXT:    pushl %edx
1110; X86-NEXT:    pushl %edx
1111; X86-NEXT:    pushl 28(%ebp)
1112; X86-NEXT:    pushl %ecx
1113; X86-NEXT:    pushl %ecx
1114; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1115; X86-NEXT:    pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1116; X86-NEXT:    pushl %eax
1117; X86-NEXT:    calll __modti3
1118; X86-NEXT:    addl $32, %esp
1119; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1120; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
1121; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1122; X86-NEXT:    orl {{[0-9]+}}(%esp), %ecx
1123; X86-NEXT:    orl %eax, %ecx
1124; X86-NEXT:    setne %al
1125; X86-NEXT:    testb %bl, %al
1126; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1127; X86-NEXT:    cmovel %edi, %eax
1128; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1129; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1130; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1131; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1132; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1133; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1134; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1135; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1136; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1137; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
1138; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1139; X86-NEXT:    subl $1, %edx
1140; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
1141; X86-NEXT:    movl %esi, %eax
1142; X86-NEXT:    sbbl $0, %eax
1143; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1144; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1145; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1146; X86-NEXT:    sbbl $0, %eax
1147; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1148; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1149; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1150; X86-NEXT:    sbbl $0, %ecx
1151; X86-NEXT:    cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1152; X86-NEXT:    sets %bl
1153; X86-NEXT:    cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1154; X86-NEXT:    sets %bh
1155; X86-NEXT:    xorb %bl, %bh
1156; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
1157; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
1158; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
1159; X86-NEXT:    orl {{[0-9]+}}(%esp), %edi
1160; X86-NEXT:    orl %eax, %edi
1161; X86-NEXT:    setne %al
1162; X86-NEXT:    testb %bh, %al
1163; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1164; X86-NEXT:    cmovel %esi, %eax
1165; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1166; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1167; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1168; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1169; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1170; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1171; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
1172; X86-NEXT:    testl %ecx, %ecx
1173; X86-NEXT:    movl $0, %eax
1174; X86-NEXT:    cmovsl %ecx, %eax
1175; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1176; X86-NEXT:    movl $-1, %eax
1177; X86-NEXT:    cmovsl %edx, %eax
1178; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1179; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1180; X86-NEXT:    movl %eax, %edx
1181; X86-NEXT:    sarl $31, %edx
1182; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1183; X86-NEXT:    movl %edx, %esi
1184; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1185; X86-NEXT:    testl %eax, %eax
1186; X86-NEXT:    cmovel %eax, %esi
1187; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1188; X86-NEXT:    movl $0, %edx
1189; X86-NEXT:    cmovsl %eax, %edx
1190; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1191; X86-NEXT:    movl $-1, %eax
1192; X86-NEXT:    cmovsl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1193; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1194; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1195; X86-NEXT:    movl %eax, %ebx
1196; X86-NEXT:    sarl $31, %ebx
1197; X86-NEXT:    movl %ebx, %edx
1198; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1199; X86-NEXT:    testl %eax, %eax
1200; X86-NEXT:    cmovel %eax, %edx
1201; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1202; X86-NEXT:    movl $0, %edx
1203; X86-NEXT:    cmovsl %eax, %edx
1204; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1205; X86-NEXT:    movl $-1, %eax
1206; X86-NEXT:    cmovsl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1207; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1208; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1209; X86-NEXT:    movl %eax, %edi
1210; X86-NEXT:    sarl $31, %edi
1211; X86-NEXT:    movl %edi, %edx
1212; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1213; X86-NEXT:    testl %eax, %eax
1214; X86-NEXT:    cmovel %eax, %edx
1215; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1216; X86-NEXT:    movl $0, %esi
1217; X86-NEXT:    cmovsl %eax, %esi
1218; X86-NEXT:    movl $-1, %eax
1219; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1220; X86-NEXT:    cmovsl %edx, %eax
1221; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1222; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1223; X86-NEXT:    movl %ecx, %eax
1224; X86-NEXT:    sarl $31, %eax
1225; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1226; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1227; X86-NEXT:    testl %ecx, %ecx
1228; X86-NEXT:    cmovel %ecx, %eax
1229; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1230; X86-NEXT:    movl %edx, %ecx
1231; X86-NEXT:    cmpl $-1, %edx
1232; X86-NEXT:    movl $-1, %eax
1233; X86-NEXT:    cmovael %eax, %ecx
1234; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1235; X86-NEXT:    cmpl $1, %edx
1236; X86-NEXT:    movl $0, %eax
1237; X86-NEXT:    sbbl %eax, %eax
1238; X86-NEXT:    notl %eax
1239; X86-NEXT:    orl %ecx, %eax
1240; X86-NEXT:    testl %edx, %edx
1241; X86-NEXT:    movl $0, %ecx
1242; X86-NEXT:    cmovbl %edx, %ecx
1243; X86-NEXT:    andl %edx, %edi
1244; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1245; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1246; X86-NEXT:    cmovel %ecx, %edi
1247; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1248; X86-NEXT:    testl %eax, %eax
1249; X86-NEXT:    movl $0, %ecx
1250; X86-NEXT:    cmoval %eax, %ecx
1251; X86-NEXT:    cmpl $-1, %edi
1252; X86-NEXT:    movl $0, %edx
1253; X86-NEXT:    cmovnel %edx, %ecx
1254; X86-NEXT:    testl %esi, %esi
1255; X86-NEXT:    movl $-1, %edx
1256; X86-NEXT:    cmovsl %edx, %edi
1257; X86-NEXT:    movl $0, %edx
1258; X86-NEXT:    cmovsl %edx, %eax
1259; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1260; X86-NEXT:    cmpl $-1, %esi
1261; X86-NEXT:    cmovel %ecx, %eax
1262; X86-NEXT:    cmovnel %edi, %esi
1263; X86-NEXT:    shldl $31, %eax, %esi
1264; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1265; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1266; X86-NEXT:    cmpl $-1, %eax
1267; X86-NEXT:    movl $-1, %ecx
1268; X86-NEXT:    cmovael %ecx, %eax
1269; X86-NEXT:    movl %eax, %ecx
1270; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1271; X86-NEXT:    cmpl $1, %esi
1272; X86-NEXT:    movl $0, %eax
1273; X86-NEXT:    sbbl %eax, %eax
1274; X86-NEXT:    notl %eax
1275; X86-NEXT:    orl %ecx, %eax
1276; X86-NEXT:    testl %esi, %esi
1277; X86-NEXT:    movl $0, %ecx
1278; X86-NEXT:    cmovbl %esi, %ecx
1279; X86-NEXT:    andl %esi, %ebx
1280; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1281; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1282; X86-NEXT:    cmovel %ecx, %ebx
1283; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1284; X86-NEXT:    testl %eax, %eax
1285; X86-NEXT:    movl $0, %ecx
1286; X86-NEXT:    cmoval %eax, %ecx
1287; X86-NEXT:    cmpl $-1, %ebx
1288; X86-NEXT:    movl $0, %edi
1289; X86-NEXT:    cmovnel %edi, %ecx
1290; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1291; X86-NEXT:    testl %esi, %esi
1292; X86-NEXT:    movl $-1, %edx
1293; X86-NEXT:    cmovsl %edx, %ebx
1294; X86-NEXT:    cmovsl %edi, %eax
1295; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1296; X86-NEXT:    cmpl $-1, %esi
1297; X86-NEXT:    cmovel %ecx, %eax
1298; X86-NEXT:    cmovnel %ebx, %esi
1299; X86-NEXT:    shldl $31, %eax, %esi
1300; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1301; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1302; X86-NEXT:    cmpl $-1, %eax
1303; X86-NEXT:    cmovael %edx, %eax
1304; X86-NEXT:    movl $-1, %ebx
1305; X86-NEXT:    movl %eax, %ecx
1306; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1307; X86-NEXT:    cmpl $1, %edx
1308; X86-NEXT:    movl $0, %eax
1309; X86-NEXT:    sbbl %eax, %eax
1310; X86-NEXT:    notl %eax
1311; X86-NEXT:    orl %ecx, %eax
1312; X86-NEXT:    testl %edx, %edx
1313; X86-NEXT:    movl $0, %ecx
1314; X86-NEXT:    cmovbl %edx, %ecx
1315; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
1316; X86-NEXT:    andl %edx, %edi
1317; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1318; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1319; X86-NEXT:    cmovel %ecx, %edi
1320; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1321; X86-NEXT:    testl %eax, %eax
1322; X86-NEXT:    movl $0, %ecx
1323; X86-NEXT:    cmoval %eax, %ecx
1324; X86-NEXT:    cmpl $-1, %edi
1325; X86-NEXT:    movl $0, %edx
1326; X86-NEXT:    cmovnel %edx, %ecx
1327; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1328; X86-NEXT:    testl %esi, %esi
1329; X86-NEXT:    cmovsl %ebx, %edi
1330; X86-NEXT:    cmovsl %edx, %eax
1331; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1332; X86-NEXT:    cmpl $-1, %esi
1333; X86-NEXT:    cmovel %ecx, %eax
1334; X86-NEXT:    cmovnel %edi, %esi
1335; X86-NEXT:    shldl $31, %eax, %esi
1336; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1337; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
1338; X86-NEXT:    cmpl $-1, %eax
1339; X86-NEXT:    cmovael %ebx, %eax
1340; X86-NEXT:    movl $-1, %esi
1341; X86-NEXT:    movl %eax, %ecx
1342; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1343; X86-NEXT:    cmpl $1, %edx
1344; X86-NEXT:    movl $0, %eax
1345; X86-NEXT:    sbbl %eax, %eax
1346; X86-NEXT:    notl %eax
1347; X86-NEXT:    orl %ecx, %eax
1348; X86-NEXT:    testl %edx, %edx
1349; X86-NEXT:    movl $0, %ecx
1350; X86-NEXT:    cmovbl %edx, %ecx
1351; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
1352; X86-NEXT:    andl %edx, %ebx
1353; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1354; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1355; X86-NEXT:    cmovel %ecx, %ebx
1356; X86-NEXT:    cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1357; X86-NEXT:    testl %eax, %eax
1358; X86-NEXT:    movl $0, %ecx
1359; X86-NEXT:    cmoval %eax, %ecx
1360; X86-NEXT:    cmpl $-1, %ebx
1361; X86-NEXT:    movl $0, %edi
1362; X86-NEXT:    cmovnel %edi, %ecx
1363; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1364; X86-NEXT:    testl %edx, %edx
1365; X86-NEXT:    cmovsl %esi, %ebx
1366; X86-NEXT:    movl %ebx, %esi
1367; X86-NEXT:    cmovsl %edi, %eax
1368; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
1369; X86-NEXT:    andl %edx, %ebx
1370; X86-NEXT:    cmpl $-1, %ebx
1371; X86-NEXT:    cmovel %ecx, %eax
1372; X86-NEXT:    cmovnel %esi, %ebx
1373; X86-NEXT:    shldl $31, %eax, %ebx
1374; X86-NEXT:    movl 8(%ebp), %eax
1375; X86-NEXT:    movl %ebx, 12(%eax)
1376; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1377; X86-NEXT:    movl %ecx, 8(%eax)
1378; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1379; X86-NEXT:    movl %ecx, 4(%eax)
1380; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1381; X86-NEXT:    movl %ecx, (%eax)
1382; X86-NEXT:    leal -12(%ebp), %esp
1383; X86-NEXT:    popl %esi
1384; X86-NEXT:    popl %edi
1385; X86-NEXT:    popl %ebx
1386; X86-NEXT:    popl %ebp
1387; X86-NEXT:    retl $4
1388  %tmp = call <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 31)
1389  ret <4 x i32> %tmp
1390}
1391