1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE42
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
8; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
9; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
10; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
11
12;
13; Equal
14;
15
16define <2 x i64> @eq_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
17; SSE2-LABEL: eq_v2i64:
18; SSE2:       # BB#0:
19; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
20; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
21; SSE2-NEXT:    pand %xmm1, %xmm0
22; SSE2-NEXT:    retq
23;
24; SSE41-LABEL: eq_v2i64:
25; SSE41:       # BB#0:
26; SSE41-NEXT:    pcmpeqq %xmm1, %xmm0
27; SSE41-NEXT:    retq
28;
29; SSE42-LABEL: eq_v2i64:
30; SSE42:       # BB#0:
31; SSE42-NEXT:    pcmpeqq %xmm1, %xmm0
32; SSE42-NEXT:    retq
33;
34; AVX-LABEL: eq_v2i64:
35; AVX:       # BB#0:
36; AVX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
37; AVX-NEXT:    retq
38;
39; XOP-LABEL: eq_v2i64:
40; XOP:       # BB#0:
41; XOP-NEXT:    vpcomeqq %xmm1, %xmm0, %xmm0
42; XOP-NEXT:    retq
43  %1 = icmp eq <2 x i64> %a, %b
44  %2 = sext <2 x i1> %1 to <2 x i64>
45  ret <2 x i64> %2
46}
47
48define <4 x i32> @eq_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
49; SSE-LABEL: eq_v4i32:
50; SSE:       # BB#0:
51; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
52; SSE-NEXT:    retq
53;
54; AVX-LABEL: eq_v4i32:
55; AVX:       # BB#0:
56; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
57; AVX-NEXT:    retq
58;
59; XOP-LABEL: eq_v4i32:
60; XOP:       # BB#0:
61; XOP-NEXT:    vpcomeqd %xmm1, %xmm0, %xmm0
62; XOP-NEXT:    retq
63  %1 = icmp eq <4 x i32> %a, %b
64  %2 = sext <4 x i1> %1 to <4 x i32>
65  ret <4 x i32> %2
66}
67
68define <8 x i16> @eq_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
69; SSE-LABEL: eq_v8i16:
70; SSE:       # BB#0:
71; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
72; SSE-NEXT:    retq
73;
74; AVX-LABEL: eq_v8i16:
75; AVX:       # BB#0:
76; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
77; AVX-NEXT:    retq
78;
79; XOP-LABEL: eq_v8i16:
80; XOP:       # BB#0:
81; XOP-NEXT:    vpcomeqw %xmm1, %xmm0, %xmm0
82; XOP-NEXT:    retq
83  %1 = icmp eq <8 x i16> %a, %b
84  %2 = sext <8 x i1> %1 to <8 x i16>
85  ret <8 x i16> %2
86}
87
88define <16 x i8> @eq_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
89; SSE-LABEL: eq_v16i8:
90; SSE:       # BB#0:
91; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
92; SSE-NEXT:    retq
93;
94; AVX-LABEL: eq_v16i8:
95; AVX:       # BB#0:
96; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
97; AVX-NEXT:    retq
98;
99; XOP-LABEL: eq_v16i8:
100; XOP:       # BB#0:
101; XOP-NEXT:    vpcomeqb %xmm1, %xmm0, %xmm0
102; XOP-NEXT:    retq
103  %1 = icmp eq <16 x i8> %a, %b
104  %2 = sext <16 x i1> %1 to <16 x i8>
105  ret <16 x i8> %2
106}
107
108;
109; Not Equal
110;
111
112define <2 x i64> @ne_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
113; SSE2-LABEL: ne_v2i64:
114; SSE2:       # BB#0:
115; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
116; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
117; SSE2-NEXT:    pand %xmm1, %xmm0
118; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
119; SSE2-NEXT:    pxor %xmm1, %xmm0
120; SSE2-NEXT:    retq
121;
122; SSE41-LABEL: ne_v2i64:
123; SSE41:       # BB#0:
124; SSE41-NEXT:    pcmpeqq %xmm1, %xmm0
125; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
126; SSE41-NEXT:    pxor %xmm1, %xmm0
127; SSE41-NEXT:    retq
128;
129; SSE42-LABEL: ne_v2i64:
130; SSE42:       # BB#0:
131; SSE42-NEXT:    pcmpeqq %xmm1, %xmm0
132; SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
133; SSE42-NEXT:    pxor %xmm1, %xmm0
134; SSE42-NEXT:    retq
135;
136; AVX-LABEL: ne_v2i64:
137; AVX:       # BB#0:
138; AVX-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
139; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
140; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
141; AVX-NEXT:    retq
142;
143; XOP-LABEL: ne_v2i64:
144; XOP:       # BB#0:
145; XOP-NEXT:    vpcomneqq %xmm1, %xmm0, %xmm0
146; XOP-NEXT:    retq
147  %1 = icmp ne <2 x i64> %a, %b
148  %2 = sext <2 x i1> %1 to <2 x i64>
149  ret <2 x i64> %2
150}
151
152define <4 x i32> @ne_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
153; SSE-LABEL: ne_v4i32:
154; SSE:       # BB#0:
155; SSE-NEXT:    pcmpeqd %xmm1, %xmm0
156; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
157; SSE-NEXT:    pxor %xmm1, %xmm0
158; SSE-NEXT:    retq
159;
160; AVX-LABEL: ne_v4i32:
161; AVX:       # BB#0:
162; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
163; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
164; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
165; AVX-NEXT:    retq
166;
167; XOP-LABEL: ne_v4i32:
168; XOP:       # BB#0:
169; XOP-NEXT:    vpcomneqd %xmm1, %xmm0, %xmm0
170; XOP-NEXT:    retq
171  %1 = icmp ne <4 x i32> %a, %b
172  %2 = sext <4 x i1> %1 to <4 x i32>
173  ret <4 x i32> %2
174}
175
176define <8 x i16> @ne_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
177; SSE-LABEL: ne_v8i16:
178; SSE:       # BB#0:
179; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
180; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
181; SSE-NEXT:    pxor %xmm1, %xmm0
182; SSE-NEXT:    retq
183;
184; AVX-LABEL: ne_v8i16:
185; AVX:       # BB#0:
186; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
187; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
188; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
189; AVX-NEXT:    retq
190;
191; XOP-LABEL: ne_v8i16:
192; XOP:       # BB#0:
193; XOP-NEXT:    vpcomneqw %xmm1, %xmm0, %xmm0
194; XOP-NEXT:    retq
195  %1 = icmp ne <8 x i16> %a, %b
196  %2 = sext <8 x i1> %1 to <8 x i16>
197  ret <8 x i16> %2
198}
199
200define <16 x i8> @ne_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
201; SSE-LABEL: ne_v16i8:
202; SSE:       # BB#0:
203; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
204; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
205; SSE-NEXT:    pxor %xmm1, %xmm0
206; SSE-NEXT:    retq
207;
208; AVX-LABEL: ne_v16i8:
209; AVX:       # BB#0:
210; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
211; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
212; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
213; AVX-NEXT:    retq
214;
215; XOP-LABEL: ne_v16i8:
216; XOP:       # BB#0:
217; XOP-NEXT:    vpcomneqb %xmm1, %xmm0, %xmm0
218; XOP-NEXT:    retq
219  %1 = icmp ne <16 x i8> %a, %b
220  %2 = sext <16 x i1> %1 to <16 x i8>
221  ret <16 x i8> %2
222}
223
224;
225; Greater Than Or Equal
226;
227
228define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
229; SSE2-LABEL: ge_v2i64:
230; SSE2:       # BB#0:
231; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
232; SSE2-NEXT:    pxor %xmm2, %xmm0
233; SSE2-NEXT:    pxor %xmm2, %xmm1
234; SSE2-NEXT:    movdqa %xmm1, %xmm2
235; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
236; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
237; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
238; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
239; SSE2-NEXT:    pand %xmm3, %xmm0
240; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
241; SSE2-NEXT:    por %xmm0, %xmm1
242; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
243; SSE2-NEXT:    pxor %xmm1, %xmm0
244; SSE2-NEXT:    retq
245;
246; SSE41-LABEL: ge_v2i64:
247; SSE41:       # BB#0:
248; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
249; SSE41-NEXT:    pxor %xmm2, %xmm0
250; SSE41-NEXT:    pxor %xmm2, %xmm1
251; SSE41-NEXT:    movdqa %xmm1, %xmm2
252; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
253; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
254; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
255; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
256; SSE41-NEXT:    pand %xmm3, %xmm0
257; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
258; SSE41-NEXT:    por %xmm0, %xmm1
259; SSE41-NEXT:    pcmpeqd %xmm0, %xmm0
260; SSE41-NEXT:    pxor %xmm1, %xmm0
261; SSE41-NEXT:    retq
262;
263; SSE42-LABEL: ge_v2i64:
264; SSE42:       # BB#0:
265; SSE42-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
266; SSE42-NEXT:    pxor %xmm2, %xmm0
267; SSE42-NEXT:    pxor %xmm1, %xmm2
268; SSE42-NEXT:    pcmpgtq %xmm0, %xmm2
269; SSE42-NEXT:    pcmpeqd %xmm0, %xmm0
270; SSE42-NEXT:    pxor %xmm2, %xmm0
271; SSE42-NEXT:    retq
272;
273; AVX-LABEL: ge_v2i64:
274; AVX:       # BB#0:
275; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
276; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
277; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
278; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
279; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
280; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
281; AVX-NEXT:    retq
282;
283; XOP-LABEL: ge_v2i64:
284; XOP:       # BB#0:
285; XOP-NEXT:    vpcomgeuq %xmm1, %xmm0, %xmm0
286; XOP-NEXT:    retq
287  %1 = icmp uge <2 x i64> %a, %b
288  %2 = sext <2 x i1> %1 to <2 x i64>
289  ret <2 x i64> %2
290}
291
292define <4 x i32> @ge_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
293; SSE2-LABEL: ge_v4i32:
294; SSE2:       # BB#0:
295; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
296; SSE2-NEXT:    pxor %xmm2, %xmm0
297; SSE2-NEXT:    pxor %xmm1, %xmm2
298; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
299; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
300; SSE2-NEXT:    pxor %xmm2, %xmm0
301; SSE2-NEXT:    retq
302;
303; SSE41-LABEL: ge_v4i32:
304; SSE41:       # BB#0:
305; SSE41-NEXT:    pmaxud %xmm0, %xmm1
306; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
307; SSE41-NEXT:    retq
308;
309; SSE42-LABEL: ge_v4i32:
310; SSE42:       # BB#0:
311; SSE42-NEXT:    pmaxud %xmm0, %xmm1
312; SSE42-NEXT:    pcmpeqd %xmm1, %xmm0
313; SSE42-NEXT:    retq
314;
315; AVX-LABEL: ge_v4i32:
316; AVX:       # BB#0:
317; AVX-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
318; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
319; AVX-NEXT:    retq
320;
321; XOP-LABEL: ge_v4i32:
322; XOP:       # BB#0:
323; XOP-NEXT:    vpcomgeud %xmm1, %xmm0, %xmm0
324; XOP-NEXT:    retq
325  %1 = icmp uge <4 x i32> %a, %b
326  %2 = sext <4 x i1> %1 to <4 x i32>
327  ret <4 x i32> %2
328}
329
330define <8 x i16> @ge_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
331; SSE2-LABEL: ge_v8i16:
332; SSE2:       # BB#0:
333; SSE2-NEXT:    psubusw %xmm0, %xmm1
334; SSE2-NEXT:    pxor %xmm0, %xmm0
335; SSE2-NEXT:    pcmpeqw %xmm1, %xmm0
336; SSE2-NEXT:    retq
337;
338; SSE41-LABEL: ge_v8i16:
339; SSE41:       # BB#0:
340; SSE41-NEXT:    pmaxuw %xmm0, %xmm1
341; SSE41-NEXT:    pcmpeqw %xmm1, %xmm0
342; SSE41-NEXT:    retq
343;
344; SSE42-LABEL: ge_v8i16:
345; SSE42:       # BB#0:
346; SSE42-NEXT:    pmaxuw %xmm0, %xmm1
347; SSE42-NEXT:    pcmpeqw %xmm1, %xmm0
348; SSE42-NEXT:    retq
349;
350; AVX-LABEL: ge_v8i16:
351; AVX:       # BB#0:
352; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
353; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
354; AVX-NEXT:    retq
355;
356; XOP-LABEL: ge_v8i16:
357; XOP:       # BB#0:
358; XOP-NEXT:    vpcomgeuw %xmm1, %xmm0, %xmm0
359; XOP-NEXT:    retq
360  %1 = icmp uge <8 x i16> %a, %b
361  %2 = sext <8 x i1> %1 to <8 x i16>
362  ret <8 x i16> %2
363}
364
365define <16 x i8> @ge_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
366; SSE-LABEL: ge_v16i8:
367; SSE:       # BB#0:
368; SSE-NEXT:    pmaxub %xmm0, %xmm1
369; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
370; SSE-NEXT:    retq
371;
372; AVX-LABEL: ge_v16i8:
373; AVX:       # BB#0:
374; AVX-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
375; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
376; AVX-NEXT:    retq
377;
378; XOP-LABEL: ge_v16i8:
379; XOP:       # BB#0:
380; XOP-NEXT:    vpcomgeub %xmm1, %xmm0, %xmm0
381; XOP-NEXT:    retq
382  %1 = icmp uge <16 x i8> %a, %b
383  %2 = sext <16 x i1> %1 to <16 x i8>
384  ret <16 x i8> %2
385}
386
387;
388; Greater Than
389;
390
391define <2 x i64> @gt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
392; SSE2-LABEL: gt_v2i64:
393; SSE2:       # BB#0:
394; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
395; SSE2-NEXT:    pxor %xmm2, %xmm1
396; SSE2-NEXT:    pxor %xmm2, %xmm0
397; SSE2-NEXT:    movdqa %xmm0, %xmm2
398; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
399; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
400; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
401; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
402; SSE2-NEXT:    pand %xmm3, %xmm1
403; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
404; SSE2-NEXT:    por %xmm1, %xmm0
405; SSE2-NEXT:    retq
406;
407; SSE41-LABEL: gt_v2i64:
408; SSE41:       # BB#0:
409; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
410; SSE41-NEXT:    pxor %xmm2, %xmm1
411; SSE41-NEXT:    pxor %xmm2, %xmm0
412; SSE41-NEXT:    movdqa %xmm0, %xmm2
413; SSE41-NEXT:    pcmpgtd %xmm1, %xmm2
414; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
415; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
416; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
417; SSE41-NEXT:    pand %xmm3, %xmm1
418; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
419; SSE41-NEXT:    por %xmm1, %xmm0
420; SSE41-NEXT:    retq
421;
422; SSE42-LABEL: gt_v2i64:
423; SSE42:       # BB#0:
424; SSE42-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
425; SSE42-NEXT:    pxor %xmm2, %xmm1
426; SSE42-NEXT:    pxor %xmm2, %xmm0
427; SSE42-NEXT:    pcmpgtq %xmm1, %xmm0
428; SSE42-NEXT:    retq
429;
430; AVX-LABEL: gt_v2i64:
431; AVX:       # BB#0:
432; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
433; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
434; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
435; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
436; AVX-NEXT:    retq
437;
438; XOP-LABEL: gt_v2i64:
439; XOP:       # BB#0:
440; XOP-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm0
441; XOP-NEXT:    retq
442  %1 = icmp ugt <2 x i64> %a, %b
443  %2 = sext <2 x i1> %1 to <2 x i64>
444  ret <2 x i64> %2
445}
446
447define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
448; SSE-LABEL: gt_v4i32:
449; SSE:       # BB#0:
450; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
451; SSE-NEXT:    pxor %xmm2, %xmm1
452; SSE-NEXT:    pxor %xmm2, %xmm0
453; SSE-NEXT:    pcmpgtd %xmm1, %xmm0
454; SSE-NEXT:    retq
455;
456; AVX1-LABEL: gt_v4i32:
457; AVX1:       # BB#0:
458; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
459; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
460; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
461; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
462; AVX1-NEXT:    retq
463;
464; AVX2-LABEL: gt_v4i32:
465; AVX2:       # BB#0:
466; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
467; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1
468; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
469; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
470; AVX2-NEXT:    retq
471;
472; XOP-LABEL: gt_v4i32:
473; XOP:       # BB#0:
474; XOP-NEXT:    vpcomgtud %xmm1, %xmm0, %xmm0
475; XOP-NEXT:    retq
476;
477; AVX512-LABEL: gt_v4i32:
478; AVX512:       # BB#0:
479; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
480; AVX512-NEXT:    vpxor %xmm2, %xmm1, %xmm1
481; AVX512-NEXT:    vpxor %xmm2, %xmm0, %xmm0
482; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
483; AVX512-NEXT:    retq
484  %1 = icmp ugt <4 x i32> %a, %b
485  %2 = sext <4 x i1> %1 to <4 x i32>
486  ret <4 x i32> %2
487}
488
489define <8 x i16> @gt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
490; SSE-LABEL: gt_v8i16:
491; SSE:       # BB#0:
492; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
493; SSE-NEXT:    pxor %xmm2, %xmm1
494; SSE-NEXT:    pxor %xmm2, %xmm0
495; SSE-NEXT:    pcmpgtw %xmm1, %xmm0
496; SSE-NEXT:    retq
497;
498; AVX-LABEL: gt_v8i16:
499; AVX:       # BB#0:
500; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
501; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
502; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
503; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
504; AVX-NEXT:    retq
505;
506; XOP-LABEL: gt_v8i16:
507; XOP:       # BB#0:
508; XOP-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm0
509; XOP-NEXT:    retq
510  %1 = icmp ugt <8 x i16> %a, %b
511  %2 = sext <8 x i1> %1 to <8 x i16>
512  ret <8 x i16> %2
513}
514
515define <16 x i8> @gt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
516; SSE-LABEL: gt_v16i8:
517; SSE:       # BB#0:
518; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
519; SSE-NEXT:    pxor %xmm2, %xmm1
520; SSE-NEXT:    pxor %xmm2, %xmm0
521; SSE-NEXT:    pcmpgtb %xmm1, %xmm0
522; SSE-NEXT:    retq
523;
524; AVX-LABEL: gt_v16i8:
525; AVX:       # BB#0:
526; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
527; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
528; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
529; AVX-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
530; AVX-NEXT:    retq
531;
532; XOP-LABEL: gt_v16i8:
533; XOP:       # BB#0:
534; XOP-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm0
535; XOP-NEXT:    retq
536  %1 = icmp ugt <16 x i8> %a, %b
537  %2 = sext <16 x i1> %1 to <16 x i8>
538  ret <16 x i8> %2
539}
540
541;
542; Less Than Or Equal
543;
544
545define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
546; SSE2-LABEL: le_v2i64:
547; SSE2:       # BB#0:
548; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
549; SSE2-NEXT:    pxor %xmm2, %xmm1
550; SSE2-NEXT:    pxor %xmm2, %xmm0
551; SSE2-NEXT:    movdqa %xmm0, %xmm2
552; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
553; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
554; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
555; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
556; SSE2-NEXT:    pand %xmm3, %xmm0
557; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
558; SSE2-NEXT:    por %xmm0, %xmm1
559; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
560; SSE2-NEXT:    pxor %xmm1, %xmm0
561; SSE2-NEXT:    retq
562;
563; SSE41-LABEL: le_v2i64:
564; SSE41:       # BB#0:
565; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
566; SSE41-NEXT:    pxor %xmm2, %xmm1
567; SSE41-NEXT:    pxor %xmm2, %xmm0
568; SSE41-NEXT:    movdqa %xmm0, %xmm2
569; SSE41-NEXT:    pcmpgtd %xmm1, %xmm2
570; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
571; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
572; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
573; SSE41-NEXT:    pand %xmm3, %xmm0
574; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
575; SSE41-NEXT:    por %xmm0, %xmm1
576; SSE41-NEXT:    pcmpeqd %xmm0, %xmm0
577; SSE41-NEXT:    pxor %xmm1, %xmm0
578; SSE41-NEXT:    retq
579;
580; SSE42-LABEL: le_v2i64:
581; SSE42:       # BB#0:
582; SSE42-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
583; SSE42-NEXT:    pxor %xmm2, %xmm1
584; SSE42-NEXT:    pxor %xmm2, %xmm0
585; SSE42-NEXT:    pcmpgtq %xmm1, %xmm0
586; SSE42-NEXT:    pcmpeqd %xmm1, %xmm1
587; SSE42-NEXT:    pxor %xmm1, %xmm0
588; SSE42-NEXT:    retq
589;
590; AVX-LABEL: le_v2i64:
591; AVX:       # BB#0:
592; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
593; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
594; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
595; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
596; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
597; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
598; AVX-NEXT:    retq
599;
600; XOP-LABEL: le_v2i64:
601; XOP:       # BB#0:
602; XOP-NEXT:    vpcomleuq %xmm1, %xmm0, %xmm0
603; XOP-NEXT:    retq
604  %1 = icmp ule <2 x i64> %a, %b
605  %2 = sext <2 x i1> %1 to <2 x i64>
606  ret <2 x i64> %2
607}
608
609define <4 x i32> @le_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
610; SSE2-LABEL: le_v4i32:
611; SSE2:       # BB#0:
612; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
613; SSE2-NEXT:    pxor %xmm2, %xmm1
614; SSE2-NEXT:    pxor %xmm2, %xmm0
615; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
616; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
617; SSE2-NEXT:    pxor %xmm1, %xmm0
618; SSE2-NEXT:    retq
619;
620; SSE41-LABEL: le_v4i32:
621; SSE41:       # BB#0:
622; SSE41-NEXT:    pminud %xmm0, %xmm1
623; SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
624; SSE41-NEXT:    retq
625;
626; SSE42-LABEL: le_v4i32:
627; SSE42:       # BB#0:
628; SSE42-NEXT:    pminud %xmm0, %xmm1
629; SSE42-NEXT:    pcmpeqd %xmm1, %xmm0
630; SSE42-NEXT:    retq
631;
632; AVX-LABEL: le_v4i32:
633; AVX:       # BB#0:
634; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm1
635; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
636; AVX-NEXT:    retq
637;
638; XOP-LABEL: le_v4i32:
639; XOP:       # BB#0:
640; XOP-NEXT:    vpcomleud %xmm1, %xmm0, %xmm0
641; XOP-NEXT:    retq
642  %1 = icmp ule <4 x i32> %a, %b
643  %2 = sext <4 x i1> %1 to <4 x i32>
644  ret <4 x i32> %2
645}
646
647define <8 x i16> @le_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
648; SSE2-LABEL: le_v8i16:
649; SSE2:       # BB#0:
650; SSE2-NEXT:    psubusw %xmm1, %xmm0
651; SSE2-NEXT:    pxor %xmm1, %xmm1
652; SSE2-NEXT:    pcmpeqw %xmm1, %xmm0
653; SSE2-NEXT:    retq
654;
655; SSE41-LABEL: le_v8i16:
656; SSE41:       # BB#0:
657; SSE41-NEXT:    pminuw %xmm0, %xmm1
658; SSE41-NEXT:    pcmpeqw %xmm1, %xmm0
659; SSE41-NEXT:    retq
660;
661; SSE42-LABEL: le_v8i16:
662; SSE42:       # BB#0:
663; SSE42-NEXT:    pminuw %xmm0, %xmm1
664; SSE42-NEXT:    pcmpeqw %xmm1, %xmm0
665; SSE42-NEXT:    retq
666;
667; AVX-LABEL: le_v8i16:
668; AVX:       # BB#0:
669; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm1
670; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
671; AVX-NEXT:    retq
672;
673; XOP-LABEL: le_v8i16:
674; XOP:       # BB#0:
675; XOP-NEXT:    vpcomleuw %xmm1, %xmm0, %xmm0
676; XOP-NEXT:    retq
677  %1 = icmp ule <8 x i16> %a, %b
678  %2 = sext <8 x i1> %1 to <8 x i16>
679  ret <8 x i16> %2
680}
681
682define <16 x i8> @le_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
683; SSE-LABEL: le_v16i8:
684; SSE:       # BB#0:
685; SSE-NEXT:    pminub %xmm0, %xmm1
686; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
687; SSE-NEXT:    retq
688;
689; AVX-LABEL: le_v16i8:
690; AVX:       # BB#0:
691; AVX-NEXT:    vpminub %xmm1, %xmm0, %xmm1
692; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
693; AVX-NEXT:    retq
694;
695; XOP-LABEL: le_v16i8:
696; XOP:       # BB#0:
697; XOP-NEXT:    vpcomleub %xmm1, %xmm0, %xmm0
698; XOP-NEXT:    retq
699  %1 = icmp ule <16 x i8> %a, %b
700  %2 = sext <16 x i1> %1 to <16 x i8>
701  ret <16 x i8> %2
702}
703
704;
705; Less Than
706;
707
708define <2 x i64> @lt_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
709; SSE2-LABEL: lt_v2i64:
710; SSE2:       # BB#0:
711; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
712; SSE2-NEXT:    pxor %xmm2, %xmm0
713; SSE2-NEXT:    pxor %xmm2, %xmm1
714; SSE2-NEXT:    movdqa %xmm1, %xmm2
715; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
716; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
717; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
718; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
719; SSE2-NEXT:    pand %xmm3, %xmm1
720; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
721; SSE2-NEXT:    por %xmm1, %xmm0
722; SSE2-NEXT:    retq
723;
724; SSE41-LABEL: lt_v2i64:
725; SSE41:       # BB#0:
726; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
727; SSE41-NEXT:    pxor %xmm2, %xmm0
728; SSE41-NEXT:    pxor %xmm2, %xmm1
729; SSE41-NEXT:    movdqa %xmm1, %xmm2
730; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
731; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
732; SSE41-NEXT:    pcmpeqd %xmm0, %xmm1
733; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
734; SSE41-NEXT:    pand %xmm3, %xmm1
735; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
736; SSE41-NEXT:    por %xmm1, %xmm0
737; SSE41-NEXT:    retq
738;
739; SSE42-LABEL: lt_v2i64:
740; SSE42:       # BB#0:
741; SSE42-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
742; SSE42-NEXT:    pxor %xmm2, %xmm0
743; SSE42-NEXT:    pxor %xmm1, %xmm2
744; SSE42-NEXT:    pcmpgtq %xmm0, %xmm2
745; SSE42-NEXT:    movdqa %xmm2, %xmm0
746; SSE42-NEXT:    retq
747;
748; AVX-LABEL: lt_v2i64:
749; AVX:       # BB#0:
750; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
751; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
752; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
753; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
754; AVX-NEXT:    retq
755;
756; XOP-LABEL: lt_v2i64:
757; XOP:       # BB#0:
758; XOP-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm0
759; XOP-NEXT:    retq
760  %1 = icmp ult <2 x i64> %a, %b
761  %2 = sext <2 x i1> %1 to <2 x i64>
762  ret <2 x i64> %2
763}
764
765define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
766; SSE-LABEL: lt_v4i32:
767; SSE:       # BB#0:
768; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
769; SSE-NEXT:    pxor %xmm2, %xmm0
770; SSE-NEXT:    pxor %xmm1, %xmm2
771; SSE-NEXT:    pcmpgtd %xmm0, %xmm2
772; SSE-NEXT:    movdqa %xmm2, %xmm0
773; SSE-NEXT:    retq
774;
775; AVX1-LABEL: lt_v4i32:
776; AVX1:       # BB#0:
777; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
778; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
779; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
780; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
781; AVX1-NEXT:    retq
782;
783; AVX2-LABEL: lt_v4i32:
784; AVX2:       # BB#0:
785; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
786; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
787; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm1
788; AVX2-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
789; AVX2-NEXT:    retq
790;
791; XOP-LABEL: lt_v4i32:
792; XOP:       # BB#0:
793; XOP-NEXT:    vpcomltud %xmm1, %xmm0, %xmm0
794; XOP-NEXT:    retq
795;
796; AVX512-LABEL: lt_v4i32:
797; AVX512:       # BB#0:
798; AVX512-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm2
799; AVX512-NEXT:    vpxor %xmm2, %xmm0, %xmm0
800; AVX512-NEXT:    vpxor %xmm2, %xmm1, %xmm1
801; AVX512-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
802; AVX512-NEXT:    retq
803  %1 = icmp ult <4 x i32> %a, %b
804  %2 = sext <4 x i1> %1 to <4 x i32>
805  ret <4 x i32> %2
806}
807
808define <8 x i16> @lt_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
809; SSE-LABEL: lt_v8i16:
810; SSE:       # BB#0:
811; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
812; SSE-NEXT:    pxor %xmm2, %xmm0
813; SSE-NEXT:    pxor %xmm1, %xmm2
814; SSE-NEXT:    pcmpgtw %xmm0, %xmm2
815; SSE-NEXT:    movdqa %xmm2, %xmm0
816; SSE-NEXT:    retq
817;
818; AVX-LABEL: lt_v8i16:
819; AVX:       # BB#0:
820; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
821; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
822; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
823; AVX-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
824; AVX-NEXT:    retq
825;
826; XOP-LABEL: lt_v8i16:
827; XOP:       # BB#0:
828; XOP-NEXT:    vpcomltuw %xmm1, %xmm0, %xmm0
829; XOP-NEXT:    retq
830  %1 = icmp ult <8 x i16> %a, %b
831  %2 = sext <8 x i1> %1 to <8 x i16>
832  ret <8 x i16> %2
833}
834
835define <16 x i8> @lt_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
836; SSE-LABEL: lt_v16i8:
837; SSE:       # BB#0:
838; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
839; SSE-NEXT:    pxor %xmm2, %xmm0
840; SSE-NEXT:    pxor %xmm1, %xmm2
841; SSE-NEXT:    pcmpgtb %xmm0, %xmm2
842; SSE-NEXT:    movdqa %xmm2, %xmm0
843; SSE-NEXT:    retq
844;
845; AVX-LABEL: lt_v16i8:
846; AVX:       # BB#0:
847; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
848; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
849; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm1
850; AVX-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
851; AVX-NEXT:    retq
852;
853; XOP-LABEL: lt_v16i8:
854; XOP:       # BB#0:
855; XOP-NEXT:    vpcomltub %xmm1, %xmm0, %xmm0
856; XOP-NEXT:    retq
857  %1 = icmp ult <16 x i8> %a, %b
858  %2 = sext <16 x i1> %1 to <16 x i8>
859  ret <16 x i8> %2
860}
861