1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4
5; If we have SSE/AVX intrinsics in the code, we miss obvious combines
6; unless we do them late on X86-specific nodes.
7
8declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>)
9
10define <4 x i32> @PR27924_cmpeq(<4 x i32> %a, <4 x i32> %b) {
11; SSE-LABEL: PR27924_cmpeq:
12; SSE:       # BB#0:
13; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
14; SSE-NEXT:    retq
15;
16; AVX-LABEL: PR27924_cmpeq:
17; AVX:       # BB#0:
18; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
19; AVX-NEXT:    retq
20;
21  %cmp = icmp sgt <4 x i32> %a, %b
22  %max = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
23  %sse_max = tail call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a, <4 x i32> %b)
24  %truth = icmp eq <4 x i32> %max, %sse_max
25  %ret = sext <4 x i1> %truth to <4 x i32>
26  ret <4 x i32> %ret
27}
28
29define <4 x i32> @PR27924_cmpgt(<4 x i32> %a, <4 x i32> %b) {
30; SSE-LABEL: PR27924_cmpgt:
31; SSE:       # BB#0:
32; SSE-NEXT:    xorps %xmm0, %xmm0
33; SSE-NEXT:    retq
34;
35; AVX-LABEL: PR27924_cmpgt:
36; AVX:       # BB#0:
37; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
38; AVX-NEXT:    retq
39;
40  %cmp = icmp sgt <4 x i32> %a, %b
41  %max = select <4 x i1> %cmp, <4 x i32> %a, <4 x i32> %b
42  %sse_max = tail call <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32> %a, <4 x i32> %b)
43  %untruth = icmp sgt <4 x i32> %max, %sse_max
44  %ret = sext <4 x i1> %untruth to <4 x i32>
45  ret <4 x i32> %ret
46}
47
48