1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 4 5; truncate v2i64 to v2i32 6 7define void @convert_v2i64_to_v2i32(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind { 8; X86-LABEL: convert_v2i64_to_v2i32: 9; X86: # BB#0: # %entry 10; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 11; X86-NEXT: paddd .LCPI0_0, %xmm0 12; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 13; X86-NEXT: movq %xmm0, (%eax) 14; X86-NEXT: retl 15; 16; X64-LABEL: convert_v2i64_to_v2i32: 17; X64: # BB#0: # %entry 18; X64-NEXT: paddd {{.*}}(%rip), %xmm0 19; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 20; X64-NEXT: movq %xmm0, (%rdi) 21; X64-NEXT: retq 22entry: 23 %val = trunc <2 x i64> %src to <2 x i32> 24 %add = add <2 x i32> %val, < i32 1, i32 1 > 25 store <2 x i32> %add, <2 x i32>* %dst.addr 26 ret void 27} 28 29; truncate v3i32 to v3i8 30 31define void @convert_v3i32_to_v3i8(<3 x i8>* %dst.addr, <3 x i32>* %src.addr) nounwind { 32; X86-LABEL: convert_v3i32_to_v3i8: 33; X86: # BB#0: # %entry 34; X86-NEXT: pushl %eax 35; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 36; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 37; X86-NEXT: movdqa (%ecx), %xmm0 38; X86-NEXT: paddd .LCPI1_0, %xmm0 39; X86-NEXT: pextrb $8, %xmm0, 2(%eax) 40; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] 41; X86-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero 42; X86-NEXT: movd %xmm0, %ecx 43; X86-NEXT: movw %cx, (%eax) 44; X86-NEXT: popl %eax 45; X86-NEXT: retl 46; 47; X64-LABEL: convert_v3i32_to_v3i8: 48; X64: # BB#0: # %entry 49; X64-NEXT: movdqa (%rsi), %xmm0 50; X64-NEXT: paddd {{.*}}(%rip), %xmm0 51; X64-NEXT: pextrb $8, %xmm0, 2(%rdi) 52; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] 53; X64-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero 54; X64-NEXT: movd %xmm0, %eax 55; X64-NEXT: movw %ax, (%rdi) 56; X64-NEXT: retq 57entry: 58 %load = load <3 x i32>, <3 x i32>* %src.addr 59 %val = trunc <3 x i32> %load to <3 x i8> 60 %add = add <3 x i8> %val, < i8 1, i8 1, i8 1 > 61 store <3 x i8> %add, <3 x i8>* %dst.addr 62 ret void 63} 64 65; truncate v5i16 to v5i8 66 67define void @convert_v5i16_to_v5i8(<5 x i8>* %dst.addr, <5 x i16>* %src.addr) nounwind { 68; X86-LABEL: convert_v5i16_to_v5i8: 69; X86: # BB#0: # %entry 70; X86-NEXT: pushl %ebp 71; X86-NEXT: movl %esp, %ebp 72; X86-NEXT: andl $-8, %esp 73; X86-NEXT: subl $8, %esp 74; X86-NEXT: movl 8(%ebp), %eax 75; X86-NEXT: movl 12(%ebp), %ecx 76; X86-NEXT: movdqa (%ecx), %xmm0 77; X86-NEXT: paddw .LCPI2_0, %xmm0 78; X86-NEXT: pextrb $8, %xmm0, 4(%eax) 79; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] 80; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero 81; X86-NEXT: movd %xmm0, (%eax) 82; X86-NEXT: movl %ebp, %esp 83; X86-NEXT: popl %ebp 84; X86-NEXT: retl 85; 86; X64-LABEL: convert_v5i16_to_v5i8: 87; X64: # BB#0: # %entry 88; X64-NEXT: movdqa (%rsi), %xmm0 89; X64-NEXT: paddw {{.*}}(%rip), %xmm0 90; X64-NEXT: pextrb $8, %xmm0, 4(%rdi) 91; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] 92; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero 93; X64-NEXT: movd %xmm0, (%rdi) 94; X64-NEXT: retq 95entry: 96 %load = load <5 x i16>, <5 x i16>* %src.addr 97 %val = trunc <5 x i16> %load to <5 x i8> 98 %add = add <5 x i8> %val, < i8 1, i8 1, i8 1, i8 1, i8 1 > 99 store <5 x i8> %add, <5 x i8>* %dst.addr 100 ret void 101} 102