1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 4 5; widening shuffle v3float and then a add 6define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind { 7; X86-LABEL: shuf: 8; X86: # %bb.0: # %entry 9; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 10; X86-NEXT: addps %xmm1, %xmm0 11; X86-NEXT: extractps $2, %xmm0, 8(%eax) 12; X86-NEXT: extractps $1, %xmm0, 4(%eax) 13; X86-NEXT: movss %xmm0, (%eax) 14; X86-NEXT: retl 15; 16; X64-LABEL: shuf: 17; X64: # %bb.0: # %entry 18; X64-NEXT: addps %xmm1, %xmm0 19; X64-NEXT: extractps $2, %xmm0, 8(%rdi) 20; X64-NEXT: movlps %xmm0, (%rdi) 21; X64-NEXT: retq 22entry: 23 %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2> 24 %val = fadd <3 x float> %x, %src2 25 store <3 x float> %val, <3 x float>* %dst.addr 26 ret void 27} 28 29 30; widening shuffle v3float with a different mask and then a add 31define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind { 32; X86-LABEL: shuf2: 33; X86: # %bb.0: # %entry 34; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 35; X86-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] 36; X86-NEXT: addps %xmm1, %xmm0 37; X86-NEXT: extractps $2, %xmm0, 8(%eax) 38; X86-NEXT: extractps $1, %xmm0, 4(%eax) 39; X86-NEXT: movss %xmm0, (%eax) 40; X86-NEXT: retl 41; 42; X64-LABEL: shuf2: 43; X64: # %bb.0: # %entry 44; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] 45; X64-NEXT: addps %xmm1, %xmm0 46; X64-NEXT: extractps $2, %xmm0, 8(%rdi) 47; X64-NEXT: movlps %xmm0, (%rdi) 48; X64-NEXT: retq 49entry: 50 %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2> 51 %val = fadd <3 x float> %x, %src2 52 store <3 x float> %val, <3 x float>* %dst.addr 53 ret void 54} 55 56; Example of when widening a v3float operation causes the DAG to replace a node 57; with the operation that we are currently widening, i.e. when replacing 58; opA with opB, the DAG will produce new operations with opA. 59define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind { 60; X86-LABEL: shuf3: 61; X86: # %bb.0: # %entry 62; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 63; X86-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] 64; X86-NEXT: movaps %xmm1, (%eax) 65; X86-NEXT: retl 66; 67; X64-LABEL: shuf3: 68; X64: # %bb.0: # %entry 69; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] 70; X64-NEXT: movaps %xmm1, (%rdi) 71; X64-NEXT: retq 72entry: 73 %shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5> 74 %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> 75 %tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 76 %tmp3.i13 = shufflevector <4 x float> %tmp1.i.i, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> ; <<3 x float>> 77 %tmp6.i14 = shufflevector <3 x float> %tmp3.i13, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 78 %tmp97.i = shufflevector <4 x float> %tmp6.i14, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> 79 %tmp2.i18 = shufflevector <3 x float> %tmp97.i, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 2> 80 %t5 = bitcast <4 x float> %tmp2.i18 to <4 x i32> 81 %shr.i.i19 = lshr <4 x i32> %t5, <i32 19, i32 19, i32 19, i32 19> 82 %and.i.i20 = and <4 x i32> %shr.i.i19, <i32 4080, i32 4080, i32 4080, i32 4080> 83 %shuffle.i.i.i21 = shufflevector <4 x float> %tmp2.i18, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 2, i32 3> 84 store <4 x float> %shuffle.i.i.i21, <4 x float>* %dst 85 ret void 86} 87 88; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS 89define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone { 90; X86-LABEL: shuf4: 91; X86: # %bb.0: 92; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] 93; X86-NEXT: retl 94; 95; X64-LABEL: shuf4: 96; X64: # %bb.0: 97; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] 98; X64-NEXT: retq 99 %vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 100 ret <8 x i8> %vshuf 101} 102 103; PR11389: another CONCAT_VECTORS case 104define void @shuf5(<8 x i8>* %p) nounwind { 105; X86-LABEL: shuf5: 106; X86: # %bb.0: 107; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 108; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero 109; X86-NEXT: movsd %xmm0, (%eax) 110; X86-NEXT: retl 111; 112; X64-LABEL: shuf5: 113; X64: # %bb.0: 114; X64-NEXT: movabsq $2387225703656530209, %rax # imm = 0x2121212121212121 115; X64-NEXT: movq %rax, (%rdi) 116; X64-NEXT: retq 117 %v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> 118 store <8 x i8> %v, <8 x i8>* %p, align 8 119 ret void 120} 121