1; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mcpu=knl | FileCheck %s --check-prefix=ALL --check-prefix=KNL
3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mcpu=skx | FileCheck %s --check-prefix=ALL --check-prefix=SKX
4
5
6define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
7; ALL-LABEL: any_extend_load_v8i64:
8; ALL:       # BB#0:
9; ALL-NEXT:    vpmovzxbq (%rdi), %zmm0
10; ALL-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
11; ALL-NEXT:    vpmovqb %zmm0, (%rdi)
12; ALL-NEXT:    retq
13  %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
14  %1 = zext <8 x i8> %wide.load to <8 x i64>
15  %2 = add nuw nsw <8 x i64> %1, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4>
16  %3 = xor <8 x i64> %2, zeroinitializer
17  %4 = trunc <8 x i64> %3 to <8 x i8>
18  store <8 x i8> %4, <8 x i8>* %ptr, align 1
19  ret void
20}
21
22define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
23; KNL-LABEL: any_extend_load_v8i32:
24; KNL:       # BB#0:
25; KNL-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
26; KNL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1
27; KNL-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
28; KNL-NEXT:    vpmovdw %zmm0, %ymm0
29; KNL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
30; KNL-NEXT:    vmovq %xmm0, (%rdi)
31; KNL-NEXT:    retq
32;
33; SKX-LABEL: any_extend_load_v8i32:
34; SKX:       # BB#0:
35; SKX-NEXT:    vpmovzxbd (%rdi), %ymm0
36; SKX-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
37; SKX-NEXT:    vpmovdb %ymm0, (%rdi)
38; SKX-NEXT:    retq
39  %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
40  %1 = zext <8 x i8> %wide.load to <8 x i32>
41  %2 = add nuw nsw <8 x i32> %1, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
42  %3 = xor <8 x i32> %2, zeroinitializer
43  %4 = trunc <8 x i32> %3 to <8 x i8>
44  store <8 x i8> %4, <8 x i8>* %ptr, align 1
45  ret void
46}
47
48define void @any_extend_load_v8i16(<8 x i8> * %ptr) {
49; KNL-LABEL: any_extend_load_v8i16:
50; KNL:       # BB#0:
51; KNL-NEXT:    vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
52; KNL-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
53; KNL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
54; KNL-NEXT:    vmovq %xmm0, (%rdi)
55; KNL-NEXT:    retq
56;
57; SKX-LABEL: any_extend_load_v8i16:
58; SKX:       # BB#0:
59; SKX-NEXT:    vpmovzxbw (%rdi), %xmm0
60; SKX-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
61; SKX-NEXT:    vpmovwb %xmm0, (%rdi)
62; SKX-NEXT:    retq
63  %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
64  %1 = zext <8 x i8> %wide.load to <8 x i16>
65  %2 = add nuw nsw <8 x i16> %1, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
66  %3 = xor <8 x i16> %2, zeroinitializer
67  %4 = trunc <8 x i16> %3 to <8 x i8>
68  store <8 x i8> %4, <8 x i8>* %ptr, align 1
69  ret void
70}
71