1; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+sse4.2 | FileCheck %s
2
3; Test based on pr5626 to load/store
4;
5
6%i32vec3 = type <3 x i32>
7define void @add3i32(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
8; CHECK-LABEL: add3i32:
9; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
10; CHECK-NEXT:    paddd   (%{{.*}}), %[[R0]]
11; CHECK-NEXT:    pextrd  $2, %[[R0]], 8(%{{.*}})
12; CHECK-NEXT:    movq    %[[R0]], (%{{.*}})
13	%a = load %i32vec3, %i32vec3* %ap, align 16
14	%b = load %i32vec3, %i32vec3* %bp, align 16
15	%x = add %i32vec3 %a, %b
16	store %i32vec3 %x, %i32vec3* %ret, align 16
17	ret void
18}
19
20define void @add3i32_2(%i32vec3*  sret %ret, %i32vec3* %ap, %i32vec3* %bp)  {
21; CHECK-LABEL: add3i32_2:
22; CHECK:         movq    (%{{.*}}), %[[R0:xmm[0-9]+]]
23; CHECK-NEXT:    pinsrd  $2, 8(%{{.*}}), %[[R0]]
24; CHECK-NEXT:    movq    (%{{.*}}), %[[R1:xmm[0-9]+]]
25; CHECK-NEXT:    pinsrd  $2, 8(%{{.*}}), %[[R1]]
26; CHECK-NEXT:    paddd   %[[R0]], %[[R1]]
27; CHECK-NEXT:    pextrd  $2, %[[R1]], 8(%{{.*}})
28; CHECK-NEXT:    movq    %[[R1]], (%{{.*}})
29	%a = load %i32vec3, %i32vec3* %ap, align 8
30	%b = load %i32vec3, %i32vec3* %bp, align 8
31	%x = add %i32vec3 %a, %b
32	store %i32vec3 %x, %i32vec3* %ret, align 8
33	ret void
34}
35
36%i32vec7 = type <7 x i32>
37define void @add7i32(%i32vec7*  sret %ret, %i32vec7* %ap, %i32vec7* %bp)  {
38; CHECK-LABEL: add7i32:
39; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
40; CHECK-NEXT:    movdqa  16(%{{.*}}), %[[R1:xmm[0-9]+]]
41; CHECK-NEXT:    paddd   (%{{.*}}), %[[R0]]
42; CHECK-NEXT:    paddd   16(%{{.*}}), %[[R1]]
43; CHECK-NEXT:    pextrd  $2, %[[R1]], 24(%{{.*}})
44; CHECK-NEXT:    movq    %[[R1]], 16(%{{.*}})
45; CHECK-NEXT:    movdqa  %[[R0]], (%{{.*}})
46	%a = load %i32vec7, %i32vec7* %ap, align 16
47	%b = load %i32vec7, %i32vec7* %bp, align 16
48	%x = add %i32vec7 %a, %b
49	store %i32vec7 %x, %i32vec7* %ret, align 16
50	ret void
51}
52
53%i32vec12 = type <12 x i32>
54define void @add12i32(%i32vec12*  sret %ret, %i32vec12* %ap, %i32vec12* %bp)  {
55; CHECK-LABEL: add12i32:
56; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
57; CHECK-NEXT:    movdqa  16(%{{.*}}), %[[R1:xmm[0-9]+]]
58; CHECK-NEXT:    movdqa  32(%{{.*}}), %[[R2:xmm[0-9]+]]
59; CHECK-NEXT:    paddd   (%{{.*}}), %[[R0]]
60; CHECK-NEXT:    paddd   16(%{{.*}}), %[[R1]]
61; CHECK-NEXT:    paddd   32(%{{.*}}), %[[R2]]
62; CHECK-NEXT:    movdqa  %[[R2]], 32(%{{.*}})
63; CHECK-NEXT:    movdqa  %[[R1]], 16(%{{.*}})
64; CHECK-NEXT:    movdqa  %[[R0]], (%{{.*}})
65	%a = load %i32vec12, %i32vec12* %ap, align 16
66	%b = load %i32vec12, %i32vec12* %bp, align 16
67	%x = add %i32vec12 %a, %b
68	store %i32vec12 %x, %i32vec12* %ret, align 16
69	ret void
70}
71
72
73%i16vec3 = type <3 x i16>
74define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
75; CHECK-LABEL: add3i16:
76; CHECK:         pmovzxwd (%{{.*}}), %[[R0:xmm[0-9]+]]
77; CHECK-NEXT:    pmovzxwd (%{{.*}}), %[[R1:xmm[0-9]+]]
78; CHECK-NEXT:    paddd    %[[R0]], %[[R1]]
79; CHECK-NEXT:    pextrw   $4, %[[R1]], 4(%{{.*}})
80; CHECK-NEXT:    pshufb   {{.*}}, %[[R1]]
81; CHECK-NEXT:    pmovzxdq %[[R1]], %[[R0]]
82; CHECK-NEXT:    movd     %[[R0]], (%{{.*}})
83	%a = load %i16vec3, %i16vec3* %ap, align 16
84	%b = load %i16vec3, %i16vec3* %bp, align 16
85	%x = add %i16vec3 %a, %b
86	store %i16vec3 %x, %i16vec3* %ret, align 16
87	ret void
88}
89
90%i16vec4 = type <4 x i16>
91define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
92; CHECK-LABEL: add4i16:
93; CHECK:         movq    (%{{.*}}), %[[R0:xmm[0-9]+]]
94; CHECK-NEXT:    movq    (%{{.*}}), %[[R1:xmm[0-9]+]]
95; CHECK-NEXT:    paddw   %[[R0]], %[[R1]]
96; CHECK-NEXT:    movq    %[[R1]], (%{{.*}})
97	%a = load %i16vec4, %i16vec4* %ap, align 16
98	%b = load %i16vec4, %i16vec4* %bp, align 16
99	%x = add %i16vec4 %a, %b
100	store %i16vec4 %x, %i16vec4* %ret, align 16
101	ret void
102}
103
104%i16vec12 = type <12 x i16>
105define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
106; CHECK-LABEL: add12i16:
107; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
108; CHECK-NEXT:    movdqa  16(%{{.*}}), %[[R1:xmm[0-9]+]]
109; CHECK-NEXT:    paddw   (%{{.*}}), %[[R0]]
110; CHECK-NEXT:    paddw   16(%{{.*}}), %[[R1]]
111; CHECK-NEXT:    movq    %[[R1]], 16(%{{.*}})
112; CHECK-NEXT:    movdqa  %[[R0]], (%{{.*}})
113	%a = load %i16vec12, %i16vec12* %ap, align 16
114	%b = load %i16vec12, %i16vec12* %bp, align 16
115	%x = add %i16vec12 %a, %b
116	store %i16vec12 %x, %i16vec12* %ret, align 16
117	ret void
118}
119
120%i16vec18 = type <18 x i16>
121define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
122; CHECK-LABEL: add18i16:
123; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
124; CHECK-NEXT:    movdqa  16(%{{.*}}), %[[R1:xmm[0-9]+]]
125; CHECK-NEXT:    movdqa  32(%{{.*}}), %[[R2:xmm[0-9]+]]
126; CHECK-NEXT:    paddw   (%{{.*}}), %[[R0]]
127; CHECK-NEXT:    paddw   16(%{{.*}}), %[[R1]]
128; CHECK-NEXT:    paddw   32(%{{.*}}), %[[R2]]
129; CHECK-NEXT:    movd    %[[R2]], 32(%{{.*}})
130; CHECK-NEXT:    movdqa  %[[R1]], 16(%{{.*}})
131; CHECK-NEXT:    movdqa  %[[R0]], (%{{.*}})
132	%a = load %i16vec18, %i16vec18* %ap, align 16
133	%b = load %i16vec18, %i16vec18* %bp, align 16
134	%x = add %i16vec18 %a, %b
135	store %i16vec18 %x, %i16vec18* %ret, align 16
136	ret void
137}
138
139
140%i8vec3 = type <3 x i8>
141define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
142; CHECK-LABEL: add3i8:
143; CHECK:         pmovzxbd (%{{.*}}), %[[R0:xmm[0-9]+]]
144; CHECK-NEXT:    pmovzxbd (%{{.*}}), %[[R1:xmm[0-9]+]]
145; CHECK-NEXT:    paddd    %[[R0]], %[[R1]]
146; CHECK-NEXT:    pextrb   $8, %[[R1]], 2(%{{.*}})
147; CHECK-NEXT:    pshufb   {{.*}}, %[[R1]]
148; CHECK-NEXT:    pmovzxwq %[[R1]], %[[R0]]
149; CHECK-NEXT:    movd     %[[R0]], %e[[R2:[abcd]]]x
150; CHECK-NEXT:    movw     %[[R2]]x, (%{{.*}})
151	%a = load %i8vec3, %i8vec3* %ap, align 16
152	%b = load %i8vec3, %i8vec3* %bp, align 16
153	%x = add %i8vec3 %a, %b
154	store %i8vec3 %x, %i8vec3* %ret, align 16
155	ret void
156}
157
158%i8vec31 = type <31 x i8>
159define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
160; CHECK-LABEL: add31i8:
161; CHECK:         movdqa  (%{{.*}}), %[[R0:xmm[0-9]+]]
162; CHECK-NEXT:    movdqa  16(%{{.*}}), %[[R1:xmm[0-9]+]]
163; CHECK-NEXT:    paddb   (%{{.*}}), %[[R0]]
164; CHECK-NEXT:    paddb   16(%{{.*}}), %[[R1]]
165; CHECK-NEXT:    pextrb  $14, %[[R1]], 30(%{{.*}})
166; CHECK-NEXT:    pextrw  $6, %[[R1]], 28(%{{.*}})
167; CHECK-NEXT:    pextrd  $2, %[[R1]], 24(%{{.*}})
168; CHECK-NEXT:    movq    %[[R1]], 16(%{{.*}})
169; CHECK-NEXT:    movdqa  %[[R0]], (%{{.*}})
170	%a = load %i8vec31, %i8vec31* %ap, align 16
171	%b = load %i8vec31, %i8vec31* %bp, align 16
172	%x = add %i8vec31 %a, %b
173	store %i8vec31 %x, %i8vec31* %ret, align 16
174	ret void
175}
176
177
178%i8vec3pack = type { <3 x i8>, i8 }
179define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pack* %rot) nounwind {
180; CHECK-LABEL: rot:
181; CHECK:         movdqa  {{.*}}, %[[CONSTANT0:xmm[0-9]+]]
182; CHECK-NEXT:    movdqa  {{.*}}, %[[SHUFFLE_MASK:xmm[0-9]+]]
183; CHECK-NEXT:    pshufb  %[[SHUFFLE_MASK]], %[[CONSTANT0]]
184; CHECK-NEXT:    pmovzxwq %[[CONSTANT0]], %[[CONSTANT0]]
185; CHECK-NEXT:    movd    %[[CONSTANT0]], %e[[R0:[abcd]]]x
186; CHECK-NEXT:    movw    %[[R0]]x, (%[[PTR0:.*]])
187; CHECK-NEXT:    movb    $-98, 2(%[[PTR0]])
188; CHECK-NEXT:    movdqa  {{.*}}, %[[CONSTANT1:xmm[0-9]+]]
189; CHECK-NEXT:    pshufb  %[[SHUFFLE_MASK]], %[[CONSTANT1]]
190; CHECK-NEXT:    pmovzxwq %[[CONSTANT1]], %[[CONSTANT1]]
191; CHECK-NEXT:    movd    %[[CONSTANT1]], %e[[R1:[abcd]]]x
192; CHECK-NEXT:    movw    %[[R1]]x, (%[[PTR1:.*]])
193; CHECK-NEXT:    movb    $1, 2(%[[PTR1]])
194; CHECK-NEXT:    movl    (%[[PTR0]]), [[TMP1:%e[abcd]+x]]
195; CHECK-NEXT:    movl    [[TMP1]], [[TMP2:.*]]
196; CHECK-NEXT:    pmovzxbd [[TMP2]], %[[X0:xmm[0-9]+]]
197; CHECK-NEXT:    pextrd  $1, %[[X0]], %e[[R0:[abcd]]]x
198; CHECK-NEXT:    shrl    %e[[R0]]x
199; CHECK-NEXT:    movd    %[[X0]], %e[[R1:[abcd]]]x
200; CHECK-NEXT:    shrl    %e[[R1]]x
201; CHECK-NEXT:    movd    %e[[R1]]x, %[[X1:xmm[0-9]+]]
202; CHECK-NEXT:    pinsrd  $1, %e[[R0]]x, %[[X1]]
203; CHECK-NEXT:    pextrd  $2, %[[X0]], %e[[R0:[abcd]]]x
204; CHECK-NEXT:    shrl    %e[[R0]]x
205; CHECK-NEXT:    pinsrd  $2, %e[[R0]]x, %[[X1]]
206; CHECK-NEXT:    pextrd  $3, %[[X0]], %e[[R0:[abcd]]]x
207; CHECK-NEXT:    pinsrd  $3, %e[[R0]]x, %[[X1]]
208; CHECK-NEXT:    pextrb  $8, %[[X1]], 2(%{{.*}})
209; CHECK-NEXT:    pshufb  %[[SHUFFLE_MASK]], %[[X1]]
210; CHECK-NEXT:    pmovzxwq %[[X1]], %[[X3:xmm[0-9]+]]
211; CHECK-NEXT:    movd    %[[X3]], %e[[R0:[abcd]]]x
212; CHECK-NEXT:    movw    %[[R0]]x, (%{{.*}})
213
214entry:
215  %storetmp = bitcast %i8vec3pack* %X to <3 x i8>*
216  store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp
217  %storetmp1 = bitcast %i8vec3pack* %rot to <3 x i8>*
218  store <3 x i8> <i8 1, i8 1, i8 1>, <3 x i8>* %storetmp1
219  %tmp = load %i8vec3pack, %i8vec3pack* %X
220  %extractVec = extractvalue %i8vec3pack %tmp, 0
221  %tmp2 = load %i8vec3pack, %i8vec3pack* %rot
222  %extractVec3 = extractvalue %i8vec3pack %tmp2, 0
223  %shr = lshr <3 x i8> %extractVec, %extractVec3
224  %storetmp4 = bitcast %i8vec3pack* %result to <3 x i8>*
225  store <3 x i8> %shr, <3 x i8>* %storetmp4
226  ret void
227}
228
229