1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s
3; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0
4
5define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
6; CHECK-LABEL: test_256_load:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    pushq %r15
9; CHECK-NEXT:    pushq %r14
10; CHECK-NEXT:    pushq %rbx
11; CHECK-NEXT:    subq $96, %rsp
12; CHECK-NEXT:    movq %rdx, %r14
13; CHECK-NEXT:    movq %rsi, %r15
14; CHECK-NEXT:    movq %rdi, %rbx
15; CHECK-NEXT:    vmovaps (%rdi), %ymm0
16; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
17; CHECK-NEXT:    vmovaps (%rsi), %ymm1
18; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
19; CHECK-NEXT:    vmovaps (%rdx), %ymm2
20; CHECK-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
21; CHECK-NEXT:    callq dummy
22; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
23; CHECK-NEXT:    vmovaps %ymm0, (%rbx)
24; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
25; CHECK-NEXT:    vmovaps %ymm0, (%r15)
26; CHECK-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
27; CHECK-NEXT:    vmovaps %ymm0, (%r14)
28; CHECK-NEXT:    addq $96, %rsp
29; CHECK-NEXT:    popq %rbx
30; CHECK-NEXT:    popq %r14
31; CHECK-NEXT:    popq %r15
32; CHECK-NEXT:    vzeroupper
33; CHECK-NEXT:    retq
34;
35; CHECK_O0-LABEL: test_256_load:
36; CHECK_O0:       # %bb.0: # %entry
37; CHECK_O0-NEXT:    subq $184, %rsp
38; CHECK_O0-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
39; CHECK_O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
40; CHECK_O0-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
41; CHECK_O0-NEXT:    vmovapd (%rdi), %ymm0
42; CHECK_O0-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
43; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm1
44; CHECK_O0-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
45; CHECK_O0-NEXT:    vmovdqa (%rdx), %ymm2
46; CHECK_O0-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
47; CHECK_O0-NEXT:    callq dummy
48; CHECK_O0-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
49; CHECK_O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
50; CHECK_O0-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
51; CHECK_O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
52; CHECK_O0-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
53; CHECK_O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
54; CHECK_O0-NEXT:    vmovapd %ymm2, (%rdi)
55; CHECK_O0-NEXT:    vmovaps %ymm1, (%rsi)
56; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rdx)
57; CHECK_O0-NEXT:    addq $184, %rsp
58; CHECK_O0-NEXT:    vzeroupper
59; CHECK_O0-NEXT:    retq
60entry:
61  %0 = bitcast double* %d to <4 x double>*
62  %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
63  %1 = bitcast float* %f to <8 x float>*
64  %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
65  %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
66  tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
67  store <4 x double> %tmp1.i, <4 x double>* %0, align 32
68  store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
69  store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
70  ret void
71}
72
73declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
74
75;;
76;; The two tests below check that we must fold load + scalar_to_vector
77;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
78
79define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
80; CHECK-LABEL: mov00:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
83; CHECK-NEXT:    retq
84;
85; CHECK_O0-LABEL: mov00:
86; CHECK_O0:       # %bb.0:
87; CHECK_O0-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
88; CHECK_O0-NEXT:    # kill: def $ymm0 killed $xmm0
89; CHECK_O0-NEXT:    retq
90  %val = load float, float* %ptr
91  %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
92  ret <8 x float> %i0
93}
94
95define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
96; CHECK-LABEL: mov01:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
99; CHECK-NEXT:    retq
100;
101; CHECK_O0-LABEL: mov01:
102; CHECK_O0:       # %bb.0:
103; CHECK_O0-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
104; CHECK_O0-NEXT:    # kill: def $ymm0 killed $xmm0
105; CHECK_O0-NEXT:    retq
106  %val = load double, double* %ptr
107  %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
108  ret <4 x double> %i0
109}
110
111define void @storev16i16(<16 x i16> %a) nounwind {
112; CHECK-LABEL: storev16i16:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vmovaps %ymm0, (%rax)
115;
116; CHECK_O0-LABEL: storev16i16:
117; CHECK_O0:       # %bb.0:
118; CHECK_O0-NEXT:    # implicit-def: $rax
119; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
120  store <16 x i16> %a, <16 x i16>* undef, align 32
121  unreachable
122}
123
124define void @storev16i16_01(<16 x i16> %a) nounwind {
125; CHECK-LABEL: storev16i16_01:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
128; CHECK-NEXT:    vmovups %xmm0, (%rax)
129;
130; CHECK_O0-LABEL: storev16i16_01:
131; CHECK_O0:       # %bb.0:
132; CHECK_O0-NEXT:    # implicit-def: $rax
133; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
134  store <16 x i16> %a, <16 x i16>* undef, align 4
135  unreachable
136}
137
138define void @storev32i8(<32 x i8> %a) nounwind {
139; CHECK-LABEL: storev32i8:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vmovaps %ymm0, (%rax)
142;
143; CHECK_O0-LABEL: storev32i8:
144; CHECK_O0:       # %bb.0:
145; CHECK_O0-NEXT:    # implicit-def: $rax
146; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
147  store <32 x i8> %a, <32 x i8>* undef, align 32
148  unreachable
149}
150
151define void @storev32i8_01(<32 x i8> %a) nounwind {
152; CHECK-LABEL: storev32i8_01:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
155; CHECK-NEXT:    vmovups %xmm0, (%rax)
156;
157; CHECK_O0-LABEL: storev32i8_01:
158; CHECK_O0:       # %bb.0:
159; CHECK_O0-NEXT:    # implicit-def: $rax
160; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
161  store <32 x i8> %a, <32 x i8>* undef, align 4
162  unreachable
163}
164
165; It is faster to make two saves, if the data is already in xmm registers. For
166; example, after making an integer operation.
167define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
168; CHECK-LABEL: double_save:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
171; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
172; CHECK-NEXT:    retq
173;
174; CHECK_O0-LABEL: double_save:
175; CHECK_O0:       # %bb.0:
176; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
177; CHECK_O0-NEXT:    # implicit-def: $ymm0
178; CHECK_O0-NEXT:    vmovaps %xmm2, %xmm0
179; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
180; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rdi)
181; CHECK_O0-NEXT:    vzeroupper
182; CHECK_O0-NEXT:    retq
183  %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
184  store <8 x i32> %Z, <8 x i32>* %P, align 16
185  ret void
186}
187
188define void @double_save_volatile(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind {
189; CHECK-LABEL: double_save_volatile:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
192; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
193; CHECK-NEXT:    vmovups %ymm0, (%rdi)
194; CHECK-NEXT:    vzeroupper
195; CHECK-NEXT:    retq
196;
197; CHECK_O0-LABEL: double_save_volatile:
198; CHECK_O0:       # %bb.0:
199; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
200; CHECK_O0-NEXT:    # implicit-def: $ymm0
201; CHECK_O0-NEXT:    vmovaps %xmm2, %xmm0
202; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
203; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rdi)
204; CHECK_O0-NEXT:    vzeroupper
205; CHECK_O0-NEXT:    retq
206  %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
207  store volatile <8 x i32> %Z, <8 x i32>* %P, align 16
208  ret void
209}
210
211declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
212
213define void @f_f() nounwind {
214; CHECK-LABEL: f_f:
215; CHECK:       # %bb.0: # %allocas
216; CHECK-NEXT:    xorl %eax, %eax
217; CHECK-NEXT:    testb %al, %al
218; CHECK-NEXT:    jne .LBB9_2
219; CHECK-NEXT:  # %bb.1: # %cif_mask_all
220; CHECK-NEXT:  .LBB9_2: # %cif_mask_mixed
221; CHECK-NEXT:    xorl %eax, %eax
222; CHECK-NEXT:    testb %al, %al
223; CHECK-NEXT:    jne .LBB9_4
224; CHECK-NEXT:  # %bb.3: # %cif_mixed_test_all
225; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [4294967295,0,0,0]
226; CHECK-NEXT:    vmaskmovps %ymm0, %ymm0, (%rax)
227; CHECK-NEXT:  .LBB9_4: # %cif_mixed_test_any_check
228;
229; CHECK_O0-LABEL: f_f:
230; CHECK_O0:       # %bb.0: # %allocas
231; CHECK_O0-NEXT:    # implicit-def: $al
232; CHECK_O0-NEXT:    testb $1, %al
233; CHECK_O0-NEXT:    jne .LBB9_1
234; CHECK_O0-NEXT:    jmp .LBB9_2
235; CHECK_O0-NEXT:  .LBB9_1: # %cif_mask_all
236; CHECK_O0-NEXT:  .LBB9_2: # %cif_mask_mixed
237; CHECK_O0-NEXT:    # implicit-def: $al
238; CHECK_O0-NEXT:    testb $1, %al
239; CHECK_O0-NEXT:    jne .LBB9_3
240; CHECK_O0-NEXT:    jmp .LBB9_4
241; CHECK_O0-NEXT:  .LBB9_3: # %cif_mixed_test_all
242; CHECK_O0-NEXT:    vmovdqa {{.*#+}} xmm0 = [4294967295,0,0,0]
243; CHECK_O0-NEXT:    vmovdqa %xmm0, %xmm0
244; CHECK_O0-NEXT:    # kill: def $ymm0 killed $xmm0
245; CHECK_O0-NEXT:    # implicit-def: $rax
246; CHECK_O0-NEXT:    # implicit-def: $ymm1
247; CHECK_O0-NEXT:    vmaskmovps %ymm1, %ymm0, (%rax)
248; CHECK_O0-NEXT:  .LBB9_4: # %cif_mixed_test_any_check
249allocas:
250  br i1 undef, label %cif_mask_all, label %cif_mask_mixed
251
252cif_mask_all:
253  unreachable
254
255cif_mask_mixed:
256  br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
257
258cif_mixed_test_all:
259  call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
260  unreachable
261
262cif_mixed_test_any_check:
263  unreachable
264}
265
266define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
267; CHECK-LABEL: add8i32:
268; CHECK:       # %bb.0:
269; CHECK-NEXT:    vmovups (%rsi), %xmm0
270; CHECK-NEXT:    vmovups 16(%rsi), %xmm1
271; CHECK-NEXT:    vmovups %xmm1, 16(%rdi)
272; CHECK-NEXT:    vmovups %xmm0, (%rdi)
273; CHECK-NEXT:    retq
274;
275; CHECK_O0-LABEL: add8i32:
276; CHECK_O0:       # %bb.0:
277; CHECK_O0-NEXT:    vmovdqu (%rsi), %xmm2
278; CHECK_O0-NEXT:    vmovdqu 16(%rsi), %xmm1
279; CHECK_O0-NEXT:    # implicit-def: $ymm0
280; CHECK_O0-NEXT:    vmovaps %xmm2, %xmm0
281; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
282; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rdi)
283; CHECK_O0-NEXT:    vzeroupper
284; CHECK_O0-NEXT:    retq
285  %b = load <8 x i32>, <8 x i32>* %bp, align 1
286  %x = add <8 x i32> zeroinitializer, %b
287  store <8 x i32> %x, <8 x i32>* %ret, align 1
288  ret void
289}
290
291define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
292; CHECK-LABEL: add4i64a64:
293; CHECK:       # %bb.0:
294; CHECK-NEXT:    vmovaps (%rsi), %ymm0
295; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
296; CHECK-NEXT:    vzeroupper
297; CHECK-NEXT:    retq
298;
299; CHECK_O0-LABEL: add4i64a64:
300; CHECK_O0:       # %bb.0:
301; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm0
302; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rdi)
303; CHECK_O0-NEXT:    vzeroupper
304; CHECK_O0-NEXT:    retq
305  %b = load <4 x i64>, <4 x i64>* %bp, align 64
306  %x = add <4 x i64> zeroinitializer, %b
307  store <4 x i64> %x, <4 x i64>* %ret, align 64
308  ret void
309}
310
311define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
312; CHECK-LABEL: add4i64a16:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vmovaps (%rsi), %xmm0
315; CHECK-NEXT:    vmovaps 16(%rsi), %xmm1
316; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
317; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
318; CHECK-NEXT:    retq
319;
320; CHECK_O0-LABEL: add4i64a16:
321; CHECK_O0:       # %bb.0:
322; CHECK_O0-NEXT:    vmovdqa (%rsi), %xmm2
323; CHECK_O0-NEXT:    vmovdqa 16(%rsi), %xmm1
324; CHECK_O0-NEXT:    # implicit-def: $ymm0
325; CHECK_O0-NEXT:    vmovaps %xmm2, %xmm0
326; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
327; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rdi)
328; CHECK_O0-NEXT:    vzeroupper
329; CHECK_O0-NEXT:    retq
330  %b = load <4 x i64>, <4 x i64>* %bp, align 16
331  %x = add <4 x i64> zeroinitializer, %b
332  store <4 x i64> %x, <4 x i64>* %ret, align 16
333  ret void
334}
335
336; This used to crash.
337; v2i128 may not be a "simple" (MVT) type, but we can split that.
338; This example gets split further in legalization.
339
340define void @PR43916(<2 x i128> %y, <2 x i128>* %z) {
341; CHECK-LABEL: PR43916:
342; CHECK:       # %bb.0:
343; CHECK-NEXT:    movq %rcx, 24(%r8)
344; CHECK-NEXT:    movq %rdx, 16(%r8)
345; CHECK-NEXT:    movq %rsi, 8(%r8)
346; CHECK-NEXT:    movq %rdi, (%r8)
347; CHECK-NEXT:    retq
348;
349; CHECK_O0-LABEL: PR43916:
350; CHECK_O0:       # %bb.0:
351; CHECK_O0-NEXT:    movq %rdi, (%r8)
352; CHECK_O0-NEXT:    movq %rsi, 8(%r8)
353; CHECK_O0-NEXT:    movq %rdx, 16(%r8)
354; CHECK_O0-NEXT:    movq %rcx, 24(%r8)
355; CHECK_O0-NEXT:    retq
356  store <2 x i128> %y, <2 x i128>* %z, align 16
357  ret void
358}
359