1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
4
5; Instcombine should be able to prove vector alignment in the
6; presence of a few mild address computation tricks.
7
8define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind  {
9; CHECK-LABEL: @test0(
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[C:%.*]] = ptrtoint i8* [[B:%.*]] to i64
12; CHECK-NEXT:    [[D:%.*]] = and i64 [[C]], -16
13; CHECK-NEXT:    [[E:%.*]] = inttoptr i64 [[D]] to double*
14; CHECK-NEXT:    [[V:%.*]] = shl i64 [[U:%.*]], 1
15; CHECK-NEXT:    [[Z:%.*]] = and i64 [[Y:%.*]], -2
16; CHECK-NEXT:    [[T1421:%.*]] = icmp eq i64 [[N:%.*]], 0
17; CHECK-NEXT:    br i1 [[T1421]], label [[RETURN:%.*]], label [[BB:%.*]]
18; CHECK:       bb:
19; CHECK-NEXT:    [[I:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[BB]] ], [ 20, [[ENTRY:%.*]] ]
20; CHECK-NEXT:    [[J:%.*]] = mul i64 [[I]], [[V]]
21; CHECK-NEXT:    [[H:%.*]] = add i64 [[J]], [[Z]]
22; CHECK-NEXT:    [[T8:%.*]] = getelementptr double, double* [[E]], i64 [[H]]
23; CHECK-NEXT:    [[P:%.*]] = bitcast double* [[T8]] to <2 x double>*
24; CHECK-NEXT:    store <2 x double> zeroinitializer, <2 x double>* [[P]], align 16
25; CHECK-NEXT:    [[INDVAR_NEXT]] = add i64 [[I]], 1
26; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[N]]
27; CHECK-NEXT:    br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
28; CHECK:       return:
29; CHECK-NEXT:    ret void
30;
31entry:
32  %c = ptrtoint i8* %b to i64
33  %d = and i64 %c, -16
34  %e = inttoptr i64 %d to double*
35  %v = mul i64 %u, 2
36  %z = and i64 %y, -2
37  %t1421 = icmp eq i64 %n, 0
38  br i1 %t1421, label %return, label %bb
39
40bb:
41  %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
42  %j = mul i64 %i, %v
43  %h = add i64 %j, %z
44  %t8 = getelementptr double, double* %e, i64 %h
45  %p = bitcast double* %t8 to <2 x double>*
46  store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
47  %indvar.next = add i64 %i, 1
48  %exitcond = icmp eq i64 %indvar.next, %n
49  br i1 %exitcond, label %return, label %bb
50
51return:
52  ret void
53}
54
55; When we see a unaligned load from an insufficiently aligned global or
56; alloca, increase the alignment of the load, turning it into an aligned load.
57
58@GLOBAL = internal global [4 x i32] zeroinitializer
59
60define <16 x i8> @test1(<2 x i64> %x) {
61; CHECK-LABEL: @test1(
62; CHECK-NEXT:  entry:
63; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 16
64; CHECK-NEXT:    ret <16 x i8> [[TMP]]
65;
66entry:
67  %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
68  ret <16 x i8> %tmp
69}
70
71@GLOBAL_as1 = internal addrspace(1) global [4 x i32] zeroinitializer
72
73define <16 x i8> @test1_as1(<2 x i64> %x) {
74; CHECK-LABEL: @test1_as1(
75; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 16
76; CHECK-NEXT:    ret <16 x i8> [[TMP]]
77;
78  %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
79  ret <16 x i8> %tmp
80}
81
82@GLOBAL_as1_gep = internal addrspace(1) global [8 x i32] zeroinitializer
83
84define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
85; CHECK-LABEL: @test1_as1_gep(
86; CHECK-NEXT:    [[TMP:%.*]] = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr inbounds ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i32 0, i32 4) to <16 x i8> addrspace(1)*), align 16
87; CHECK-NEXT:    ret <16 x i8> [[TMP]]
88;
89  %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32], [8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
90  ret <16 x i8> %tmp
91}
92
93
94; When a load or store lacks an explicit alignment, add one.
95
96define double @test2(double* %p, double %n) nounwind {
97; CHECK-LABEL: @test2(
98; CHECK-NEXT:    [[T:%.*]] = load double, double* [[P:%.*]], align 8
99; CHECK-NEXT:    store double [[N:%.*]], double* [[P]], align 8
100; CHECK-NEXT:    ret double [[T]]
101;
102  %t = load double, double* %p
103  store double %n, double* %p
104  ret double %t
105}
106
107declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
108
109declare void @use(i8*)
110
111%struct.s = type { i32, i32, i32, i32 }
112
113define void @test3(%struct.s* sret(%struct.s) %a4) {
114; Check that the alignment is bumped up the alignment of the sret type.
115; CHECK-LABEL: @test3(
116; CHECK-NEXT:    [[A4_CAST:%.*]] = bitcast %struct.s* [[A4:%.*]] to i8*
117; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* nonnull align 4 dereferenceable(16) [[A4_CAST]], i8 0, i64 16, i1 false)
118; CHECK-NEXT:    call void @use(i8* [[A4_CAST]])
119; CHECK-NEXT:    ret void
120;
121  %a4.cast = bitcast %struct.s* %a4 to i8*
122  call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i1 false)
123  call void @use(i8* %a4.cast)
124  ret void
125}
126