1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -instcombine -S < %s | FileCheck %s
3
4define float @matching_scalar(<4 x float>* dereferenceable(16) %p) {
5; CHECK-LABEL: @matching_scalar(
6; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
7; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
8; CHECK-NEXT:    ret float [[R]]
9;
10  %bc = bitcast <4 x float>* %p to float*
11  %r = load float, float* %bc, align 16
12  ret float %r
13}
14
15define i32 @nonmatching_scalar(<4 x float>* dereferenceable(16) %p) {
16; CHECK-LABEL: @nonmatching_scalar(
17; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i32*
18; CHECK-NEXT:    [[R:%.*]] = load i32, i32* [[BC]], align 16
19; CHECK-NEXT:    ret i32 [[R]]
20;
21  %bc = bitcast <4 x float>* %p to i32*
22  %r = load i32, i32* %bc, align 16
23  ret i32 %r
24}
25
26define i64 @larger_scalar(<4 x float>* dereferenceable(16) %p) {
27; CHECK-LABEL: @larger_scalar(
28; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i64*
29; CHECK-NEXT:    [[R:%.*]] = load i64, i64* [[BC]], align 16
30; CHECK-NEXT:    ret i64 [[R]]
31;
32  %bc = bitcast <4 x float>* %p to i64*
33  %r = load i64, i64* %bc, align 16
34  ret i64 %r
35}
36
37define i8 @smaller_scalar(<4 x float>* dereferenceable(16) %p) {
38; CHECK-LABEL: @smaller_scalar(
39; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
40; CHECK-NEXT:    [[R:%.*]] = load i8, i8* [[BC]], align 16
41; CHECK-NEXT:    ret i8 [[R]]
42;
43  %bc = bitcast <4 x float>* %p to i8*
44  %r = load i8, i8* %bc, align 16
45  ret i8 %r
46}
47
48define i8 @smaller_scalar_less_aligned(<4 x float>* dereferenceable(16) %p) {
49; CHECK-LABEL: @smaller_scalar_less_aligned(
50; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
51; CHECK-NEXT:    [[R:%.*]] = load i8, i8* [[BC]], align 4
52; CHECK-NEXT:    ret i8 [[R]]
53;
54  %bc = bitcast <4 x float>* %p to i8*
55  %r = load i8, i8* %bc, align 4
56  ret i8 %r
57}
58
59define float @matching_scalar_small_deref(<4 x float>* dereferenceable(15) %p) {
60; CHECK-LABEL: @matching_scalar_small_deref(
61; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
62; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
63; CHECK-NEXT:    ret float [[R]]
64;
65  %bc = bitcast <4 x float>* %p to float*
66  %r = load float, float* %bc, align 16
67  ret float %r
68}
69
70define float @matching_scalar_smallest_deref(<4 x float>* dereferenceable(1) %p) {
71; CHECK-LABEL: @matching_scalar_smallest_deref(
72; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
73; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
74; CHECK-NEXT:    ret float [[R]]
75;
76  %bc = bitcast <4 x float>* %p to float*
77  %r = load float, float* %bc, align 16
78  ret float %r
79}
80
81define float @matching_scalar_smallest_deref_or_null(<4 x float>* dereferenceable_or_null(1) %p) {
82; CHECK-LABEL: @matching_scalar_smallest_deref_or_null(
83; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
84; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
85; CHECK-NEXT:    ret float [[R]]
86;
87  %bc = bitcast <4 x float>* %p to float*
88  %r = load float, float* %bc, align 16
89  ret float %r
90}
91
92define float @matching_scalar_smallest_deref_addrspace(<4 x float> addrspace(4)* dereferenceable(1) %p) {
93; CHECK-LABEL: @matching_scalar_smallest_deref_addrspace(
94; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float> addrspace(4)* [[P:%.*]], i64 0, i64 0
95; CHECK-NEXT:    [[R:%.*]] = load float, float addrspace(4)* [[BC]], align 16
96; CHECK-NEXT:    ret float [[R]]
97;
98  %bc = bitcast <4 x float> addrspace(4)* %p to float addrspace(4)*
99  %r = load float, float addrspace(4)* %bc, align 16
100  ret float %r
101}
102
103; A null pointer can't be assumed inbounds in a non-default address space.
104
105define float @matching_scalar_smallest_deref_or_null_addrspace(<4 x float> addrspace(4)* dereferenceable_or_null(1) %p) {
106; CHECK-LABEL: @matching_scalar_smallest_deref_or_null_addrspace(
107; CHECK-NEXT:    [[BC:%.*]] = getelementptr <4 x float>, <4 x float> addrspace(4)* [[P:%.*]], i64 0, i64 0
108; CHECK-NEXT:    [[R:%.*]] = load float, float addrspace(4)* [[BC]], align 16
109; CHECK-NEXT:    ret float [[R]]
110;
111  %bc = bitcast <4 x float> addrspace(4)* %p to float addrspace(4)*
112  %r = load float, float addrspace(4)* %bc, align 16
113  ret float %r
114}
115
116define float @matching_scalar_volatile(<4 x float>* dereferenceable(16) %p) {
117; CHECK-LABEL: @matching_scalar_volatile(
118; CHECK-NEXT:    [[BC:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[P:%.*]], i64 0, i64 0
119; CHECK-NEXT:    [[R:%.*]] = load volatile float, float* [[BC]], align 16
120; CHECK-NEXT:    ret float [[R]]
121;
122  %bc = bitcast <4 x float>* %p to float*
123  %r = load volatile float, float* %bc, align 16
124  ret float %r
125}
126
127define float @nonvector(double* dereferenceable(16) %p) {
128; CHECK-LABEL: @nonvector(
129; CHECK-NEXT:    [[BC:%.*]] = bitcast double* [[P:%.*]] to float*
130; CHECK-NEXT:    [[R:%.*]] = load float, float* [[BC]], align 16
131; CHECK-NEXT:    ret float [[R]]
132;
133  %bc = bitcast double* %p to float*
134  %r = load float, float* %bc, align 16
135  ret float %r
136}
137