1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s -check-prefix=RV32I
4
5; Check indexed and unindexed, sext, zext and anyext loads
6
7define i32 @lb(i8 *%a) nounwind {
8; RV32I-LABEL: lb:
9; RV32I:       # %bb.0:
10; RV32I-NEXT:    lb a1, 1(a0)
11; RV32I-NEXT:    lb a0, 0(a0)
12; RV32I-NEXT:    mv a0, a1
13; RV32I-NEXT:    ret
14  %1 = getelementptr i8, i8* %a, i32 1
15  %2 = load i8, i8* %1
16  %3 = sext i8 %2 to i32
17  ; the unused load will produce an anyext for selection
18  %4 = load volatile i8, i8* %a
19  ret i32 %3
20}
21
22define i32 @lh(i16 *%a) nounwind {
23; RV32I-LABEL: lh:
24; RV32I:       # %bb.0:
25; RV32I-NEXT:    lh a1, 4(a0)
26; RV32I-NEXT:    lh a0, 0(a0)
27; RV32I-NEXT:    mv a0, a1
28; RV32I-NEXT:    ret
29  %1 = getelementptr i16, i16* %a, i32 2
30  %2 = load i16, i16* %1
31  %3 = sext i16 %2 to i32
32  ; the unused load will produce an anyext for selection
33  %4 = load volatile i16, i16* %a
34  ret i32 %3
35}
36
37define i32 @lw(i32 *%a) nounwind {
38; RV32I-LABEL: lw:
39; RV32I:       # %bb.0:
40; RV32I-NEXT:    lw a1, 12(a0)
41; RV32I-NEXT:    lw a0, 0(a0)
42; RV32I-NEXT:    mv a0, a1
43; RV32I-NEXT:    ret
44  %1 = getelementptr i32, i32* %a, i32 3
45  %2 = load i32, i32* %1
46  %3 = load volatile i32, i32* %a
47  ret i32 %2
48}
49
50define i32 @lbu(i8 *%a) nounwind {
51; RV32I-LABEL: lbu:
52; RV32I:       # %bb.0:
53; RV32I-NEXT:    lbu a1, 4(a0)
54; RV32I-NEXT:    lbu a0, 0(a0)
55; RV32I-NEXT:    add a0, a1, a0
56; RV32I-NEXT:    ret
57  %1 = getelementptr i8, i8* %a, i32 4
58  %2 = load i8, i8* %1
59  %3 = zext i8 %2 to i32
60  %4 = load volatile i8, i8* %a
61  %5 = zext i8 %4 to i32
62  %6 = add i32 %3, %5
63  ret i32 %6
64}
65
66define i32 @lhu(i16 *%a) nounwind {
67; RV32I-LABEL: lhu:
68; RV32I:       # %bb.0:
69; RV32I-NEXT:    lhu a1, 10(a0)
70; RV32I-NEXT:    lhu a0, 0(a0)
71; RV32I-NEXT:    add a0, a1, a0
72; RV32I-NEXT:    ret
73  %1 = getelementptr i16, i16* %a, i32 5
74  %2 = load i16, i16* %1
75  %3 = zext i16 %2 to i32
76  %4 = load volatile i16, i16* %a
77  %5 = zext i16 %4 to i32
78  %6 = add i32 %3, %5
79  ret i32 %6
80}
81
82; Check indexed and unindexed stores
83
84define void @sb(i8 *%a, i8 %b) nounwind {
85; RV32I-LABEL: sb:
86; RV32I:       # %bb.0:
87; RV32I-NEXT:    sb a1, 0(a0)
88; RV32I-NEXT:    sb a1, 6(a0)
89; RV32I-NEXT:    ret
90  store i8 %b, i8* %a
91  %1 = getelementptr i8, i8* %a, i32 6
92  store i8 %b, i8* %1
93  ret void
94}
95
96define void @sh(i16 *%a, i16 %b) nounwind {
97; RV32I-LABEL: sh:
98; RV32I:       # %bb.0:
99; RV32I-NEXT:    sh a1, 0(a0)
100; RV32I-NEXT:    sh a1, 14(a0)
101; RV32I-NEXT:    ret
102  store i16 %b, i16* %a
103  %1 = getelementptr i16, i16* %a, i32 7
104  store i16 %b, i16* %1
105  ret void
106}
107
108define void @sw(i32 *%a, i32 %b) nounwind {
109; RV32I-LABEL: sw:
110; RV32I:       # %bb.0:
111; RV32I-NEXT:    sw a1, 0(a0)
112; RV32I-NEXT:    sw a1, 32(a0)
113; RV32I-NEXT:    ret
114  store i32 %b, i32* %a
115  %1 = getelementptr i32, i32* %a, i32 8
116  store i32 %b, i32* %1
117  ret void
118}
119
120; Check load and store to an i1 location
121define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
122; RV32I-LABEL: load_sext_zext_anyext_i1:
123; RV32I:       # %bb.0:
124; RV32I-NEXT:    lbu a1, 1(a0)
125; RV32I-NEXT:    lbu a2, 2(a0)
126; RV32I-NEXT:    lb a0, 0(a0)
127; RV32I-NEXT:    sub a0, a2, a1
128; RV32I-NEXT:    ret
129  ; sextload i1
130  %1 = getelementptr i1, i1* %a, i32 1
131  %2 = load i1, i1* %1
132  %3 = sext i1 %2 to i32
133  ; zextload i1
134  %4 = getelementptr i1, i1* %a, i32 2
135  %5 = load i1, i1* %4
136  %6 = zext i1 %5 to i32
137  %7 = add i32 %3, %6
138  ; extload i1 (anyext). Produced as the load is unused.
139  %8 = load volatile i1, i1* %a
140  ret i32 %7
141}
142
143define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
144; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
145; RV32I:       # %bb.0:
146; RV32I-NEXT:    lbu a1, 1(a0)
147; RV32I-NEXT:    lbu a2, 2(a0)
148; RV32I-NEXT:    lb a0, 0(a0)
149; RV32I-NEXT:    sub a0, a2, a1
150; RV32I-NEXT:    ret
151  ; sextload i1
152  %1 = getelementptr i1, i1* %a, i32 1
153  %2 = load i1, i1* %1
154  %3 = sext i1 %2 to i16
155  ; zextload i1
156  %4 = getelementptr i1, i1* %a, i32 2
157  %5 = load i1, i1* %4
158  %6 = zext i1 %5 to i16
159  %7 = add i16 %3, %6
160  ; extload i1 (anyext). Produced as the load is unused.
161  %8 = load volatile i1, i1* %a
162  ret i16 %7
163}
164
165; Check load and store to a global
166@G = global i32 0
167
168define i32 @lw_sw_global(i32 %a) nounwind {
169; RV32I-LABEL: lw_sw_global:
170; RV32I:       # %bb.0:
171; RV32I-NEXT:    lui a2, %hi(G)
172; RV32I-NEXT:    lw a1, %lo(G)(a2)
173; RV32I-NEXT:    sw a0, %lo(G)(a2)
174; RV32I-NEXT:    addi a2, a2, %lo(G)
175; RV32I-NEXT:    lw a3, 36(a2)
176; RV32I-NEXT:    sw a0, 36(a2)
177; RV32I-NEXT:    mv a0, a1
178; RV32I-NEXT:    ret
179  %1 = load volatile i32, i32* @G
180  store i32 %a, i32* @G
181  %2 = getelementptr i32, i32* @G, i32 9
182  %3 = load volatile i32, i32* %2
183  store i32 %a, i32* %2
184  ret i32 %1
185}
186
187; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
188define i32 @lw_sw_constant(i32 %a) nounwind {
189; RV32I-LABEL: lw_sw_constant:
190; RV32I:       # %bb.0:
191; RV32I-NEXT:    lui a2, 912092
192; RV32I-NEXT:    lw a1, -273(a2)
193; RV32I-NEXT:    sw a0, -273(a2)
194; RV32I-NEXT:    mv a0, a1
195; RV32I-NEXT:    ret
196  %1 = inttoptr i32 3735928559 to i32*
197  %2 = load volatile i32, i32* %1
198  store i32 %a, i32* %1
199  ret i32 %2
200}
201