1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
4
5define i64 @test1(i8* %data) {
6; X86-LABEL: test1:
7; X86:       # %bb.0: # %entry
8; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
9; X86-NEXT:    movzbl (%eax), %eax
10; X86-NEXT:    shll $2, %eax
11; X86-NEXT:    andl $60, %eax
12; X86-NEXT:    xorl %edx, %edx
13; X86-NEXT:    retl
14;
15; X64-LABEL: test1:
16; X64:       # %bb.0: # %entry
17; X64-NEXT:    movl (%rdi), %eax
18; X64-NEXT:    shll $2, %eax
19; X64-NEXT:    andl $60, %eax
20; X64-NEXT:    retq
21entry:
22  %bf.load = load i8, i8* %data, align 4
23  %bf.clear = shl i8 %bf.load, 2
24  %0 = and i8 %bf.clear, 60
25  %mul = zext i8 %0 to i64
26  ret i64 %mul
27}
28
29define i8* @test2(i8* %data) {
30; X86-LABEL: test2:
31; X86:       # %bb.0: # %entry
32; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
33; X86-NEXT:    movzbl (%eax), %ecx
34; X86-NEXT:    andl $15, %ecx
35; X86-NEXT:    leal (%eax,%ecx,4), %eax
36; X86-NEXT:    retl
37;
38; X64-LABEL: test2:
39; X64:       # %bb.0: # %entry
40; X64-NEXT:    movl (%rdi), %eax
41; X64-NEXT:    andl $15, %eax
42; X64-NEXT:    leaq (%rdi,%rax,4), %rax
43; X64-NEXT:    retq
44entry:
45  %bf.load = load i8, i8* %data, align 4
46  %bf.clear = shl i8 %bf.load, 2
47  %0 = and i8 %bf.clear, 60
48  %mul = zext i8 %0 to i64
49  %add.ptr = getelementptr inbounds i8, i8* %data, i64 %mul
50  ret i8* %add.ptr
51}
52
53; If the shift op is SHL, the logic op can only be AND.
54define i64 @test3(i8* %data) {
55; X86-LABEL: test3:
56; X86:       # %bb.0: # %entry
57; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
58; X86-NEXT:    movb (%eax), %al
59; X86-NEXT:    shlb $2, %al
60; X86-NEXT:    xorb $60, %al
61; X86-NEXT:    movzbl %al, %eax
62; X86-NEXT:    xorl %edx, %edx
63; X86-NEXT:    retl
64;
65; X64-LABEL: test3:
66; X64:       # %bb.0: # %entry
67; X64-NEXT:    movb (%rdi), %al
68; X64-NEXT:    shlb $2, %al
69; X64-NEXT:    xorb $60, %al
70; X64-NEXT:    movzbl %al, %eax
71; X64-NEXT:    retq
72entry:
73  %bf.load = load i8, i8* %data, align 4
74  %bf.clear = shl i8 %bf.load, 2
75  %0 = xor i8 %bf.clear, 60
76  %mul = zext i8 %0 to i64
77  ret i64 %mul
78}
79
80define i64 @test4(i8* %data) {
81; X86-LABEL: test4:
82; X86:       # %bb.0: # %entry
83; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
84; X86-NEXT:    movzbl (%eax), %eax
85; X86-NEXT:    shrl $2, %eax
86; X86-NEXT:    andl $-4, %eax
87; X86-NEXT:    xorl %edx, %edx
88; X86-NEXT:    retl
89;
90; X64-LABEL: test4:
91; X64:       # %bb.0: # %entry
92; X64-NEXT:    movl (%rdi), %eax
93; X64-NEXT:    shrq $2, %rax
94; X64-NEXT:    andl $60, %eax
95; X64-NEXT:    retq
96entry:
97  %bf.load = load i8, i8* %data, align 4
98  %bf.clear = lshr i8 %bf.load, 2
99  %0 = and i8 %bf.clear, 60
100  %1 = zext i8 %0 to i64
101  ret i64 %1
102}
103
104define i64 @test5(i8* %data) {
105; X86-LABEL: test5:
106; X86:       # %bb.0: # %entry
107; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
108; X86-NEXT:    movzbl (%eax), %eax
109; X86-NEXT:    shrl $2, %eax
110; X86-NEXT:    xorl $60, %eax
111; X86-NEXT:    xorl %edx, %edx
112; X86-NEXT:    retl
113;
114; X64-LABEL: test5:
115; X64:       # %bb.0: # %entry
116; X64-NEXT:    movzbl (%rdi), %eax
117; X64-NEXT:    shrq $2, %rax
118; X64-NEXT:    xorq $60, %rax
119; X64-NEXT:    retq
120entry:
121  %bf.load = load i8, i8* %data, align 4
122  %bf.clear = lshr i8 %bf.load, 2
123  %0 = xor i8 %bf.clear, 60
124  %1 = zext i8 %0 to i64
125  ret i64 %1
126}
127
128define i64 @test6(i8* %data) {
129; X86-LABEL: test6:
130; X86:       # %bb.0: # %entry
131; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
132; X86-NEXT:    movzbl (%eax), %eax
133; X86-NEXT:    shrl $2, %eax
134; X86-NEXT:    orl $60, %eax
135; X86-NEXT:    xorl %edx, %edx
136; X86-NEXT:    retl
137;
138; X64-LABEL: test6:
139; X64:       # %bb.0: # %entry
140; X64-NEXT:    movzbl (%rdi), %eax
141; X64-NEXT:    shrq $2, %rax
142; X64-NEXT:    orq $60, %rax
143; X64-NEXT:    retq
144entry:
145  %bf.load = load i8, i8* %data, align 4
146  %bf.clear = lshr i8 %bf.load, 2
147  %0 = or i8 %bf.clear, 60
148  %1 = zext i8 %0 to i64
149  ret i64 %1
150}
151
152; Load is folded with sext.
153define i64 @test8(i8* %data) {
154; X86-LABEL: test8:
155; X86:       # %bb.0: # %entry
156; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
157; X86-NEXT:    movsbl (%eax), %eax
158; X86-NEXT:    movzwl %ax, %eax
159; X86-NEXT:    shrl $2, %eax
160; X86-NEXT:    orl $60, %eax
161; X86-NEXT:    xorl %edx, %edx
162; X86-NEXT:    retl
163;
164; X64-LABEL: test8:
165; X64:       # %bb.0: # %entry
166; X64-NEXT:    movsbl (%rdi), %eax
167; X64-NEXT:    movzwl %ax, %eax
168; X64-NEXT:    shrl $2, %eax
169; X64-NEXT:    orl $60, %eax
170; X64-NEXT:    retq
171entry:
172  %bf.load = load i8, i8* %data, align 4
173  %ext = sext i8 %bf.load to i16
174  %bf.clear = lshr i16 %ext, 2
175  %0 = or i16 %bf.clear, 60
176  %1 = zext i16 %0 to i64
177  ret i64 %1
178}
179
180