1; RUN: llc < %s -march=mips -mcpu=mips2 -relocation-model=pic | FileCheck %s \
2; RUN:    -check-prefixes=ALL,GP32,M2,NOT-R2-R6
3; RUN: llc < %s -march=mips -mcpu=mips32 -relocation-model=pic | FileCheck %s \
4; RUN:    -check-prefixes=ALL,GP32,NOT-R2-R6,32R1-R5
5; RUN: llc < %s -march=mips -mcpu=mips32r2 -relocation-model=pic | FileCheck %s \
6; RUN:    -check-prefixes=ALL,GP32,32R1-R5,R2-R6
7; RUN: llc < %s -march=mips -mcpu=mips32r3 -relocation-model=pic | FileCheck %s \
8; RUN:    -check-prefixes=ALL,GP32,32R1-R5,R2-R6
9; RUN: llc < %s -march=mips -mcpu=mips32r5 -relocation-model=pic | FileCheck %s \
10; RUN:    -check-prefixes=ALL,GP32,32R1-R5,R2-R6
11; RUN: llc < %s -march=mips -mcpu=mips32r6 -relocation-model=pic | FileCheck %s \
12; RUN:    -check-prefixes=ALL,GP32,32R6,R2-R6
13; RUN: llc < %s -march=mips64 -mcpu=mips3 -relocation-model=pic | FileCheck %s \
14; RUN:    -check-prefixes=ALL,GP64,M3,NOT-R2-R6
15; RUN: llc < %s -march=mips64 -mcpu=mips4 -relocation-model=pic | FileCheck %s \
16; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6,NOT-R2-R6
17; RUN: llc < %s -march=mips64 -mcpu=mips64 -relocation-model=pic | FileCheck %s \
18; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6,NOT-R2-R6
19; RUN: llc < %s -march=mips64 -mcpu=mips64r2 -relocation-model=pic | FileCheck %s \
20; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6,R2-R6
21; RUN: llc < %s -march=mips64 -mcpu=mips64r3 -relocation-model=pic | FileCheck %s \
22; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6,R2-R6
23; RUN: llc < %s -march=mips64 -mcpu=mips64r5 -relocation-model=pic | FileCheck %s \
24; RUN:    -check-prefixes=ALL,GP64,GP64-NOT-R6,R2-R6
25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -relocation-model=pic | FileCheck %s \
26; RUN:    -check-prefixes=ALL,GP64,64R6,R2-R6
27; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -relocation-model=pic | FileCheck %s \
28; RUN:    -check-prefixes=ALL,MM,MMR3
29; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | FileCheck %s \
30; RUN:    -check-prefixes=ALL,MM,MMR6
31
32define signext i1 @shl_i1(i1 signext %a, i1 signext %b) {
33entry:
34; ALL-LABEL: shl_i1:
35
36  ; ALL:        move    $2, $4
37
38  %r = shl i1 %a, %b
39  ret i1 %r
40}
41
42define signext i8 @shl_i8(i8 signext %a, i8 signext %b) {
43entry:
44; ALL-LABEL: shl_i8:
45
46  ; NOT-R2-R6:  andi    $[[T0:[0-9]+]], $5, 255
47  ; NOT-R2-R6:  sllv    $[[T1:[0-9]+]], $4, $[[T0]]
48  ; NOT-R2-R6:  sll     $[[T2:[0-9]+]], $[[T1]], 24
49  ; NOT-R2-R6:  sra     $2, $[[T2]], 24
50
51  ; R2-R6:      andi    $[[T0:[0-9]+]], $5, 255
52  ; R2-R6:      sllv    $[[T1:[0-9]+]], $4, $[[T0]]
53  ; R2-R6:      seb     $2, $[[T1]]
54
55  ; MM:         andi16  $[[T0:[0-9]+]], $5, 255
56  ; MM:         sllv    $[[T1:[0-9]+]], $4, $[[T0]]
57  ; MM:         seb     $2, $[[T1]]
58
59  %r = shl i8 %a, %b
60  ret i8 %r
61}
62
63define signext i16 @shl_i16(i16 signext %a, i16 signext %b) {
64entry:
65; ALL-LABEL: shl_i16:
66
67  ; NOT-R2-R6:  andi    $[[T0:[0-9]+]], $5, 65535
68  ; NOT-R2-R6:  sllv    $[[T1:[0-9]+]], $4, $[[T0]]
69  ; NOT-R2-R6:  sll     $[[T2:[0-9]+]], $[[T1]], 16
70  ; NOT-R2-R6:  sra     $2, $[[T2]], 16
71
72  ; R2-R6:      andi    $[[T0:[0-9]+]], $5, 65535
73  ; R2-R6:      sllv    $[[T1:[0-9]+]], $4, $[[T0]]
74  ; R2-R6:      seh     $2, $[[T1]]
75
76  ; MM:         andi16  $[[T0:[0-9]+]], $5, 65535
77  ; MM:         sllv    $[[T1:[0-9]+]], $4, $[[T0]]
78  ; MM:         seh     $2, $[[T1]]
79
80  %r = shl i16 %a, %b
81  ret i16 %r
82}
83
84define signext i32 @shl_i32(i32 signext %a, i32 signext %b) {
85entry:
86; ALL-LABEL: shl_i32:
87
88  ; ALL:        sllv    $2, $4, $5
89
90  %r = shl i32 %a, %b
91  ret i32 %r
92}
93
94define signext i64 @shl_i64(i64 signext %a, i64 signext %b) {
95entry:
96; ALL-LABEL: shl_i64:
97
98  ; M2:         sllv      $[[T0:[0-9]+]], $5, $7
99  ; M2:         andi      $[[T1:[0-9]+]], $7, 32
100  ; M2:         bnez      $[[T1]], $[[BB0:BB[0-9_]+]]
101  ; M2:         move      $2, $[[T0]]
102  ; M2:         sllv      $[[T2:[0-9]+]], $4, $7
103  ; M2:         not       $[[T3:[0-9]+]], $7
104  ; M2:         srl       $[[T4:[0-9]+]], $5, 1
105  ; M2:         srlv      $[[T5:[0-9]+]], $[[T4]], $[[T3]]
106  ; M2:         or        $2, $[[T2]], $[[T3]]
107  ; M2:         $[[BB0]]:
108  ; M2:         bnez      $[[T1]], $[[BB1:BB[0-9_]+]]
109  ; M2:         addiu     $3, $zero, 0
110  ; M2:         move      $3, $[[T0]]
111  ; M2:         $[[BB1]]:
112  ; M2:         jr        $ra
113  ; M2:         nop
114
115  ; 32R1-R5:    sllv      $[[T0:[0-9]+]], $4, $7
116  ; 32R1-R5:    not       $[[T1:[0-9]+]], $7
117  ; 32R1-R5:    srl       $[[T2:[0-9]+]], $5, 1
118  ; 32R1-R5:    srlv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
119  ; 32R1-R5:    or        $2, $[[T0]], $[[T3]]
120  ; 32R1-R5:    sllv      $[[T4:[0-9]+]], $5, $7
121  ; 32R1-R5:    andi      $[[T5:[0-9]+]], $7, 32
122  ; 32R1-R5:    movn      $2, $[[T4]], $[[T5]]
123  ; 32R1-R5:    jr        $ra
124  ; 32R1-R5:    movn      $3, $zero, $[[T5]]
125
126  ; 32R6:       sllv      $[[T0:[0-9]+]], $4, $7
127  ; 32R6:       not       $[[T1:[0-9]+]], $7
128  ; 32R6:       srl       $[[T2:[0-9]+]], $5, 1
129  ; 32R6:       srlv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
130  ; 32R6:       or        $[[T4:[0-9]+]], $[[T0]], $[[T3]]
131  ; 32R6:       andi      $[[T5:[0-9]+]], $7, 32
132  ; 32R6:       seleqz    $[[T6:[0-9]+]], $[[T4]], $[[T2]]
133  ; 32R6:       sllv      $[[T7:[0-9]+]], $5, $7
134  ; 32R6:       selnez    $[[T8:[0-9]+]], $[[T7]], $[[T5]]
135  ; 32R6:       or        $2, $[[T8]], $[[T6]]
136  ; 32R6:       jr        $ra
137  ; 32R6:       seleqz    $3, $[[T7]], $[[T5]]
138
139  ; GP64:       dsllv     $2, $4, $5
140
141  ; MMR3:       sllv      $[[T0:[0-9]+]], $4, $7
142  ; MMR3:       srl16     $[[T1:[0-9]+]], $5, 1
143  ; MMR3:       not16     $[[T2:[0-9]+]], $7
144  ; MMR3:       srlv      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
145  ; MMR3:       or16      $[[T4:[0-9]+]], $[[T0]]
146  ; MMR3:       sllv      $[[T5:[0-9]+]], $5, $7
147  ; MMR3:       andi16    $[[T6:[0-9]+]], $7, 32
148  ; MMR3:       movn      $[[T7:[0-9]+]], $[[T5]], $[[T6]]
149  ; MMR3:       lui       $[[T8:[0-9]+]], 0
150  ; MMR3:       movn      $3, $[[T8]], $[[T6]]
151
152  ; MMR6:       sllv      $[[T0:[0-9]+]], $4, $7
153  ; MMR6:       srl16     $[[T1:[0-9]+]], $5, 1
154  ; MMR6:       not16     $[[T2:[0-9]+]], $7
155  ; MMR6:       srlv      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
156  ; MMR6:       or16      $[[T4:[0-9]+]], $[[T0]]
157  ; MMR6:       andi16    $[[T5:[0-9]+]], $7, 32
158  ; MMR6:       seleqz    $[[T6:[0-9]+]], $[[T4]], $[[T5]]
159  ; MMR6:       sllv      $[[T7:[0-9]+]], $5, $7
160  ; MMR6:       selnez    $[[T8:[0-9]+]], $[[T7]], $[[T5]]
161  ; MMR6:       or        $2, $[[T8]], $[[T6]]
162  ; MMR6:       seleqz    $3, $[[T7]], $[[T5]]
163
164  %r = shl i64 %a, %b
165  ret i64 %r
166}
167
168define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
169entry:
170; ALL-LABEL: shl_i128:
171
172  ; GP32:           lw        $25, %call16(__ashlti3)($gp)
173
174  ; M3:             sll       $[[T0:[0-9]+]], $7, 0
175  ; M3:             dsllv     $[[T1:[0-9]+]], $5, $7
176  ; M3:             andi      $[[T2:[0-9]+]], $[[T0]], 64
177  ; M3:             bnez      $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
178  ; M3:             move      $2, $[[T1]]
179  ; M3:             dsllv     $[[T4:[0-9]+]], $4, $7
180  ; M3:             dsrl      $[[T5:[0-9]+]], $5, 1
181  ; M3:             not       $[[T6:[0-9]+]], $[[T0]]
182  ; M3:             dsrlv     $[[T7:[0-9]+]], $[[T5]], $[[T6]]
183  ; M3:             or        $2, $[[T4]], $[[T7]]
184  ; M3:             $[[BB0]]:
185  ; M3:             bnez      $[[T3]], $[[BB1:BB[0-9_]+]]
186  ; M3:             daddiu    $3, $zero, 0
187  ; M3:             move      $3, $[[T1]]
188  ; M3:             $[[BB1]]:
189  ; M3:             jr        $ra
190  ; M3:             nop
191
192  ; GP64-NOT-R6:    dsllv     $[[T0:[0-9]+]], $4, $7
193  ; GP64-NOT-R6:    dsrl      $[[T1:[0-9]+]], $5, 1
194  ; GP64-NOT-R6:    sll       $[[T2:[0-9]+]], $7, 0
195  ; GP64-NOT-R6:    not       $[[T3:[0-9]+]], $[[T2]]
196  ; GP64-NOT-R6:    dsrlv     $[[T4:[0-9]+]], $[[T1]], $[[T3]]
197  ; GP64-NOT-R6:    or        $2, $[[T0]], $[[T4]]
198  ; GP64-NOT-R6:    dsllv     $3, $5, $7
199  ; GP64-NOT-R6:    andi      $[[T5:[0-9]+]], $[[T2]], 64
200  ; GP64-NOT-R6:    movn      $2, $3, $[[T5]]
201  ; GP64-NOT-R6:    jr        $ra
202  ; GP64-NOT-R6:    movn      $3, $zero, $1
203
204  ; 64R6:           dsllv     $[[T0:[0-9]+]], $4, $7
205  ; 64R6:           dsrl      $[[T1:[0-9]+]], $5, 1
206  ; 64R6:           sll       $[[T2:[0-9]+]], $7, 0
207  ; 64R6:           not       $[[T3:[0-9]+]], $[[T2]]
208  ; 64R6:           dsrlv     $[[T4:[0-9]+]], $[[T1]], $[[T3]]
209  ; 64R6:           or        $[[T5:[0-9]+]], $[[T0]], $[[T4]]
210  ; 64R6:           andi      $[[T6:[0-9]+]], $[[T2]], 64
211  ; 64R6:           sll       $[[T7:[0-9]+]], $[[T6]], 0
212  ; 64R6:           seleqz    $[[T8:[0-9]+]], $[[T5]], $[[T7]]
213  ; 64R6:           dsllv     $[[T9:[0-9]+]], $5, $7
214  ; 64R6:           selnez    $[[T10:[0-9]+]], $[[T9]], $[[T7]]
215  ; 64R6:           or        $2, $[[T10]], $[[T8]]
216  ; 64R6:           jr        $ra
217  ; 64R6:           seleqz    $3, $[[T9]], $[[T7]]
218
219  ; MM:             lw        $25, %call16(__ashlti3)($2)
220
221  %r = shl i128 %a, %b
222  ret i128 %r
223}
224