1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2; RUN:    -check-prefix=ALL -check-prefix=GP32 \
3; RUN:    -check-prefix=M2
4; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
5; RUN:    -check-prefix=ALL -check-prefix=GP32 \
6; RUN:    -check-prefix=32R1-R5
7; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
8; RUN:    -check-prefix=ALL -check-prefix=GP32 \
9; RUN:    -check-prefix=32R1-R5
10; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
11; RUN:    -check-prefix=ALL -check-prefix=GP32 \
12; RUN:    -check-prefix=32R1-R5
13; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
14; RUN:    -check-prefix=ALL -check-prefix=GP32 \
15; RUN:    -check-prefix=32R1-R5
16; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
17; RUN:    -check-prefix=ALL -check-prefix=GP32 \
18; RUN:    -check-prefix=32R6
19; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
20; RUN:    -check-prefix=ALL -check-prefix=GP64 \
21; RUN:    -check-prefix=M3
22; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
23; RUN:    -check-prefix=ALL -check-prefix=GP64 \
24; RUN:    -check-prefix=GP64-NOT-R6
25; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
26; RUN:    -check-prefix=ALL -check-prefix=GP64 \
27; RUN:    -check-prefix=GP64-NOT-R6
28; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
29; RUN:    -check-prefix=ALL -check-prefix=GP64 \
30; RUN:    -check-prefix=GP64-NOT-R6
31; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
32; RUN:    -check-prefix=ALL -check-prefix=GP64 \
33; RUN:    -check-prefix=GP64-NOT-R6
34; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
35; RUN:    -check-prefix=ALL -check-prefix=GP64 \
36; RUN:    -check-prefix=GP64-NOT-R6
37; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
38; RUN:    -check-prefix=ALL -check-prefix=GP64 \
39; RUN:    -check-prefix=64R6
40
41define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) {
42entry:
43; ALL-LABEL: ashr_i1:
44
45  ; ALL:        move    $2, $4
46
47  %r = ashr i1 %a, %b
48  ret i1 %r
49}
50
51define signext i8 @ashr_i8(i8 signext %a, i8 signext %b) {
52entry:
53; ALL-LABEL: ashr_i8:
54
55  ; FIXME: The andi instruction is redundant.
56  ; ALL:        andi    $[[T0:[0-9]+]], $5, 255
57  ; ALL:        srav    $2, $4, $[[T0]]
58
59  %r = ashr i8 %a, %b
60  ret i8 %r
61}
62
63define signext i16 @ashr_i16(i16 signext %a, i16 signext %b) {
64entry:
65; ALL-LABEL: ashr_i16:
66
67  ; FIXME: The andi instruction is redundant.
68  ; ALL:        andi    $[[T0:[0-9]+]], $5, 65535
69  ; ALL:        srav    $2, $4, $[[T0]]
70
71  %r = ashr i16 %a, %b
72  ret i16 %r
73}
74
75define signext i32 @ashr_i32(i32 signext %a, i32 signext %b) {
76entry:
77; ALL-LABEL: ashr_i32:
78
79  ; ALL:        srav    $2, $4, $5
80
81  %r = ashr i32 %a, %b
82  ret i32 %r
83}
84
85define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
86entry:
87; ALL-LABEL: ashr_i64:
88
89  ; M2:         srav      $[[T0:[0-9]+]], $4, $7
90  ; M2:         andi      $[[T1:[0-9]+]], $7, 32
91  ; M2:         bnez      $[[T1]], $[[BB0:BB[0-9_]+]]
92  ; M2:         move      $3, $[[T0]]
93  ; M2:         srlv      $[[T2:[0-9]+]], $5, $7
94  ; M2:         not       $[[T3:[0-9]+]], $7
95  ; M2:         sll       $[[T4:[0-9]+]], $4, 1
96  ; M2:         sllv      $[[T5:[0-9]+]], $[[T4]], $[[T3]]
97  ; M2:         or        $3, $[[T3]], $[[T2]]
98  ; M2:         $[[BB0]]:
99  ; M2:         beqz      $[[T1]], $[[BB1:BB[0-9_]+]]
100  ; M2:         nop
101  ; M2:         sra       $2, $4, 31
102  ; M2:         $[[BB1]]:
103  ; M2:         jr        $ra
104  ; M2:         nop
105
106  ; 32R1-R5:    srlv      $[[T0:[0-9]+]], $5, $7
107  ; 32R1-R5:    not       $[[T1:[0-9]+]], $7
108  ; 32R1-R5:    sll       $[[T2:[0-9]+]], $4, 1
109  ; 32R1-R5:    sllv      $[[T3:[0-9]+]], $[[T2]], $[[T1]]
110  ; 32R1-R5:    or        $3, $[[T3]], $[[T0]]
111  ; 32R1-R5:    srav      $[[T4:[0-9]+]], $4, $7
112  ; 32R1-R5:    andi      $[[T5:[0-9]+]], $7, 32
113  ; 32R1-R5:    movn      $3, $[[T4]], $[[T5]]
114  ; 32R1-R5:    sra       $4, $4, 31
115  ; 32R1-R5:    jr        $ra
116  ; 32R1-R5:    movn      $2, $4, $[[T5]]
117
118  ; 32R6:       srav      $[[T0:[0-9]+]], $4, $7
119  ; 32R6:       andi      $[[T1:[0-9]+]], $7, 32
120  ; 32R6:       seleqz    $[[T2:[0-9]+]], $[[T0]], $[[T1]]
121  ; 32R6:       sra       $[[T3:[0-9]+]], $4, 31
122  ; 32R6:       selnez    $[[T4:[0-9]+]], $[[T3]], $[[T1]]
123  ; 32R6:       or        $[[T5:[0-9]+]], $[[T4]], $[[T2]]
124  ; 32R6:       srlv      $[[T6:[0-9]+]], $5, $7
125  ; 32R6:       not       $[[T7:[0-9]+]], $7
126  ; 32R6:       sll       $[[T8:[0-9]+]], $4, 1
127  ; 32R6:       sllv      $[[T9:[0-9]+]], $[[T8]], $[[T7]]
128  ; 32R6:       or        $[[T10:[0-9]+]], $[[T9]], $[[T6]]
129  ; 32R6:       seleqz    $[[T11:[0-9]+]], $[[T10]], $[[T1]]
130  ; 32R6:       selnez    $[[T12:[0-9]+]], $[[T0]], $[[T1]]
131  ; 32R6:       jr        $ra
132  ; 32R6:       or        $3, $[[T0]], $[[T11]]
133
134  ; FIXME: The sll instruction below is redundant.
135  ; GP64:       sll       $[[T0:[0-9]+]], $5, 0
136  ; GP64:       dsrav     $2, $4, $[[T0]]
137
138  %r = ashr i64 %a, %b
139  ret i64 %r
140}
141
142define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
143entry:
144; ALL-LABEL: ashr_i128:
145
146  ; GP32:           lw        $25, %call16(__ashrti3)($gp)
147
148  ; M3:             sll       $[[T0:[0-9]+]], $7, 0
149  ; M3:             dsrav     $[[T1:[0-9]+]], $4, $[[T0]]
150  ; M3:             andi      $[[T2:[0-9]+]], $[[T0]], 32
151  ; M3:             bnez      $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
152  ; M3:             move      $3, $[[T1]]
153  ; M3:             dsrlv     $[[T4:[0-9]+]], $5, $[[T0]]
154  ; M3:             dsll      $[[T5:[0-9]+]], $4, 1
155  ; M3:             not       $[[T6:[0-9]+]], $[[T0]]
156  ; M3:             dsllv     $[[T7:[0-9]+]], $[[T5]], $[[T6]]
157  ; M3:             or        $3, $[[T7]], $[[T4]]
158  ; M3:             $[[BB0]]:
159  ; M3:             beqz      $[[T3]], $[[BB1:BB[0-9_]+]]
160  ; M3:             nop
161  ; M3:             dsra      $2, $4, 31
162  ; M3:             $[[BB1]]:
163  ; M3:             jr        $ra
164  ; M3:             nop
165
166  ; GP64-NOT-R6:    sll       $[[T0:[0-9]+]], $7, 0
167  ; GP64-NOT-R6:    dsrlv     $[[T1:[0-9]+]], $5, $[[T0]]
168  ; GP64-NOT-R6:    dsll      $[[T2:[0-9]+]], $4, 1
169  ; GP64-NOT-R6:    not       $[[T3:[0-9]+]], $[[T0]]
170  ; GP64-NOT-R6:    dsllv     $[[T4:[0-9]+]], $[[T2]], $[[T3]]
171  ; GP64-NOT-R6:    or        $3, $[[T4]], $[[T1]]
172  ; GP64-NOT-R6:    dsrav     $2, $4, $[[T0]]
173  ; GP64-NOT-R6:    andi      $[[T5:[0-9]+]], $[[T0]], 32
174
175  ; GP64-NOT-R6:    movn      $3, $2, $[[T5]]
176  ; GP64-NOT-R6:    dsra      $[[T6:[0-9]+]], $4, 31
177  ; GP64-NOT-R6:    jr        $ra
178  ; GP64-NOT-R6:    movn      $2, $[[T6]], $[[T5]]
179
180  ; 64R6:           sll       $[[T0:[0-9]+]], $7, 0
181  ; 64R6:           dsrav     $[[T1:[0-9]+]], $4, $[[T0]]
182  ; 64R6:           andi      $[[T2:[0-9]+]], $[[T0]], 32
183  ; 64R6:           sll       $[[T3:[0-9]+]], $[[T2]], 0
184  ; 64R6:           seleqz    $[[T4:[0-9]+]], $[[T1]], $[[T3]]
185  ; 64R6:           dsra      $[[T5:[0-9]+]], $4, 31
186  ; 64R6:           selnez    $[[T6:[0-9]+]], $[[T5]], $[[T3]]
187  ; 64R6:           or        $2, $[[T6]], $[[T4]]
188  ; 64R6:           dsrlv     $[[T7:[0-9]+]], $5, $[[T0]]
189  ; 64R6:           dsll      $[[T8:[0-9]+]], $4, 1
190  ; 64R6:           not       $[[T9:[0-9]+]], $[[T0]]
191  ; 64R6:           dsllv     $[[T10:[0-9]+]], $[[T8]], $[[T9]]
192  ; 64R6:           or        $[[T11:[0-9]+]], $[[T10]], $[[T7]]
193  ; 64R6:           seleqz    $[[T12:[0-9]+]], $[[T11]], $[[T3]]
194  ; 64R6:           selnez    $[[T13:[0-9]+]], $[[T1]], $[[T3]]
195  ; 64R6:           jr        $ra
196  ; 64R6:           or        $3, $[[T13]], $[[T12]]
197
198  %r = ashr i128 %a, %b
199  ret i128 %r
200}
201