1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
3; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
4; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32
5; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
6; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
7; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
8; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
9; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
10; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
11; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
12; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32
13; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
14; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
15; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
16; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
17; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
18; RUN:    -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64
19; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
20; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
21; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
22; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
23; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
24; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
26; RUN:    -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64
27
28define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
29entry:
30; ALL-LABEL: sub_i1:
31
32  ; ALL:            subu    $[[T0:[0-9]+]], $4, $5
33  ; ALL:            sll     $[[T0]], $[[T0]], 31
34  ; ALL:            sra     $2, $[[T0]], 31
35
36  %r = sub i1 %a, %b
37  ret i1 %r
38}
39
40define signext i8 @sub_i8(i8 signext %a, i8 signext %b) {
41entry:
42; ALL-LABEL: sub_i8:
43
44  ; NOT-R2-R6:      subu    $[[T0:[0-9]+]], $4, $5
45  ; NOT-R2-R6:      sll     $[[T0]], $[[T0]], 24
46  ; NOT-R2-R6:      sra     $2, $[[T0]], 24
47
48  ; R2-R6:          subu    $[[T0:[0-9]+]], $4, $5
49  ; R2-R6:          seb     $2, $[[T0:[0-9]+]]
50
51  %r = sub i8 %a, %b
52  ret i8 %r
53}
54
55define signext i16 @sub_i16(i16 signext %a, i16 signext %b) {
56entry:
57; ALL-LABEL: sub_i16:
58
59  ; NOT-R2-R6:      subu    $[[T0:[0-9]+]], $4, $5
60  ; NOT-R2-R6:      sll     $[[T0]], $[[T0]], 16
61  ; NOT-R2-R6:      sra     $2, $[[T0]], 16
62
63  ; R2-R6:          subu    $[[T0:[0-9]+]], $4, $5
64  ; R2-R6:          seh     $2, $[[T0:[0-9]+]]
65
66  %r = sub i16 %a, %b
67  ret i16 %r
68}
69
70define signext i32 @sub_i32(i32 signext %a, i32 signext %b) {
71entry:
72; ALL-LABEL: sub_i32:
73
74  ; ALL:            subu    $2, $4, $5
75
76  %r = sub i32 %a, %b
77  ret i32 %r
78}
79
80define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
81entry:
82; ALL-LABEL: sub_i64:
83
84  ; GP32:           subu    $3, $5, $7
85  ; GP32:           sltu    $[[T0:[0-9]+]], $5, $7
86  ; GP32:           addu    $[[T1:[0-9]+]], $[[T0]], $6
87  ; GP32:           subu    $2, $4, $[[T1]]
88
89  ; GP64:           dsubu   $2, $4, $5
90
91  %r = sub i64 %a, %b
92  ret i64 %r
93}
94
95define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
96entry:
97; ALL-LABEL: sub_i128:
98
99  ; GP32:       lw        $[[T0:[0-9]+]], 20($sp)
100  ; GP32:       sltu      $[[T1:[0-9]+]], $5, $[[T0]]
101  ; GP32:       lw        $[[T2:[0-9]+]], 16($sp)
102  ; GP32:       addu      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
103  ; GP32:       lw        $[[T4:[0-9]+]], 24($sp)
104  ; GP32:       lw        $[[T5:[0-9]+]], 28($sp)
105  ; GP32:       subu      $[[T6:[0-9]+]], $7, $[[T5]]
106  ; GP32:       subu      $2, $4, $[[T3]]
107  ; GP32:       sltu      $[[T8:[0-9]+]], $6, $[[T4]]
108  ; GP32:       addu      $[[T9:[0-9]+]], $[[T8]], $[[T0]]
109  ; GP32:       subu      $3, $5, $[[T9]]
110  ; GP32:       sltu      $[[T10:[0-9]+]], $7, $[[T5]]
111  ; GP32:       addu      $[[T11:[0-9]+]], $[[T10]], $[[T4]]
112  ; GP32:       subu      $4, $6, $[[T11]]
113  ; GP32:       move      $5, $[[T6]]
114
115  ; GP64:       dsubu     $3, $5, $7
116  ; GP64:       sltu      $[[T0:[0-9]+]], $5, $7
117  ; GP64:       daddu     $[[T1:[0-9]+]], $[[T0]], $6
118  ; GP64:       dsubu     $2, $4, $[[T1]]
119
120  %r = sub i128 %a, %b
121  ret i128 %r
122}
123