1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefixes=CHECK,RV32I %s
4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefixes=CHECK,RV32IM %s
6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefixes=CHECK,RV64I %s
8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefixes=CHECK,RV64IM %s
10
11define i32 @fold_srem_positive_odd(i32 %x) nounwind {
12; RV32I-LABEL: fold_srem_positive_odd:
13; RV32I:       # %bb.0:
14; RV32I-NEXT:    addi sp, sp, -16
15; RV32I-NEXT:    sw ra, 12(sp)
16; RV32I-NEXT:    addi a1, zero, 95
17; RV32I-NEXT:    call __modsi3
18; RV32I-NEXT:    lw ra, 12(sp)
19; RV32I-NEXT:    addi sp, sp, 16
20; RV32I-NEXT:    ret
21;
22; RV32IM-LABEL: fold_srem_positive_odd:
23; RV32IM:       # %bb.0:
24; RV32IM-NEXT:    lui a1, 706409
25; RV32IM-NEXT:    addi a1, a1, 389
26; RV32IM-NEXT:    mulh a1, a0, a1
27; RV32IM-NEXT:    add a1, a1, a0
28; RV32IM-NEXT:    srli a2, a1, 31
29; RV32IM-NEXT:    srai a1, a1, 6
30; RV32IM-NEXT:    add a1, a1, a2
31; RV32IM-NEXT:    addi a2, zero, 95
32; RV32IM-NEXT:    mul a1, a1, a2
33; RV32IM-NEXT:    sub a0, a0, a1
34; RV32IM-NEXT:    ret
35;
36; RV64I-LABEL: fold_srem_positive_odd:
37; RV64I:       # %bb.0:
38; RV64I-NEXT:    addi sp, sp, -16
39; RV64I-NEXT:    sd ra, 8(sp)
40; RV64I-NEXT:    sext.w a0, a0
41; RV64I-NEXT:    addi a1, zero, 95
42; RV64I-NEXT:    call __moddi3
43; RV64I-NEXT:    ld ra, 8(sp)
44; RV64I-NEXT:    addi sp, sp, 16
45; RV64I-NEXT:    ret
46;
47; RV64IM-LABEL: fold_srem_positive_odd:
48; RV64IM:       # %bb.0:
49; RV64IM-NEXT:    sext.w a0, a0
50; RV64IM-NEXT:    lui a1, 1045903
51; RV64IM-NEXT:    addiw a1, a1, -733
52; RV64IM-NEXT:    slli a1, a1, 15
53; RV64IM-NEXT:    addi a1, a1, 1035
54; RV64IM-NEXT:    slli a1, a1, 12
55; RV64IM-NEXT:    addi a1, a1, -905
56; RV64IM-NEXT:    slli a1, a1, 12
57; RV64IM-NEXT:    addi a1, a1, -1767
58; RV64IM-NEXT:    mulh a1, a0, a1
59; RV64IM-NEXT:    add a1, a1, a0
60; RV64IM-NEXT:    srli a2, a1, 63
61; RV64IM-NEXT:    srai a1, a1, 6
62; RV64IM-NEXT:    add a1, a1, a2
63; RV64IM-NEXT:    addi a2, zero, 95
64; RV64IM-NEXT:    mul a1, a1, a2
65; RV64IM-NEXT:    sub a0, a0, a1
66; RV64IM-NEXT:    ret
67  %1 = srem i32 %x, 95
68  ret i32 %1
69}
70
71
72define i32 @fold_srem_positive_even(i32 %x) nounwind {
73; RV32I-LABEL: fold_srem_positive_even:
74; RV32I:       # %bb.0:
75; RV32I-NEXT:    addi sp, sp, -16
76; RV32I-NEXT:    sw ra, 12(sp)
77; RV32I-NEXT:    addi a1, zero, 1060
78; RV32I-NEXT:    call __modsi3
79; RV32I-NEXT:    lw ra, 12(sp)
80; RV32I-NEXT:    addi sp, sp, 16
81; RV32I-NEXT:    ret
82;
83; RV32IM-LABEL: fold_srem_positive_even:
84; RV32IM:       # %bb.0:
85; RV32IM-NEXT:    lui a1, 253241
86; RV32IM-NEXT:    addi a1, a1, -15
87; RV32IM-NEXT:    mulh a1, a0, a1
88; RV32IM-NEXT:    srli a2, a1, 31
89; RV32IM-NEXT:    srai a1, a1, 8
90; RV32IM-NEXT:    add a1, a1, a2
91; RV32IM-NEXT:    addi a2, zero, 1060
92; RV32IM-NEXT:    mul a1, a1, a2
93; RV32IM-NEXT:    sub a0, a0, a1
94; RV32IM-NEXT:    ret
95;
96; RV64I-LABEL: fold_srem_positive_even:
97; RV64I:       # %bb.0:
98; RV64I-NEXT:    addi sp, sp, -16
99; RV64I-NEXT:    sd ra, 8(sp)
100; RV64I-NEXT:    sext.w a0, a0
101; RV64I-NEXT:    addi a1, zero, 1060
102; RV64I-NEXT:    call __moddi3
103; RV64I-NEXT:    ld ra, 8(sp)
104; RV64I-NEXT:    addi sp, sp, 16
105; RV64I-NEXT:    ret
106;
107; RV64IM-LABEL: fold_srem_positive_even:
108; RV64IM:       # %bb.0:
109; RV64IM-NEXT:    sext.w a0, a0
110; RV64IM-NEXT:    lui a1, 506482
111; RV64IM-NEXT:    addiw a1, a1, -31
112; RV64IM-NEXT:    slli a1, a1, 13
113; RV64IM-NEXT:    addi a1, a1, 711
114; RV64IM-NEXT:    slli a1, a1, 19
115; RV64IM-NEXT:    addi a1, a1, 1979
116; RV64IM-NEXT:    mulh a1, a0, a1
117; RV64IM-NEXT:    srli a2, a1, 63
118; RV64IM-NEXT:    srai a1, a1, 9
119; RV64IM-NEXT:    add a1, a1, a2
120; RV64IM-NEXT:    addi a2, zero, 1060
121; RV64IM-NEXT:    mul a1, a1, a2
122; RV64IM-NEXT:    sub a0, a0, a1
123; RV64IM-NEXT:    ret
124  %1 = srem i32 %x, 1060
125  ret i32 %1
126}
127
128
129define i32 @fold_srem_negative_odd(i32 %x) nounwind {
130; RV32I-LABEL: fold_srem_negative_odd:
131; RV32I:       # %bb.0:
132; RV32I-NEXT:    addi sp, sp, -16
133; RV32I-NEXT:    sw ra, 12(sp)
134; RV32I-NEXT:    addi a1, zero, -723
135; RV32I-NEXT:    call __modsi3
136; RV32I-NEXT:    lw ra, 12(sp)
137; RV32I-NEXT:    addi sp, sp, 16
138; RV32I-NEXT:    ret
139;
140; RV32IM-LABEL: fold_srem_negative_odd:
141; RV32IM:       # %bb.0:
142; RV32IM-NEXT:    lui a1, 677296
143; RV32IM-NEXT:    addi a1, a1, -91
144; RV32IM-NEXT:    mulh a1, a0, a1
145; RV32IM-NEXT:    srli a2, a1, 31
146; RV32IM-NEXT:    srai a1, a1, 8
147; RV32IM-NEXT:    add a1, a1, a2
148; RV32IM-NEXT:    addi a2, zero, -723
149; RV32IM-NEXT:    mul a1, a1, a2
150; RV32IM-NEXT:    sub a0, a0, a1
151; RV32IM-NEXT:    ret
152;
153; RV64I-LABEL: fold_srem_negative_odd:
154; RV64I:       # %bb.0:
155; RV64I-NEXT:    addi sp, sp, -16
156; RV64I-NEXT:    sd ra, 8(sp)
157; RV64I-NEXT:    sext.w a0, a0
158; RV64I-NEXT:    addi a1, zero, -723
159; RV64I-NEXT:    call __moddi3
160; RV64I-NEXT:    ld ra, 8(sp)
161; RV64I-NEXT:    addi sp, sp, 16
162; RV64I-NEXT:    ret
163;
164; RV64IM-LABEL: fold_srem_negative_odd:
165; RV64IM:       # %bb.0:
166; RV64IM-NEXT:    sext.w a0, a0
167; RV64IM-NEXT:    lui a1, 4781
168; RV64IM-NEXT:    addiw a1, a1, 2045
169; RV64IM-NEXT:    slli a1, a1, 13
170; RV64IM-NEXT:    addi a1, a1, 1371
171; RV64IM-NEXT:    slli a1, a1, 13
172; RV64IM-NEXT:    addi a1, a1, -11
173; RV64IM-NEXT:    slli a1, a1, 12
174; RV64IM-NEXT:    addi a1, a1, -1355
175; RV64IM-NEXT:    mulh a1, a0, a1
176; RV64IM-NEXT:    sub a1, a1, a0
177; RV64IM-NEXT:    srli a2, a1, 63
178; RV64IM-NEXT:    srai a1, a1, 9
179; RV64IM-NEXT:    add a1, a1, a2
180; RV64IM-NEXT:    addi a2, zero, -723
181; RV64IM-NEXT:    mul a1, a1, a2
182; RV64IM-NEXT:    sub a0, a0, a1
183; RV64IM-NEXT:    ret
184  %1 = srem i32 %x, -723
185  ret i32 %1
186}
187
188
189define i32 @fold_srem_negative_even(i32 %x) nounwind {
190; RV32I-LABEL: fold_srem_negative_even:
191; RV32I:       # %bb.0:
192; RV32I-NEXT:    addi sp, sp, -16
193; RV32I-NEXT:    sw ra, 12(sp)
194; RV32I-NEXT:    lui a1, 1048570
195; RV32I-NEXT:    addi a1, a1, 1595
196; RV32I-NEXT:    call __modsi3
197; RV32I-NEXT:    lw ra, 12(sp)
198; RV32I-NEXT:    addi sp, sp, 16
199; RV32I-NEXT:    ret
200;
201; RV32IM-LABEL: fold_srem_negative_even:
202; RV32IM:       # %bb.0:
203; RV32IM-NEXT:    lui a1, 1036895
204; RV32IM-NEXT:    addi a1, a1, 999
205; RV32IM-NEXT:    mulh a1, a0, a1
206; RV32IM-NEXT:    srli a2, a1, 31
207; RV32IM-NEXT:    srai a1, a1, 8
208; RV32IM-NEXT:    add a1, a1, a2
209; RV32IM-NEXT:    lui a2, 1048570
210; RV32IM-NEXT:    addi a2, a2, 1595
211; RV32IM-NEXT:    mul a1, a1, a2
212; RV32IM-NEXT:    sub a0, a0, a1
213; RV32IM-NEXT:    ret
214;
215; RV64I-LABEL: fold_srem_negative_even:
216; RV64I:       # %bb.0:
217; RV64I-NEXT:    addi sp, sp, -16
218; RV64I-NEXT:    sd ra, 8(sp)
219; RV64I-NEXT:    sext.w a0, a0
220; RV64I-NEXT:    lui a1, 1048570
221; RV64I-NEXT:    addiw a1, a1, 1595
222; RV64I-NEXT:    call __moddi3
223; RV64I-NEXT:    ld ra, 8(sp)
224; RV64I-NEXT:    addi sp, sp, 16
225; RV64I-NEXT:    ret
226;
227; RV64IM-LABEL: fold_srem_negative_even:
228; RV64IM:       # %bb.0:
229; RV64IM-NEXT:    sext.w a0, a0
230; RV64IM-NEXT:    lui a1, 1036895
231; RV64IM-NEXT:    addiw a1, a1, 999
232; RV64IM-NEXT:    slli a1, a1, 12
233; RV64IM-NEXT:    addi a1, a1, 11
234; RV64IM-NEXT:    slli a1, a1, 12
235; RV64IM-NEXT:    addi a1, a1, -523
236; RV64IM-NEXT:    slli a1, a1, 12
237; RV64IM-NEXT:    addi a1, a1, -481
238; RV64IM-NEXT:    mulh a1, a0, a1
239; RV64IM-NEXT:    srli a2, a1, 63
240; RV64IM-NEXT:    srai a1, a1, 12
241; RV64IM-NEXT:    add a1, a1, a2
242; RV64IM-NEXT:    lui a2, 1048570
243; RV64IM-NEXT:    addiw a2, a2, 1595
244; RV64IM-NEXT:    mul a1, a1, a2
245; RV64IM-NEXT:    sub a0, a0, a1
246; RV64IM-NEXT:    ret
247  %1 = srem i32 %x, -22981
248  ret i32 %1
249}
250
251
252; Don't fold if we can combine srem with sdiv.
253define i32 @combine_srem_sdiv(i32 %x) nounwind {
254; RV32I-LABEL: combine_srem_sdiv:
255; RV32I:       # %bb.0:
256; RV32I-NEXT:    addi sp, sp, -16
257; RV32I-NEXT:    sw ra, 12(sp)
258; RV32I-NEXT:    sw s0, 8(sp)
259; RV32I-NEXT:    sw s1, 4(sp)
260; RV32I-NEXT:    mv s0, a0
261; RV32I-NEXT:    addi a1, zero, 95
262; RV32I-NEXT:    call __modsi3
263; RV32I-NEXT:    mv s1, a0
264; RV32I-NEXT:    addi a1, zero, 95
265; RV32I-NEXT:    mv a0, s0
266; RV32I-NEXT:    call __divsi3
267; RV32I-NEXT:    add a0, s1, a0
268; RV32I-NEXT:    lw s1, 4(sp)
269; RV32I-NEXT:    lw s0, 8(sp)
270; RV32I-NEXT:    lw ra, 12(sp)
271; RV32I-NEXT:    addi sp, sp, 16
272; RV32I-NEXT:    ret
273;
274; RV32IM-LABEL: combine_srem_sdiv:
275; RV32IM:       # %bb.0:
276; RV32IM-NEXT:    lui a1, 706409
277; RV32IM-NEXT:    addi a1, a1, 389
278; RV32IM-NEXT:    mulh a1, a0, a1
279; RV32IM-NEXT:    add a1, a1, a0
280; RV32IM-NEXT:    srli a2, a1, 31
281; RV32IM-NEXT:    srai a1, a1, 6
282; RV32IM-NEXT:    add a1, a1, a2
283; RV32IM-NEXT:    addi a2, zero, 95
284; RV32IM-NEXT:    mul a2, a1, a2
285; RV32IM-NEXT:    sub a0, a0, a2
286; RV32IM-NEXT:    add a0, a0, a1
287; RV32IM-NEXT:    ret
288;
289; RV64I-LABEL: combine_srem_sdiv:
290; RV64I:       # %bb.0:
291; RV64I-NEXT:    addi sp, sp, -32
292; RV64I-NEXT:    sd ra, 24(sp)
293; RV64I-NEXT:    sd s0, 16(sp)
294; RV64I-NEXT:    sd s1, 8(sp)
295; RV64I-NEXT:    sext.w s0, a0
296; RV64I-NEXT:    addi a1, zero, 95
297; RV64I-NEXT:    mv a0, s0
298; RV64I-NEXT:    call __moddi3
299; RV64I-NEXT:    mv s1, a0
300; RV64I-NEXT:    addi a1, zero, 95
301; RV64I-NEXT:    mv a0, s0
302; RV64I-NEXT:    call __divdi3
303; RV64I-NEXT:    addw a0, s1, a0
304; RV64I-NEXT:    ld s1, 8(sp)
305; RV64I-NEXT:    ld s0, 16(sp)
306; RV64I-NEXT:    ld ra, 24(sp)
307; RV64I-NEXT:    addi sp, sp, 32
308; RV64I-NEXT:    ret
309;
310; RV64IM-LABEL: combine_srem_sdiv:
311; RV64IM:       # %bb.0:
312; RV64IM-NEXT:    sext.w a1, a0
313; RV64IM-NEXT:    lui a2, 1045903
314; RV64IM-NEXT:    addiw a2, a2, -733
315; RV64IM-NEXT:    slli a2, a2, 15
316; RV64IM-NEXT:    addi a2, a2, 1035
317; RV64IM-NEXT:    slli a2, a2, 12
318; RV64IM-NEXT:    addi a2, a2, -905
319; RV64IM-NEXT:    slli a2, a2, 12
320; RV64IM-NEXT:    addi a2, a2, -1767
321; RV64IM-NEXT:    mulh a2, a1, a2
322; RV64IM-NEXT:    add a1, a2, a1
323; RV64IM-NEXT:    srli a2, a1, 63
324; RV64IM-NEXT:    srai a1, a1, 6
325; RV64IM-NEXT:    add a1, a1, a2
326; RV64IM-NEXT:    addi a2, zero, 95
327; RV64IM-NEXT:    mul a2, a1, a2
328; RV64IM-NEXT:    sub a0, a0, a2
329; RV64IM-NEXT:    addw a0, a0, a1
330; RV64IM-NEXT:    ret
331  %1 = srem i32 %x, 95
332  %2 = sdiv i32 %x, 95
333  %3 = add i32 %1, %2
334  ret i32 %3
335}
336
337; Don't fold for divisors that are a power of two.
338define i32 @dont_fold_srem_power_of_two(i32 %x) nounwind {
339; RV32I-LABEL: dont_fold_srem_power_of_two:
340; RV32I:       # %bb.0:
341; RV32I-NEXT:    srai a1, a0, 31
342; RV32I-NEXT:    srli a1, a1, 26
343; RV32I-NEXT:    add a1, a0, a1
344; RV32I-NEXT:    andi a1, a1, -64
345; RV32I-NEXT:    sub a0, a0, a1
346; RV32I-NEXT:    ret
347;
348; RV32IM-LABEL: dont_fold_srem_power_of_two:
349; RV32IM:       # %bb.0:
350; RV32IM-NEXT:    srai a1, a0, 31
351; RV32IM-NEXT:    srli a1, a1, 26
352; RV32IM-NEXT:    add a1, a0, a1
353; RV32IM-NEXT:    andi a1, a1, -64
354; RV32IM-NEXT:    sub a0, a0, a1
355; RV32IM-NEXT:    ret
356;
357; RV64I-LABEL: dont_fold_srem_power_of_two:
358; RV64I:       # %bb.0:
359; RV64I-NEXT:    sext.w a1, a0
360; RV64I-NEXT:    srli a1, a1, 57
361; RV64I-NEXT:    andi a1, a1, 63
362; RV64I-NEXT:    add a1, a0, a1
363; RV64I-NEXT:    addi a2, zero, 1
364; RV64I-NEXT:    slli a2, a2, 32
365; RV64I-NEXT:    addi a2, a2, -64
366; RV64I-NEXT:    and a1, a1, a2
367; RV64I-NEXT:    subw a0, a0, a1
368; RV64I-NEXT:    ret
369;
370; RV64IM-LABEL: dont_fold_srem_power_of_two:
371; RV64IM:       # %bb.0:
372; RV64IM-NEXT:    sext.w a1, a0
373; RV64IM-NEXT:    srli a1, a1, 57
374; RV64IM-NEXT:    andi a1, a1, 63
375; RV64IM-NEXT:    add a1, a0, a1
376; RV64IM-NEXT:    addi a2, zero, 1
377; RV64IM-NEXT:    slli a2, a2, 32
378; RV64IM-NEXT:    addi a2, a2, -64
379; RV64IM-NEXT:    and a1, a1, a2
380; RV64IM-NEXT:    subw a0, a0, a1
381; RV64IM-NEXT:    ret
382  %1 = srem i32 %x, 64
383  ret i32 %1
384}
385
386; Don't fold if the divisor is one.
387define i32 @dont_fold_srem_one(i32 %x) nounwind {
388; CHECK-LABEL: dont_fold_srem_one:
389; CHECK:       # %bb.0:
390; CHECK-NEXT:    mv a0, zero
391; CHECK-NEXT:    ret
392  %1 = srem i32 %x, 1
393  ret i32 %1
394}
395
396; Don't fold if the divisor is 2^31.
397define i32 @dont_fold_srem_i32_smax(i32 %x) nounwind {
398; RV32I-LABEL: dont_fold_srem_i32_smax:
399; RV32I:       # %bb.0:
400; RV32I-NEXT:    srai a1, a0, 31
401; RV32I-NEXT:    srli a1, a1, 1
402; RV32I-NEXT:    add a1, a0, a1
403; RV32I-NEXT:    lui a2, 524288
404; RV32I-NEXT:    and a1, a1, a2
405; RV32I-NEXT:    add a0, a0, a1
406; RV32I-NEXT:    ret
407;
408; RV32IM-LABEL: dont_fold_srem_i32_smax:
409; RV32IM:       # %bb.0:
410; RV32IM-NEXT:    srai a1, a0, 31
411; RV32IM-NEXT:    srli a1, a1, 1
412; RV32IM-NEXT:    add a1, a0, a1
413; RV32IM-NEXT:    lui a2, 524288
414; RV32IM-NEXT:    and a1, a1, a2
415; RV32IM-NEXT:    add a0, a0, a1
416; RV32IM-NEXT:    ret
417;
418; RV64I-LABEL: dont_fold_srem_i32_smax:
419; RV64I:       # %bb.0:
420; RV64I-NEXT:    sext.w a1, a0
421; RV64I-NEXT:    srli a1, a1, 32
422; RV64I-NEXT:    lui a2, 524288
423; RV64I-NEXT:    addiw a2, a2, -1
424; RV64I-NEXT:    and a1, a1, a2
425; RV64I-NEXT:    add a1, a0, a1
426; RV64I-NEXT:    addi a2, zero, 1
427; RV64I-NEXT:    slli a2, a2, 31
428; RV64I-NEXT:    and a1, a1, a2
429; RV64I-NEXT:    addw a0, a0, a1
430; RV64I-NEXT:    ret
431;
432; RV64IM-LABEL: dont_fold_srem_i32_smax:
433; RV64IM:       # %bb.0:
434; RV64IM-NEXT:    sext.w a1, a0
435; RV64IM-NEXT:    srli a1, a1, 32
436; RV64IM-NEXT:    lui a2, 524288
437; RV64IM-NEXT:    addiw a2, a2, -1
438; RV64IM-NEXT:    and a1, a1, a2
439; RV64IM-NEXT:    add a1, a0, a1
440; RV64IM-NEXT:    addi a2, zero, 1
441; RV64IM-NEXT:    slli a2, a2, 31
442; RV64IM-NEXT:    and a1, a1, a2
443; RV64IM-NEXT:    addw a0, a0, a1
444; RV64IM-NEXT:    ret
445  %1 = srem i32 %x, 2147483648
446  ret i32 %1
447}
448
449; Don't fold i64 srem
450define i64 @dont_fold_srem_i64(i64 %x) nounwind {
451; RV32I-LABEL: dont_fold_srem_i64:
452; RV32I:       # %bb.0:
453; RV32I-NEXT:    addi sp, sp, -16
454; RV32I-NEXT:    sw ra, 12(sp)
455; RV32I-NEXT:    addi a2, zero, 98
456; RV32I-NEXT:    mv a3, zero
457; RV32I-NEXT:    call __moddi3
458; RV32I-NEXT:    lw ra, 12(sp)
459; RV32I-NEXT:    addi sp, sp, 16
460; RV32I-NEXT:    ret
461;
462; RV32IM-LABEL: dont_fold_srem_i64:
463; RV32IM:       # %bb.0:
464; RV32IM-NEXT:    addi sp, sp, -16
465; RV32IM-NEXT:    sw ra, 12(sp)
466; RV32IM-NEXT:    addi a2, zero, 98
467; RV32IM-NEXT:    mv a3, zero
468; RV32IM-NEXT:    call __moddi3
469; RV32IM-NEXT:    lw ra, 12(sp)
470; RV32IM-NEXT:    addi sp, sp, 16
471; RV32IM-NEXT:    ret
472;
473; RV64I-LABEL: dont_fold_srem_i64:
474; RV64I:       # %bb.0:
475; RV64I-NEXT:    addi sp, sp, -16
476; RV64I-NEXT:    sd ra, 8(sp)
477; RV64I-NEXT:    addi a1, zero, 98
478; RV64I-NEXT:    call __moddi3
479; RV64I-NEXT:    ld ra, 8(sp)
480; RV64I-NEXT:    addi sp, sp, 16
481; RV64I-NEXT:    ret
482;
483; RV64IM-LABEL: dont_fold_srem_i64:
484; RV64IM:       # %bb.0:
485; RV64IM-NEXT:    lui a1, 2675
486; RV64IM-NEXT:    addiw a1, a1, -251
487; RV64IM-NEXT:    slli a1, a1, 13
488; RV64IM-NEXT:    addi a1, a1, 1839
489; RV64IM-NEXT:    slli a1, a1, 13
490; RV64IM-NEXT:    addi a1, a1, 167
491; RV64IM-NEXT:    slli a1, a1, 13
492; RV64IM-NEXT:    addi a1, a1, 1505
493; RV64IM-NEXT:    mulh a1, a0, a1
494; RV64IM-NEXT:    srli a2, a1, 63
495; RV64IM-NEXT:    srai a1, a1, 5
496; RV64IM-NEXT:    add a1, a1, a2
497; RV64IM-NEXT:    addi a2, zero, 98
498; RV64IM-NEXT:    mul a1, a1, a2
499; RV64IM-NEXT:    sub a0, a0, a1
500; RV64IM-NEXT:    ret
501  %1 = srem i64 %x, 98
502  ret i64 %1
503}
504