1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64-LINUX
4; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s --check-prefix=X64-WIN32
5
6declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
7declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
8
9; The immediate can be encoded in a smaller way if the
10; instruction is a sub instead of an add.
11define i32 @test1(i32 inreg %a) nounwind {
12; X86-LABEL: test1:
13; X86:       # %bb.0: # %entry
14; X86-NEXT:    subl $-128, %eax
15; X86-NEXT:    retl
16;
17; X64-LINUX-LABEL: test1:
18; X64-LINUX:       # %bb.0: # %entry
19; X64-LINUX-NEXT:    # kill: def $edi killed $edi def $rdi
20; X64-LINUX-NEXT:    leal 128(%rdi), %eax
21; X64-LINUX-NEXT:    retq
22;
23; X64-WIN32-LABEL: test1:
24; X64-WIN32:       # %bb.0: # %entry
25; X64-WIN32-NEXT:    # kill: def $ecx killed $ecx def $rcx
26; X64-WIN32-NEXT:    leal 128(%rcx), %eax
27; X64-WIN32-NEXT:    retq
28entry:
29  %b = add i32 %a, 128
30  ret i32 %b
31}
32
33define i32 @test1b(i32* %p) nounwind {
34; X86-LABEL: test1b:
35; X86:       # %bb.0: # %entry
36; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
37; X86-NEXT:    movl (%eax), %eax
38; X86-NEXT:    subl $-128, %eax
39; X86-NEXT:    retl
40;
41; X64-LINUX-LABEL: test1b:
42; X64-LINUX:       # %bb.0: # %entry
43; X64-LINUX-NEXT:    movl (%rdi), %eax
44; X64-LINUX-NEXT:    subl $-128, %eax
45; X64-LINUX-NEXT:    retq
46;
47; X64-WIN32-LABEL: test1b:
48; X64-WIN32:       # %bb.0: # %entry
49; X64-WIN32-NEXT:    movl (%rcx), %eax
50; X64-WIN32-NEXT:    subl $-128, %eax
51; X64-WIN32-NEXT:    retq
52entry:
53  %a = load i32, i32* %p
54  %b = add i32 %a, 128
55  ret i32 %b
56}
57
58define i64 @test2(i64 inreg %a) nounwind {
59; X86-LABEL: test2:
60; X86:       # %bb.0: # %entry
61; X86-NEXT:    addl $-2147483648, %eax # imm = 0x80000000
62; X86-NEXT:    adcl $0, %edx
63; X86-NEXT:    retl
64;
65; X64-LINUX-LABEL: test2:
66; X64-LINUX:       # %bb.0: # %entry
67; X64-LINUX-NEXT:    movq %rdi, %rax
68; X64-LINUX-NEXT:    subq $-2147483648, %rax # imm = 0x80000000
69; X64-LINUX-NEXT:    retq
70;
71; X64-WIN32-LABEL: test2:
72; X64-WIN32:       # %bb.0: # %entry
73; X64-WIN32-NEXT:    movq %rcx, %rax
74; X64-WIN32-NEXT:    subq $-2147483648, %rax # imm = 0x80000000
75; X64-WIN32-NEXT:    retq
76entry:
77  %b = add i64 %a, 2147483648
78  ret i64 %b
79}
80define i64 @test3(i64 inreg %a) nounwind {
81; X86-LABEL: test3:
82; X86:       # %bb.0: # %entry
83; X86-NEXT:    addl $128, %eax
84; X86-NEXT:    adcl $0, %edx
85; X86-NEXT:    retl
86;
87; X64-LINUX-LABEL: test3:
88; X64-LINUX:       # %bb.0: # %entry
89; X64-LINUX-NEXT:    leaq 128(%rdi), %rax
90; X64-LINUX-NEXT:    retq
91;
92; X64-WIN32-LABEL: test3:
93; X64-WIN32:       # %bb.0: # %entry
94; X64-WIN32-NEXT:    leaq 128(%rcx), %rax
95; X64-WIN32-NEXT:    retq
96entry:
97  %b = add i64 %a, 128
98  ret i64 %b
99}
100
101define i64 @test3b(i64* %p) nounwind {
102; X86-LABEL: test3b:
103; X86:       # %bb.0: # %entry
104; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
105; X86-NEXT:    movl 4(%ecx), %edx
106; X86-NEXT:    movl $128, %eax
107; X86-NEXT:    addl (%ecx), %eax
108; X86-NEXT:    adcl $0, %edx
109; X86-NEXT:    retl
110;
111; X64-LINUX-LABEL: test3b:
112; X64-LINUX:       # %bb.0: # %entry
113; X64-LINUX-NEXT:    movq (%rdi), %rax
114; X64-LINUX-NEXT:    subq $-128, %rax
115; X64-LINUX-NEXT:    retq
116;
117; X64-WIN32-LABEL: test3b:
118; X64-WIN32:       # %bb.0: # %entry
119; X64-WIN32-NEXT:    movq (%rcx), %rax
120; X64-WIN32-NEXT:    subq $-128, %rax
121; X64-WIN32-NEXT:    retq
122entry:
123  %a = load i64, i64* %p
124  %b = add i64 %a, 128
125  ret i64 %b
126}
127
128define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
129; X86-LABEL: test4:
130; X86:       # %bb.0: # %entry
131; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
132; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
133; X86-NEXT:    jo .LBB5_2
134; X86-NEXT:  # %bb.1: # %normal
135; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
136; X86-NEXT:    movl $0, (%eax)
137; X86-NEXT:  .LBB5_2: # %overflow
138; X86-NEXT:    xorl %eax, %eax
139; X86-NEXT:    retl
140;
141; X64-LINUX-LABEL: test4:
142; X64-LINUX:       # %bb.0: # %entry
143; X64-LINUX-NEXT:    addl %esi, %edi
144; X64-LINUX-NEXT:    jo .LBB5_2
145; X64-LINUX-NEXT:  # %bb.1: # %normal
146; X64-LINUX-NEXT:    movl $0, (%rdx)
147; X64-LINUX-NEXT:  .LBB5_2: # %overflow
148; X64-LINUX-NEXT:    xorl %eax, %eax
149; X64-LINUX-NEXT:    retq
150;
151; X64-WIN32-LABEL: test4:
152; X64-WIN32:       # %bb.0: # %entry
153; X64-WIN32-NEXT:    addl %edx, %ecx
154; X64-WIN32-NEXT:    jo .LBB5_2
155; X64-WIN32-NEXT:  # %bb.1: # %normal
156; X64-WIN32-NEXT:    movl $0, (%r8)
157; X64-WIN32-NEXT:  .LBB5_2: # %overflow
158; X64-WIN32-NEXT:    xorl %eax, %eax
159; X64-WIN32-NEXT:    retq
160entry:
161  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
162  %sum = extractvalue {i32, i1} %t, 0
163  %obit = extractvalue {i32, i1} %t, 1
164  br i1 %obit, label %overflow, label %normal
165
166normal:
167  store i32 0, i32* %X
168  br label %overflow
169
170overflow:
171  ret i1 false
172}
173
174define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
175; X86-LABEL: test5:
176; X86:       # %bb.0: # %entry
177; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
178; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
179; X86-NEXT:    jb .LBB6_2
180; X86-NEXT:  # %bb.1: # %normal
181; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
182; X86-NEXT:    movl $0, (%eax)
183; X86-NEXT:  .LBB6_2: # %carry
184; X86-NEXT:    xorl %eax, %eax
185; X86-NEXT:    retl
186;
187; X64-LINUX-LABEL: test5:
188; X64-LINUX:       # %bb.0: # %entry
189; X64-LINUX-NEXT:    addl %esi, %edi
190; X64-LINUX-NEXT:    jb .LBB6_2
191; X64-LINUX-NEXT:  # %bb.1: # %normal
192; X64-LINUX-NEXT:    movl $0, (%rdx)
193; X64-LINUX-NEXT:  .LBB6_2: # %carry
194; X64-LINUX-NEXT:    xorl %eax, %eax
195; X64-LINUX-NEXT:    retq
196;
197; X64-WIN32-LABEL: test5:
198; X64-WIN32:       # %bb.0: # %entry
199; X64-WIN32-NEXT:    addl %edx, %ecx
200; X64-WIN32-NEXT:    jb .LBB6_2
201; X64-WIN32-NEXT:  # %bb.1: # %normal
202; X64-WIN32-NEXT:    movl $0, (%r8)
203; X64-WIN32-NEXT:  .LBB6_2: # %carry
204; X64-WIN32-NEXT:    xorl %eax, %eax
205; X64-WIN32-NEXT:    retq
206entry:
207  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
208  %sum = extractvalue {i32, i1} %t, 0
209  %obit = extractvalue {i32, i1} %t, 1
210  br i1 %obit, label %carry, label %normal
211
212normal:
213  store i32 0, i32* %X
214  br label %carry
215
216carry:
217  ret i1 false
218}
219
220define i64 @test6(i64 %A, i32 %B) nounwind {
221; X86-LABEL: test6:
222; X86:       # %bb.0: # %entry
223; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
224; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
225; X86-NEXT:    addl {{[0-9]+}}(%esp), %edx
226; X86-NEXT:    retl
227;
228; X64-LINUX-LABEL: test6:
229; X64-LINUX:       # %bb.0: # %entry
230; X64-LINUX-NEXT:    # kill: def $esi killed $esi def $rsi
231; X64-LINUX-NEXT:    shlq $32, %rsi
232; X64-LINUX-NEXT:    leaq (%rsi,%rdi), %rax
233; X64-LINUX-NEXT:    retq
234;
235; X64-WIN32-LABEL: test6:
236; X64-WIN32:       # %bb.0: # %entry
237; X64-WIN32-NEXT:    # kill: def $edx killed $edx def $rdx
238; X64-WIN32-NEXT:    shlq $32, %rdx
239; X64-WIN32-NEXT:    leaq (%rdx,%rcx), %rax
240; X64-WIN32-NEXT:    retq
241entry:
242  %tmp12 = zext i32 %B to i64
243  %tmp3 = shl i64 %tmp12, 32
244  %tmp5 = add i64 %tmp3, %A
245  ret i64 %tmp5
246}
247
248define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
249; X86-LABEL: test7:
250; X86:       # %bb.0: # %entry
251; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
252; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
253; X86-NEXT:    setb %dl
254; X86-NEXT:    retl
255;
256; X64-LINUX-LABEL: test7:
257; X64-LINUX:       # %bb.0: # %entry
258; X64-LINUX-NEXT:    movl %edi, %eax
259; X64-LINUX-NEXT:    addl %esi, %eax
260; X64-LINUX-NEXT:    setb %dl
261; X64-LINUX-NEXT:    retq
262;
263; X64-WIN32-LABEL: test7:
264; X64-WIN32:       # %bb.0: # %entry
265; X64-WIN32-NEXT:    movl %ecx, %eax
266; X64-WIN32-NEXT:    addl %edx, %eax
267; X64-WIN32-NEXT:    setb %dl
268; X64-WIN32-NEXT:    retq
269entry:
270  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
271  ret {i32, i1} %t
272}
273
274; PR5443
275define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
276; X86-LABEL: test8:
277; X86:       # %bb.0: # %entry
278; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
279; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
280; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
281; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
282; X86-NEXT:    setb %cl
283; X86-NEXT:    retl
284;
285; X64-LINUX-LABEL: test8:
286; X64-LINUX:       # %bb.0: # %entry
287; X64-LINUX-NEXT:    movq %rdi, %rax
288; X64-LINUX-NEXT:    addq %rsi, %rax
289; X64-LINUX-NEXT:    setb %dl
290; X64-LINUX-NEXT:    retq
291;
292; X64-WIN32-LABEL: test8:
293; X64-WIN32:       # %bb.0: # %entry
294; X64-WIN32-NEXT:    movq %rcx, %rax
295; X64-WIN32-NEXT:    addq %rdx, %rax
296; X64-WIN32-NEXT:    setb %dl
297; X64-WIN32-NEXT:    retq
298entry:
299  %extleft = zext i64 %left to i65
300  %extright = zext i64 %right to i65
301  %sum = add i65 %extleft, %extright
302  %res.0 = trunc i65 %sum to i64
303  %overflow = and i65 %sum, -18446744073709551616
304  %res.1 = icmp ne i65 %overflow, 0
305  %final0 = insertvalue {i64, i1} undef, i64 %res.0, 0
306  %final1 = insertvalue {i64, i1} %final0, i1 %res.1, 1
307  ret {i64, i1} %final1
308}
309
310define i32 @test9(i32 %x, i32 %y) nounwind readnone {
311; X86-LABEL: test9:
312; X86:       # %bb.0: # %entry
313; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
314; X86-NEXT:    xorl %ecx, %ecx
315; X86-NEXT:    cmpl $10, {{[0-9]+}}(%esp)
316; X86-NEXT:    sete %cl
317; X86-NEXT:    subl %ecx, %eax
318; X86-NEXT:    retl
319;
320; X64-LINUX-LABEL: test9:
321; X64-LINUX:       # %bb.0: # %entry
322; X64-LINUX-NEXT:    movl %esi, %eax
323; X64-LINUX-NEXT:    xorl %ecx, %ecx
324; X64-LINUX-NEXT:    cmpl $10, %edi
325; X64-LINUX-NEXT:    sete %cl
326; X64-LINUX-NEXT:    subl %ecx, %eax
327; X64-LINUX-NEXT:    retq
328;
329; X64-WIN32-LABEL: test9:
330; X64-WIN32:       # %bb.0: # %entry
331; X64-WIN32-NEXT:    movl %edx, %eax
332; X64-WIN32-NEXT:    xorl %edx, %edx
333; X64-WIN32-NEXT:    cmpl $10, %ecx
334; X64-WIN32-NEXT:    sete %dl
335; X64-WIN32-NEXT:    subl %edx, %eax
336; X64-WIN32-NEXT:    retq
337entry:
338  %cmp = icmp eq i32 %x, 10
339  %sub = sext i1 %cmp to i32
340  %cond = add i32 %sub, %y
341  ret i32 %cond
342}
343
344define i1 @test10(i32 %x) nounwind {
345; X86-LABEL: test10:
346; X86:       # %bb.0: # %entry
347; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
348; X86-NEXT:    incl %eax
349; X86-NEXT:    seto %al
350; X86-NEXT:    retl
351;
352; X64-LINUX-LABEL: test10:
353; X64-LINUX:       # %bb.0: # %entry
354; X64-LINUX-NEXT:    incl %edi
355; X64-LINUX-NEXT:    seto %al
356; X64-LINUX-NEXT:    retq
357;
358; X64-WIN32-LABEL: test10:
359; X64-WIN32:       # %bb.0: # %entry
360; X64-WIN32-NEXT:    incl %ecx
361; X64-WIN32-NEXT:    seto %al
362; X64-WIN32-NEXT:    retq
363entry:
364  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 1)
365  %obit = extractvalue {i32, i1} %t, 1
366  ret i1 %obit
367}
368
369define void @test11(i32* inreg %a) nounwind {
370; X86-LABEL: test11:
371; X86:       # %bb.0: # %entry
372; X86-NEXT:    subl $-128, (%eax)
373; X86-NEXT:    retl
374;
375; X64-LINUX-LABEL: test11:
376; X64-LINUX:       # %bb.0: # %entry
377; X64-LINUX-NEXT:    subl $-128, (%rdi)
378; X64-LINUX-NEXT:    retq
379;
380; X64-WIN32-LABEL: test11:
381; X64-WIN32:       # %bb.0: # %entry
382; X64-WIN32-NEXT:    subl $-128, (%rcx)
383; X64-WIN32-NEXT:    retq
384entry:
385  %aa = load i32, i32* %a
386  %b = add i32 %aa, 128
387  store i32 %b, i32* %a
388  ret void
389}
390
391define void @test12(i64* inreg %a) nounwind {
392; X86-LABEL: test12:
393; X86:       # %bb.0: # %entry
394; X86-NEXT:    addl $-2147483648, (%eax) # imm = 0x80000000
395; X86-NEXT:    adcl $0, 4(%eax)
396; X86-NEXT:    retl
397;
398; X64-LINUX-LABEL: test12:
399; X64-LINUX:       # %bb.0: # %entry
400; X64-LINUX-NEXT:    subq $-2147483648, (%rdi) # imm = 0x80000000
401; X64-LINUX-NEXT:    retq
402;
403; X64-WIN32-LABEL: test12:
404; X64-WIN32:       # %bb.0: # %entry
405; X64-WIN32-NEXT:    subq $-2147483648, (%rcx) # imm = 0x80000000
406; X64-WIN32-NEXT:    retq
407entry:
408  %aa = load i64, i64* %a
409  %b = add i64 %aa, 2147483648
410  store i64 %b, i64* %a
411  ret void
412}
413
414define void @test13(i64* inreg %a) nounwind {
415; X86-LABEL: test13:
416; X86:       # %bb.0: # %entry
417; X86-NEXT:    addl $128, (%eax)
418; X86-NEXT:    adcl $0, 4(%eax)
419; X86-NEXT:    retl
420;
421; X64-LINUX-LABEL: test13:
422; X64-LINUX:       # %bb.0: # %entry
423; X64-LINUX-NEXT:    subq $-128, (%rdi)
424; X64-LINUX-NEXT:    retq
425;
426; X64-WIN32-LABEL: test13:
427; X64-WIN32:       # %bb.0: # %entry
428; X64-WIN32-NEXT:    subq $-128, (%rcx)
429; X64-WIN32-NEXT:    retq
430entry:
431  %aa = load i64, i64* %a
432  %b = add i64 %aa, 128
433  store i64 %b, i64* %a
434  ret void
435}
436
437define i32 @inc_not(i32 %a) {
438; X86-LABEL: inc_not:
439; X86:       # %bb.0:
440; X86-NEXT:    xorl %eax, %eax
441; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
442; X86-NEXT:    retl
443;
444; X64-LINUX-LABEL: inc_not:
445; X64-LINUX:       # %bb.0:
446; X64-LINUX-NEXT:    movl %edi, %eax
447; X64-LINUX-NEXT:    negl %eax
448; X64-LINUX-NEXT:    retq
449;
450; X64-WIN32-LABEL: inc_not:
451; X64-WIN32:       # %bb.0:
452; X64-WIN32-NEXT:    movl %ecx, %eax
453; X64-WIN32-NEXT:    negl %eax
454; X64-WIN32-NEXT:    retq
455  %nota = xor i32 %a, -1
456  %r = add i32 %nota, 1
457  ret i32 %r
458}
459
460define <4 x i32> @inc_not_vec(<4 x i32> %a) nounwind {
461; X86-LABEL: inc_not_vec:
462; X86:       # %bb.0:
463; X86-NEXT:    pushl %edi
464; X86-NEXT:    pushl %esi
465; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
466; X86-NEXT:    xorl %ecx, %ecx
467; X86-NEXT:    xorl %edx, %edx
468; X86-NEXT:    subl {{[0-9]+}}(%esp), %edx
469; X86-NEXT:    xorl %esi, %esi
470; X86-NEXT:    subl {{[0-9]+}}(%esp), %esi
471; X86-NEXT:    xorl %edi, %edi
472; X86-NEXT:    subl {{[0-9]+}}(%esp), %edi
473; X86-NEXT:    subl {{[0-9]+}}(%esp), %ecx
474; X86-NEXT:    movl %ecx, 12(%eax)
475; X86-NEXT:    movl %edi, 8(%eax)
476; X86-NEXT:    movl %esi, 4(%eax)
477; X86-NEXT:    movl %edx, (%eax)
478; X86-NEXT:    popl %esi
479; X86-NEXT:    popl %edi
480; X86-NEXT:    retl $4
481;
482; X64-LINUX-LABEL: inc_not_vec:
483; X64-LINUX:       # %bb.0:
484; X64-LINUX-NEXT:    pxor %xmm1, %xmm1
485; X64-LINUX-NEXT:    psubd %xmm0, %xmm1
486; X64-LINUX-NEXT:    movdqa %xmm1, %xmm0
487; X64-LINUX-NEXT:    retq
488;
489; X64-WIN32-LABEL: inc_not_vec:
490; X64-WIN32:       # %bb.0:
491; X64-WIN32-NEXT:    pxor %xmm0, %xmm0
492; X64-WIN32-NEXT:    psubd (%rcx), %xmm0
493; X64-WIN32-NEXT:    retq
494  %nota = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
495  %r = add <4 x i32> %nota, <i32 1, i32 1, i32 1, i32 1>
496  ret <4 x i32> %r
497}
498
499define void @uaddo1_not(i32 %a, i32* %p0, i1* %p1) {
500; X86-LABEL: uaddo1_not:
501; X86:       # %bb.0:
502; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
503; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
504; X86-NEXT:    xorl %edx, %edx
505; X86-NEXT:    subl {{[0-9]+}}(%esp), %edx
506; X86-NEXT:    movl %edx, (%ecx)
507; X86-NEXT:    setae (%eax)
508; X86-NEXT:    retl
509;
510; X64-LINUX-LABEL: uaddo1_not:
511; X64-LINUX:       # %bb.0:
512; X64-LINUX-NEXT:    negl %edi
513; X64-LINUX-NEXT:    movl %edi, (%rsi)
514; X64-LINUX-NEXT:    setae (%rdx)
515; X64-LINUX-NEXT:    retq
516;
517; X64-WIN32-LABEL: uaddo1_not:
518; X64-WIN32:       # %bb.0:
519; X64-WIN32-NEXT:    negl %ecx
520; X64-WIN32-NEXT:    movl %ecx, (%rdx)
521; X64-WIN32-NEXT:    setae (%r8)
522; X64-WIN32-NEXT:    retq
523  %nota = xor i32 %a, -1
524  %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %nota, i32 1)
525  %r0 = extractvalue {i32, i1} %uaddo, 0
526  %r1 = extractvalue {i32, i1} %uaddo, 1
527  store i32 %r0, i32* %p0
528  store i1 %r1, i1* %p1
529  ret void
530}
531
532define i32 @add_to_sub(i32 %a, i32 %b) {
533; X86-LABEL: add_to_sub:
534; X86:       # %bb.0:
535; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
536; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
537; X86-NEXT:    retl
538;
539; X64-LINUX-LABEL: add_to_sub:
540; X64-LINUX:       # %bb.0:
541; X64-LINUX-NEXT:    movl %esi, %eax
542; X64-LINUX-NEXT:    subl %edi, %eax
543; X64-LINUX-NEXT:    retq
544;
545; X64-WIN32-LABEL: add_to_sub:
546; X64-WIN32:       # %bb.0:
547; X64-WIN32-NEXT:    movl %edx, %eax
548; X64-WIN32-NEXT:    subl %ecx, %eax
549; X64-WIN32-NEXT:    retq
550  %nota = xor i32 %a, -1
551  %add = add i32 %nota, %b
552  %r = add i32 %add, 1
553  ret i32 %r
554}
555
556declare dso_local void @bar_i32(i32)
557declare dso_local void @bar_i64(i64)
558
559; Make sure we can use sub -128 for add 128 when the flags are used.
560define void @add_i32_128_flag(i32 %x) {
561; X86-LABEL: add_i32_128_flag:
562; X86:       # %bb.0: # %entry
563; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
564; X86-NEXT:    subl $-128, %eax
565; X86-NEXT:    je .LBB19_2
566; X86-NEXT:  # %bb.1: # %if.then
567; X86-NEXT:    pushl %eax
568; X86-NEXT:    .cfi_adjust_cfa_offset 4
569; X86-NEXT:    calll bar_i32
570; X86-NEXT:    addl $4, %esp
571; X86-NEXT:    .cfi_adjust_cfa_offset -4
572; X86-NEXT:  .LBB19_2: # %if.end
573; X86-NEXT:    retl
574;
575; X64-LINUX-LABEL: add_i32_128_flag:
576; X64-LINUX:       # %bb.0: # %entry
577; X64-LINUX-NEXT:    subl $-128, %edi
578; X64-LINUX-NEXT:    je .LBB19_1
579; X64-LINUX-NEXT:  # %bb.2: # %if.then
580; X64-LINUX-NEXT:    jmp bar_i32 # TAILCALL
581; X64-LINUX-NEXT:  .LBB19_1: # %if.end
582; X64-LINUX-NEXT:    retq
583;
584; X64-WIN32-LABEL: add_i32_128_flag:
585; X64-WIN32:       # %bb.0: # %entry
586; X64-WIN32-NEXT:    subl $-128, %ecx
587; X64-WIN32-NEXT:    je .LBB19_1
588; X64-WIN32-NEXT:  # %bb.2: # %if.then
589; X64-WIN32-NEXT:    jmp bar_i32 # TAILCALL
590; X64-WIN32-NEXT:  .LBB19_1: # %if.end
591; X64-WIN32-NEXT:    retq
592entry:
593  %add = add i32 %x, 128
594  %tobool = icmp eq i32 %add, 0
595  br i1 %tobool, label %if.end, label %if.then
596
597if.then:
598  tail call void @bar_i32(i32 %add)
599  br label %if.end
600
601if.end:
602  ret void
603}
604
605; Make sure we can use sub -128 for add 128 when the flags are used.
606define void @add_i64_128_flag(i64 %x) {
607; X86-LABEL: add_i64_128_flag:
608; X86:       # %bb.0: # %entry
609; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
610; X86-NEXT:    movl $128, %eax
611; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
612; X86-NEXT:    adcl $0, %ecx
613; X86-NEXT:    movl %eax, %edx
614; X86-NEXT:    orl %ecx, %edx
615; X86-NEXT:    je .LBB20_2
616; X86-NEXT:  # %bb.1: # %if.then
617; X86-NEXT:    pushl %ecx
618; X86-NEXT:    .cfi_adjust_cfa_offset 4
619; X86-NEXT:    pushl %eax
620; X86-NEXT:    .cfi_adjust_cfa_offset 4
621; X86-NEXT:    calll bar_i64
622; X86-NEXT:    addl $8, %esp
623; X86-NEXT:    .cfi_adjust_cfa_offset -8
624; X86-NEXT:  .LBB20_2: # %if.end
625; X86-NEXT:    retl
626;
627; X64-LINUX-LABEL: add_i64_128_flag:
628; X64-LINUX:       # %bb.0: # %entry
629; X64-LINUX-NEXT:    subq $-128, %rdi
630; X64-LINUX-NEXT:    je .LBB20_1
631; X64-LINUX-NEXT:  # %bb.2: # %if.then
632; X64-LINUX-NEXT:    jmp bar_i64 # TAILCALL
633; X64-LINUX-NEXT:  .LBB20_1: # %if.end
634; X64-LINUX-NEXT:    retq
635;
636; X64-WIN32-LABEL: add_i64_128_flag:
637; X64-WIN32:       # %bb.0: # %entry
638; X64-WIN32-NEXT:    subq $-128, %rcx
639; X64-WIN32-NEXT:    je .LBB20_1
640; X64-WIN32-NEXT:  # %bb.2: # %if.then
641; X64-WIN32-NEXT:    jmp bar_i64 # TAILCALL
642; X64-WIN32-NEXT:  .LBB20_1: # %if.end
643; X64-WIN32-NEXT:    retq
644entry:
645  %add = add i64 %x, 128
646  %tobool = icmp eq i64 %add, 0
647  br i1 %tobool, label %if.end, label %if.then
648
649if.then:
650  tail call void @bar_i64(i64 %add)
651  br label %if.end
652
653if.end:
654  ret void
655}
656
657; Make sure we can use sub -2147483648 for add 2147483648 when the flags are used.
658define void @add_i64_2147483648_flag(i64 %x) {
659; X86-LABEL: add_i64_2147483648_flag:
660; X86:       # %bb.0: # %entry
661; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
662; X86-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
663; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
664; X86-NEXT:    adcl $0, %ecx
665; X86-NEXT:    movl %eax, %edx
666; X86-NEXT:    orl %ecx, %edx
667; X86-NEXT:    je .LBB21_2
668; X86-NEXT:  # %bb.1: # %if.then
669; X86-NEXT:    pushl %ecx
670; X86-NEXT:    .cfi_adjust_cfa_offset 4
671; X86-NEXT:    pushl %eax
672; X86-NEXT:    .cfi_adjust_cfa_offset 4
673; X86-NEXT:    calll bar_i64
674; X86-NEXT:    addl $8, %esp
675; X86-NEXT:    .cfi_adjust_cfa_offset -8
676; X86-NEXT:  .LBB21_2: # %if.end
677; X86-NEXT:    retl
678;
679; X64-LINUX-LABEL: add_i64_2147483648_flag:
680; X64-LINUX:       # %bb.0: # %entry
681; X64-LINUX-NEXT:    subq $-2147483648, %rdi # imm = 0x80000000
682; X64-LINUX-NEXT:    je .LBB21_1
683; X64-LINUX-NEXT:  # %bb.2: # %if.then
684; X64-LINUX-NEXT:    jmp bar_i64 # TAILCALL
685; X64-LINUX-NEXT:  .LBB21_1: # %if.end
686; X64-LINUX-NEXT:    retq
687;
688; X64-WIN32-LABEL: add_i64_2147483648_flag:
689; X64-WIN32:       # %bb.0: # %entry
690; X64-WIN32-NEXT:    subq $-2147483648, %rcx # imm = 0x80000000
691; X64-WIN32-NEXT:    je .LBB21_1
692; X64-WIN32-NEXT:  # %bb.2: # %if.then
693; X64-WIN32-NEXT:    jmp bar_i64 # TAILCALL
694; X64-WIN32-NEXT:  .LBB21_1: # %if.end
695; X64-WIN32-NEXT:    retq
696entry:
697  %add = add i64 %x, 2147483648
698  %tobool = icmp eq i64 %add, 0
699  br i1 %tobool, label %if.end, label %if.then
700
701if.then:
702  tail call void @bar_i64(i64 %add)
703  br label %if.end
704
705if.end:
706  ret void
707}
708