1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s
2; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s
3
4; Make sure that flags are properly preserved despite atomic optimizations.
5
6define i32 @atomic_and_flags_1(i8* %p, i32 %a, i32 %b) {
7; CHECK-LABEL: atomic_and_flags_1:
8
9  ; Generate flags value, and use it.
10  ; CHECK:      cmpl
11  ; CHECK-NEXT: jne
12  %cmp = icmp eq i32 %a, %b
13  br i1 %cmp, label %L1, label %L2
14
15L1:
16  ; The following pattern will get folded.
17  ; CHECK: incb
18  %1 = load atomic i8, i8* %p seq_cst, align 1
19  %2 = add i8 %1, 1 ; This forces the INC instruction to be generated.
20  store atomic i8 %2, i8* %p release, align 1
21
22  ; Use the comparison result again. We need to rematerialize the comparison
23  ; somehow. This test checks that cmpl gets emitted again, but any
24  ; rematerialization would work (the optimizer used to clobber the flags with
25  ; the add).
26  ; CHECK-NEXT: cmpl
27  ; CHECK-NEXT: jne
28  br i1 %cmp, label %L3, label %L4
29
30L2:
31  ret i32 2
32
33L3:
34  ret i32 3
35
36L4:
37  ret i32 4
38}
39
40; Same as above, but using 2 as immediate to avoid the INC instruction.
41define i32 @atomic_and_flags_2(i8* %p, i32 %a, i32 %b) {
42; CHECK-LABEL: atomic_and_flags_2:
43  ; CHECK:      cmpl
44  ; CHECK-NEXT: jne
45  %cmp = icmp eq i32 %a, %b
46  br i1 %cmp, label %L1, label %L2
47L1:
48  ; CHECK: addb
49  %1 = load atomic i8, i8* %p seq_cst, align 1
50  %2 = add i8 %1, 2
51  store atomic i8 %2, i8* %p release, align 1
52  ; CHECK-NEXT: cmpl
53  ; CHECK-NEXT: jne
54  br i1 %cmp, label %L3, label %L4
55L2:
56  ret i32 2
57L3:
58  ret i32 3
59L4:
60  ret i32 4
61}
62