1; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
2
3@sc64 = external global i64
4
5define void @atomic_fetch_add64() nounwind {
6; X32-LABEL:   atomic_fetch_add64:
7entry:
8  %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
9; X32:       addl
10; X32:       adcl
11; X32:       lock
12; X32:       cmpxchg8b
13  %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
14; X32:       addl
15; X32:       adcl
16; X32:       lock
17; X32:       cmpxchg8b
18  %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
19; X32:       addl
20; X32:       adcl
21; X32:       lock
22; X32:       cmpxchg8b
23  %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
24; X32:       addl
25; X32:       adcl
26; X32:       lock
27; X32:       cmpxchg8b
28  ret void
29; X32:       ret
30}
31
32define void @atomic_fetch_sub64() nounwind {
33; X32-LABEL:   atomic_fetch_sub64:
34  %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
35; X32:       addl $-1
36; X32:       adcl $-1
37; X32:       lock
38; X32:       cmpxchg8b
39  %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
40; X32:       addl $-3
41; X32:       adcl $-1
42; X32:       lock
43; X32:       cmpxchg8b
44  %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
45; X32:       addl $-5
46; X32:       adcl $-1
47; X32:       lock
48; X32:       cmpxchg8b
49  %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
50; X32:       subl
51; X32:       sbbl
52; X32:       lock
53; X32:       cmpxchg8b
54  ret void
55; X32:       ret
56}
57
58define void @atomic_fetch_and64() nounwind {
59; X32-LABEL:   atomic_fetch_and64:
60  %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
61; X32:       andl $3
62; X32-NOT:       andl
63; X32:       lock
64; X32:       cmpxchg8b
65  %t2 = atomicrmw and  i64* @sc64, i64 4294967297 acquire
66; X32:       andl $1
67; X32:       andl $1
68; X32:       lock
69; X32:       cmpxchg8b
70  %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
71; X32:       andl
72; X32:       andl
73; X32:       lock
74; X32:       cmpxchg8b
75  ret void
76; X32:       ret
77}
78
79define void @atomic_fetch_or64() nounwind {
80; X32-LABEL:   atomic_fetch_or64:
81  %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
82; X32:       orl $3
83; X32-NOT:       orl
84; X32:       lock
85; X32:       cmpxchg8b
86  %t2 = atomicrmw or   i64* @sc64, i64 4294967297 acquire
87; X32:       orl $1
88; X32:       orl $1
89; X32:       lock
90; X32:       cmpxchg8b
91  %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
92; X32:       orl
93; X32:       orl
94; X32:       lock
95; X32:       cmpxchg8b
96  ret void
97; X32:       ret
98}
99
100define void @atomic_fetch_xor64() nounwind {
101; X32-LABEL:   atomic_fetch_xor64:
102  %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
103; X32:       xorl
104; X32-NOT:       xorl
105; X32:       lock
106; X32:       cmpxchg8b
107  %t2 = atomicrmw xor  i64* @sc64, i64 4294967297 acquire
108; X32:       xorl $1
109; X32:       xorl $1
110; X32:       lock
111; X32:       cmpxchg8b
112  %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
113; X32:       xorl
114; X32:       xorl
115; X32:       lock
116; X32:       cmpxchg8b
117  ret void
118; X32:       ret
119}
120
121define void @atomic_fetch_nand64(i64 %x) nounwind {
122; X32-LABEL:   atomic_fetch_nand64:
123  %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
124; X32:       andl
125; X32:       andl
126; X32:       notl
127; X32:       notl
128; X32:       lock
129; X32:       cmpxchg8b
130  ret void
131; X32:       ret
132}
133
134define void @atomic_fetch_max64(i64 %x) nounwind {
135; X32-LABEL:   atomic_fetch_max64:
136  %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
137; X32:       subl
138; X32:       subl
139; X32:       cmov
140; X32:       cmov
141; X32:       lock
142; X32:       cmpxchg8b
143  ret void
144; X32:       ret
145}
146
147define void @atomic_fetch_min64(i64 %x) nounwind {
148; X32-LABEL:   atomic_fetch_min64:
149  %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
150; X32:       subl
151; X32:       subl
152; X32:       cmov
153; X32:       cmov
154; X32:       lock
155; X32:       cmpxchg8b
156  ret void
157; X32:       ret
158}
159
160define void @atomic_fetch_umax64(i64 %x) nounwind {
161; X32-LABEL:   atomic_fetch_umax64:
162  %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
163; X32:       subl
164; X32:       subl
165; X32:       cmov
166; X32:       cmov
167; X32:       lock
168; X32:       cmpxchg8b
169  ret void
170; X32:       ret
171}
172
173define void @atomic_fetch_umin64(i64 %x) nounwind {
174; X32-LABEL:   atomic_fetch_umin64:
175  %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
176; X32:       subl
177; X32:       subl
178; X32:       cmov
179; X32:       cmov
180; X32:       lock
181; X32:       cmpxchg8b
182  ret void
183; X32:       ret
184}
185
186define void @atomic_fetch_cmpxchg64() nounwind {
187; X32-LABEL:   atomic_fetch_cmpxchg64:
188  %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire
189; X32:       lock
190; X32:       cmpxchg8b
191  ret void
192; X32:       ret
193}
194
195define void @atomic_fetch_store64(i64 %x) nounwind {
196; X32-LABEL:   atomic_fetch_store64:
197  store atomic i64 %x, i64* @sc64 release, align 8
198; X32:       lock
199; X32:       cmpxchg8b
200  ret void
201; X32:       ret
202}
203
204define void @atomic_fetch_swap64(i64 %x) nounwind {
205; X32-LABEL:   atomic_fetch_swap64:
206  %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
207; X32:       lock
208; X32:       xchg8b
209  ret void
210; X32:       ret
211}
212