1; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
2
3; CHECK-LABEL: test_atomic_i32
4; CHECK:       ld [%o0]
5; CHECK:       membar
6; CHECK:       ld [%o1]
7; CHECK:       membar
8; CHECK:       membar
9; CHECK:       st {{.+}}, [%o2]
10define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
11entry:
12  %0 = load atomic i32, i32* %ptr1 acquire, align 8
13  %1 = load atomic i32, i32* %ptr2 acquire, align 8
14  %2 = add i32 %0, %1
15  store atomic i32 %2, i32* %ptr3 release, align 8
16  ret i32 %2
17}
18
19; CHECK-LABEL: test_atomic_i64
20; CHECK:       ldx [%o0]
21; CHECK:       membar
22; CHECK:       ldx [%o1]
23; CHECK:       membar
24; CHECK:       membar
25; CHECK:       stx {{.+}}, [%o2]
26define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
27entry:
28  %0 = load atomic i64, i64* %ptr1 acquire, align 8
29  %1 = load atomic i64, i64* %ptr2 acquire, align 8
30  %2 = add i64 %0, %1
31  store atomic i64 %2, i64* %ptr3 release, align 8
32  ret i64 %2
33}
34
35; CHECK-LABEL: test_cmpxchg_i32
36; CHECK:       mov 123, [[R:%[gilo][0-7]]]
37; CHECK:       cas [%o1], %o0, [[R]]
38
39define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
40entry:
41  %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
42  %b = extractvalue { i32, i1 } %pair, 0
43  ret i32 %b
44}
45
46; CHECK-LABEL: test_cmpxchg_i64
47; CHECK:       mov 123, [[R:%[gilo][0-7]]]
48; CHECK:       casx [%o1], %o0, [[R]]
49
50define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
51entry:
52  %pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
53  %b = extractvalue { i64, i1 } %pair, 0
54  ret i64 %b
55}
56
57; CHECK-LABEL: test_swap_i32
58; CHECK:       mov 42, [[R:%[gilo][0-7]]]
59; CHECK:       swap [%o1], [[R]]
60
61define i32 @test_swap_i32(i32 %a, i32* %ptr) {
62entry:
63  %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
64  ret i32 %b
65}
66
67; CHECK-LABEL: test_swap_i64
68; CHECK:       casx [%o1],
69
70define i64 @test_swap_i64(i64 %a, i64* %ptr) {
71entry:
72  %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
73  ret i64 %b
74}
75
76; CHECK-LABEL: test_load_add_32
77; CHECK: membar
78; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
79; CHECK: cas [%o0], [[V]], [[U]]
80; CHECK: membar
81define zeroext i32 @test_load_add_32(i32* %p, i32 zeroext %v) {
82entry:
83  %0 = atomicrmw add i32* %p, i32 %v seq_cst
84  ret i32 %0
85}
86
87; CHECK-LABEL: test_load_sub_64
88; CHECK: membar
89; CHECK: sub
90; CHECK: casx [%o0]
91; CHECK: membar
92define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
93entry:
94  %0 = atomicrmw sub i64* %p, i64 %v seq_cst
95  ret i64 %0
96}
97
98; CHECK-LABEL: test_load_xor_32
99; CHECK: membar
100; CHECK: xor
101; CHECK: cas [%o0]
102; CHECK: membar
103define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
104entry:
105  %0 = atomicrmw xor i32* %p, i32 %v seq_cst
106  ret i32 %0
107}
108
109; CHECK-LABEL: test_load_and_32
110; CHECK: membar
111; CHECK: and
112; CHECK-NOT: xor
113; CHECK: cas [%o0]
114; CHECK: membar
115define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
116entry:
117  %0 = atomicrmw and i32* %p, i32 %v seq_cst
118  ret i32 %0
119}
120
121; CHECK-LABEL: test_load_nand_32
122; CHECK: membar
123; CHECK: and
124; CHECK: xor
125; CHECK: cas [%o0]
126; CHECK: membar
127define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
128entry:
129  %0 = atomicrmw nand i32* %p, i32 %v seq_cst
130  ret i32 %0
131}
132
133; CHECK-LABEL: test_load_max_64
134; CHECK: membar
135; CHECK: cmp
136; CHECK: movg %xcc
137; CHECK: casx [%o0]
138; CHECK: membar
139define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
140entry:
141  %0 = atomicrmw max i64* %p, i64 %v seq_cst
142  ret i64 %0
143}
144
145; CHECK-LABEL: test_load_umin_32
146; CHECK: membar
147; CHECK: cmp
148; CHECK: movleu %icc
149; CHECK: cas [%o0]
150; CHECK: membar
151define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
152entry:
153  %0 = atomicrmw umin i32* %p, i32 %v seq_cst
154  ret i32 %0
155}
156