1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2012 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // This file is an internal atomic implementation, use atomicops.h instead.
32
33 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
34 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
35
36 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
37
38 namespace google {
39 namespace protobuf {
40 namespace internal {
41
42 // Atomically execute:
43 // result = *ptr;
44 // if (*ptr == old_value)
45 // *ptr = new_value;
46 // return result;
47 //
48 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
49 // Always return the old value of "*ptr"
50 //
51 // This routine implies no memory barriers.
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)52 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
53 Atomic32 old_value,
54 Atomic32 new_value) {
55 Atomic32 prev, tmp;
56 __asm__ __volatile__(".set push\n"
57 ".set noreorder\n"
58 "1:\n"
59 "ll %0, %5\n" // prev = *ptr
60 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
61 "move %2, %4\n" // tmp = new_value
62 "sc %2, %1\n" // *ptr = tmp (with atomic check)
63 "beqz %2, 1b\n" // start again on atomic error
64 "nop\n" // delay slot nop
65 "2:\n"
66 ".set pop\n"
67 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
68 : "r" (old_value), "r" (new_value), "m" (*ptr)
69 : "memory");
70 return prev;
71 }
72
73 // Atomically store new_value into *ptr, returning the previous value held in
74 // *ptr. This routine implies no memory barriers.
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)75 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
76 Atomic32 new_value) {
77 Atomic32 temp, old;
78 __asm__ __volatile__(".set push\n"
79 ".set noreorder\n"
80 "1:\n"
81 "ll %1, %4\n" // old = *ptr
82 "move %0, %3\n" // temp = new_value
83 "sc %0, %2\n" // *ptr = temp (with atomic check)
84 "beqz %0, 1b\n" // start again on atomic error
85 "nop\n" // delay slot nop
86 ".set pop\n"
87 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
88 : "r" (new_value), "m" (*ptr)
89 : "memory");
90
91 return old;
92 }
93
94 // Atomically increment *ptr by "increment". Returns the new value of
95 // *ptr with the increment applied. This routine implies no memory barriers.
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)96 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
97 Atomic32 increment) {
98 Atomic32 temp, temp2;
99
100 __asm__ __volatile__(".set push\n"
101 ".set noreorder\n"
102 "1:\n"
103 "ll %0, %4\n" // temp = *ptr
104 "addu %1, %0, %3\n" // temp2 = temp + increment
105 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
106 "beqz %1, 1b\n" // start again on atomic error
107 "addu %1, %0, %3\n" // temp2 = temp + increment
108 ".set pop\n"
109 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
110 : "Ir" (increment), "m" (*ptr)
111 : "memory");
112 // temp2 now holds the final value.
113 return temp2;
114 }
115
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)116 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
117 Atomic32 increment) {
118 ATOMICOPS_COMPILER_BARRIER();
119 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
120 ATOMICOPS_COMPILER_BARRIER();
121 return res;
122 }
123
124 // "Acquire" operations
125 // ensure that no later memory access can be reordered ahead of the operation.
126 // "Release" operations ensure that no previous memory access can be reordered
127 // after the operation. "Barrier" operations have both "Acquire" and "Release"
128 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
129 // access.
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)130 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
131 Atomic32 old_value,
132 Atomic32 new_value) {
133 ATOMICOPS_COMPILER_BARRIER();
134 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
135 ATOMICOPS_COMPILER_BARRIER();
136 return res;
137 }
138
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)139 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
140 Atomic32 old_value,
141 Atomic32 new_value) {
142 ATOMICOPS_COMPILER_BARRIER();
143 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
144 ATOMICOPS_COMPILER_BARRIER();
145 return res;
146 }
147
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)148 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
149 *ptr = value;
150 }
151
MemoryBarrier()152 inline void MemoryBarrier() {
153 __asm__ __volatile__("sync" : : : "memory");
154 }
155
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)156 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
157 *ptr = value;
158 MemoryBarrier();
159 }
160
Release_Store(volatile Atomic32 * ptr,Atomic32 value)161 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
162 MemoryBarrier();
163 *ptr = value;
164 }
165
NoBarrier_Load(volatile const Atomic32 * ptr)166 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
167 return *ptr;
168 }
169
Acquire_Load(volatile const Atomic32 * ptr)170 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
171 Atomic32 value = *ptr;
172 MemoryBarrier();
173 return value;
174 }
175
Release_Load(volatile const Atomic32 * ptr)176 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
177 MemoryBarrier();
178 return *ptr;
179 }
180
181 #if defined(__LP64__)
182 // 64-bit versions of the atomic ops.
183
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
185 Atomic64 old_value,
186 Atomic64 new_value) {
187 Atomic64 prev, tmp;
188 __asm__ __volatile__(".set push\n"
189 ".set noreorder\n"
190 "1:\n"
191 "lld %0, %5\n" // prev = *ptr
192 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
193 "move %2, %4\n" // tmp = new_value
194 "scd %2, %1\n" // *ptr = tmp (with atomic check)
195 "beqz %2, 1b\n" // start again on atomic error
196 "nop\n" // delay slot nop
197 "2:\n"
198 ".set pop\n"
199 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
200 : "r" (old_value), "r" (new_value), "m" (*ptr)
201 : "memory");
202 return prev;
203 }
204
205 // Atomically store new_value into *ptr, returning the previous value held in
206 // *ptr. This routine implies no memory barriers.
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
209 Atomic64 temp, old;
210 __asm__ __volatile__(".set push\n"
211 ".set noreorder\n"
212 "1:\n"
213 "lld %1, %4\n" // old = *ptr
214 "move %0, %3\n" // temp = new_value
215 "scd %0, %2\n" // *ptr = temp (with atomic check)
216 "beqz %0, 1b\n" // start again on atomic error
217 "nop\n" // delay slot nop
218 ".set pop\n"
219 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
220 : "r" (new_value), "m" (*ptr)
221 : "memory");
222
223 return old;
224 }
225
226 // Atomically increment *ptr by "increment". Returns the new value of
227 // *ptr with the increment applied. This routine implies no memory barriers.
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)228 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
229 Atomic64 increment) {
230 Atomic64 temp, temp2;
231
232 __asm__ __volatile__(".set push\n"
233 ".set noreorder\n"
234 "1:\n"
235 "lld %0, %4\n" // temp = *ptr
236 "daddu %1, %0, %3\n" // temp2 = temp + increment
237 "scd %1, %2\n" // *ptr = temp2 (with atomic check)
238 "beqz %1, 1b\n" // start again on atomic error
239 "daddu %1, %0, %3\n" // temp2 = temp + increment
240 ".set pop\n"
241 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
242 : "Ir" (increment), "m" (*ptr)
243 : "memory");
244 // temp2 now holds the final value.
245 return temp2;
246 }
247
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)248 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
249 Atomic64 increment) {
250 MemoryBarrier();
251 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
252 MemoryBarrier();
253 return res;
254 }
255
256 // "Acquire" operations
257 // ensure that no later memory access can be reordered ahead of the operation.
258 // "Release" operations ensure that no previous memory access can be reordered
259 // after the operation. "Barrier" operations have both "Acquire" and "Release"
260 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
261 // access.
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)262 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
263 Atomic64 old_value,
264 Atomic64 new_value) {
265 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
266 MemoryBarrier();
267 return res;
268 }
269
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)270 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
271 Atomic64 old_value,
272 Atomic64 new_value) {
273 MemoryBarrier();
274 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
275 }
276
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)277 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
278 *ptr = value;
279 }
280
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)281 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
282 *ptr = value;
283 MemoryBarrier();
284 }
285
Release_Store(volatile Atomic64 * ptr,Atomic64 value)286 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
287 MemoryBarrier();
288 *ptr = value;
289 }
290
NoBarrier_Load(volatile const Atomic64 * ptr)291 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
292 return *ptr;
293 }
294
Acquire_Load(volatile const Atomic64 * ptr)295 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
296 Atomic64 value = *ptr;
297 MemoryBarrier();
298 return value;
299 }
300
Release_Load(volatile const Atomic64 * ptr)301 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
302 MemoryBarrier();
303 return *ptr;
304 }
305 #endif
306
307 } // namespace internal
308 } // namespace protobuf
309 } // namespace google
310
311 #undef ATOMICOPS_COMPILER_BARRIER
312
313 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
314