1 //
2 // Copyright 2021 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // Spinlock.h:
7 //   Spinlock is a lock that loops actively until it gets the resource.
8 //   Only use it when the lock will be granted in reasonably short time.
9 
10 #ifndef COMMON_SPINLOCK_H_
11 #define COMMON_SPINLOCK_H_
12 
13 #include <atomic>
14 
15 // TODO(jplate) Add pause for ARM, http://anglebug.com:6067
16 #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
17 extern "C" void _mm_pause();
18 #    pragma intrinsic(_mm_pause)
19 #    define ANGLE_SMT_PAUSE() _mm_pause()
20 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
21 #    define ANGLE_SMT_PAUSE() __asm__ __volatile__("pause;")
22 #else
23 #    define ANGLE_SMT_PAUSE() static_cast<void>(0)
24 #endif
25 
26 namespace angle
27 {
28 
29 class Spinlock
30 {
31   public:
32     Spinlock() noexcept;
33 
34     bool try_lock() noexcept;
35     void lock() noexcept;
36     void unlock() noexcept;
37 
38   private:
39     std::atomic_bool mLock;
40 };
41 
Spinlock()42 inline Spinlock::Spinlock() noexcept : mLock(false) {}
43 
try_lock()44 inline bool Spinlock::try_lock() noexcept
45 {
46     // Relaxed check first to prevent unnecessary cache misses.
47     return !mLock.load(std::memory_order_relaxed) &&
48            !mLock.exchange(true, std::memory_order_acquire);
49 }
50 
lock()51 inline void Spinlock::lock() noexcept
52 {
53     while (mLock.exchange(true, std::memory_order_acquire))
54     {
55         // Relaxed wait to prevent unnecessary cache misses.
56         while (mLock.load(std::memory_order_relaxed))
57         {
58             // Optimization for simultaneous multithreading.
59             ANGLE_SMT_PAUSE();
60         }
61     }
62 }
63 
unlock()64 inline void Spinlock::unlock() noexcept
65 {
66     mLock.store(false, std::memory_order_release);
67 }
68 
69 }  // namespace angle
70 
71 #endif  // COMMON_SPINLOCK_H_
72