Lines Matching refs:mo

59 static bool IsLoadOrder(morder mo) {  in IsLoadOrder()  argument
60 return mo == mo_relaxed || mo == mo_consume in IsLoadOrder()
61 || mo == mo_acquire || mo == mo_seq_cst; in IsLoadOrder()
64 static bool IsStoreOrder(morder mo) { in IsStoreOrder() argument
65 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; in IsStoreOrder()
68 static bool IsReleaseOrder(morder mo) { in IsReleaseOrder() argument
69 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; in IsReleaseOrder()
72 static bool IsAcquireOrder(morder mo) { in IsAcquireOrder() argument
73 return mo == mo_consume || mo == mo_acquire in IsAcquireOrder()
74 || mo == mo_acq_rel || mo == mo_seq_cst; in IsAcquireOrder()
77 static bool IsAcqRelOrder(morder mo) { in IsAcqRelOrder() argument
78 return mo == mo_acq_rel || mo == mo_seq_cst; in IsAcqRelOrder()
220 static memory_order to_mo(morder mo) { in to_mo() argument
221 switch (mo) { in to_mo()
234 static T NoTsanAtomicLoad(const volatile T *a, morder mo) { in NoTsanAtomicLoad() argument
235 return atomic_load(to_atomic(a), to_mo(mo)); in NoTsanAtomicLoad()
239 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { in NoTsanAtomicLoad() argument
247 morder mo) { in AtomicLoad() argument
248 CHECK(IsLoadOrder(mo)); in AtomicLoad()
251 if (!IsAcquireOrder(mo)) { in AtomicLoad()
253 return NoTsanAtomicLoad(a, mo); in AtomicLoad()
257 T v = NoTsanAtomicLoad(a, mo); in AtomicLoad()
264 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { in NoTsanAtomicStore() argument
265 atomic_store(to_atomic(a), v, to_mo(mo)); in NoTsanAtomicStore()
269 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { in NoTsanAtomicStore() argument
277 morder mo) { in AtomicStore() argument
278 CHECK(IsStoreOrder(mo)); in AtomicStore()
284 if (!IsReleaseOrder(mo)) { in AtomicStore()
285 NoTsanAtomicStore(a, v, mo); in AtomicStore()
294 NoTsanAtomicStore(a, v, mo); in AtomicStore()
299 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { in AtomicRMW() argument
302 if (mo != mo_relaxed) { in AtomicRMW()
307 if (IsAcqRelOrder(mo)) in AtomicRMW()
309 else if (IsReleaseOrder(mo)) in AtomicRMW()
311 else if (IsAcquireOrder(mo)) in AtomicRMW()
321 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { in NoTsanAtomicExchange() argument
326 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAdd() argument
331 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchSub() argument
336 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAnd() argument
341 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchOr() argument
346 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchXor() argument
351 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchNand() argument
357 morder mo) { in AtomicExchange() argument
358 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo); in AtomicExchange()
363 morder mo) { in AtomicFetchAdd() argument
364 return AtomicRMW<T, func_add>(thr, pc, a, v, mo); in AtomicFetchAdd()
369 morder mo) { in AtomicFetchSub() argument
370 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo); in AtomicFetchSub()
375 morder mo) { in AtomicFetchAnd() argument
376 return AtomicRMW<T, func_and>(thr, pc, a, v, mo); in AtomicFetchAnd()
381 morder mo) { in AtomicFetchOr() argument
382 return AtomicRMW<T, func_or>(thr, pc, a, v, mo); in AtomicFetchOr()
387 morder mo) { in AtomicFetchXor() argument
388 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo); in AtomicFetchXor()
393 morder mo) { in AtomicFetchNand() argument
394 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo); in AtomicFetchNand()
398 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS() argument
399 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); in NoTsanAtomicCAS()
404 morder mo, morder fmo) { in NoTsanAtomicCAS() argument
415 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS() argument
416 NoTsanAtomicCAS(a, &c, v, mo, fmo); in NoTsanAtomicCAS()
422 volatile T *a, T *c, T v, morder mo, morder fmo) { in AtomicCAS() argument
426 bool write_lock = mo != mo_acquire && mo != mo_consume; in AtomicCAS()
427 if (mo != mo_relaxed) { in AtomicCAS()
432 if (IsAcqRelOrder(mo)) in AtomicCAS()
434 else if (IsReleaseOrder(mo)) in AtomicCAS()
436 else if (IsAcquireOrder(mo)) in AtomicCAS()
455 volatile T *a, T c, T v, morder mo, morder fmo) { in AtomicCAS() argument
456 AtomicCAS(thr, pc, a, &c, v, mo, fmo); in AtomicCAS()
461 static void NoTsanAtomicFence(morder mo) { in NoTsanAtomicFence() argument
465 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { in AtomicFence() argument
479 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
483 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
484 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
491 morder mo, const char *func) in ScopedAtomic() argument
494 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo); in ScopedAtomic()
504 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { in AtomicStatInc() argument
512 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed in AtomicStatInc()
513 : mo == mo_consume ? StatAtomicConsume in AtomicStatInc()
514 : mo == mo_acquire ? StatAtomicAcquire in AtomicStatInc()
515 : mo == mo_release ? StatAtomicRelease in AtomicStatInc()
516 : mo == mo_acq_rel ? StatAtomicAcq_Rel in AtomicStatInc()
522 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { in __tsan_atomic8_load() argument
523 SCOPED_ATOMIC(Load, a, mo); in __tsan_atomic8_load()
527 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { in __tsan_atomic16_load() argument
528 SCOPED_ATOMIC(Load, a, mo); in __tsan_atomic16_load()
532 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { in __tsan_atomic32_load() argument
533 SCOPED_ATOMIC(Load, a, mo); in __tsan_atomic32_load()
537 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { in __tsan_atomic64_load() argument
538 SCOPED_ATOMIC(Load, a, mo); in __tsan_atomic64_load()
543 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { in __tsan_atomic128_load() argument
544 SCOPED_ATOMIC(Load, a, mo); in __tsan_atomic128_load()
549 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_store() argument
550 SCOPED_ATOMIC(Store, a, v, mo); in __tsan_atomic8_store()
554 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_store() argument
555 SCOPED_ATOMIC(Store, a, v, mo); in __tsan_atomic16_store()
559 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_store() argument
560 SCOPED_ATOMIC(Store, a, v, mo); in __tsan_atomic32_store()
564 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_store() argument
565 SCOPED_ATOMIC(Store, a, v, mo); in __tsan_atomic64_store()
570 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_store() argument
571 SCOPED_ATOMIC(Store, a, v, mo); in __tsan_atomic128_store()
576 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_exchange() argument
577 SCOPED_ATOMIC(Exchange, a, v, mo); in __tsan_atomic8_exchange()
581 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_exchange() argument
582 SCOPED_ATOMIC(Exchange, a, v, mo); in __tsan_atomic16_exchange()
586 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_exchange() argument
587 SCOPED_ATOMIC(Exchange, a, v, mo); in __tsan_atomic32_exchange()
591 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_exchange() argument
592 SCOPED_ATOMIC(Exchange, a, v, mo); in __tsan_atomic64_exchange()
597 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_exchange() argument
598 SCOPED_ATOMIC(Exchange, a, v, mo); in __tsan_atomic128_exchange()
603 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_add() argument
604 SCOPED_ATOMIC(FetchAdd, a, v, mo); in __tsan_atomic8_fetch_add()
608 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_add() argument
609 SCOPED_ATOMIC(FetchAdd, a, v, mo); in __tsan_atomic16_fetch_add()
613 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_add() argument
614 SCOPED_ATOMIC(FetchAdd, a, v, mo); in __tsan_atomic32_fetch_add()
618 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_add() argument
619 SCOPED_ATOMIC(FetchAdd, a, v, mo); in __tsan_atomic64_fetch_add()
624 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_add() argument
625 SCOPED_ATOMIC(FetchAdd, a, v, mo); in __tsan_atomic128_fetch_add()
630 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_sub() argument
631 SCOPED_ATOMIC(FetchSub, a, v, mo); in __tsan_atomic8_fetch_sub()
635 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_sub() argument
636 SCOPED_ATOMIC(FetchSub, a, v, mo); in __tsan_atomic16_fetch_sub()
640 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_sub() argument
641 SCOPED_ATOMIC(FetchSub, a, v, mo); in __tsan_atomic32_fetch_sub()
645 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_sub() argument
646 SCOPED_ATOMIC(FetchSub, a, v, mo); in __tsan_atomic64_fetch_sub()
651 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_sub() argument
652 SCOPED_ATOMIC(FetchSub, a, v, mo); in __tsan_atomic128_fetch_sub()
657 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_and() argument
658 SCOPED_ATOMIC(FetchAnd, a, v, mo); in __tsan_atomic8_fetch_and()
662 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_and() argument
663 SCOPED_ATOMIC(FetchAnd, a, v, mo); in __tsan_atomic16_fetch_and()
667 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_and() argument
668 SCOPED_ATOMIC(FetchAnd, a, v, mo); in __tsan_atomic32_fetch_and()
672 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_and() argument
673 SCOPED_ATOMIC(FetchAnd, a, v, mo); in __tsan_atomic64_fetch_and()
678 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_and() argument
679 SCOPED_ATOMIC(FetchAnd, a, v, mo); in __tsan_atomic128_fetch_and()
684 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_or() argument
685 SCOPED_ATOMIC(FetchOr, a, v, mo); in __tsan_atomic8_fetch_or()
689 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_or() argument
690 SCOPED_ATOMIC(FetchOr, a, v, mo); in __tsan_atomic16_fetch_or()
694 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_or() argument
695 SCOPED_ATOMIC(FetchOr, a, v, mo); in __tsan_atomic32_fetch_or()
699 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_or() argument
700 SCOPED_ATOMIC(FetchOr, a, v, mo); in __tsan_atomic64_fetch_or()
705 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_or() argument
706 SCOPED_ATOMIC(FetchOr, a, v, mo); in __tsan_atomic128_fetch_or()
711 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_xor() argument
712 SCOPED_ATOMIC(FetchXor, a, v, mo); in __tsan_atomic8_fetch_xor()
716 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_xor() argument
717 SCOPED_ATOMIC(FetchXor, a, v, mo); in __tsan_atomic16_fetch_xor()
721 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_xor() argument
722 SCOPED_ATOMIC(FetchXor, a, v, mo); in __tsan_atomic32_fetch_xor()
726 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_xor() argument
727 SCOPED_ATOMIC(FetchXor, a, v, mo); in __tsan_atomic64_fetch_xor()
732 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_xor() argument
733 SCOPED_ATOMIC(FetchXor, a, v, mo); in __tsan_atomic128_fetch_xor()
738 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_nand() argument
739 SCOPED_ATOMIC(FetchNand, a, v, mo); in __tsan_atomic8_fetch_nand()
743 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_nand() argument
744 SCOPED_ATOMIC(FetchNand, a, v, mo); in __tsan_atomic16_fetch_nand()
748 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_nand() argument
749 SCOPED_ATOMIC(FetchNand, a, v, mo); in __tsan_atomic32_fetch_nand()
753 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_nand() argument
754 SCOPED_ATOMIC(FetchNand, a, v, mo); in __tsan_atomic64_fetch_nand()
759 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_nand() argument
760 SCOPED_ATOMIC(FetchNand, a, v, mo); in __tsan_atomic128_fetch_nand()
766 morder mo, morder fmo) { in __tsan_atomic8_compare_exchange_strong() argument
767 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_strong()
772 morder mo, morder fmo) { in __tsan_atomic16_compare_exchange_strong() argument
773 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_strong()
778 morder mo, morder fmo) { in __tsan_atomic32_compare_exchange_strong() argument
779 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_strong()
784 morder mo, morder fmo) { in __tsan_atomic64_compare_exchange_strong() argument
785 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_strong()
791 morder mo, morder fmo) { in __tsan_atomic128_compare_exchange_strong() argument
792 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_strong()
798 morder mo, morder fmo) { in __tsan_atomic8_compare_exchange_weak() argument
799 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_weak()
804 morder mo, morder fmo) { in __tsan_atomic16_compare_exchange_weak() argument
805 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_weak()
810 morder mo, morder fmo) { in __tsan_atomic32_compare_exchange_weak() argument
811 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_weak()
816 morder mo, morder fmo) { in __tsan_atomic64_compare_exchange_weak() argument
817 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_weak()
823 morder mo, morder fmo) { in __tsan_atomic128_compare_exchange_weak() argument
824 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_weak()
830 morder mo, morder fmo) { in __tsan_atomic8_compare_exchange_val() argument
831 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_val()
836 morder mo, morder fmo) { in __tsan_atomic16_compare_exchange_val() argument
837 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_val()
842 morder mo, morder fmo) { in __tsan_atomic32_compare_exchange_val() argument
843 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_val()
848 morder mo, morder fmo) { in __tsan_atomic64_compare_exchange_val() argument
849 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_val()
855 morder mo, morder fmo) { in __tsan_atomic128_compare_exchange_val() argument
856 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_val()
861 void __tsan_atomic_thread_fence(morder mo) { in __tsan_atomic_thread_fence() argument
863 SCOPED_ATOMIC(Fence, mo); in __tsan_atomic_thread_fence()
867 void __tsan_atomic_signal_fence(morder mo) { in __tsan_atomic_signal_fence() argument