| // -*- C++ -*- header. |
| |
| // Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. |
| // |
| // This file is part of the GNU ISO C++ Library. This library is free |
| // software; you can redistribute it and/or modify it under the |
| // terms of the GNU General Public License as published by the |
| // Free Software Foundation; either version 3, or (at your option) |
| // any later version. |
| |
| // This library is distributed in the hope that it will be useful, |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| // GNU General Public License for more details. |
| |
| // Under Section 7 of GPL version 3, you are granted additional |
| // permissions described in the GCC Runtime Library Exception, version |
| // 3.1, as published by the Free Software Foundation. |
| |
| // You should have received a copy of the GNU General Public License and |
| // a copy of the GCC Runtime Library Exception along with this program; |
| // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| // <http://www.gnu.org/licenses/>. |
| |
| /** @file atomic |
| * This is a Standard C++ Library header. |
| */ |
| |
| // Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl. |
| // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html |
| |
| #ifndef _GLIBCXX_ATOMIC |
| #define _GLIBCXX_ATOMIC 1 |
| |
| #pragma GCC system_header |
| |
| #ifndef __GXX_EXPERIMENTAL_CXX0X__ |
| # include <bits/c++0x_warning.h> |
| #endif |
| |
| #include <bits/atomic_base.h> |
| #include <cstddef> |
| |
| _GLIBCXX_BEGIN_NAMESPACE(std) |
| |
| /** |
| * @addtogroup atomics |
| * @{ |
| */ |
| |
| /// kill_dependency |
| template<typename _Tp> |
| inline _Tp |
| kill_dependency(_Tp __y) |
| { |
| _Tp ret(__y); |
| return ret; |
| } |
| |
| inline memory_order |
| __calculate_memory_order(memory_order __m) |
| { |
| const bool __cond1 = __m == memory_order_release; |
| const bool __cond2 = __m == memory_order_acq_rel; |
| memory_order __mo1(__cond1 ? memory_order_relaxed : __m); |
| memory_order __mo2(__cond2 ? memory_order_acquire : __mo1); |
| return __mo2; |
| } |
| |
| // |
| // Three nested namespaces for atomic implementation details. |
| // |
| // The nested namespace inlined into std:: is determined by the value |
| // of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting |
| // ATOMIC_*_LOCK_FREE macros. See file atomic_base.h. |
| // |
| // 0 == __atomic0 == Never lock-free |
| // 1 == __atomic1 == Best available, sometimes lock-free |
| // 2 == __atomic2 == Always lock-free |
| #include <bits/atomic_0.h> |
| #include <bits/atomic_2.h> |
| |
| /// atomic |
| /// 29.4.3, Generic atomic type, primary class template. |
| template<typename _Tp> |
| struct atomic |
| { |
| private: |
| _Tp _M_i; |
| |
| public: |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(_Tp __i) : _M_i(__i) { } |
| |
| operator _Tp() const; |
| |
| _Tp |
| operator=(_Tp __i) { store(__i); return __i; } |
| |
| bool |
| is_lock_free() const volatile; |
| |
| void |
| store(_Tp, memory_order = memory_order_seq_cst) volatile; |
| |
| _Tp |
| load(memory_order = memory_order_seq_cst) const volatile; |
| |
| _Tp |
| exchange(_Tp __i, memory_order = memory_order_seq_cst) volatile; |
| |
| bool |
| compare_exchange_weak(_Tp&, _Tp, memory_order, memory_order) volatile; |
| |
| bool |
| compare_exchange_strong(_Tp&, _Tp, memory_order, memory_order) volatile; |
| |
| bool |
| compare_exchange_weak(_Tp&, _Tp, |
| memory_order = memory_order_seq_cst) volatile; |
| |
| bool |
| compare_exchange_strong(_Tp&, _Tp, |
| memory_order = memory_order_seq_cst) volatile; |
| }; |
| |
| |
| /// Partial specialization for pointer types. |
| template<typename _Tp> |
| struct atomic<_Tp*> : atomic_address |
| { |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(_Tp* __v) : atomic_address(__v) { } |
| |
| void |
| store(_Tp* __v, memory_order __m = memory_order_seq_cst) |
| { atomic_address::store(__v, __m); } |
| |
| _Tp* |
| load(memory_order __m = memory_order_seq_cst) const |
| { return static_cast<_Tp*>(atomic_address::load(__m)); } |
| |
| _Tp* |
| exchange(_Tp* __v, memory_order __m = memory_order_seq_cst) |
| { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); } |
| |
| bool |
| compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order); |
| |
| bool |
| compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order); |
| |
| bool |
| compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst); |
| |
| bool |
| compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst); |
| |
| _Tp* |
| fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst); |
| |
| _Tp* |
| fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst); |
| |
| operator _Tp*() const |
| { return load(); } |
| |
| _Tp* |
| operator=(_Tp* __v) |
| { |
| store(__v); |
| return __v; |
| } |
| |
| _Tp* |
| operator++(int) { return fetch_add(1); } |
| |
| _Tp* |
| operator--(int) { return fetch_sub(1); } |
| |
| _Tp* |
| operator++() { return fetch_add(1) + 1; } |
| |
| _Tp* |
| operator--() { return fetch_sub(1) - 1; } |
| |
| _Tp* |
| operator+=(ptrdiff_t __d) |
| { return fetch_add(__d) + __d; } |
| |
| _Tp* |
| operator-=(ptrdiff_t __d) |
| { return fetch_sub(__d) - __d; } |
| }; |
| |
| |
| /// Explicit specialization for void* |
| template<> |
| struct atomic<void*> : public atomic_address |
| { |
| typedef void* __integral_type; |
| typedef atomic_address __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for bool. |
| template<> |
| struct atomic<bool> : public atomic_bool |
| { |
| typedef bool __integral_type; |
| typedef atomic_bool __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for char. |
| template<> |
| struct atomic<char> : public atomic_char |
| { |
| typedef char __integral_type; |
| typedef atomic_char __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for signed char. |
| template<> |
| struct atomic<signed char> : public atomic_schar |
| { |
| typedef signed char __integral_type; |
| typedef atomic_schar __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for unsigned char. |
| template<> |
| struct atomic<unsigned char> : public atomic_uchar |
| { |
| typedef unsigned char __integral_type; |
| typedef atomic_uchar __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for short. |
| template<> |
| struct atomic<short> : public atomic_short |
| { |
| typedef short __integral_type; |
| typedef atomic_short __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for unsigned short. |
| template<> |
| struct atomic<unsigned short> : public atomic_ushort |
| { |
| typedef unsigned short __integral_type; |
| typedef atomic_ushort __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for int. |
| template<> |
| struct atomic<int> : atomic_int |
| { |
| typedef int __integral_type; |
| typedef atomic_int __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for unsigned int. |
| template<> |
| struct atomic<unsigned int> : public atomic_uint |
| { |
| typedef unsigned int __integral_type; |
| typedef atomic_uint __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for long. |
| template<> |
| struct atomic<long> : public atomic_long |
| { |
| typedef long __integral_type; |
| typedef atomic_long __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for unsigned long. |
| template<> |
| struct atomic<unsigned long> : public atomic_ulong |
| { |
| typedef unsigned long __integral_type; |
| typedef atomic_ulong __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for long long. |
| template<> |
| struct atomic<long long> : public atomic_llong |
| { |
| typedef long long __integral_type; |
| typedef atomic_llong __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for unsigned long long. |
| template<> |
| struct atomic<unsigned long long> : public atomic_ullong |
| { |
| typedef unsigned long long __integral_type; |
| typedef atomic_ullong __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for wchar_t. |
| template<> |
| struct atomic<wchar_t> : public atomic_wchar_t |
| { |
| typedef wchar_t __integral_type; |
| typedef atomic_wchar_t __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for char16_t. |
| template<> |
| struct atomic<char16_t> : public atomic_char16_t |
| { |
| typedef char16_t __integral_type; |
| typedef atomic_char16_t __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| /// Explicit specialization for char32_t. |
| template<> |
| struct atomic<char32_t> : public atomic_char32_t |
| { |
| typedef char32_t __integral_type; |
| typedef atomic_char32_t __base_type; |
| |
| atomic() = default; |
| ~atomic() = default; |
| atomic(const atomic&) = delete; |
| atomic& operator=(const atomic&) volatile = delete; |
| |
| atomic(__integral_type __i) : __base_type(__i) { } |
| |
| using __base_type::operator __integral_type; |
| using __base_type::operator=; |
| }; |
| |
| template<typename _Tp> |
| bool |
| atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1, |
| memory_order __m2) |
| { |
| void** __vr = reinterpret_cast<void**>(&__r); |
| void* __vv = static_cast<void*>(__v); |
| return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2); |
| } |
| |
| template<typename _Tp> |
| bool |
| atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, |
| memory_order __m1, |
| memory_order __m2) |
| { |
| void** __vr = reinterpret_cast<void**>(&__r); |
| void* __vv = static_cast<void*>(__v); |
| return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2); |
| } |
| |
| template<typename _Tp> |
| bool |
| atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, |
| memory_order __m) |
| { |
| return compare_exchange_weak(__r, __v, __m, |
| __calculate_memory_order(__m)); |
| } |
| |
| template<typename _Tp> |
| bool |
| atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, |
| memory_order __m) |
| { |
| return compare_exchange_strong(__r, __v, __m, |
| __calculate_memory_order(__m)); |
| } |
| |
| template<typename _Tp> |
| _Tp* |
| atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m) |
| { |
| void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m); |
| return static_cast<_Tp*>(__p); |
| } |
| |
| template<typename _Tp> |
| _Tp* |
| atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m) |
| { |
| void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m); |
| return static_cast<_Tp*>(__p); |
| } |
| |
| // Convenience function definitions, atomic_flag. |
| inline bool |
| atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m) |
| { return __a->test_and_set(__m); } |
| |
| inline void |
| atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) |
| { return __a->clear(__m); } |
| |
| |
| // Convenience function definitions, atomic_address. |
| inline bool |
| atomic_is_lock_free(const atomic_address* __a) |
| { return __a->is_lock_free(); } |
| |
| inline void |
| atomic_store(atomic_address* __a, void* __v) |
| { __a->store(__v); } |
| |
| inline void |
| atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m) |
| { __a->store(__v, __m); } |
| |
| inline void* |
| atomic_load(const atomic_address* __a) |
| { return __a->load(); } |
| |
| inline void* |
| atomic_load_explicit(const atomic_address* __a, memory_order __m) |
| { return __a->load(__m); } |
| |
| inline void* |
| atomic_exchange(atomic_address* __a, void* __v) |
| { return __a->exchange(__v); } |
| |
| inline void* |
| atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m) |
| { return __a->exchange(__v, __m); } |
| |
| inline bool |
| atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2) |
| { |
| return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| inline bool |
| atomic_compare_exchange_strong(atomic_address* __a, |
| void** __v1, void* __v2) |
| { |
| return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| inline bool |
| atomic_compare_exchange_weak_explicit(atomic_address* __a, |
| void** __v1, void* __v2, |
| memory_order __m1, memory_order __m2) |
| { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); } |
| |
| inline bool |
| atomic_compare_exchange_strong_explicit(atomic_address* __a, |
| void** __v1, void* __v2, |
| memory_order __m1, memory_order __m2) |
| { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); } |
| |
| inline void* |
| atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d, |
| memory_order __m) |
| { return __a->fetch_add(__d, __m); } |
| |
| inline void* |
| atomic_fetch_add(atomic_address* __a, ptrdiff_t __d) |
| { return __a->fetch_add(__d); } |
| |
| inline void* |
| atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d, |
| memory_order __m) |
| { return __a->fetch_sub(__d, __m); } |
| |
| inline void* |
| atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d) |
| { return __a->fetch_sub(__d); } |
| |
| |
| // Convenience function definitions, atomic_bool. |
| inline bool |
| atomic_is_lock_free(const atomic_bool* __a) |
| { return __a->is_lock_free(); } |
| |
| inline void |
| atomic_store(atomic_bool* __a, bool __i) |
| { __a->store(__i); } |
| |
| inline void |
| atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m) |
| { __a->store(__i, __m); } |
| |
| inline bool |
| atomic_load(const atomic_bool* __a) |
| { return __a->load(); } |
| |
| inline bool |
| atomic_load_explicit(const atomic_bool* __a, memory_order __m) |
| { return __a->load(__m); } |
| |
| inline bool |
| atomic_exchange(atomic_bool* __a, bool __i) |
| { return __a->exchange(__i); } |
| |
| inline bool |
| atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m) |
| { return __a->exchange(__i, __m); } |
| |
| inline bool |
| atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2) |
| { |
| return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| inline bool |
| atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2) |
| { |
| return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| inline bool |
| atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1, |
| bool __i2, memory_order __m1, |
| memory_order __m2) |
| { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } |
| |
| inline bool |
| atomic_compare_exchange_strong_explicit(atomic_bool* __a, |
| bool* __i1, bool __i2, |
| memory_order __m1, memory_order __m2) |
| { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } |
| |
| |
| |
| // Free standing functions. Template argument should be constricted |
| // to intergral types as specified in the standard. |
| template<typename _ITp> |
| inline void |
| atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) |
| { __a->store(__i, __m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m) |
| { return __a->load(__m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->exchange(__i, __m); } |
| |
| template<typename _ITp> |
| inline bool |
| atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a, |
| _ITp* __i1, _ITp __i2, |
| memory_order __m1, memory_order __m2) |
| { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } |
| |
| template<typename _ITp> |
| inline bool |
| atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a, |
| _ITp* __i1, _ITp __i2, |
| memory_order __m1, |
| memory_order __m2) |
| { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->fetch_add(__i, __m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->fetch_sub(__i, __m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->fetch_and(__i, __m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->fetch_or(__i, __m); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
| memory_order __m) |
| { return __a->fetch_xor(__i, __m); } |
| |
| template<typename _ITp> |
| inline bool |
| atomic_is_lock_free(const __atomic_base<_ITp>* __a) |
| { return __a->is_lock_free(); } |
| |
| template<typename _ITp> |
| inline void |
| atomic_store(__atomic_base<_ITp>* __a, _ITp __i) |
| { atomic_store_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_load(const __atomic_base<_ITp>* __a) |
| { return atomic_load_explicit(__a, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline bool |
| atomic_compare_exchange_weak(__atomic_base<_ITp>* __a, |
| _ITp* __i1, _ITp __i2) |
| { |
| return atomic_compare_exchange_weak_explicit(__a, __i1, __i2, |
| memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| template<typename _ITp> |
| inline bool |
| atomic_compare_exchange_strong(__atomic_base<_ITp>* __a, |
| _ITp* __i1, _ITp __i2) |
| { |
| return atomic_compare_exchange_strong_explicit(__a, __i1, __i2, |
| memory_order_seq_cst, |
| memory_order_seq_cst); |
| } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } |
| |
| template<typename _ITp> |
| inline _ITp |
| atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) |
| { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } |
| |
| // @} group atomics |
| |
| _GLIBCXX_END_NAMESPACE |
| |
| #endif |