first commit
This commit is contained in:
@@ -0,0 +1,205 @@
|
||||
#ifndef BOOST_ATOMIC_ATOMIC_HPP
|
||||
#define BOOST_ATOMIC_ATOMIC_HPP
|
||||
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
|
||||
#include <boost/memory_order.hpp>
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/platform.hpp>
|
||||
#include <boost/atomic/detail/type-classification.hpp>
|
||||
#include <boost/type_traits/is_signed.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_SHORT_LOCK_FREE
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_INT_LOCK_FREE
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_LONG_LOCK_FREE
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_LLONG_LOCK_FREE
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_POINTER_LOCK_FREE
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_ADDRESS_LOCK_FREE BOOST_ATOMIC_POINTER_LOCK_FREE
|
||||
|
||||
#ifndef BOOST_ATOMIC_BOOL_LOCK_FREE
|
||||
#define BOOST_ATOMIC_BOOL_LOCK_FREE 0
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_THREAD_FENCE
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 0
|
||||
inline void atomic_thread_fence(memory_order)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_SIGNAL_FENCE
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 0
|
||||
inline void atomic_signal_fence(memory_order order)
|
||||
{
|
||||
atomic_thread_fence(order);
|
||||
}
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
class atomic :
|
||||
public atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value >
|
||||
{
|
||||
private:
|
||||
typedef T value_type;
|
||||
typedef atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value > super;
|
||||
public:
|
||||
atomic(void) : super() {}
|
||||
explicit atomic(const value_type & v) : super(v) {}
|
||||
|
||||
atomic & operator=(value_type v) volatile
|
||||
{
|
||||
super::operator=(v);
|
||||
return *const_cast<atomic *>(this);
|
||||
}
|
||||
private:
|
||||
atomic(const atomic &) /* =delete */ ;
|
||||
atomic & operator=(const atomic &) /* =delete */ ;
|
||||
};
|
||||
|
||||
typedef atomic<char> atomic_char;
|
||||
typedef atomic<unsigned char> atomic_uchar;
|
||||
typedef atomic<signed char> atomic_schar;
|
||||
typedef atomic<uint8_t> atomic_uint8_t;
|
||||
typedef atomic<int8_t> atomic_int8_t;
|
||||
typedef atomic<unsigned short> atomic_ushort;
|
||||
typedef atomic<short> atomic_short;
|
||||
typedef atomic<uint16_t> atomic_uint16_t;
|
||||
typedef atomic<int16_t> atomic_int16_t;
|
||||
typedef atomic<unsigned int> atomic_uint;
|
||||
typedef atomic<int> atomic_int;
|
||||
typedef atomic<uint32_t> atomic_uint32_t;
|
||||
typedef atomic<int32_t> atomic_int32_t;
|
||||
typedef atomic<unsigned long> atomic_ulong;
|
||||
typedef atomic<long> atomic_long;
|
||||
typedef atomic<uint64_t> atomic_uint64_t;
|
||||
typedef atomic<int64_t> atomic_int64_t;
|
||||
#ifdef BOOST_HAS_LONG_LONG
|
||||
typedef atomic<boost::ulong_long_type> atomic_ullong;
|
||||
typedef atomic<boost::long_long_type> atomic_llong;
|
||||
#endif
|
||||
typedef atomic<void*> atomic_address;
|
||||
typedef atomic<bool> atomic_bool;
|
||||
typedef atomic<wchar_t> atomic_wchar_t;
|
||||
#if !defined(BOOST_NO_CXX11_CHAR16_T)
|
||||
typedef atomic<char16_t> atomic_char16_t;
|
||||
#endif
|
||||
#if !defined(BOOST_NO_CXX11_CHAR32_T)
|
||||
typedef atomic<char32_t> atomic_char32_t;
|
||||
#endif
|
||||
|
||||
typedef atomic<int_least8_t> atomic_int_least8_t;
|
||||
typedef atomic<uint_least8_t> atomic_uint_least8_t;
|
||||
typedef atomic<int_least16_t> atomic_int_least16_t;
|
||||
typedef atomic<uint_least16_t> atomic_uint_least16_t;
|
||||
typedef atomic<int_least32_t> atomic_int_least32_t;
|
||||
typedef atomic<uint_least32_t> atomic_uint_least32_t;
|
||||
typedef atomic<int_least64_t> atomic_int_least64_t;
|
||||
typedef atomic<uint_least64_t> atomic_uint_least64_t;
|
||||
typedef atomic<int_fast8_t> atomic_int_fast8_t;
|
||||
typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
|
||||
typedef atomic<int_fast16_t> atomic_int_fast16_t;
|
||||
typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
|
||||
typedef atomic<int_fast32_t> atomic_int_fast32_t;
|
||||
typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
|
||||
typedef atomic<int_fast64_t> atomic_int_fast64_t;
|
||||
typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
|
||||
typedef atomic<intmax_t> atomic_intmax_t;
|
||||
typedef atomic<uintmax_t> atomic_uintmax_t;
|
||||
|
||||
typedef atomic<std::size_t> atomic_size_t;
|
||||
typedef atomic<std::ptrdiff_t> atomic_ptrdiff_t;
|
||||
|
||||
// PGI seems to not support intptr_t/uintptr_t properly. BOOST_HAS_STDINT_H is not defined for this compiler by Boost.Config.
|
||||
#if !defined(__PGIC__)
|
||||
|
||||
#if (defined(BOOST_WINDOWS) && !defined(_WIN32_WCE)) \
|
||||
|| (defined(_XOPEN_UNIX) && (_XOPEN_UNIX+0 > 0)) \
|
||||
|| defined(__CYGWIN__) \
|
||||
|| defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__) \
|
||||
|| defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
|
||||
typedef atomic<intptr_t> atomic_intptr_t;
|
||||
typedef atomic<uintptr_t> atomic_uintptr_t;
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
#if defined(__INTPTR_TYPE__)
|
||||
typedef atomic< __INTPTR_TYPE__ > atomic_intptr_t;
|
||||
#endif
|
||||
#if defined(__UINTPTR_TYPE__)
|
||||
typedef atomic< __UINTPTR_TYPE__ > atomic_uintptr_t;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
|
||||
#define BOOST_ATOMIC_FLAG_LOCK_FREE 0
|
||||
class atomic_flag
|
||||
{
|
||||
public:
|
||||
atomic_flag(void) : v_(false) {}
|
||||
|
||||
bool
|
||||
test_and_set(memory_order order = memory_order_seq_cst)
|
||||
{
|
||||
return v_.exchange(true, order);
|
||||
}
|
||||
|
||||
void
|
||||
clear(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
v_.store(false, order);
|
||||
}
|
||||
private:
|
||||
atomic_flag(const atomic_flag &) /* = delete */ ;
|
||||
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
|
||||
atomic<bool> v_;
|
||||
};
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,519 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_BASE_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_BASE_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Base class definition and fallback implementation.
|
||||
// To be overridden (through partial specialization) by
|
||||
// platform implementations.
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/lockpool.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#define BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
|
||||
operator value_type(void) volatile const \
|
||||
{ \
|
||||
return load(memory_order_seq_cst); \
|
||||
} \
|
||||
\
|
||||
this_type & \
|
||||
operator=(value_type v) volatile \
|
||||
{ \
|
||||
store(v, memory_order_seq_cst); \
|
||||
return *const_cast<this_type *>(this); \
|
||||
} \
|
||||
\
|
||||
bool \
|
||||
compare_exchange_strong( \
|
||||
value_type & expected, \
|
||||
value_type desired, \
|
||||
memory_order order = memory_order_seq_cst) volatile \
|
||||
{ \
|
||||
return compare_exchange_strong(expected, desired, order, calculate_failure_order(order)); \
|
||||
} \
|
||||
\
|
||||
bool \
|
||||
compare_exchange_weak( \
|
||||
value_type & expected, \
|
||||
value_type desired, \
|
||||
memory_order order = memory_order_seq_cst) volatile \
|
||||
{ \
|
||||
return compare_exchange_weak(expected, desired, order, calculate_failure_order(order)); \
|
||||
} \
|
||||
\
|
||||
|
||||
#define BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
|
||||
value_type \
|
||||
operator++(int) volatile \
|
||||
{ \
|
||||
return fetch_add(1); \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator++(void) volatile \
|
||||
{ \
|
||||
return fetch_add(1) + 1; \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator--(int) volatile \
|
||||
{ \
|
||||
return fetch_sub(1); \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator--(void) volatile \
|
||||
{ \
|
||||
return fetch_sub(1) - 1; \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator+=(difference_type v) volatile \
|
||||
{ \
|
||||
return fetch_add(v) + v; \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator-=(difference_type v) volatile \
|
||||
{ \
|
||||
return fetch_sub(v) - v; \
|
||||
} \
|
||||
|
||||
#define BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
|
||||
value_type \
|
||||
operator&=(difference_type v) volatile \
|
||||
{ \
|
||||
return fetch_and(v) & v; \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator|=(difference_type v) volatile \
|
||||
{ \
|
||||
return fetch_or(v) | v; \
|
||||
} \
|
||||
\
|
||||
value_type \
|
||||
operator^=(difference_type v) volatile \
|
||||
{ \
|
||||
return fetch_xor(v) ^ v; \
|
||||
} \
|
||||
|
||||
#define BOOST_ATOMIC_DECLARE_POINTER_OPERATORS \
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
|
||||
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
|
||||
|
||||
#define BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS \
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS \
|
||||
BOOST_ATOMIC_DECLARE_ADDITIVE_OPERATORS \
|
||||
BOOST_ATOMIC_DECLARE_BIT_OPERATORS \
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
inline memory_order
|
||||
calculate_failure_order(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acq_rel:
|
||||
return memory_order_acquire;
|
||||
case memory_order_release:
|
||||
return memory_order_relaxed;
|
||||
default:
|
||||
return order;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, typename C, unsigned int Size, bool Sign>
|
||||
class base_atomic {
|
||||
private:
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef lockpool::scoped_lock guard_type;
|
||||
public:
|
||||
base_atomic(void) {}
|
||||
|
||||
explicit base_atomic(const value_type & v)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<char *>(v_));
|
||||
|
||||
memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order /*order*/ = memory_order_seq_cst) volatile const
|
||||
{
|
||||
guard_type guard(const_cast<const char *>(v_));
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, const_cast<const char *>(v_), sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order /*success_order*/,
|
||||
memory_order /*failure_order*/) volatile
|
||||
{
|
||||
guard_type guard(const_cast<char *>(v_));
|
||||
|
||||
if (memcmp(const_cast<char *>(v_), &expected, sizeof(value_type)) == 0) {
|
||||
memcpy(const_cast<char *>(v_), &desired, sizeof(value_type));
|
||||
return true;
|
||||
} else {
|
||||
memcpy(&expected, const_cast<char *>(v_), sizeof(value_type));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<char *>(v_));
|
||||
|
||||
value_type tmp;
|
||||
memcpy(&tmp, const_cast<char *>(v_), sizeof(value_type));
|
||||
|
||||
memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
|
||||
char v_[sizeof(value_type)];
|
||||
};
|
||||
|
||||
template<typename T, unsigned int Size, bool Sign>
|
||||
class base_atomic<T, int, Size, Sign> {
|
||||
private:
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
typedef lockpool::scoped_lock guard_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
v_ = v;
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ = v;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(value_type & expected, value_type desired,
|
||||
memory_order /*success_order*/,
|
||||
memory_order /*failure_order*/) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
if (v_ == expected) {
|
||||
v_ = desired;
|
||||
return true;
|
||||
} else {
|
||||
expected = v_;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(value_type & expected, value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ += v;
|
||||
return old;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ -= v;
|
||||
return old;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ &= v;
|
||||
return old;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ |= v;
|
||||
return old;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ ^= v;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
template<typename T, unsigned int Size, bool Sign>
|
||||
class base_atomic<T *, void *, Size, Sign> {
|
||||
private:
|
||||
typedef base_atomic this_type;
|
||||
typedef T * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
typedef lockpool::scoped_lock guard_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
v_ = v;
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ = v;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(value_type & expected, value_type desired,
|
||||
memory_order /*success_order*/,
|
||||
memory_order /*failure_order*/) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
if (v_ == expected) {
|
||||
v_ = desired;
|
||||
return true;
|
||||
} else {
|
||||
expected = v_;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(value_type & expected, value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
value_type fetch_add(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ += v;
|
||||
return old;
|
||||
}
|
||||
|
||||
value_type fetch_sub(difference_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ -= v;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
template<unsigned int Size, bool Sign>
|
||||
class base_atomic<void *, void *, Size, Sign> {
|
||||
private:
|
||||
typedef base_atomic this_type;
|
||||
typedef void * value_type;
|
||||
typedef lockpool::scoped_lock guard_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
v_ = v;
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order /*order*/ = memory_order_seq_cst) const volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
value_type old = v_;
|
||||
v_ = v;
|
||||
return old;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(value_type & expected, value_type desired,
|
||||
memory_order /*success_order*/,
|
||||
memory_order /*failure_order*/) volatile
|
||||
{
|
||||
guard_type guard(const_cast<value_type *>(&v_));
|
||||
|
||||
if (v_ == expected) {
|
||||
v_ = desired;
|
||||
return true;
|
||||
} else {
|
||||
expected = v_;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(value_type & expected, value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,872 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_CAS32STRONG_HPP
|
||||
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
|
||||
// Build 8-, 16- and 32-bit atomic operations from
|
||||
// a platform_cmpxchg32_strong primitive.
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/* integral types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 1, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
storage_type expected_s = (storage_type) expected;
|
||||
storage_type desired_s = (storage_type) desired;
|
||||
|
||||
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
expected = (value_type) expected_s;
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 2, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
storage_type expected_s = (storage_type) expected;
|
||||
storage_type desired_s = (storage_type) desired;
|
||||
|
||||
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
expected = (value_type) expected_s;
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* pointer types */
|
||||
|
||||
template<bool Sign>
|
||||
class base_atomic<void *, void *, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef void * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T *, void *, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* generic types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 1, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 2, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v) : v_(0)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
bool success = platform_cmpxchg32_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,916 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_CAS32WEAK_HPP
|
||||
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/* integral types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 1, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
storage_type expected_s = (storage_type) expected;
|
||||
storage_type desired_s = (storage_type) desired;
|
||||
|
||||
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
expected = (value_type) expected_s;
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 2, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
storage_type expected_s = (storage_type) expected;
|
||||
storage_type desired_s = (storage_type) desired;
|
||||
|
||||
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
expected = (value_type) expected_s;
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* pointer types */
|
||||
|
||||
template<bool Sign>
|
||||
class base_atomic<void *, void *, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef void * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T *, void *, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile value_type &>(v_) = v;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = const_cast<const volatile value_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* generic types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 1, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v) : v_(0)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 2, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v) : v_(0)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 4, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint32_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v) : v_(0)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type tmp = 0;
|
||||
memcpy(&tmp, &v, sizeof(value_type));
|
||||
platform_fence_before_store(order);
|
||||
const_cast<volatile storage_type &>(v_) = tmp;
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type tmp = const_cast<const volatile storage_type &>(v_);
|
||||
platform_fence_after_load(order);
|
||||
|
||||
value_type v;
|
||||
memcpy(&v, &tmp, sizeof(value_type));
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg32(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
for(;;) {
|
||||
value_type tmp = expected;
|
||||
if (compare_exchange_weak(tmp, desired, success_order, failure_order))
|
||||
return true;
|
||||
if (tmp != expected) {
|
||||
expected = tmp;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,438 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_CAS64STRONG_HPP
|
||||
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
|
||||
// Build 64-bit atomic operation from platform_cmpxchg64_strong
|
||||
// primitive. It is assumed that 64-bit loads/stores are not
|
||||
// atomic, so they are funnelled through cmpxchg as well.
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
/* integral types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, int, 8, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef T difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
platform_store64(v, &v_);
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = platform_load64(&v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original & v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original | v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original ^ v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* pointer types */
|
||||
|
||||
template<bool Sign>
|
||||
class base_atomic<void *, void *, 8, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef void * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
platform_store64(v, &v_);
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = platform_load64(&v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T *, void *, 8, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T * value_type;
|
||||
typedef ptrdiff_t difference_type;
|
||||
public:
|
||||
explicit base_atomic(value_type v) : v_(v) {}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
platform_fence_before_store(order);
|
||||
platform_store64(v, &v_);
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
value_type v = platform_load64(&v_);
|
||||
platform_fence_after_load(order);
|
||||
return v;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
platform_fence_before(success_order);
|
||||
|
||||
bool success = platform_cmpxchg64_strong(expected, desired, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original + v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
value_type
|
||||
fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, original - v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
value_type v_;
|
||||
};
|
||||
|
||||
/* generic types */
|
||||
|
||||
template<typename T, bool Sign>
|
||||
class base_atomic<T, void, 8, Sign> {
|
||||
typedef base_atomic this_type;
|
||||
typedef T value_type;
|
||||
typedef uint64_t storage_type;
|
||||
public:
|
||||
explicit base_atomic(value_type const& v) : v_(0)
|
||||
{
|
||||
memcpy(&v_, &v, sizeof(value_type));
|
||||
}
|
||||
base_atomic(void) {}
|
||||
|
||||
void
|
||||
store(value_type const& value, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
storage_type value_s = 0;
|
||||
memcpy(&value_s, &value, sizeof(value_s));
|
||||
platform_fence_before_store(order);
|
||||
platform_store64(value_s, &v_);
|
||||
platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
value_type
|
||||
load(memory_order order = memory_order_seq_cst) const volatile
|
||||
{
|
||||
storage_type value_s = platform_load64(&v_);
|
||||
platform_fence_after_load(order);
|
||||
value_type value;
|
||||
memcpy(&value, &value_s, sizeof(value_s));
|
||||
return value;
|
||||
}
|
||||
|
||||
value_type
|
||||
exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
value_type original = load(memory_order_relaxed);
|
||||
do {
|
||||
} while (!compare_exchange_weak(original, v, order, memory_order_relaxed));
|
||||
return original;
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_weak(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
|
||||
bool
|
||||
compare_exchange_strong(
|
||||
value_type & expected,
|
||||
value_type const& desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
storage_type expected_s = 0, desired_s = 0;
|
||||
memcpy(&expected_s, &expected, sizeof(value_type));
|
||||
memcpy(&desired_s, &desired, sizeof(value_type));
|
||||
|
||||
platform_fence_before(success_order);
|
||||
bool success = platform_cmpxchg64_strong(expected_s, desired_s, &v_);
|
||||
|
||||
if (success) {
|
||||
platform_fence_after(success_order);
|
||||
} else {
|
||||
platform_fence_after(failure_order);
|
||||
memcpy(&expected, &expected_s, sizeof(value_type));
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
bool
|
||||
is_lock_free(void) const volatile
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
BOOST_ATOMIC_DECLARE_BASE_OPERATORS
|
||||
private:
|
||||
base_atomic(const base_atomic &) /* = delete */ ;
|
||||
void operator=(const base_atomic &) /* = delete */ ;
|
||||
storage_type v_;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,54 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_CONFIG_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_CONFIG_HPP
|
||||
|
||||
// Copyright (c) 2012 Hartmut Kaiser
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/config.hpp>
|
||||
|
||||
#if (defined(_MSC_VER) && (_MSC_VER >= 1020)) || defined(__GNUC__) || defined(BOOST_CLANG) || defined(BOOST_INTEL) || defined(__COMO__) || defined(__DMC__)
|
||||
#define BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Set up dll import/export options
|
||||
#if (defined(BOOST_ATOMIC_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && \
|
||||
!defined(BOOST_ATOMIC_STATIC_LINK)
|
||||
|
||||
#if defined(BOOST_ATOMIC_SOURCE)
|
||||
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_EXPORT
|
||||
#define BOOST_ATOMIC_BUILD_DLL
|
||||
#else
|
||||
#define BOOST_ATOMIC_DECL BOOST_SYMBOL_IMPORT
|
||||
#endif
|
||||
|
||||
#endif // building a shared library
|
||||
|
||||
#ifndef BOOST_ATOMIC_DECL
|
||||
#define BOOST_ATOMIC_DECL
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Auto library naming
|
||||
#if !defined(BOOST_ATOMIC_SOURCE) && !defined(BOOST_ALL_NO_LIB) && \
|
||||
!defined(BOOST_ATOMIC_NO_LIB)
|
||||
|
||||
#define BOOST_LIB_NAME boost_atomic
|
||||
|
||||
// tell the auto-link code to select a dll when required:
|
||||
#if defined(BOOST_ALL_DYN_LINK) || defined(BOOST_ATOMIC_DYN_LINK)
|
||||
#define BOOST_DYN_LINK
|
||||
#endif
|
||||
|
||||
#include <boost/config/auto_link.hpp>
|
||||
|
||||
#endif // auto-linking disabled
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,359 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ALPHA_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
#include <boost/atomic/detail/builder.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
/*
|
||||
Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
|
||||
(HP OpenVMS systems documentation) and the alpha reference manual.
|
||||
*/
|
||||
|
||||
/*
|
||||
NB: The most natural thing would be to write the increment/decrement
|
||||
operators along the following lines:
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%1 \n"
|
||||
"addl %0,1,%0 \n"
|
||||
"stl_c %0,%1 \n"
|
||||
"beq %0,1b\n"
|
||||
: "=&b" (tmp)
|
||||
: "m" (value)
|
||||
: "cc"
|
||||
);
|
||||
|
||||
However according to the comments on the HP website and matching
|
||||
comments in the Linux kernel sources this defies branch prediction,
|
||||
as the cpu assumes that backward branches are always taken; so
|
||||
instead copy the trick from the Linux kernel, introduce a forward
|
||||
branch and back again.
|
||||
|
||||
I have, however, had a hard time measuring the difference between
|
||||
the two versions in microbenchmarks -- I am leaving it in nevertheless
|
||||
as it apparently does not hurt either.
|
||||
*/
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
inline void fence_before(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_consume:
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
inline void fence_after(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void platform_atomic_thread_fence(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_consume:
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__asm__ __volatile__ ("mb" ::: "memory");
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
class atomic_alpha_32 {
|
||||
public:
|
||||
typedef T integral_type;
|
||||
explicit atomic_alpha_32(T v) : i(v) {}
|
||||
atomic_alpha_32() {}
|
||||
T load(memory_order order=memory_order_seq_cst) const volatile
|
||||
{
|
||||
T v=*reinterpret_cast<volatile const int *>(&i);
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
void store(T v, memory_order order=memory_order_seq_cst) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
*reinterpret_cast<volatile int *>(&i)=(int)v;
|
||||
}
|
||||
bool compare_exchange_weak(
|
||||
T &expected,
|
||||
T desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
fence_before(success_order);
|
||||
int current, success;
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %2, %4\n"
|
||||
"cmpeq %2, %0, %3\n"
|
||||
"mov %2, %0\n"
|
||||
"beq %3, 3f\n"
|
||||
"stl_c %1, %4\n"
|
||||
"2:\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"3: mov %3, %1\n"
|
||||
"br 2b\n"
|
||||
".previous\n"
|
||||
|
||||
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
if (desired) fence_after(success_order);
|
||||
else fence_after(failure_order);
|
||||
return desired;
|
||||
}
|
||||
|
||||
bool is_lock_free(void) const volatile {return true;}
|
||||
protected:
|
||||
inline T fetch_add_var(T c, memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
T original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0, %2\n"
|
||||
"addl %0, %3, %1\n"
|
||||
"stl_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i), "r" (c)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
inline T fetch_inc(memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
int original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0, %2\n"
|
||||
"addl %0, 1, %1\n"
|
||||
"stl_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
inline T fetch_dec(memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
int original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0, %2\n"
|
||||
"subl %0, 1, %1\n"
|
||||
"stl_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
private:
|
||||
T i;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class atomic_alpha_64 {
|
||||
public:
|
||||
typedef T integral_type;
|
||||
explicit atomic_alpha_64(T v) : i(v) {}
|
||||
atomic_alpha_64() {}
|
||||
T load(memory_order order=memory_order_seq_cst) const volatile
|
||||
{
|
||||
T v=*reinterpret_cast<volatile const T *>(&i);
|
||||
fence_after(order);
|
||||
return v;
|
||||
}
|
||||
void store(T v, memory_order order=memory_order_seq_cst) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
*reinterpret_cast<volatile T *>(&i)=v;
|
||||
}
|
||||
bool compare_exchange_weak(
|
||||
T &expected,
|
||||
T desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
fence_before(success_order);
|
||||
int current, success;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %2, %4\n"
|
||||
"cmpeq %2, %0, %3\n"
|
||||
"mov %2, %0\n"
|
||||
"beq %3, 3f\n"
|
||||
"stq_c %1, %4\n"
|
||||
"2:\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"3: mov %3, %1\n"
|
||||
"br 2b\n"
|
||||
".previous\n"
|
||||
|
||||
: "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
if (desired) fence_after(success_order);
|
||||
else fence_after(failure_order);
|
||||
return desired;
|
||||
}
|
||||
|
||||
bool is_lock_free(void) const volatile {return true;}
|
||||
protected:
|
||||
inline T fetch_add_var(T c, memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
T original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0, %2\n"
|
||||
"addq %0, %3, %1\n"
|
||||
"stq_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i), "r" (c)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
inline T fetch_inc(memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
T original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0, %2\n"
|
||||
"addq %0, 1, %1\n"
|
||||
"stq_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
inline T fetch_dec(memory_order order) volatile
|
||||
{
|
||||
fence_before(order);
|
||||
T original, modified;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0, %2\n"
|
||||
"subq %0, 1, %1\n"
|
||||
"stq_c %1, %2\n"
|
||||
"beq %1, 2f\n"
|
||||
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous\n"
|
||||
|
||||
: "=&r" (original), "=&r" (modified)
|
||||
: "m" (i)
|
||||
:
|
||||
);
|
||||
fence_after(order);
|
||||
return original;
|
||||
}
|
||||
private:
|
||||
T i;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
|
||||
public:
|
||||
typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
|
||||
public:
|
||||
typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
|
||||
public:
|
||||
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
|
||||
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
|
||||
public:
|
||||
typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
|
||||
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,250 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_GCC_ARMV6PLUS_HPP
|
||||
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
// Copyright (c) 2009 Phil Endecott
|
||||
// ARM Code by Phil Endecott, based on other architectures.
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
// From the ARM Architecture Reference Manual for architecture v6:
|
||||
//
|
||||
// LDREX{<cond>} <Rd>, [<Rn>]
|
||||
// <Rd> Specifies the destination register for the memory word addressed by <Rd>
|
||||
// <Rn> Specifies the register containing the address.
|
||||
//
|
||||
// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
|
||||
// <Rd> Specifies the destination register for the returned status value.
|
||||
// 0 if the operation updates memory
|
||||
// 1 if the operation fails to update memory
|
||||
// <Rm> Specifies the register containing the word to be stored to memory.
|
||||
// <Rn> Specifies the register containing the address.
|
||||
// Rd must not be the same register as Rm or Rn.
|
||||
//
|
||||
// ARM v7 is like ARM v6 plus:
|
||||
// There are half-word and byte versions of the LDREX and STREX instructions,
|
||||
// LDREXH, LDREXB, STREXH and STREXB.
|
||||
// There are also double-word versions, LDREXD and STREXD.
|
||||
// (Actually it looks like these are available from version 6k onwards.)
|
||||
// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
|
||||
// I think you can supply an immediate offset to the address.
|
||||
//
|
||||
// A memory barrier is effected using a "co-processor 15" instruction,
|
||||
// though a separate assembler mnemonic is available for it in v7.
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
|
||||
// doesn't include all instructions and in particular it doesn't include the co-processor
|
||||
// instruction used for the memory barrier or the load-locked/store-conditional
|
||||
// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
|
||||
// asm blocks with code to temporarily change to ARM mode.
|
||||
//
|
||||
// You can only change between ARM and Thumb modes when branching using the bx instruction.
|
||||
// bx takes an address specified in a register. The least significant bit of the address
|
||||
// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
|
||||
// A temporary register is needed for the address and is passed as an argument to these
|
||||
// macros. It must be one of the "low" registers accessible to Thumb code, specified
|
||||
// using the "l" attribute in the asm statement.
|
||||
//
|
||||
// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
|
||||
// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal
|
||||
// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
|
||||
// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
|
||||
// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
|
||||
// so they can always be present.
|
||||
|
||||
#if defined(__thumb__) && !defined(__ARM_ARCH_7A__)
|
||||
// FIXME also other v7 variants.
|
||||
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: "
|
||||
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: "
|
||||
|
||||
#else
|
||||
// The tmpreg is wasted in this case, which is non-optimal.
|
||||
#define BOOST_ATOMIC_ARM_ASM_START(TMPREG)
|
||||
#define BOOST_ATOMIC_ARM_ASM_END(TMPREG)
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_ARCH_7A__)
|
||||
// FIXME ditto.
|
||||
#define BOOST_ATOMIC_ARM_DMB "dmb\n"
|
||||
#else
|
||||
#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
|
||||
#endif
|
||||
|
||||
inline void
|
||||
arm_barrier(void)
|
||||
{
|
||||
int brtmp;
|
||||
__asm__ __volatile__ (
|
||||
BOOST_ATOMIC_ARM_ASM_START(%0)
|
||||
BOOST_ATOMIC_ARM_DMB
|
||||
BOOST_ATOMIC_ARM_ASM_END(%0)
|
||||
: "=&l" (brtmp) :: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_before(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
arm_barrier();
|
||||
case memory_order_consume:
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
arm_barrier();
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_before_store(memory_order order)
|
||||
{
|
||||
platform_fence_before(order);
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_store(memory_order order)
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
arm_barrier();
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_load(memory_order order)
|
||||
{
|
||||
platform_fence_after(order);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool
|
||||
platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
|
||||
{
|
||||
int success;
|
||||
int tmp;
|
||||
__asm__ (
|
||||
BOOST_ATOMIC_ARM_ASM_START(%2)
|
||||
"mov %1, #0\n" // success = 0
|
||||
"ldrex %0, %3\n" // expected' = *(&i)
|
||||
"teq %0, %4\n" // flags = expected'==expected
|
||||
"ittt eq\n"
|
||||
"strexeq %2, %5, %3\n" // if (flags.equal) *(&i) = desired, tmp = !OK
|
||||
"teqeq %2, #0\n" // if (flags.equal) flags = tmp==0
|
||||
"moveq %1, #1\n" // if (flags.equal) success = 1
|
||||
BOOST_ATOMIC_ARM_ASM_END(%2)
|
||||
: "=&r" (expected), // %0
|
||||
"=&r" (success), // %1
|
||||
"=&l" (tmp), // %2
|
||||
"+Q" (*ptr) // %3
|
||||
: "r" (expected), // %4
|
||||
"r" (desired) // %5
|
||||
: "cc"
|
||||
);
|
||||
return success;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
inline void
|
||||
atomic_thread_fence(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
atomics::detail::arm_barrier();
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
inline void
|
||||
atomic_signal_fence(memory_order)
|
||||
{
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
|
||||
class atomic_flag {
|
||||
private:
|
||||
atomic_flag(const atomic_flag &) /* = delete */ ;
|
||||
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
|
||||
uint32_t v_;
|
||||
public:
|
||||
atomic_flag(void) : v_(false) {}
|
||||
|
||||
void
|
||||
clear(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before_store(order);
|
||||
const_cast<volatile uint32_t &>(v_) = 0;
|
||||
atomics::detail::platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
bool
|
||||
test_and_set(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before(order);
|
||||
uint32_t expected = v_;
|
||||
do {
|
||||
if (expected == 1)
|
||||
break;
|
||||
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
|
||||
atomics::detail::platform_fence_after(order);
|
||||
return expected;
|
||||
}
|
||||
};
|
||||
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
|
||||
|
||||
}
|
||||
|
||||
#undef BOOST_ATOMIC_ARM_ASM_START
|
||||
#undef BOOST_ATOMIC_ARM_ASM_END
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
|
||||
|
||||
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
|
||||
|
||||
#include <boost/atomic/detail/cas32weak.hpp>
|
||||
|
||||
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Use the gnu builtin __sync_val_compare_and_swap to build
|
||||
// atomic operations for 32 bit and smaller.
|
||||
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
inline void
|
||||
atomic_thread_fence(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_relaxed:
|
||||
break;
|
||||
case memory_order_release:
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__sync_synchronize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
inline void
|
||||
platform_fence_before(memory_order)
|
||||
{
|
||||
/* empty, as compare_and_swap is synchronizing already */
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after(memory_order)
|
||||
{
|
||||
/* empty, as compare_and_swap is synchronizing already */
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_before_store(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_acquire:
|
||||
case memory_order_consume:
|
||||
break;
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__sync_synchronize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_store(memory_order order)
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_load(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_relaxed:
|
||||
case memory_order_release:
|
||||
break;
|
||||
case memory_order_consume:
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
__sync_synchronize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool
|
||||
platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr)
|
||||
{
|
||||
T found = __sync_val_compare_and_swap(ptr, expected, desired);
|
||||
bool success = (found == expected);
|
||||
expected = found;
|
||||
return success;
|
||||
}
|
||||
|
||||
class atomic_flag {
|
||||
private:
|
||||
atomic_flag(const atomic_flag &) /* = delete */ ;
|
||||
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
|
||||
uint32_t v_;
|
||||
public:
|
||||
atomic_flag(void) : v_(false) {}
|
||||
|
||||
void
|
||||
clear(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before_store(order);
|
||||
const_cast<volatile uint32_t &>(v_) = 0;
|
||||
atomics::detail::platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
bool
|
||||
test_and_set(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before(order);
|
||||
uint32_t expected = v_;
|
||||
do {
|
||||
if (expected == 1)
|
||||
break;
|
||||
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
|
||||
atomics::detail::platform_fence_after(order);
|
||||
return expected;
|
||||
}
|
||||
};
|
||||
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
|
||||
|
||||
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE (sizeof(long) <= 4 ? 2 : 0)
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE (sizeof(long long) <= 4 ? 2 : 0)
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE (sizeof(void *) <= 4 ? 2 : 0)
|
||||
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
|
||||
|
||||
#include <boost/atomic/detail/cas32strong.hpp>
|
||||
|
||||
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,199 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
#include <boost/atomic/detail/builder.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
/* fallback implementation for various compilation targets;
|
||||
this is *not* efficient, particularly because all operations
|
||||
are fully fenced (full memory barriers before and after
|
||||
each operation) */
|
||||
|
||||
#if defined(__GNUC__)
|
||||
namespace boost { namespace atomics { namespace detail {
|
||||
inline int32_t
|
||||
fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired)
|
||||
{
|
||||
return __sync_val_compare_and_swap_4(ptr, expected, desired);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS32 1
|
||||
|
||||
#if defined(__amd64__) || defined(__i686__)
|
||||
inline int64_t
|
||||
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
|
||||
{
|
||||
return __sync_val_compare_and_swap_8(ptr, expected, desired);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS64 1
|
||||
#endif
|
||||
}}}
|
||||
|
||||
#elif defined(__ICL) || defined(_MSC_VER)
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <Windows.h>
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
namespace boost { namespace atomics { namespace detail {
|
||||
inline int32_t
|
||||
fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired)
|
||||
{
|
||||
return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS32 1
|
||||
#if defined(_WIN64)
|
||||
inline int64_t
|
||||
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
|
||||
{
|
||||
return _InterlockedCompareExchange64(ptr, desired, expected);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS64 1
|
||||
#endif
|
||||
}}}
|
||||
|
||||
#elif (defined(__ICC) || defined(__ECC))
|
||||
namespace boost { namespace atomics { namespace detail {
|
||||
inline int32_t
|
||||
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
|
||||
{
|
||||
return _InterlockedCompareExchange((void*)ptr, desired, expected);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS32 1
|
||||
#if defined(__x86_64)
|
||||
inline int64_t
|
||||
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
|
||||
{
|
||||
return cas64<int>(ptr, expected, desired);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS64 1
|
||||
#elif defined(__ECC) //IA-64 version
|
||||
inline int64_t
|
||||
fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
|
||||
{
|
||||
return _InterlockedCompareExchange64((void*)ptr, desired, expected);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS64 1
|
||||
#endif
|
||||
}}}
|
||||
|
||||
#elif (defined(__SUNPRO_CC) && defined(__sparc))
|
||||
#include <sys/atomic.h>
|
||||
namespace boost { namespace atomics { namespace detail {
|
||||
inline int32_t
|
||||
fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
|
||||
{
|
||||
return atomic_cas_32((volatile unsigned int*)ptr, expected, desired);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS32 1
|
||||
|
||||
/* FIXME: check for 64 bit mode */
|
||||
inline int64_t
|
||||
fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
|
||||
{
|
||||
return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);
|
||||
}
|
||||
#define BOOST_ATOMIC_HAVE_CAS64 1
|
||||
}}}
|
||||
#endif
|
||||
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAVE_CAS32
|
||||
template<typename T>
|
||||
class atomic_generic_cas32 {
|
||||
private:
|
||||
typedef atomic_generic_cas32 this_type;
|
||||
public:
|
||||
explicit atomic_generic_cas32(T v) : i((int32_t)v) {}
|
||||
atomic_generic_cas32() {}
|
||||
T load(memory_order order=memory_order_seq_cst) const volatile
|
||||
{
|
||||
T expected=(T)i;
|
||||
do { } while(!const_cast<this_type *>(this)->compare_exchange_weak(expected, expected, order, memory_order_relaxed));
|
||||
return expected;
|
||||
}
|
||||
void store(T v, memory_order order=memory_order_seq_cst) volatile
|
||||
{
|
||||
exchange(v);
|
||||
}
|
||||
bool compare_exchange_strong(
|
||||
T &expected,
|
||||
T desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
T found;
|
||||
found=(T)fenced_compare_exchange_strong_32(&i, (int32_t)expected, (int32_t)desired);
|
||||
bool success=(found==expected);
|
||||
expected=found;
|
||||
return success;
|
||||
}
|
||||
bool compare_exchange_weak(
|
||||
T &expected,
|
||||
T desired,
|
||||
memory_order success_order,
|
||||
memory_order failure_order) volatile
|
||||
{
|
||||
return compare_exchange_strong(expected, desired, success_order, failure_order);
|
||||
}
|
||||
T exchange(T r, memory_order order=memory_order_seq_cst) volatile
|
||||
{
|
||||
T expected=(T)i;
|
||||
do { } while(!compare_exchange_weak(expected, r, order, memory_order_relaxed));
|
||||
return expected;
|
||||
}
|
||||
|
||||
bool is_lock_free(void) const volatile {return true;}
|
||||
typedef T integral_type;
|
||||
private:
|
||||
mutable int32_t i;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 4> : public build_atomic_from_exchange<atomic_generic_cas32<T> > {
|
||||
public:
|
||||
typedef build_atomic_from_exchange<atomic_generic_cas32<T> > super;
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> {
|
||||
public:
|
||||
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
|
||||
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> {
|
||||
public:
|
||||
typedef build_atomic_from_larger_type<atomic_generic_cas32<int32_t>, T> super;
|
||||
|
||||
explicit platform_atomic_integral(T v) : super(v) {}
|
||||
platform_atomic_integral(void) {}
|
||||
};
|
||||
#endif
|
||||
|
||||
} } }
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,206 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
// Copyright (c) 2012 Andrey Semashev
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32_WCE)
|
||||
|
||||
#include <boost/detail/interlocked.hpp>
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE((long*)(dest), exchange, compare)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE((long*)(dest), newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), addend)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) BOOST_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#include <intrin.h>
|
||||
|
||||
#pragma intrinsic(_InterlockedCompareExchange)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
#pragma intrinsic(_InterlockedExchange)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
|
||||
|
||||
#if _MSC_VER >= 1400
|
||||
|
||||
#pragma intrinsic(_InterlockedAnd)
|
||||
#pragma intrinsic(_InterlockedOr)
|
||||
#pragma intrinsic(_InterlockedXor)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
|
||||
|
||||
#endif // _MSC_VER >= 1400
|
||||
|
||||
#if _MSC_VER >= 1600
|
||||
|
||||
// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
|
||||
// Note that for each bit count these macros must be either all defined or all not defined.
|
||||
// Otherwise atomic<> operations will be implemented inconsistently.
|
||||
|
||||
#pragma intrinsic(_InterlockedCompareExchange8)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd8)
|
||||
#pragma intrinsic(_InterlockedExchange8)
|
||||
#pragma intrinsic(_InterlockedAnd8)
|
||||
#pragma intrinsic(_InterlockedOr8)
|
||||
#pragma intrinsic(_InterlockedXor8)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
|
||||
|
||||
#pragma intrinsic(_InterlockedCompareExchange16)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd16)
|
||||
#pragma intrinsic(_InterlockedExchange16)
|
||||
#pragma intrinsic(_InterlockedAnd16)
|
||||
#pragma intrinsic(_InterlockedOr16)
|
||||
#pragma intrinsic(_InterlockedXor16)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
|
||||
|
||||
#endif // _MSC_VER >= 1600
|
||||
|
||||
#if defined(_M_AMD64) || defined(_M_IA64)
|
||||
|
||||
#pragma intrinsic(_InterlockedCompareExchange64)
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64)
|
||||
#pragma intrinsic(_InterlockedExchange64)
|
||||
#pragma intrinsic(_InterlockedAnd64)
|
||||
#pragma intrinsic(_InterlockedOr64)
|
||||
#pragma intrinsic(_InterlockedXor64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
|
||||
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer)
|
||||
#pragma intrinsic(_InterlockedExchangePointer)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
|
||||
|
||||
#else // defined(_M_AMD64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
|
||||
|
||||
#endif // defined(_M_AMD64)
|
||||
|
||||
#else // defined(_MSC_VER)
|
||||
|
||||
#if defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
|
||||
#if defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
|
||||
|
||||
#else // defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
|
||||
|
||||
#endif // defined(_WIN64)
|
||||
|
||||
#else // defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#if defined(__MINGW64__)
|
||||
#define BOOST_ATOMIC_INTERLOCKED_IMPORT
|
||||
#else
|
||||
#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
extern "C" {
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
|
||||
|
||||
#if defined(_WIN64)
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
|
||||
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
|
||||
BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
|
||||
|
||||
#else // defined(_WIN64)
|
||||
|
||||
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
|
||||
#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
|
||||
|
||||
#endif // defined(_WIN64)
|
||||
|
||||
} // extern "C"
|
||||
|
||||
} // namespace detail
|
||||
} // namespace atomics
|
||||
} // namespace boost
|
||||
|
||||
#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
|
||||
|
||||
#endif // defined(BOOST_USE_WINDOWS_H)
|
||||
|
||||
#endif // defined(_MSC_VER)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,187 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_LINUX_ARM_HPP
|
||||
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
//
|
||||
// Copyright (c) 2009, 2011 Helge Bahmann
|
||||
// Copyright (c) 2009 Phil Endecott
|
||||
// Linux-specific code by Phil Endecott
|
||||
|
||||
// Different ARM processors have different atomic instructions. In particular,
|
||||
// architecture versions before v6 (which are still in widespread use, e.g. the
|
||||
// Intel/Marvell XScale chips like the one in the NSLU2) have only atomic swap.
|
||||
// On Linux the kernel provides some support that lets us abstract away from
|
||||
// these differences: it provides emulated CAS and barrier functions at special
|
||||
// addresses that are guaranteed not to be interrupted by the kernel. Using
|
||||
// this facility is slightly slower than inline assembler would be, but much
|
||||
// faster than a system call.
|
||||
//
|
||||
// While this emulated CAS is "strong" in the sense that it does not fail
|
||||
// "spuriously" (i.e.: it never fails to perform the exchange when the value
|
||||
// found equals the value expected), it does not return the found value on
|
||||
// failure. To satisfy the atomic API, compare_exchange_{weak|strong} must
|
||||
// return the found value on failure, and we have to manually load this value
|
||||
// after the emulated CAS reports failure. This in turn introduces a race
|
||||
// between the CAS failing (due to the "wrong" value being found) and subsequently
|
||||
// loading (which might turn up the "right" value). From an application's
|
||||
// point of view this looks like "spurious failure", and therefore the
|
||||
// emulated CAS is only good enough to provide compare_exchange_weak
|
||||
// semantics.
|
||||
|
||||
#include <cstddef>
|
||||
#include <boost/cstdint.hpp>
|
||||
#include <boost/memory_order.hpp>
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
inline void
|
||||
arm_barrier(void)
|
||||
{
|
||||
void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0;
|
||||
kernel_dmb();
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_before(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
arm_barrier();
|
||||
case memory_order_consume:
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
arm_barrier();
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_before_store(memory_order order)
|
||||
{
|
||||
platform_fence_before(order);
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_store(memory_order order)
|
||||
{
|
||||
if (order == memory_order_seq_cst)
|
||||
arm_barrier();
|
||||
}
|
||||
|
||||
inline void
|
||||
platform_fence_after_load(memory_order order)
|
||||
{
|
||||
platform_fence_after(order);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool
|
||||
platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
|
||||
{
|
||||
typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr);
|
||||
|
||||
if (((kernel_cmpxchg32_t) 0xffff0fc0)(expected, desired, ptr) == 0) {
|
||||
return true;
|
||||
} else {
|
||||
expected = *ptr;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#define BOOST_ATOMIC_THREAD_FENCE 2
|
||||
inline void
|
||||
atomic_thread_fence(memory_order order)
|
||||
{
|
||||
switch(order) {
|
||||
case memory_order_acquire:
|
||||
case memory_order_release:
|
||||
case memory_order_acq_rel:
|
||||
case memory_order_seq_cst:
|
||||
atomics::detail::arm_barrier();
|
||||
default:;
|
||||
}
|
||||
}
|
||||
|
||||
#define BOOST_ATOMIC_SIGNAL_FENCE 2
|
||||
inline void
|
||||
atomic_signal_fence(memory_order)
|
||||
{
|
||||
__asm__ __volatile__ ("" ::: "memory");
|
||||
}
|
||||
|
||||
class atomic_flag {
|
||||
private:
|
||||
atomic_flag(const atomic_flag &) /* = delete */ ;
|
||||
atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
|
||||
uint32_t v_;
|
||||
public:
|
||||
atomic_flag(void) : v_(false) {}
|
||||
|
||||
void
|
||||
clear(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before_store(order);
|
||||
const_cast<volatile uint32_t &>(v_) = 0;
|
||||
atomics::detail::platform_fence_after_store(order);
|
||||
}
|
||||
|
||||
bool
|
||||
test_and_set(memory_order order = memory_order_seq_cst) volatile
|
||||
{
|
||||
atomics::detail::platform_fence_before(order);
|
||||
uint32_t expected = v_;
|
||||
do {
|
||||
if (expected == 1)
|
||||
break;
|
||||
} while (!atomics::detail::platform_cmpxchg32(expected, (uint32_t)1, &v_));
|
||||
atomics::detail::platform_fence_after(order);
|
||||
return expected;
|
||||
}
|
||||
};
|
||||
#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
|
||||
|
||||
}
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
|
||||
|
||||
#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_CHAR16_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_CHAR32_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_WCHAR_T_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_INT_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
|
||||
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
|
||||
#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
|
||||
|
||||
#include <boost/atomic/detail/cas32weak.hpp>
|
||||
|
||||
#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,96 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_LOCKPOOL_HPP
|
||||
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
|
||||
#include <boost/thread/mutex.hpp>
|
||||
#endif
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
#ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
|
||||
|
||||
class lockpool
|
||||
{
|
||||
public:
|
||||
typedef mutex lock_type;
|
||||
class scoped_lock
|
||||
{
|
||||
private:
|
||||
lock_type& mtx_;
|
||||
|
||||
scoped_lock(scoped_lock const&) /* = delete */;
|
||||
scoped_lock& operator=(scoped_lock const&) /* = delete */;
|
||||
|
||||
public:
|
||||
explicit
|
||||
scoped_lock(const volatile void * addr) : mtx_(get_lock_for(addr))
|
||||
{
|
||||
mtx_.lock();
|
||||
}
|
||||
~scoped_lock()
|
||||
{
|
||||
mtx_.unlock();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
class lockpool
|
||||
{
|
||||
public:
|
||||
typedef atomic_flag lock_type;
|
||||
|
||||
class scoped_lock
|
||||
{
|
||||
private:
|
||||
atomic_flag& flag_;
|
||||
|
||||
scoped_lock(const scoped_lock &) /* = delete */;
|
||||
scoped_lock& operator=(const scoped_lock &) /* = delete */;
|
||||
|
||||
public:
|
||||
explicit
|
||||
scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr))
|
||||
{
|
||||
for (; flag_.test_and_set(memory_order_acquire);)
|
||||
{
|
||||
#if defined(BOOST_ATOMIC_X86_PAUSE)
|
||||
BOOST_ATOMIC_X86_PAUSE();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
~scoped_lock(void)
|
||||
{
|
||||
flag_.clear(memory_order_release);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,62 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_PLATFORM_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_PLATFORM_HPP
|
||||
|
||||
// Copyright (c) 2009 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
// Platform selection file
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
||||
|
||||
#include <boost/atomic/detail/gcc-x86.hpp>
|
||||
|
||||
#elif 0 && defined(__GNUC__) && defined(__alpha__) /* currently does not work correctly */
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
#include <boost/atomic/detail/gcc-alpha.hpp>
|
||||
|
||||
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
|
||||
|
||||
#include <boost/atomic/detail/gcc-ppc.hpp>
|
||||
|
||||
// This list of ARM architecture versions comes from Apple's arm/arch.h header.
|
||||
// I don't know how complete it is.
|
||||
#elif defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|
||||
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|
||||
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_7A__))
|
||||
|
||||
#include <boost/atomic/detail/gcc-armv6plus.hpp>
|
||||
|
||||
#elif defined(__linux__) && defined(__arm__)
|
||||
|
||||
#include <boost/atomic/detail/linux-arm.hpp>
|
||||
|
||||
#elif defined(__GNUC__) && defined(__sparc_v9__)
|
||||
|
||||
#include <boost/atomic/detail/gcc-sparcv9.hpp>
|
||||
|
||||
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
|
||||
|
||||
#include <boost/atomic/detail/windows.hpp>
|
||||
|
||||
#elif 0 && defined(__GNUC__) /* currently does not work correctly */
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
#include <boost/atomic/detail/gcc-cas.hpp>
|
||||
|
||||
#else
|
||||
|
||||
#include <boost/atomic/detail/base.hpp>
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,45 @@
|
||||
#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
|
||||
#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
|
||||
|
||||
// Copyright (c) 2011 Helge Bahmann
|
||||
//
|
||||
// Distributed under the Boost Software License, Version 1.0.
|
||||
// See accompanying file LICENSE_1_0.txt or copy at
|
||||
// http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/atomic/detail/config.hpp>
|
||||
#include <boost/type_traits/is_integral.hpp>
|
||||
|
||||
#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
|
||||
#pragma once
|
||||
#endif
|
||||
|
||||
namespace boost {
|
||||
namespace atomics {
|
||||
namespace detail {
|
||||
|
||||
template<typename T, bool IsInt = boost::is_integral<T>::value>
|
||||
struct classify
|
||||
{
|
||||
typedef void type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct classify<T, true> {typedef int type;};
|
||||
|
||||
template<typename T>
|
||||
struct classify<T*, false> {typedef void* type;};
|
||||
|
||||
template<typename T>
|
||||
struct storage_size_of
|
||||
{
|
||||
enum _
|
||||
{
|
||||
size = sizeof(T),
|
||||
value = (size == 3 ? 4 : (size == 5 || size == 6 || size == 7 ? 8 : size))
|
||||
};
|
||||
};
|
||||
|
||||
}}}
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user