first commit

This commit is contained in:
murdle
2026-03-01 02:38:58 +02:00
commit 19250b9db4
19111 changed files with 4358159 additions and 0 deletions

View File

@@ -0,0 +1,71 @@
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#define BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#include <boost/config.hpp>
// at this time, few compiles completely implement atomic<>
#define BOOST_LOCKFREE_NO_HDR_ATOMIC
// MSVC supports atomic<> from version 2012 onwards.
#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
// GCC supports atomic<> from version 4.8 onwards.
#if defined(__GNUC__)
# if defined(__GNUC_PATCHLEVEL__)
# define __GNUC_VERSION__ (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
# else
# define __GNUC_VERSION__ (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100)
# endif
#endif
#if (__GNUC_VERSION__ >= 40800) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
#undef __GNUC_VERSION__
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
#include <boost/atomic.hpp>
#else
#include <atomic>
#endif
namespace boost {
namespace lockfree {
namespace detail {
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
using boost::atomic;
using boost::memory_order_acquire;
using boost::memory_order_consume;
using boost::memory_order_relaxed;
using boost::memory_order_release;
#else
using std::atomic;
using std::memory_order_acquire;
using std::memory_order_consume;
using std::memory_order_relaxed;
using std::memory_order_release;
#endif
}
using detail::atomic;
using detail::memory_order_acquire;
using detail::memory_order_consume;
using detail::memory_order_relaxed;
using detail::memory_order_release;
}}
#endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */

View File

@@ -0,0 +1,38 @@
// branch hints
// Copyright (C) 2007, 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED
#define BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED
namespace boost {
namespace lockfree {
namespace detail {
/** \brief hint for the branch prediction */
inline bool likely(bool expr)
{
#ifdef __GNUC__
return __builtin_expect(expr, true);
#else
return expr;
#endif
}
/** \brief hint for the branch prediction */
inline bool unlikely(bool expr)
{
#ifdef __GNUC__
return __builtin_expect(expr, false);
#else
return expr;
#endif
}
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED */

View File

@@ -0,0 +1,49 @@
// boost lockfree: copy_payload helper
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#define BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#include <boost/mpl/if.hpp>
#include <boost/type_traits/is_convertible.hpp>
namespace boost {
namespace lockfree {
namespace detail {
struct copy_convertible
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = t;
}
};
struct copy_constructible_and_copyable
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = U(t);
}
};
template <typename T, typename U>
void copy_payload(T & t, U & u)
{
typedef typename boost::mpl::if_<typename boost::is_convertible<T, U>::type,
copy_convertible,
copy_constructible_and_copyable
>::type copy_type;
copy_type::copy(t, u);
}
}}}
#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */

View File

@@ -0,0 +1,625 @@
// lock-free freelist
//
// Copyright (C) 2008, 2009, 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#include <memory>
#include <boost/array.hpp>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
namespace boost {
namespace lockfree {
namespace detail {
template <typename T,
typename Alloc = std::allocator<T>
>
class freelist_stack:
Alloc
{
struct freelist_node
{
tagged_ptr<freelist_node> next;
};
typedef tagged_ptr<freelist_node> tagged_node_ptr;
public:
typedef tagged_ptr<T> tagged_node_handle;
template <typename Allocator>
freelist_stack (Allocator const & alloc, std::size_t n = 0):
Alloc(alloc),
pool_(tagged_node_ptr(NULL))
{
for (std::size_t i = 0; i != n; ++i) {
T * node = Alloc::allocate(1);
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(node);
#else
deallocate<false>(node);
#endif
}
}
template <bool ThreadSafe>
void reserve (std::size_t count)
{
for (std::size_t i = 0; i != count; ++i) {
T * node = Alloc::allocate(1);
deallocate<ThreadSafe>(node);
}
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_ptr)
{
T * n = tagged_ptr.get_ptr();
n->~T();
deallocate<ThreadSafe>(n);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n);
}
~freelist_stack(void)
{
tagged_node_ptr current (pool_);
while (current) {
freelist_node * current_ptr = current.get_ptr();
if (current_ptr)
current = current_ptr->next;
Alloc::deallocate((T*)current_ptr, 1);
}
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
T * get_handle(T * pointer) const
{
return pointer;
}
T * get_handle(tagged_node_handle const & handle) const
{
return get_pointer(handle);
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return tptr.get_ptr();
}
T * get_pointer(T * pointer) const
{
return pointer;
}
T * null_handle(void) const
{
return NULL;
}
protected: // allow use from subclasses
template <bool ThreadSafe, bool Bounded>
T * allocate (void)
{
if (ThreadSafe)
return allocate_impl<Bounded>();
else
return allocate_impl_unsafe<Bounded>();
}
private:
template <bool Bounded>
T * allocate_impl (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
for(;;) {
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag() + 1);
if (pool_.compare_exchange_weak(old_pool, new_pool)) {
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
}
}
template <bool Bounded>
T * allocate_impl_unsafe (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag() + 1);
pool_.store(new_pool, memory_order_relaxed);
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
protected:
template <bool ThreadSafe>
void deallocate (T * n)
{
if (ThreadSafe)
deallocate_impl(n);
else
deallocate_impl_unsafe(n);
}
private:
void deallocate_impl (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
for(;;) {
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
pool_.store(new_pool, memory_order_relaxed);
}
atomic<tagged_node_ptr> pool_;
};
class tagged_index
{
public:
typedef boost::uint16_t tag_t;
typedef boost::uint16_t index_t;
/** uninitialized constructor */
tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_index(tagged_index const & rhs):
index(rhs.index), tag(rhs.tag)
{}
#else
tagged_index(tagged_index const & rhs) = default;
#endif
explicit tagged_index(index_t i, tag_t t = 0):
index(i), tag(t)
{}
/** index access */
/* @{ */
index_t get_index() const
{
return index;
}
void set_index(index_t i)
{
index = i;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return tag;
}
void set_tag(tag_t t)
{
tag = t;
}
/* @} */
bool operator==(tagged_index const & rhs) const
{
return (index == rhs.index) && (tag == rhs.tag);
}
protected:
index_t index;
tag_t tag;
};
template <typename T,
std::size_t size>
struct compiletime_sized_freelist_storage
{
// array-based freelists only support a 16bit address space.
BOOST_STATIC_ASSERT(size < 65536);
boost::array<char, size * sizeof(T)> data;
// unused ... only for API purposes
template <typename Allocator>
compiletime_sized_freelist_storage(Allocator const & alloc, std::size_t count)
{}
T * nodes(void) const
{
return reinterpret_cast<T*>(const_cast<char*>(data.data()));
}
std::size_t node_count(void) const
{
return size;
}
};
template <typename T,
typename Alloc = std::allocator<T> >
struct runtime_sized_freelist_storage:
Alloc
{
T * nodes_;
std::size_t node_count_;
template <typename Allocator>
runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):
Alloc(alloc), node_count_(count)
{
if (count > 65535)
boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects"));
nodes_ = Alloc::allocate(count);
}
~runtime_sized_freelist_storage(void)
{
Alloc::deallocate(nodes_, node_count_);
}
T * nodes(void) const
{
return nodes_;
}
std::size_t node_count(void) const
{
return node_count_;
}
};
template <typename T,
typename NodeStorage = runtime_sized_freelist_storage<T>
>
class fixed_size_freelist:
NodeStorage
{
struct freelist_node
{
tagged_index next;
};
typedef tagged_index::index_t index_t;
void initialize(void)
{
T * nodes = NodeStorage::nodes();
for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {
tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);
next_index->set_index(null_handle());
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(nodes + i);
#else
deallocate<false>(static_cast<index_t>(i));
#endif
}
}
public:
typedef tagged_index tagged_node_handle;
template <typename Allocator>
fixed_size_freelist (Allocator const & alloc, std::size_t count):
NodeStorage(alloc, count),
pool_(tagged_index(static_cast<index_t>(count), 0))
{
initialize();
}
fixed_size_freelist (void):
pool_(tagged_index(NodeStorage::node_count(), 0))
{
initialize();
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_index)
{
index_t index = tagged_index.get_index();
T * n = NodeStorage::nodes() + index;
n->~T();
deallocate<ThreadSafe>(index);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n - NodeStorage::nodes());
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
index_t null_handle(void) const
{
return static_cast<index_t>(NodeStorage::node_count());
}
index_t get_handle(T * pointer) const
{
if (pointer == NULL)
return null_handle();
else
return static_cast<index_t>(pointer - NodeStorage::nodes());
}
index_t get_handle(tagged_node_handle const & handle) const
{
return handle.get_index();
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return get_pointer(tptr.get_index());
}
T * get_pointer(index_t index) const
{
if (index == null_handle())
return 0;
else
return NodeStorage::nodes() + index;
}
T * get_pointer(T * ptr) const
{
return ptr;
}
protected: // allow use from subclasses
template <bool ThreadSafe>
index_t allocate (void)
{
if (ThreadSafe)
return allocate_impl();
else
return allocate_impl_unsafe();
}
private:
index_t allocate_impl (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_tag() + 1);
if (pool_.compare_exchange_weak(old_pool, new_pool))
return old_pool.get_index();
}
}
index_t allocate_impl_unsafe (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_tag() + 1);
pool_.store(new_pool, memory_order_relaxed);
return old_pool.get_index();
}
template <bool ThreadSafe>
void deallocate (index_t index)
{
if (ThreadSafe)
deallocate_impl(index);
else
deallocate_impl_unsafe(index);
}
void deallocate_impl (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
pool_.store(new_pool);
}
atomic<tagged_index> pool_;
};
template <typename T,
typename Alloc,
bool IsCompileTimeSized,
bool IsFixedSize,
std::size_t Capacity
>
struct select_freelist
{
typedef typename mpl::if_c<IsCompileTimeSized,
compiletime_sized_freelist_storage<T, Capacity>,
runtime_sized_freelist_storage<T, Alloc>
>::type fixed_sized_storage_type;
typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,
fixed_size_freelist<T, fixed_sized_storage_type>,
freelist_stack<T, Alloc>
>::type type;
};
template <typename T, bool IsNodeBased>
struct select_tagged_handle
{
typedef typename mpl::if_c<IsNodeBased,
tagged_ptr<T>,
tagged_index
>::type tagged_handle_type;
typedef typename mpl::if_c<IsNodeBased,
T*,
typename tagged_index::index_t
>::type handle_type;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */

View File

@@ -0,0 +1,73 @@
// boost lockfree
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#define BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#include <boost/lockfree/policies.hpp>
namespace boost {
namespace lockfree {
namespace detail {
namespace mpl = boost::mpl;
template <typename bound_args, typename tag_type>
struct has_arg
{
typedef typename parameter::binding<bound_args, tag_type, mpl::void_>::type type;
static const bool value = mpl::is_not_void_<type>::type::value;
};
template <typename bound_args>
struct extract_capacity
{
static const bool has_capacity = has_arg<bound_args, tag::capacity>::value;
typedef typename mpl::if_c<has_capacity,
typename has_arg<bound_args, tag::capacity>::type,
mpl::size_t< 0 >
>::type capacity_t;
static const std::size_t capacity = capacity_t::value;
};
template <typename bound_args, typename T>
struct extract_allocator
{
static const bool has_allocator = has_arg<bound_args, tag::allocator>::value;
typedef typename mpl::if_c<has_allocator,
typename has_arg<bound_args, tag::allocator>::type,
std::allocator<T>
>::type allocator_arg;
typedef typename allocator_arg::template rebind<T>::other type;
};
template <typename bound_args, bool default_ = false>
struct extract_fixed_sized
{
static const bool has_fixed_sized = has_arg<bound_args, tag::fixed_sized>::value;
typedef typename mpl::if_c<has_fixed_sized,
typename has_arg<bound_args, tag::fixed_sized>::type,
mpl::bool_<default_>
>::type type;
static const bool value = type::value;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2009 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
#define BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
/* this file defines the following macros:
BOOST_LOCKFREE_CACHELINE_BYTES: size of a cache line
BOOST_LOCKFREE_PTR_COMPRESSION: use tag/pointer compression to utilize parts
of the virtual address space as tag (at least 16bit)
BOOST_LOCKFREE_DCAS_ALIGNMENT: symbol used for aligning structs at cache line
boundaries
*/
#define BOOST_LOCKFREE_CACHELINE_BYTES 64
#ifdef _MSC_VER
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __declspec(align(BOOST_LOCKFREE_CACHELINE_BYTES))
#if defined(_M_IX86)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(_M_X64) || defined(_M_IA64)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __declspec(align(16))
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __attribute__((aligned(BOOST_LOCKFREE_CACHELINE_BYTES)))
#if defined(__i386__) || defined(__ppc__)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(__x86_64__)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __attribute__((aligned(16)))
#elif defined(__alpha__)
// LATER: alpha may benefit from pointer compression. but what is the maximum size of the address space?
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#endif
#endif /* __GNUC__ */
#ifndef BOOST_LOCKFREE_DCAS_ALIGNMENT
#define BOOST_LOCKFREE_DCAS_ALIGNMENT /*BOOST_LOCKFREE_DCAS_ALIGNMENT*/
#endif
#ifndef BOOST_LOCKFREE_CACHELINE_ALIGNMENT
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT /*BOOST_LOCKFREE_CACHELINE_ALIGNMENT*/
#endif
#endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */

View File

@@ -0,0 +1,21 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#include <boost/config.hpp>
#include <boost/lockfree/detail/prefix.hpp>
#ifndef BOOST_LOCKFREE_PTR_COMPRESSION
#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>
#else
#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>
#endif
#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */

View File

@@ -0,0 +1,127 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#include <boost/lockfree/detail/branch_hints.hpp>
#include <cstddef> /* for std::size_t */
namespace boost {
namespace lockfree {
namespace detail {
template <class T>
class BOOST_LOCKFREE_DCAS_ALIGNMENT tagged_ptr
{
public:
typedef std::size_t tag_t;
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr), tag(p.tag)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(p), tag(t)
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
set(p.ptr, p.tag);
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = p;
tag = t;
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr) && (tag == p.tag);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr(void) const volatile
{
return ptr;
}
void set_ptr(T * p) volatile
{
ptr = p;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const volatile
{
return tag;
}
void set_tag(tag_t t) volatile
{
tag = t;
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *ptr;
}
T * operator->() const
{
return ptr;
}
operator bool(void) const
{
return ptr != 0;
}
/* @} */
protected:
T * ptr;
tag_t tag;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */

View File

@@ -0,0 +1,168 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008, 2009 Tim Blechmann, based on code by Cory Nelson
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#include <boost/lockfree/detail/branch_hints.hpp>
#include <cstddef> /* for std::size_t */
#include <boost/cstdint.hpp>
namespace boost {
namespace lockfree {
namespace detail {
#if defined (__x86_64__) || defined (_M_X64)
template <class T>
class tagged_ptr
{
typedef boost::uint64_t compressed_ptr_t;
public:
typedef boost::uint16_t tag_t;
private:
union cast_unit
{
compressed_ptr_t value;
tag_t tag[4];
};
static const int tag_index = 3;
static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1;
static T* extract_ptr(volatile compressed_ptr_t const & i)
{
return (T*)(i & ptr_mask);
}
static tag_t extract_tag(volatile compressed_ptr_t const & i)
{
cast_unit cu;
cu.value = i;
return cu.tag[tag_index];
}
static compressed_ptr_t pack_ptr(T * ptr, int tag)
{
cast_unit ret;
ret.value = compressed_ptr_t(ptr);
ret.tag[tag_index] = tag;
return ret.value;
}
public:
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(pack_ptr(p, t))
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
ptr = p.ptr;
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = pack_ptr(p, t);
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr() const volatile
{
return extract_ptr(ptr);
}
void set_ptr(T * p) volatile
{
tag_t tag = get_tag();
ptr = pack_ptr(p, tag);
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const volatile
{
return extract_tag(ptr);
}
void set_tag(tag_t t) volatile
{
T * p = get_ptr();
ptr = pack_ptr(p, t);
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *get_ptr();
}
T * operator->() const
{
return get_ptr();
}
operator bool(void) const
{
return get_ptr() != 0;
}
/* @} */
protected:
compressed_ptr_t ptr;
};
#else
#error unsupported platform
#endif
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */

View File

@@ -0,0 +1,59 @@
// boost lockfree
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
#define BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
#include <boost/parameter.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/size_t.hpp>
#include <boost/mpl/void.hpp>
namespace boost {
namespace lockfree {
#ifndef BOOST_DOXYGEN_INVOKED
namespace tag { struct allocator ; }
namespace tag { struct fixed_sized; }
namespace tag { struct capacity; }
#endif
/** Configures a data structure as \b fixed-sized.
*
* The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible size of the
* queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on platforms that lack
* double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom.
* This implies that a data structure is bounded.
* */
template <bool IsFixedSized>
struct fixed_sized:
boost::parameter::template_keyword<tag::fixed_sized, boost::mpl::bool_<IsFixedSized> >
{};
/** Sets the \b capacity of a data structure at compile-time.
*
* This implies that a data structure is bounded and fixed-sized.
* */
template <size_t Size>
struct capacity:
boost::parameter::template_keyword<tag::capacity, boost::mpl::size_t<Size> >
{};
/** Defines the \b allocator type of a data structure.
* */
template <class Alloc>
struct allocator:
boost::parameter::template_keyword<tag::allocator, Alloc>
{};
}
}
#endif /* BOOST_LOCKFREE_POLICIES_HPP_INCLUDED */

View File

@@ -0,0 +1,476 @@
// lock-free queue from
// Michael, M. M. and Scott, M. L.,
// "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
//
// Copyright (C) 2008, 2009, 2010, 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
#define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
#include <memory> /* std::auto_ptr */
#include <boost/assert.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/has_trivial_assign.hpp>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/copy_payload.hpp>
#include <boost/lockfree/detail/freelist.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
namespace boost {
namespace lockfree {
namespace detail {
typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
boost::parameter::optional<tag::capacity>
> queue_signature;
} /* namespace detail */
/** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
* construction/destruction has to be synchronized. It uses a freelist for memory management,
* freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
*
* \b Policies:
* - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
* Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
* If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
* by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index
* type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
* to achieve lock-freedom.
*
* - \ref boost::lockfree::capacity, optional \n
* If this template argument is passed to the options, the size of the queue is set at compile-time.\n
* It this option implies \c fixed_sized<true>
*
* - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
* Specifies the allocator that is used for the internal freelist
*
* \b Requirements:
* - T must have a copy constructor
* - T must have a trivial assignment operator
* - T must have a trivial destructor
*
* */
#ifndef BOOST_DOXYGEN_INVOKED
template <typename T,
class A0 = boost::parameter::void_,
class A1 = boost::parameter::void_,
class A2 = boost::parameter::void_>
#else
template <typename T, ...Options>
#endif
class queue:
boost::noncopyable
{
private:
#ifndef BOOST_DOXYGEN_INVOKED
#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR
BOOST_STATIC_ASSERT((boost::has_trivial_destructor<T>::value));
#endif
#ifdef BOOST_HAS_TRIVIAL_ASSIGN
BOOST_STATIC_ASSERT((boost::has_trivial_assign<T>::value));
#endif
typedef typename detail::queue_signature::bind<A0, A1, A2>::type bound_args;
static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
static const bool node_based = !(has_capacity || fixed_sized);
static const bool compile_time_sized = has_capacity;
struct BOOST_LOCKFREE_CACHELINE_ALIGNMENT node
{
typedef typename detail::select_tagged_handle<node, node_based>::tagged_handle_type tagged_node_handle;
typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
node(T const & v, handle_type null_handle):
data(v)//, next(tagged_node_handle(0, 0))
{
/* increment tag to avoid ABA problem */
tagged_node_handle old_next = next.load(memory_order_relaxed);
tagged_node_handle new_next (null_handle, old_next.get_tag()+1);
next.store(new_next, memory_order_release);
}
node (handle_type null_handle):
next(tagged_node_handle(null_handle, 0))
{}
node(void)
{}
atomic<tagged_node_handle> next;
T data;
};
typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
typedef typename pool_t::tagged_node_handle tagged_node_handle;
typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
void initialize(void)
{
node * n = pool.template construct<true, false>(pool.null_handle());
tagged_node_handle dummy_node(pool.get_handle(n), 0);
head_.store(dummy_node, memory_order_relaxed);
tail_.store(dummy_node, memory_order_release);
}
struct implementation_defined
{
typedef node_allocator allocator;
typedef std::size_t size_type;
};
#endif
public:
typedef T value_type;
typedef typename implementation_defined::allocator allocator;
typedef typename implementation_defined::size_type size_type;
/**
* \return true, if implementation is lock-free.
*
* \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
* On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is
* no possibility to provide a completely accurate implementation, because one would need to test every internal
* node, which is impossible if further nodes will be allocated from the operating system.
* */
bool is_lock_free (void) const
{
return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();
}
//! Construct queue
// @{
queue(void):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(node_allocator(), capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
template <typename U>
explicit queue(typename node_allocator::template rebind<U>::other const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, capacity)
{
BOOST_STATIC_ASSERT(has_capacity);
initialize();
}
explicit queue(allocator const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
// @}
//! Construct queue, allocate n nodes for the freelist.
// @{
explicit queue(size_type n):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(node_allocator(), n + 1)
{
BOOST_ASSERT(!has_capacity);
initialize();
}
template <typename U>
queue(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, n + 1)
{
BOOST_STATIC_ASSERT(!has_capacity);
initialize();
}
// @}
/** \copydoc boost::lockfree::stack::reserve
* */
void reserve(size_type n)
{
pool.template reserve<true>(n);
}
/** \copydoc boost::lockfree::stack::reserve_unsafe
* */
void reserve_unsafe(size_type n)
{
pool.template reserve<false>(n);
}
/** Destroys queue, free all nodes from freelist.
* */
~queue(void)
{
T dummy;
while(unsynchronized_pop(dummy))
{}
pool.template destruct<false>(head_.load(memory_order_relaxed));
}
/** Check if the queue is empty
*
* \return true, if the queue is empty, false otherwise
* \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this
* value in program logic.
* */
bool empty(void)
{
return pool.get_handle(head_.load()) == pool.get_handle(tail_.load());
}
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* */
bool push(T const & t)
{
return do_push<false>(t);
}
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
* \throws if memory allocator throws
* */
bool bounded_push(T const & t)
{
return do_push<true>(t);
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
template <bool Bounded>
bool do_push(T const & t)
{
using detail::likely;
node * n = pool.template construct<true, Bounded>(t, pool.null_handle());
handle_type node_handle = pool.get_handle(n);
if (n == NULL)
return false;
for (;;) {
tagged_node_handle tail = tail_.load(memory_order_acquire);
node * tail_node = pool.get_pointer(tail);
tagged_node_handle next = tail_node->next.load(memory_order_acquire);
node * next_ptr = pool.get_pointer(next);
tagged_node_handle tail2 = tail_.load(memory_order_acquire);
if (likely(tail == tail2)) {
if (next_ptr == 0) {
tagged_node_handle new_tail_next(node_handle, next.get_tag() + 1);
if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) {
tagged_node_handle new_tail(node_handle, tail.get_tag() + 1);
tail_.compare_exchange_strong(tail, new_tail);
return true;
}
}
else {
tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_tag() + 1);
tail_.compare_exchange_strong(tail, new_tail);
}
}
}
}
#endif
public:
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
* */
bool unsynchronized_push(T const & t)
{
node * n = pool.template construct<false, false>(t, pool.null_handle());
if (n == NULL)
return false;
for (;;) {
tagged_node_handle tail = tail_.load(memory_order_relaxed);
tagged_node_handle next = tail->next.load(memory_order_relaxed);
node * next_ptr = next.get_ptr();
if (next_ptr == 0) {
tail->next.store(tagged_node_handle(n, next.get_tag() + 1), memory_order_relaxed);
tail_.store(tagged_node_handle(n, tail.get_tag() + 1), memory_order_relaxed);
return true;
}
else
tail_.store(tagged_node_handle(next_ptr, tail.get_tag() + 1), memory_order_relaxed);
}
}
/** Pops object from queue.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Thread-safe and non-blocking
* */
bool pop (T & ret)
{
return pop<T>(ret);
}
/** Pops object from queue.
*
* \pre type U must be constructible by T and copyable, or T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Thread-safe and non-blocking
* */
template <typename U>
bool pop (U & ret)
{
using detail::likely;
for (;;) {
tagged_node_handle head = head_.load(memory_order_acquire);
node * head_ptr = pool.get_pointer(head);
tagged_node_handle tail = tail_.load(memory_order_acquire);
tagged_node_handle next = head_ptr->next.load(memory_order_acquire);
node * next_ptr = pool.get_pointer(next);
tagged_node_handle head2 = head_.load(memory_order_acquire);
if (likely(head == head2)) {
if (pool.get_handle(head) == pool.get_handle(tail)) {
if (next_ptr == 0)
return false;
tagged_node_handle new_tail(pool.get_handle(next), tail.get_tag() + 1);
tail_.compare_exchange_strong(tail, new_tail);
} else {
if (next_ptr == 0)
/* this check is not part of the original algorithm as published by michael and scott
*
* however we reuse the tagged_ptr part for the freelist and clear the next part during node
* allocation. we can observe a null-pointer here.
* */
continue;
detail::copy_payload(next_ptr->data, ret);
tagged_node_handle new_head(pool.get_handle(next), head.get_tag() + 1);
if (head_.compare_exchange_weak(head, new_head)) {
pool.template destruct<true>(head);
return true;
}
}
}
}
}
/** Pops object from queue.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
bool unsynchronized_pop (T & ret)
{
return unsynchronized_pop<T>(ret);
}
/** Pops object from queue.
*
* \pre type U must be constructible by T and copyable, or T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
template <typename U>
bool unsynchronized_pop (U & ret)
{
for (;;) {
tagged_node_handle head = head_.load(memory_order_relaxed);
node * head_ptr = pool.get_pointer(head);
tagged_node_handle tail = tail_.load(memory_order_relaxed);
tagged_node_handle next = head_ptr->next.load(memory_order_relaxed);
node * next_ptr = pool.get_pointer(next);
if (pool.get_handle(head) == pool.get_handle(tail)) {
if (next_ptr == 0)
return false;
tagged_node_handle new_tail(pool.get_handle(next), tail.get_tag() + 1);
tail_.store(new_tail);
} else {
if (next_ptr == 0)
/* this check is not part of the original algorithm as published by michael and scott
*
* however we reuse the tagged_ptr part for the freelist and clear the next part during node
* allocation. we can observe a null-pointer here.
* */
continue;
detail::copy_payload(next_ptr->data, ret);
tagged_node_handle new_head(pool.get_handle(next), head.get_tag() + 1);
head_.store(new_head);
pool.template destruct<false>(head);
return true;
}
}
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
atomic<tagged_node_handle> head_;
static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
char padding1[padding_size];
atomic<tagged_node_handle> tail_;
char padding2[padding_size];
pool_t pool;
#endif
};
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */

View File

@@ -0,0 +1,650 @@
// lock-free single-producer/single-consumer ringbuffer
// this algorithm is implemented in various projects (linux kernel)
//
// Copyright (C) 2009, 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
#define BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
#include <algorithm>
#include <boost/array.hpp>
#include <boost/assert.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/branch_hints.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/prefix.hpp>
namespace boost {
namespace lockfree {
namespace detail {
typedef parameter::parameters<boost::parameter::optional<tag::capacity>,
boost::parameter::optional<tag::allocator>
> ringbuffer_signature;
template <typename T>
class ringbuffer_base:
boost::noncopyable
{
#ifndef BOOST_DOXYGEN_INVOKED
typedef std::size_t size_t;
static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
atomic<size_t> write_index_;
char padding1[padding_size]; /* force read_index and write_index to different cache lines */
atomic<size_t> read_index_;
protected:
ringbuffer_base(void):
write_index_(0), read_index_(0)
{}
static size_t next_index(size_t arg, size_t max_size)
{
size_t ret = arg + 1;
while (unlikely(ret >= max_size))
ret -= max_size;
return ret;
}
static size_t read_available(size_t write_index, size_t read_index, size_t max_size)
{
if (write_index >= read_index)
return write_index - read_index;
size_t ret = write_index + max_size - read_index;
return ret;
}
static size_t write_available(size_t write_index, size_t read_index, size_t max_size)
{
size_t ret = read_index - write_index - 1;
if (write_index >= read_index)
ret += max_size;
return ret;
}
bool push(T const & t, T * buffer, size_t max_size)
{
size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
size_t next = next_index(write_index, max_size);
if (next == read_index_.load(memory_order_acquire))
return false; /* ringbuffer is full */
buffer[write_index] = t;
write_index_.store(next, memory_order_release);
return true;
}
size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size)
{
size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
const size_t read_index = read_index_.load(memory_order_acquire);
const size_t avail = write_available(write_index, read_index, max_size);
if (avail == 0)
return 0;
input_count = (std::min)(input_count, avail);
size_t new_write_index = write_index + input_count;
if (write_index + input_count > max_size) {
/* copy data in two sections */
size_t count0 = max_size - write_index;
std::copy(input_buffer, input_buffer + count0, internal_buffer + write_index);
std::copy(input_buffer + count0, input_buffer + input_count, internal_buffer);
new_write_index -= max_size;
} else {
std::copy(input_buffer, input_buffer + input_count, internal_buffer + write_index);
if (new_write_index == max_size)
new_write_index = 0;
}
write_index_.store(new_write_index, memory_order_release);
return input_count;
}
template <typename ConstIterator>
ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size)
{
// FIXME: avoid std::distance and std::advance
size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
const size_t read_index = read_index_.load(memory_order_acquire);
const size_t avail = write_available(write_index, read_index, max_size);
if (avail == 0)
return begin;
size_t input_count = std::distance(begin, end);
input_count = (std::min)(input_count, avail);
size_t new_write_index = write_index + input_count;
ConstIterator last = begin;
std::advance(last, input_count);
if (write_index + input_count > max_size) {
/* copy data in two sections */
size_t count0 = max_size - write_index;
ConstIterator midpoint = begin;
std::advance(midpoint, count0);
std::copy(begin, midpoint, internal_buffer + write_index);
std::copy(midpoint, last, internal_buffer);
new_write_index -= max_size;
} else {
std::copy(begin, last, internal_buffer + write_index);
if (new_write_index == max_size)
new_write_index = 0;
}
write_index_.store(new_write_index, memory_order_release);
return last;
}
bool pop (T & ret, T * buffer, size_t max_size)
{
size_t write_index = write_index_.load(memory_order_acquire);
size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
if (empty(write_index, read_index))
return false;
ret = buffer[read_index];
size_t next = next_index(read_index, max_size);
read_index_.store(next, memory_order_release);
return true;
}
size_t pop (T * output_buffer, size_t output_count, const T * internal_buffer, size_t max_size)
{
const size_t write_index = write_index_.load(memory_order_acquire);
size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
const size_t avail = read_available(write_index, read_index, max_size);
if (avail == 0)
return 0;
output_count = (std::min)(output_count, avail);
size_t new_read_index = read_index + output_count;
if (read_index + output_count > max_size) {
/* copy data in two sections */
size_t count0 = max_size - read_index;
size_t count1 = output_count - count0;
std::copy(internal_buffer + read_index, internal_buffer + max_size, output_buffer);
std::copy(internal_buffer, internal_buffer + count1, output_buffer + count0);
new_read_index -= max_size;
} else {
std::copy(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer);
if (new_read_index == max_size)
new_read_index = 0;
}
read_index_.store(new_read_index, memory_order_release);
return output_count;
}
template <typename OutputIterator>
size_t pop (OutputIterator it, const T * internal_buffer, size_t max_size)
{
const size_t write_index = write_index_.load(memory_order_acquire);
size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
const size_t avail = read_available(write_index, read_index, max_size);
if (avail == 0)
return 0;
size_t new_read_index = read_index + avail;
if (read_index + avail > max_size) {
/* copy data in two sections */
size_t count0 = max_size - read_index;
size_t count1 = avail - count0;
std::copy(internal_buffer + read_index, internal_buffer + max_size, it);
std::copy(internal_buffer, internal_buffer + count1, it);
new_read_index -= max_size;
} else {
std::copy(internal_buffer + read_index, internal_buffer + read_index + avail, it);
if (new_read_index == max_size)
new_read_index = 0;
}
read_index_.store(new_read_index, memory_order_release);
return avail;
}
#endif
public:
/** reset the ringbuffer
*
* \note Not thread-safe
* */
void reset(void)
{
write_index_.store(0, memory_order_relaxed);
read_index_.store(0, memory_order_release);
}
/** Check if the ringbuffer is empty
*
* \return true, if the ringbuffer is empty, false otherwise
* \note Due to the concurrent nature of the ringbuffer the result may be inaccurate.
* */
bool empty(void)
{
return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed));
}
/**
* \return true, if implementation is lock-free.
*
* */
bool is_lock_free(void) const
{
return write_index_.is_lock_free() && read_index_.is_lock_free();
}
private:
bool empty(size_t write_index, size_t read_index)
{
return write_index == read_index;
}
};
template <typename T, std::size_t max_size>
class compile_time_sized_ringbuffer:
public ringbuffer_base<T>
{
typedef std::size_t size_t;
boost::array<T, max_size> array_;
public:
bool push(T const & t)
{
return ringbuffer_base<T>::push(t, array_.c_array(), max_size);
}
bool pop(T & ret)
{
return ringbuffer_base<T>::pop(ret, array_.c_array(), max_size);
}
size_t push(T const * t, size_t size)
{
return ringbuffer_base<T>::push(t, size, array_.c_array(), max_size);
}
template <size_t size>
size_t push(T const (&t)[size])
{
return push(t, size);
}
template <typename ConstIterator>
ConstIterator push(ConstIterator begin, ConstIterator end)
{
return ringbuffer_base<T>::push(begin, end, array_.c_array(), max_size);
}
size_t pop(T * ret, size_t size)
{
return ringbuffer_base<T>::pop(ret, size, array_.c_array(), max_size);
}
template <size_t size>
size_t pop(T (&ret)[size])
{
return pop(ret, size);
}
template <typename OutputIterator>
size_t pop(OutputIterator it)
{
return ringbuffer_base<T>::pop(it, array_.c_array(), max_size);
}
};
template <typename T, typename Alloc>
class runtime_sized_ringbuffer:
public ringbuffer_base<T>,
private Alloc
{
typedef std::size_t size_t;
size_t max_elements_;
typedef typename Alloc::pointer pointer;
pointer array_;
public:
explicit runtime_sized_ringbuffer(size_t max_elements):
max_elements_(max_elements)
{
// TODO: we don't necessarily need to construct all elements
array_ = Alloc::allocate(max_elements);
for (size_t i = 0; i != max_elements; ++i)
Alloc::construct(array_ + i, T());
}
template <typename U>
runtime_sized_ringbuffer(typename Alloc::template rebind<U>::other const & alloc, size_t max_elements):
Alloc(alloc), max_elements_(max_elements)
{
// TODO: we don't necessarily need to construct all elements
array_ = Alloc::allocate(max_elements);
for (size_t i = 0; i != max_elements; ++i)
Alloc::construct(array_ + i, T());
}
runtime_sized_ringbuffer(Alloc const & alloc, size_t max_elements):
Alloc(alloc), max_elements_(max_elements)
{
// TODO: we don't necessarily need to construct all elements
array_ = Alloc::allocate(max_elements);
for (size_t i = 0; i != max_elements; ++i)
Alloc::construct(array_ + i, T());
}
~runtime_sized_ringbuffer(void)
{
for (size_t i = 0; i != max_elements_; ++i)
Alloc::destroy(array_ + i);
Alloc::deallocate(array_, max_elements_);
}
bool push(T const & t)
{
return ringbuffer_base<T>::push(t, &*array_, max_elements_);
}
bool pop(T & ret)
{
return ringbuffer_base<T>::pop(ret, &*array_, max_elements_);
}
size_t push(T const * t, size_t size)
{
return ringbuffer_base<T>::push(t, size, &*array_, max_elements_);
}
template <size_t size>
size_t push(T const (&t)[size])
{
return push(t, size);
}
template <typename ConstIterator>
ConstIterator push(ConstIterator begin, ConstIterator end)
{
return ringbuffer_base<T>::push(begin, end, array_, max_elements_);
}
size_t pop(T * ret, size_t size)
{
return ringbuffer_base<T>::pop(ret, size, array_, max_elements_);
}
template <size_t size>
size_t pop(T (&ret)[size])
{
return pop(ret, size);
}
template <typename OutputIterator>
size_t pop(OutputIterator it)
{
return ringbuffer_base<T>::pop(it, array_, max_elements_);
}
};
template <typename T, typename A0, typename A1>
struct make_ringbuffer
{
typedef typename ringbuffer_signature::bind<A0, A1>::type bound_args;
typedef extract_capacity<bound_args> extract_capacity_t;
static const bool runtime_sized = !extract_capacity_t::has_capacity;
static const size_t capacity = extract_capacity_t::capacity;
typedef extract_allocator<bound_args, T> extract_allocator_t;
typedef typename extract_allocator_t::type allocator;
// allocator argument is only sane, for run-time sized ringbuffers
BOOST_STATIC_ASSERT((mpl::if_<mpl::bool_<!runtime_sized>,
mpl::bool_<!extract_allocator_t::has_allocator>,
mpl::true_
>::type::value));
typedef typename mpl::if_c<runtime_sized,
runtime_sized_ringbuffer<T, allocator>,
compile_time_sized_ringbuffer<T, capacity>
>::type ringbuffer_type;
};
} /* namespace detail */
/** The spsc_queue class provides a single-writer/single-reader fifo queue, pushing and popping is wait-free.
*
* \b Policies:
* - \c boost::lockfree::capacity<>, optional <br>
* If this template argument is passed to the options, the size of the ringbuffer is set at compile-time.
*
* - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<T>> <br>
* Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured
* to be sized at run-time
*
* \b Requirements:
* - T must have a default constructor
* - T must be copyable
* */
#ifndef BOOST_DOXYGEN_INVOKED
template <typename T,
class A0 = boost::parameter::void_,
class A1 = boost::parameter::void_>
#else
template <typename T, ...Options>
#endif
class spsc_queue:
public detail::make_ringbuffer<T, A0, A1>::ringbuffer_type
{
private:
#ifndef BOOST_DOXYGEN_INVOKED
typedef typename detail::make_ringbuffer<T, A0, A1>::ringbuffer_type base_type;
static const bool runtime_sized = detail::make_ringbuffer<T, A0, A1>::runtime_sized;
typedef typename detail::make_ringbuffer<T, A0, A1>::allocator allocator_arg;
struct implementation_defined
{
typedef allocator_arg allocator;
typedef std::size_t size_type;
};
#endif
public:
typedef T value_type;
typedef typename implementation_defined::allocator allocator;
typedef typename implementation_defined::size_type size_type;
/** Constructs a spsc_queue
*
* \pre spsc_queue must be configured to be sized at compile-time
*/
// @{
spsc_queue(void)
{
BOOST_ASSERT(!runtime_sized);
}
template <typename U>
explicit spsc_queue(typename allocator::template rebind<U>::other const & alloc)
{
// just for API compatibility: we don't actually need an allocator
BOOST_STATIC_ASSERT(!runtime_sized);
}
explicit spsc_queue(allocator const & alloc)
{
// just for API compatibility: we don't actually need an allocator
BOOST_ASSERT(!runtime_sized);
}
// @}
/** Constructs a spsc_queue for element_count elements
*
* \pre spsc_queue must be configured to be sized at run-time
*/
// @{
explicit spsc_queue(size_type element_count):
base_type(element_count)
{
BOOST_ASSERT(runtime_sized);
}
template <typename U>
spsc_queue(size_type element_count, typename allocator::template rebind<U>::other const & alloc):
base_type(alloc, element_count)
{
BOOST_STATIC_ASSERT(runtime_sized);
}
spsc_queue(size_type element_count, allocator_arg const & alloc):
base_type(alloc, element_count)
{
BOOST_ASSERT(runtime_sized);
}
// @}
/** Pushes object t to the ringbuffer.
*
* \pre only one thread is allowed to push data to the spsc_queue
* \post object will be pushed to the spsc_queue, unless it is full.
* \return true, if the push operation is successful.
*
* \note Thread-safe and wait-free
* */
bool push(T const & t)
{
return base_type::push(t);
}
/** Pops one object from ringbuffer.
*
* \pre only one thread is allowed to pop data to the spsc_queue
* \post if ringbuffer is not empty, object will be copied to ret.
* \return true, if the pop operation is successful, false if ringbuffer was empty.
*
* \note Thread-safe and wait-free
*/
bool pop(T & ret)
{
return base_type::pop(ret);
}
/** Pushes as many objects from the array t as there is space.
*
* \pre only one thread is allowed to push data to the spsc_queue
* \return number of pushed items
*
* \note Thread-safe and wait-free
*/
size_type push(T const * t, size_type size)
{
return base_type::push(t, size);
}
/** Pushes as many objects from the array t as there is space available.
*
* \pre only one thread is allowed to push data to the spsc_queue
* \return number of pushed items
*
* \note Thread-safe and wait-free
*/
template <size_type size>
size_type push(T const (&t)[size])
{
return push(t, size);
}
/** Pushes as many objects from the range [begin, end) as there is space .
*
* \pre only one thread is allowed to push data to the spsc_queue
* \return iterator to the first element, which has not been pushed
*
* \note Thread-safe and wait-free
*/
template <typename ConstIterator>
ConstIterator push(ConstIterator begin, ConstIterator end)
{
return base_type::push(begin, end);
}
/** Pops a maximum of size objects from ringbuffer.
*
* \pre only one thread is allowed to pop data to the spsc_queue
* \return number of popped items
*
* \note Thread-safe and wait-free
* */
size_type pop(T * ret, size_type size)
{
return base_type::pop(ret, size);
}
/** Pops a maximum of size objects from spsc_queue.
*
* \pre only one thread is allowed to pop data to the spsc_queue
* \return number of popped items
*
* \note Thread-safe and wait-free
* */
template <size_type size>
size_type pop(T (&ret)[size])
{
return pop(ret, size);
}
/** Pops objects to the output iterator it
*
* \pre only one thread is allowed to pop data to the spsc_queue
* \return number of popped items
*
* \note Thread-safe and wait-free
* */
template <typename OutputIterator>
size_type pop(OutputIterator it)
{
return base_type::pop(it);
}
};
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */

View File

@@ -0,0 +1,512 @@
// Copyright (C) 2008, 2009, 2010, 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
#define BOOST_LOCKFREE_STACK_HPP_INCLUDED
#include <boost/assert.hpp>
#include <boost/checked_delete.hpp>
#include <boost/integer_traits.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/type_traits/has_trivial_assign.hpp>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/copy_payload.hpp>
#include <boost/lockfree/detail/freelist.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
namespace boost {
namespace lockfree {
namespace detail {
typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
boost::parameter::optional<tag::capacity>
> stack_signature;
}
/** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
* construction/destruction has to be synchronized. It uses a freelist for memory management,
* freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
*
* \b Policies:
*
* - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
* Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
* If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
* by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
* type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
* to achieve lock-freedom.
*
* - \c boost::lockfree::capacity<>, optional <br>
* If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
* It this option implies \c fixed_sized<true>
*
* - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
* Specifies the allocator that is used for the internal freelist
*
* \b Requirements:
* - T must have a copy constructor
* */
#ifndef BOOST_DOXYGEN_INVOKED
template <typename T,
class A0 = boost::parameter::void_,
class A1 = boost::parameter::void_,
class A2 = boost::parameter::void_>
#else
template <typename T, ...Options>
#endif
class stack:
boost::noncopyable
{
private:
#ifndef BOOST_DOXYGEN_INVOKED
BOOST_STATIC_ASSERT(boost::has_trivial_assign<T>::value);
BOOST_STATIC_ASSERT(boost::has_trivial_destructor<T>::value);
typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
static const bool node_based = !(has_capacity || fixed_sized);
static const bool compile_time_sized = has_capacity;
struct node
{
node(T const & val):
v(val)
{}
typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
handle_t next;
const T v;
};
typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
typedef typename pool_t::tagged_node_handle tagged_node_handle;
// check compile-time capacity
BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
mpl::true_
>::type::value));
struct implementation_defined
{
typedef node_allocator allocator;
typedef std::size_t size_type;
};
#endif
public:
typedef T value_type;
typedef typename implementation_defined::allocator allocator;
typedef typename implementation_defined::size_type size_type;
/**
* \return true, if implementation is lock-free.
*
* \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
* On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
* there is no possibility to provide a completely accurate implementation, because one would need to test
* every internal node, which is impossible if further nodes will be allocated from the operating system.
*
* */
bool is_lock_free (void) const
{
return tos.is_lock_free() && pool.is_lock_free();
}
//! Construct stack
// @{
stack(void):
pool(node_allocator(), capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
template <typename U>
explicit stack(typename node_allocator::template rebind<U>::other const & alloc):
pool(alloc, capacity)
{
BOOST_STATIC_ASSERT(has_capacity);
initialize();
}
explicit stack(allocator const & alloc):
pool(alloc, capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
// @}
//! Construct stack, allocate n nodes for the freelist.
// @{
explicit stack(size_type n):
pool(node_allocator(), n)
{
BOOST_ASSERT(!has_capacity);
initialize();
}
template <typename U>
stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
pool(alloc, n)
{
BOOST_STATIC_ASSERT(!has_capacity);
initialize();
}
// @}
/** Allocate n nodes for freelist
*
* \pre only valid if no capacity<> argument given
* \note thread-safe, may block if memory allocator blocks
*
* */
void reserve(size_type n)
{
BOOST_STATIC_ASSERT(!has_capacity);
pool.reserve(n);
}
/** Allocate n nodes for freelist
*
* \pre only valid if no capacity<> argument given
* \note not thread-safe, may block if memory allocator blocks
*
* */
void reserve_unsafe(size_type n)
{
BOOST_STATIC_ASSERT(!has_capacity);
pool.reserve_unsafe(n);
}
/** Destroys stack, free all nodes from freelist.
*
* \note not thread-safe
*
* */
~stack(void)
{
T dummy;
while(unsynchronized_pop(dummy))
{}
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
void initialize(void)
{
tos.store(tagged_node_handle(pool.null_handle(), 0));
}
void link_nodes_atomic(node * new_top_node, node * end_node)
{
tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
for (;;) {
tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
end_node->next = pool.get_handle(old_tos);
if (tos.compare_exchange_weak(old_tos, new_tos))
break;
}
}
void link_nodes_unsafe(node * new_top_node, node * end_node)
{
tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
end_node->next = pool.get_pointer(old_tos);
tos.store(new_tos, memory_order_relaxed);
}
template <bool Threadsafe, bool Bounded, typename ConstIterator>
tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
{
ConstIterator it = begin;
node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
if (end_node == NULL) {
ret = begin;
return make_tuple<node*, node*>(NULL, NULL);
}
node * new_top_node = end_node;
end_node->next = NULL;
try {
/* link nodes */
for (; it != end; ++it) {
node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
if (newnode == NULL)
break;
newnode->next = new_top_node;
new_top_node = newnode;
}
} catch (...) {
for (node * current_node = new_top_node; current_node != NULL;) {
node * next = current_node->next;
pool.template destruct<Threadsafe>(current_node);
current_node = next;
}
throw;
}
ret = it;
return make_tuple(new_top_node, end_node);
}
#endif
public:
/** Pushes object t to the stack.
*
* \post object will be pushed to the stack, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
* */
bool push(T const & v)
{
return do_push<false>(v);
}
/** Pushes object t to the stack.
*
* \post object will be pushed to the stack, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
* */
bool bounded_push(T const & v)
{
return do_push<true>(v);
}
#ifndef BOOST_DOXYGEN_INVOKED
private:
template <bool Bounded>
bool do_push(T const & v)
{
node * newnode = pool.template construct<true, Bounded>(v);
if (newnode == 0)
return false;
link_nodes_atomic(newnode, newnode);
return true;
}
template <bool Bounded, typename ConstIterator>
ConstIterator do_push(ConstIterator begin, ConstIterator end)
{
node * new_top_node;
node * end_node;
ConstIterator ret;
tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
if (new_top_node)
link_nodes_atomic(new_top_node, end_node);
return ret;
}
public:
#endif
/** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
*
* \return iterator to the first element, which has not been pushed
*
* \note Operation is applied atomically
* \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
*/
template <typename ConstIterator>
ConstIterator push(ConstIterator begin, ConstIterator end)
{
return do_push<false, ConstIterator>(begin, end);
}
/** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
*
* \return iterator to the first element, which has not been pushed
*
* \note Operation is applied atomically
* \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
* \throws if memory allocator throws
*/
template <typename ConstIterator>
ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
{
return do_push<true, ConstIterator>(begin, end);
}
/** Pushes object t to the stack.
*
* \post object will be pushed to the stack, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
* */
bool unsynchronized_push(T const & v)
{
node * newnode = pool.template construct<false, false>(v);
if (newnode == 0)
return false;
link_nodes_unsafe(newnode, newnode);
return true;
}
/** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
*
* \return iterator to the first element, which has not been pushed
*
* \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
*/
template <typename ConstIterator>
ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
{
node * new_top_node;
node * end_node;
ConstIterator ret;
tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
if (new_top_node)
link_nodes_unsafe(new_top_node, end_node);
return ret;
}
/** Pops object from stack.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if stack was empty.
*
* \note Thread-safe and non-blocking
*
* */
bool pop(T & ret)
{
return pop<T>(ret);
}
/** Pops object from stack.
*
* \pre type T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if stack was empty.
*
* \note Thread-safe and non-blocking
*
* */
template <typename U>
bool pop(U & ret)
{
BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
for (;;) {
node * old_tos_pointer = pool.get_pointer(old_tos);
if (!old_tos_pointer)
return false;
tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_tag() + 1);
if (tos.compare_exchange_weak(old_tos, new_tos)) {
detail::copy_payload(old_tos_pointer->v, ret);
pool.template destruct<true>(old_tos);
return true;
}
}
}
/** Pops object from stack.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if stack was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
bool unsynchronized_pop(T & ret)
{
return unsynchronized_pop<T>(ret);
}
/** Pops object from stack.
*
* \pre type T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if stack was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
template <typename U>
bool unsynchronized_pop(U & ret)
{
BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
node * old_tos_pointer = pool.get_pointer(old_tos);
if (!pool.get_pointer(old_tos))
return false;
node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_tag() + 1);
tos.store(new_tos, memory_order_relaxed);
detail::copy_payload(old_tos_pointer->v, ret);
pool.template destruct<false>(old_tos);
return true;
}
/**
* \return true, if stack is empty.
*
* \note It only guarantees that at some point during the execution of the function the stack has been empty.
* It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
* */
bool empty(void) const
{
return pool.get_pointer(tos.load()) == NULL;
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
detail::atomic<tagged_node_handle> tos;
static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
char padding[padding_size];
pool_t pool;
#endif
};
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */