Update concurrentqueue to 1.0.2

1.0.2 resolves compiler warnings for MSVC 2019 with C++17 support (9cfda6cc61)
pull/25/head
Kyle Schwarz 2020-08-10 10:20:00 -04:00
parent 154eab4b1b
commit 7b3782cbe7
6 changed files with 141 additions and 85 deletions

View File

@ -1,8 +1,9 @@
// Provides an efficient blocking version of moodycamel::ConcurrentQueue.
// ©2015-2016 Cameron Desrochers. Distributed under the terms of the simplified
// ©2015-2020 Cameron Desrochers. Distributed under the terms of the simplified
// BSD license, available at the top of concurrentqueue.h.
// Also dual-licensed under the Boost Software License (see LICENSE.md)
// Uses Jeff Preshing's semaphore implementation (under the terms of its
// separate zlib license, embedded below).
// separate zlib license, see lightweightsemaphore.h).
#pragma once
@ -55,7 +56,7 @@ public:
// includes making the memory effects of construction visible, possibly with a
// memory barrier).
explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE)
: inner(capacity), sema(create<LightweightSemaphore>(), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
: inner(capacity), sema(create<LightweightSemaphore>(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
{
assert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member");
if (!sema) {
@ -64,7 +65,7 @@ public:
}
BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers)
: inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create<LightweightSemaphore>(), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
: inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create<LightweightSemaphore>(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy<LightweightSemaphore>)
{
assert(reinterpret_cast<ConcurrentQueue*>((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member");
if (!sema) {
@ -550,18 +551,11 @@ public:
private:
template<typename U>
static inline U* create()
template<typename U, typename A1, typename A2>
static inline U* create(A1&& a1, A2&& a2)
{
auto p = (Traits::malloc)(sizeof(U));
return p != nullptr ? new (p) U : nullptr;
}
template<typename U, typename A1>
static inline U* create(A1&& a1)
{
auto p = (Traits::malloc)(sizeof(U));
return p != nullptr ? new (p) U(std::forward<A1>(a1)) : nullptr;
void* p = (Traits::malloc)(sizeof(U));
return p != nullptr ? new (p) U(std::forward<A1>(a1), std::forward<A2>(a2)) : nullptr;
}
template<typename U>

View File

@ -5,7 +5,7 @@
// http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue
// Simplified BSD license:
// Copyright (c) 2013-2016, Cameron Desrochers.
// Copyright (c) 2013-2020, Cameron Desrochers.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -27,6 +27,7 @@
// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Also dual-licensed under the Boost Software License (see LICENSE.md)
#pragma once
@ -42,6 +43,13 @@
#endif
#endif
#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)
// VS2019 with /W4 warns about constant conditional expressions but unless /std=c++17 or higher
// does not support `if constexpr`, so we have no choice but to simply disable the warning
#pragma warning(push)
#pragma warning(disable: 4127) // conditional expression is constant
#endif
#if defined(__APPLE__)
#include "TargetConditionals.h"
#endif
@ -225,18 +233,43 @@ namespace moodycamel { namespace details {
#endif
#endif
namespace moodycamel { namespace details {
#ifndef MOODYCAMEL_ALIGNAS
// VS2013 doesn't support alignas or alignof
// VS2013 doesn't support alignas or alignof, and align() requires a constant literal
#if defined(_MSC_VER) && _MSC_VER <= 1800
#define MOODYCAMEL_ALIGNAS(alignment) __declspec(align(alignment))
#define MOODYCAMEL_ALIGNOF(obj) __alignof(obj)
#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) typename details::Vs2013Aligned<std::alignment_of<obj>::value, T>::type
template<int Align, typename T> struct Vs2013Aligned { }; // default, unsupported alignment
template<typename T> struct Vs2013Aligned<1, T> { typedef __declspec(align(1)) T type; };
template<typename T> struct Vs2013Aligned<2, T> { typedef __declspec(align(2)) T type; };
template<typename T> struct Vs2013Aligned<4, T> { typedef __declspec(align(4)) T type; };
template<typename T> struct Vs2013Aligned<8, T> { typedef __declspec(align(8)) T type; };
template<typename T> struct Vs2013Aligned<16, T> { typedef __declspec(align(16)) T type; };
template<typename T> struct Vs2013Aligned<32, T> { typedef __declspec(align(32)) T type; };
template<typename T> struct Vs2013Aligned<64, T> { typedef __declspec(align(64)) T type; };
template<typename T> struct Vs2013Aligned<128, T> { typedef __declspec(align(128)) T type; };
template<typename T> struct Vs2013Aligned<256, T> { typedef __declspec(align(256)) T type; };
#else
template<typename T> struct identity { typedef T type; };
#define MOODYCAMEL_ALIGNAS(alignment) alignas(alignment)
#define MOODYCAMEL_ALIGNOF(obj) alignof(obj)
#define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) alignas(alignof(obj)) typename details::identity<T>::type
#endif
#endif
} }
// TSAN can false report races in lock-free code. To enable TSAN to be used from projects that use this one,
// we can apply per-function compile-time suppression.
// See https://clang.llvm.org/docs/ThreadSanitizer.html#has-feature-thread-sanitizer
#define MOODYCAMEL_NO_TSAN
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#undef MOODYCAMEL_NO_TSAN
#define MOODYCAMEL_NO_TSAN __attribute__((no_sanitize("thread")))
#endif // TSAN
#endif // TSAN
// Compiler-specific likely/unlikely hints
namespace moodycamel { namespace details {
@ -339,6 +372,12 @@ struct ConcurrentQueueDefaultTraits
// that this limit is enforced at the block level (for performance reasons), i.e.
// it's rounded up to the nearest block size.
static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max<size_t>::value;
// The number of times to spin before sleeping when waiting on a semaphore.
// Recommended values are on the order of 1000-10000 unless the number of
// consumer threads exceeds the number of idle cores (in which case try 0-100).
// Only affects instances of the BlockingConcurrentQueue.
static const int MAX_SEMA_SPINS = 10000;
#ifndef MCDBGQ_USE_RELACY
@ -1608,7 +1647,7 @@ private:
private:
static_assert(std::alignment_of<T>::value <= sizeof(T), "The queue does not support types with an alignment greater than their size at this time");
MOODYCAMEL_ALIGNAS(MOODYCAMEL_ALIGNOF(T)) char elements[sizeof(T) * BLOCK_SIZE];
MOODYCAMEL_ALIGNED_TYPE_LIKE(char[sizeof(T) * BLOCK_SIZE], T) elements;
public:
Block* next;
std::atomic<size_t> elementsCompletelyDequeued;
@ -1858,7 +1897,7 @@ private:
++pr_blockIndexSlotsUsed;
}
if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
// The constructor may throw. We want the element not to appear in the queue in
// that case (without corrupting the queue):
MOODYCAMEL_TRY {
@ -1884,7 +1923,7 @@ private:
blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release);
pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1);
if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
this->tailIndex.store(newTailIndex, std::memory_order_release);
return true;
}
@ -1998,7 +2037,7 @@ private:
}
template<AllocationMode allocMode, typename It>
bool enqueue_bulk(It itemFirst, size_t count)
bool MOODYCAMEL_NO_TSAN enqueue_bulk(It itemFirst, size_t count)
{
// First, we need to make sure we have enough room to enqueue all of the elements;
// this means pre-allocating blocks and putting them in the block index (but only if
@ -2100,7 +2139,7 @@ private:
block = block->next;
}
if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);
}
}
@ -2115,11 +2154,11 @@ private:
this->tailBlock = firstAllocatedBlock;
}
while (true) {
auto stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
index_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
if (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {
stopIndex = newTailIndex;
}
if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
while (currentTailIndex != stopIndex) {
new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);
}
@ -2181,8 +2220,9 @@ private:
this->tailBlock = this->tailBlock->next;
}
if (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst))) && firstAllocatedBlock != nullptr) {
blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);
MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
if (firstAllocatedBlock != nullptr)
blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release);
}
this->tailIndex.store(newTailIndex, std::memory_order_release);
@ -2226,7 +2266,7 @@ private:
auto index = firstIndex;
do {
auto firstIndexInBlock = index;
auto endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
index_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
auto block = localBlockIndex->entries[indexIndex].block;
if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) {
@ -2460,8 +2500,8 @@ private:
newBlock->owner = this;
#endif
newBlock->ConcurrentQueue::Block::template reset_empty<implicit_context>();
if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
// May throw, try to insert now before we publish the fact that we have this new block
MOODYCAMEL_TRY {
new ((*newBlock)[currentTailIndex]) T(std::forward<U>(element));
@ -2479,7 +2519,7 @@ private:
this->tailBlock = newBlock;
if (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new ((T*)nullptr) T(std::forward<U>(element)))) {
this->tailIndex.store(newTailIndex, std::memory_order_release);
return true;
}
@ -2563,6 +2603,10 @@ private:
return false;
}
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4706) // assignment within conditional expression
#endif
template<AllocationMode allocMode, typename It>
bool enqueue_bulk(It itemFirst, size_t count)
{
@ -2598,6 +2642,7 @@ private:
auto head = this->headIndex.load(std::memory_order_relaxed);
assert(!details::circular_less_than<index_t>(currentTailIndex, head));
bool full = !details::circular_less_than<index_t>(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max<size_t>::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head));
if (full || !(indexInserted = insert_block_index_entry<allocMode>(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block<allocMode>()) == nullptr) {
// Index allocation or block allocation failed; revert any other allocations
// and index insertions done so far for this operation
@ -2648,11 +2693,11 @@ private:
this->tailBlock = firstAllocatedBlock;
}
while (true) {
auto stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
index_t stopIndex = (currentTailIndex & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
if (details::circular_less_than<index_t>(newTailIndex, stopIndex)) {
stopIndex = newTailIndex;
}
if (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new ((T*)nullptr) T(details::deref_noexcept(itemFirst)))) {
while (currentTailIndex != stopIndex) {
new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++);
}
@ -2712,6 +2757,9 @@ private:
this->tailIndex.store(newTailIndex, std::memory_order_release);
return true;
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
template<typename It>
size_t dequeue_bulk(It& itemFirst, size_t max)
@ -2743,7 +2791,7 @@ private:
auto indexIndex = get_block_index_index_for_index(index, localBlockIndex);
do {
auto blockStartIndex = index;
auto endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
index_t endIndex = (index & ~static_cast<index_t>(BLOCK_SIZE - 1)) + static_cast<index_t>(BLOCK_SIZE);
endIndex = details::circular_less_than<index_t>(firstIndex + static_cast<index_t>(actualCount), endIndex) ? firstIndex + static_cast<index_t>(actualCount) : endIndex;
auto entry = localBlockIndex->index[indexIndex];
@ -2841,7 +2889,7 @@ private:
if (localBlockIndex == nullptr) {
return false; // this can happen if new_block_index failed in the constructor
}
auto newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);
size_t newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1);
idxEntry = localBlockIndex->index[newTail];
if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE ||
idxEntry->value.load(std::memory_order_relaxed) == nullptr) {
@ -3411,7 +3459,7 @@ private:
}
auto newHash = new (raw) ImplicitProducerHash;
newHash->capacity = newCapacity;
newHash->capacity = (size_t)newCapacity;
newHash->entries = reinterpret_cast<ImplicitProducerKVP*>(details::align_for<ImplicitProducerKVP>(raw + sizeof(ImplicitProducerHash)));
for (size_t i = 0; i != newCapacity; ++i) {
new (newHash->entries + i) ImplicitProducerKVP;
@ -3525,23 +3573,26 @@ private:
template<typename TAlign>
static inline void* aligned_malloc(size_t size)
{
if (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
MOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
return (Traits::malloc)(size);
size_t alignment = std::alignment_of<TAlign>::value;
void* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*));
if (!raw)
return nullptr;
char* ptr = details::align_for<TAlign>(reinterpret_cast<char*>(raw) + sizeof(void*));
*(reinterpret_cast<void**>(ptr) - 1) = raw;
return ptr;
else {
size_t alignment = std::alignment_of<TAlign>::value;
void* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*));
if (!raw)
return nullptr;
char* ptr = details::align_for<TAlign>(reinterpret_cast<char*>(raw) + sizeof(void*));
*(reinterpret_cast<void**>(ptr) - 1) = raw;
return ptr;
}
}
template<typename TAlign>
static inline void aligned_free(void* ptr)
{
if (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
MOODYCAMEL_CONSTEXPR_IF (std::alignment_of<TAlign>::value <= std::alignment_of<details::max_align_t>::value)
return (Traits::free)(ptr);
(Traits::free)(ptr ? *(reinterpret_cast<void**>(ptr) - 1) : nullptr);
else
(Traits::free)(ptr ? *(reinterpret_cast<void**>(ptr) - 1) : nullptr);
}
template<typename U>
@ -3647,7 +3698,7 @@ ConsumerToken::ConsumerToken(ConcurrentQueue<T, Traits>& queue)
: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)
{
initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release);
lastKnownGlobalOffset = -1;
lastKnownGlobalOffset = (std::uint32_t)-1;
}
template<typename T, typename Traits>
@ -3655,7 +3706,7 @@ ConsumerToken::ConsumerToken(BlockingConcurrentQueue<T, Traits>& queue)
: itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr)
{
initialOffset = reinterpret_cast<ConcurrentQueue<T, Traits>*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release);
lastKnownGlobalOffset = -1;
lastKnownGlobalOffset = (std::uint32_t)-1;
}
template<typename T, typename Traits>
@ -3682,6 +3733,10 @@ inline void swap(typename ConcurrentQueue<T, Traits>::ImplicitProducerKVP& a, ty
}
#if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17)
#pragma warning(pop)
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif

View File

@ -257,14 +257,12 @@ public:
private:
std::atomic<ssize_t> m_count;
details::Semaphore m_sema;
int m_maxSpins;
bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1)
{
ssize_t oldCount;
// Is there a better way to set the initial spin count?
// If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
// as threads start hitting the kernel semaphore.
int spin = 10000;
int spin = m_maxSpins;
while (--spin >= 0)
{
oldCount = m_count.load(std::memory_order_relaxed);
@ -276,8 +274,11 @@ private:
if (oldCount > 0)
return true;
if (timeout_usecs < 0)
return m_sema.wait();
if (m_sema.timed_wait((std::uint64_t)timeout_usecs))
{
if (m_sema.wait())
return true;
}
if (timeout_usecs > 0 && m_sema.timed_wait((std::uint64_t)timeout_usecs))
return true;
// At this point, we've timed out waiting for the semaphore, but the
// count is still decremented indicating we may still be waiting on
@ -298,7 +299,7 @@ private:
{
assert(max > 0);
ssize_t oldCount;
int spin = 10000;
int spin = m_maxSpins;
while (--spin >= 0)
{
oldCount = m_count.load(std::memory_order_relaxed);
@ -313,12 +314,7 @@ private:
oldCount = m_count.fetch_sub(1, std::memory_order_acquire);
if (oldCount <= 0)
{
if (timeout_usecs < 0)
{
if (!m_sema.wait())
return 0;
}
else if (!m_sema.timed_wait((std::uint64_t)timeout_usecs))
if ((timeout_usecs == 0) || (timeout_usecs < 0 && !m_sema.wait()) || (timeout_usecs > 0 && !m_sema.timed_wait((std::uint64_t)timeout_usecs)))
{
while (true)
{
@ -336,9 +332,10 @@ private:
}
public:
LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
LightweightSemaphore(ssize_t initialCount = 0, int maxSpins = 10000) : m_count(initialCount), m_maxSpins(maxSpins)
{
assert(initialCount >= 0);
assert(maxSpins >= 0);
}
bool tryWait()

View File

@ -277,7 +277,7 @@ struct ThreadLocal
auto raw = static_cast<char*>(corealgos_allocator::malloc(sizeof(InnerHash) + std::alignment_of<KeyValuePair>::value - 1 + sizeof(KeyValuePair) * newCapacity));
if (raw == nullptr) {
// Allocation failed
currentHashCount.fetch_add(-1, std::memory_order_relaxed);
currentHashCount.fetch_add((uint32_t)-1, std::memory_order_relaxed);
resizeInProgress.clear(std::memory_order_relaxed);
return nullptr;
}
@ -434,7 +434,7 @@ struct FreeList
assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0);
// Decrease refcount twice, once for our ref, and once for the list's ref
head->freeListRefs.fetch_add(-2, std::memory_order_release);
head->freeListRefs.fetch_add(-2u, std::memory_order_release);
return head;
}
@ -442,7 +442,7 @@ struct FreeList
// increased.
// Note that we don't need to release any memory effects, but we do need to ensure that the reference
// count decrement happens-after the CAS on the head.
refs = prevHead->freeListRefs.fetch_add(-1, std::memory_order_acq_rel);
refs = prevHead->freeListRefs.fetch_add(-1u, std::memory_order_acq_rel);
if (refs == SHOULD_BE_ON_FREELIST + 1) {
add_knowing_refcount_is_zero(prevHead);
}

View File

@ -484,7 +484,7 @@ bool run_test(uint64_t seed, int iterations, test_type& out_type, const char*& o
count = q.try_dequeue_bulk(bulkData.begin(), bulkData.size());
}
for (std::size_t k = 0; k != count; ++k) {
auto item = bulkData[k];
item = bulkData[k];
ASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) >= 0 && (item & 0xFFFFFF) < (int)largestOpCount);
ASSERT_OR_FAIL_THREAD((item & 0xFFFFFF) > lastItems[item >> 24]);
lastItems[item >> 24] = item & 0xFFFFFF;
@ -784,12 +784,12 @@ int main(int argc, char** argv)
}
}
int result = 0;
int exitCode = 0;
test_type test;
const char* failReason;
if (singleSeed) {
if (!run_test(seed, SINGLE_SEED_ITERATIONS, test, failReason)) {
result = 1;
exitCode = 1;
std::ofstream fout(LOG_FILE, std::ios::app);
fout << test_names[test] << " failed: " << failReason << std::endl;
std::printf(" %s failed: %s\n", test_names[test], failReason);
@ -818,7 +818,7 @@ int main(int argc, char** argv)
std::signal(SIGSEGV, signal_handler);
std::signal(SIGABRT, signal_handler);
int result;
bool result;
try {
result = run_test(seed, 2, test, failReason);
}
@ -839,7 +839,7 @@ int main(int argc, char** argv)
std::signal(SIGABRT, SIG_DFL);
if (!result) {
result = 1;
exitCode = 1;
std::ofstream fout(LOG_FILE, std::ios::app);
fout << "*** Failure detected!\n Seed: " << std::hex << seed << "\n Test: " << test_names[test] << "\n Reason: " << failReason << std::endl;
std::printf("*** Failure detected!\n Seed: %08x%08x\n Test: %s\n Reason: %s\n", (uint32_t)(seed >> 32), (uint32_t)(seed), test_names[test], failReason);
@ -863,5 +863,5 @@ int main(int argc, char** argv)
}
}
return result;
return exitCode;
}

View File

@ -274,6 +274,11 @@ public:
#define SUPER_ALIGNMENT 128
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4324) // structure was padded due to alignment specifier
#endif
struct MOODYCAMEL_ALIGNAS(SUPER_ALIGNMENT) VeryAligned {
static size_t errors;
@ -305,6 +310,11 @@ struct MOODYCAMEL_ALIGNAS(SUPER_ALIGNMENT) VeryAligned {
};
size_t VeryAligned::errors = 0;
#ifdef _MSC_VER
#pragma warning(pop)
#endif
class ConcurrentQueueTests : public TestClass<ConcurrentQueueTests>
{
@ -403,35 +413,35 @@ public:
ASSERT_OR_FAIL(!details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = 0; b = 1 << 31;
a = 0; b = 1u << 31;
ASSERT_OR_FAIL(!details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = 1; b = 1 << 31;
a = 1; b = 1u << 31;
ASSERT_OR_FAIL(details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = 0; b = (1 << 31) + 1;
a = 0; b = (1u << 31) + 1;
ASSERT_OR_FAIL(!details::circular_less_than(a, b));
ASSERT_OR_FAIL(details::circular_less_than(b, a));
a = 100; b = (1 << 31) + 1;
a = 100; b = (1u << 31) + 1;
ASSERT_OR_FAIL(details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = (1 << 31) + 7; b = 5;
a = (1u << 31) + 7; b = 5;
ASSERT_OR_FAIL(details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = (1 << 16) + 7; b = (1 << 16) + 5;
a = (1u << 16) + 7; b = (1 << 16) + 5;
ASSERT_OR_FAIL(!details::circular_less_than(a, b));
ASSERT_OR_FAIL(details::circular_less_than(b, a));
a = 0xFFFFFFFF; b = 0;
a = 0xFFFFFFFFu; b = 0;
ASSERT_OR_FAIL(details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
a = 0xFFFFFFFF; b = 0xFFFFFF;
a = 0xFFFFFFFFu; b = 0xFFFFFFu;
ASSERT_OR_FAIL(details::circular_less_than(a, b));
ASSERT_OR_FAIL(!details::circular_less_than(b, a));
}
@ -2059,7 +2069,7 @@ public:
bool success[2] = { true, true };
for (int i = 0; i != 2; ++i) {
if (i == 0) {
threads[i] = SimpleThread([&](int i) {
threads[i] = SimpleThread([&](int) {
// Producer
ProducerToken tok(q);
for (int i = 0; i != 32*1024; ++i) {
@ -2068,7 +2078,7 @@ public:
}, i);
}
else {
threads[i] = SimpleThread([&](int i) {
threads[i] = SimpleThread([&](int) {
// Consumer
int items[5];
int prevItem = -1;
@ -2108,7 +2118,7 @@ public:
bool success[2] = { true, true };
for (int i = 0; i != 2; ++i) {
if (i == 0) {
threads[i] = SimpleThread([&](int i) {
threads[i] = SimpleThread([&](int) {
// Producer
for (int i = 0; i != 32*1024; ++i) {
q.enqueue(i);
@ -2116,7 +2126,7 @@ public:
}, i);
}
else {
threads[i] = SimpleThread([&](int i) {
threads[i] = SimpleThread([&](int) {
// Consumer
int items[5];
int prevItem = -1;
@ -4666,7 +4676,7 @@ public:
auto item = local.get_or_create();
item->value = (int)tid;
for (int i = 0; i != 1024; ++i) {
auto item = local.get_or_create();
item = local.get_or_create();
if (item->value != (int)tid) {
failed[tid] = true;
}
@ -4840,8 +4850,8 @@ public:
else {
ASSERT_OR_FAIL(removed[i].load(std::memory_order_relaxed));
}
auto removed = hash.remove(i);
ASSERT_OR_FAIL(removed == val);
auto removedVal = hash.remove(i);
ASSERT_OR_FAIL(removedVal == val);
}
for (int i = 0; i != MAX_ENTRIES; ++i) {
ASSERT_OR_FAIL(hash.find(i) == nullptr);