PonyPlayer
atomicops.h
浏览该文件的文档.
1// ©2013-2016 Cameron Desrochers.
2// Distributed under the simplified BSD license (see the license file that
3// should have come with this header).
4// Uses Jeff Preshing's semaphore implementation (under the terms of its
5// separate zlib license, embedded below).
6
7#pragma once
8
9// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
10// of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
11// Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
12// Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
13
14#include <cerrno>
15#include <cassert>
16#include <type_traits>
17#include <cerrno>
18#include <cstdint>
19#include <ctime>
20
21// Platform detection
22#if defined(__INTEL_COMPILER)
23#define AE_ICC
24#elif defined(_MSC_VER)
25#define AE_VCPP
26#elif defined(__GNUC__)
27#define AE_GCC
28#endif
29
30#if defined(_M_IA64) || defined(__ia64__)
31#define AE_ARCH_IA64
32#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
33#define AE_ARCH_X64
34#elif defined(_M_IX86) || defined(__i386__)
35#define AE_ARCH_X86
36#elif defined(_M_PPC) || defined(__powerpc__)
37#define AE_ARCH_PPC
38#else
39#define AE_ARCH_UNKNOWN
40#endif
41
42
43// AE_UNUSED
44#define AE_UNUSED(x) ((void)x)
45
46// AE_NO_TSAN/AE_TSAN_ANNOTATE_*
47#if defined(__has_feature)
48#if __has_feature(thread_sanitizer)
49#if __cplusplus >= 201703L // inline variables require C++17
50namespace moodycamel { inline int ae_tsan_global; }
51#define AE_TSAN_ANNOTATE_RELEASE() AnnotateHappensBefore(__FILE__, __LINE__, (void *)(&::moodycamel::ae_tsan_global))
52#define AE_TSAN_ANNOTATE_ACQUIRE() AnnotateHappensAfter(__FILE__, __LINE__, (void *)(&::moodycamel::ae_tsan_global))
53extern "C" void AnnotateHappensBefore(const char*, int, void*);
54extern "C" void AnnotateHappensAfter(const char*, int, void*);
55#else // when we can't work with tsan, attempt to disable its warnings
56#define AE_NO_TSAN __attribute__((no_sanitize("thread")))
57#endif
58#endif
59#endif
60#ifndef AE_NO_TSAN
61#define AE_NO_TSAN
62#endif
63#ifndef AE_TSAN_ANNOTATE_RELEASE
64#define AE_TSAN_ANNOTATE_RELEASE()
65#define AE_TSAN_ANNOTATE_ACQUIRE()
66#endif
67
68
69// AE_FORCEINLINE
70#if defined(AE_VCPP) || defined(AE_ICC)
71#define AE_FORCEINLINE __forceinline
72#elif defined(AE_GCC)
73//#define AE_FORCEINLINE __attribute__((always_inline))
74#define AE_FORCEINLINE inline
75#else
76#define AE_FORCEINLINE inline
77#endif
78
79
80// AE_ALIGN
81#if defined(AE_VCPP) || defined(AE_ICC)
82#define AE_ALIGN(x) __declspec(align(x))
83#elif defined(AE_GCC)
84#define AE_ALIGN(x) __attribute__((aligned(x)))
85#else
86// Assume GCC compliant syntax...
87#define AE_ALIGN(x) __attribute__((aligned(x)))
88#endif
89
90
91// Portable atomic fences implemented below:
92
93namespace moodycamel {
94
101
102 // memory_order_sync: Forces a full sync:
103 // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
106
107} // end namespace moodycamel
108
109#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || (defined(AE_ICC) && __INTEL_COMPILER < 1600)
110// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
111
112#include <intrin.h>
113
114#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
115#define AeFullSync _mm_mfence
116#define AeLiteSync _mm_mfence
117#elif defined(AE_ARCH_IA64)
118#define AeFullSync __mf
119#define AeLiteSync __mf
120#elif defined(AE_ARCH_PPC)
121#include <ppcintrinsics.h>
122#define AeFullSync __sync
123#define AeLiteSync __lwsync
124#endif
125
126
127#ifdef AE_VCPP
128#pragma warning(push)
129#pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
130#ifdef __cplusplus_cli
131#pragma managed(push, off)
132#endif
133#endif
134
135namespace moodycamel {
136
138{
139 switch (order) {
140 case memory_order_relaxed: break;
141 case memory_order_acquire: _ReadBarrier(); break;
142 case memory_order_release: _WriteBarrier(); break;
143 case memory_order_acq_rel: _ReadWriteBarrier(); break;
144 case memory_order_seq_cst: _ReadWriteBarrier(); break;
145 default: assert(false);
146 }
147}
148
149// x86/x64 have a strong memory model -- all loads and stores have
150// acquire and release semantics automatically (so only need compiler
151// barriers for those).
152#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
154{
155 switch (order) {
156 case memory_order_relaxed: break;
157 case memory_order_acquire: _ReadBarrier(); break;
158 case memory_order_release: _WriteBarrier(); break;
159 case memory_order_acq_rel: _ReadWriteBarrier(); break;
161 _ReadWriteBarrier();
162 AeFullSync();
163 _ReadWriteBarrier();
164 break;
165 default: assert(false);
166 }
167}
168#else
170{
171 // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
172 switch (order) {
174 break;
176 _ReadBarrier();
177 AeLiteSync();
178 _ReadBarrier();
179 break;
181 _WriteBarrier();
182 AeLiteSync();
183 _WriteBarrier();
184 break;
186 _ReadWriteBarrier();
187 AeLiteSync();
188 _ReadWriteBarrier();
189 break;
191 _ReadWriteBarrier();
192 AeFullSync();
193 _ReadWriteBarrier();
194 break;
195 default: assert(false);
196 }
197}
198#endif
199} // end namespace moodycamel
200#else
201// Use standard library of atomics
202#include <atomic>
203
204namespace moodycamel {
205
207{
208 switch (order) {
209 case memory_order_relaxed: break;
210 case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
211 case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
212 case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
213 case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
214 default: assert(false);
215 }
216}
217
219{
220 switch (order) {
221 case memory_order_relaxed: break;
222 case memory_order_acquire: AE_TSAN_ANNOTATE_ACQUIRE(); std::atomic_thread_fence(std::memory_order_acquire); break;
223 case memory_order_release: AE_TSAN_ANNOTATE_RELEASE(); std::atomic_thread_fence(std::memory_order_release); break;
226 default: assert(false);
227 }
228}
229
230} // end namespace moodycamel
231
232#endif
233
234
235#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
236#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
237#endif
238
239#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
240#include <atomic>
241#endif
242#include <utility>
243
244// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
245// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
246// The guarantee of atomicity is only made for types that already have atomic load and store guarantees
247// at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
248namespace moodycamel {
249template<typename T>
251{
252public:
253 AE_NO_TSAN weak_atomic() : value() { }
254#ifdef AE_VCPP
255#pragma warning(push)
256#pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
257#endif
258 template<typename U> AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) { }
259#ifdef __cplusplus_cli
260 // Work around bug with universal reference/nullptr combination that only appears when /clr is on
261 AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) { }
262#endif
263 AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) { }
264 AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) { }
265#ifdef AE_VCPP
266#pragma warning(pop)
267#endif
268
269 AE_FORCEINLINE operator T() const AE_NO_TSAN { return load(); }
270
271
272#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
273 template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { value = std::forward<U>(x); return *this; }
274 AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { value = other.value; return *this; }
275
276 AE_FORCEINLINE T load() const AE_NO_TSAN { return value; }
277
279 {
280#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
281 if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
282#if defined(_M_AMD64)
283 else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
284#endif
285#else
286#error Unsupported platform
287#endif
288 assert(false && "T must be either a 32 or 64 bit type");
289 return value;
290 }
291
293 {
294#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
295 if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
296#if defined(_M_AMD64)
297 else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
298#endif
299#else
300#error Unsupported platform
301#endif
302 assert(false && "T must be either a 32 or 64 bit type");
303 return value;
304 }
305#else
306 template<typename U>
308 {
309 value.store(std::forward<U>(x), std::memory_order_relaxed);
310 return *this;
311 }
312
314 {
315 value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
316 return *this;
317 }
318
319 AE_FORCEINLINE T load() const AE_NO_TSAN { return value.load(std::memory_order_relaxed); }
320
322 {
323 return value.fetch_add(increment, std::memory_order_acquire);
324 }
325
327 {
328 return value.fetch_add(increment, std::memory_order_release);
329 }
330#endif
331
332
333private:
334#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
335 // No std::atomic support, but still need to circumvent compiler optimizations.
336 // `volatile` will make memory access slow, but is guaranteed to be reliable.
337 volatile T value;
338#else
339 std::atomic<T> value;
340#endif
341};
342
343} // end namespace moodycamel
344
345
346
347// Portable single-producer, single-consumer semaphore below:
348
349#if defined(_WIN32)
350// Avoid including windows.h in a header; we only need a handful of
351// items, so we'll redeclare them here (this is relatively safe since
352// the API generally has to remain stable between Windows versions).
353// I know this is an ugly hack but it still beats polluting the global
354// namespace with thousands of generic names or adding a .cpp for nothing.
355extern "C" {
356 struct _SECURITY_ATTRIBUTES;
357 __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
358 __declspec(dllimport) int __stdcall CloseHandle(void* hObject);
359 __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
360 __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
361}
362#elif defined(__MACH__)
363#include <mach/mach.h>
364#elif defined(__unix__)
365#include <semaphore.h>
366#elif defined(FREERTOS)
367#include <FreeRTOS.h>
368#include <semphr.h>
369#include <task.h>
370#endif
371
372namespace moodycamel
373{
374 // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
375 // portable + lightweight semaphore implementations, originally from
376 // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
377 // LICENSE:
378 // Copyright (c) 2015 Jeff Preshing
379 //
380 // This software is provided 'as-is', without any express or implied
381 // warranty. In no event will the authors be held liable for any damages
382 // arising from the use of this software.
383 //
384 // Permission is granted to anyone to use this software for any purpose,
385 // including commercial applications, and to alter it and redistribute it
386 // freely, subject to the following restrictions:
387 //
388 // 1. The origin of this software must not be misrepresented; you must not
389 // claim that you wrote the original software. If you use this software
390 // in a product, an acknowledgement in the product documentation would be
391 // appreciated but is not required.
392 // 2. Altered source versions must be plainly marked as such, and must not be
393 // misrepresented as being the original software.
394 // 3. This notice may not be removed or altered from any source distribution.
395 namespace spsc_sema
396 {
397#if defined(_WIN32)
398 class Semaphore
399 {
400 private:
401 void* m_hSema;
402
403 Semaphore(const Semaphore& other);
404 Semaphore& operator=(const Semaphore& other);
405
406 public:
407 AE_NO_TSAN Semaphore(int initialCount = 0) : m_hSema()
408 {
409 assert(initialCount >= 0);
410 const long maxLong = 0x7fffffff;
411 m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
412 assert(m_hSema);
413 }
414
415 AE_NO_TSAN ~Semaphore()
416 {
417 CloseHandle(m_hSema);
418 }
419
420 bool wait() AE_NO_TSAN
421 {
422 const unsigned long infinite = 0xffffffff;
423 return WaitForSingleObject(m_hSema, infinite) == 0;
424 }
425
426 bool try_wait() AE_NO_TSAN
427 {
428 return WaitForSingleObject(m_hSema, 0) == 0;
429 }
430
431 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
432 {
433 return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
434 }
435
436 void signal(int count = 1) AE_NO_TSAN
437 {
438 while (!ReleaseSemaphore(m_hSema, count, nullptr));
439 }
440 };
441#elif defined(__MACH__)
442 //---------------------------------------------------------
443 // Semaphore (Apple iOS and OSX)
444 // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
445 //---------------------------------------------------------
446 class Semaphore
447 {
448 private:
449 semaphore_t m_sema;
450
451 Semaphore(const Semaphore& other);
452 Semaphore& operator=(const Semaphore& other);
453
454 public:
455 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema()
456 {
457 assert(initialCount >= 0);
458 kern_return_t rc = semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
459 assert(rc == KERN_SUCCESS);
460 AE_UNUSED(rc);
461 }
462
463 AE_NO_TSAN ~Semaphore()
464 {
465 semaphore_destroy(mach_task_self(), m_sema);
466 }
467
468 bool wait() AE_NO_TSAN
469 {
470 return semaphore_wait(m_sema) == KERN_SUCCESS;
471 }
472
473 bool try_wait() AE_NO_TSAN
474 {
475 return timed_wait(0);
476 }
477
478 bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN
479 {
480 mach_timespec_t ts;
481 ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
482 ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
483
484 // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
485 kern_return_t rc = semaphore_timedwait(m_sema, ts);
486 return rc == KERN_SUCCESS;
487 }
488
489 void signal() AE_NO_TSAN
490 {
491 while (semaphore_signal(m_sema) != KERN_SUCCESS);
492 }
493
494 void signal(int count) AE_NO_TSAN
495 {
496 while (count-- > 0)
497 {
498 while (semaphore_signal(m_sema) != KERN_SUCCESS);
499 }
500 }
501 };
502#elif defined(__unix__)
503 //---------------------------------------------------------
504 // Semaphore (POSIX, Linux)
505 //---------------------------------------------------------
506 class Semaphore
507 {
508 private:
509 sem_t m_sema;
510
511 Semaphore(const Semaphore& other);
512 Semaphore& operator=(const Semaphore& other);
513
514 public:
515 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema()
516 {
517 assert(initialCount >= 0);
518 int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
519 assert(rc == 0);
520 AE_UNUSED(rc);
521 }
522
523 AE_NO_TSAN ~Semaphore()
524 {
525 sem_destroy(&m_sema);
526 }
527
528 bool wait() AE_NO_TSAN
529 {
530 // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
531 int rc;
532 do
533 {
534 rc = sem_wait(&m_sema);
535 }
536 while (rc == -1 && errno == EINTR);
537 return rc == 0;
538 }
539
540 bool try_wait() AE_NO_TSAN
541 {
542 int rc;
543 do {
544 rc = sem_trywait(&m_sema);
545 } while (rc == -1 && errno == EINTR);
546 return rc == 0;
547 }
548
549 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
550 {
551 struct timespec ts;
552 const int usecs_in_1_sec = 1000000;
553 const int nsecs_in_1_sec = 1000000000;
554 clock_gettime(CLOCK_REALTIME, &ts);
555 ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
556 ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
557 // sem_timedwait bombs if you have more than 1e9 in tv_nsec
558 // so we have to clean things up before passing it in
559 if (ts.tv_nsec >= nsecs_in_1_sec) {
560 ts.tv_nsec -= nsecs_in_1_sec;
561 ++ts.tv_sec;
562 }
563
564 int rc;
565 do {
566 rc = sem_timedwait(&m_sema, &ts);
567 } while (rc == -1 && errno == EINTR);
568 return rc == 0;
569 }
570
571 void signal() AE_NO_TSAN
572 {
573 while (sem_post(&m_sema) == -1);
574 }
575
576 void signal(int count) AE_NO_TSAN
577 {
578 while (count-- > 0)
579 {
580 while (sem_post(&m_sema) == -1);
581 }
582 }
583 };
584#elif defined(FREERTOS)
585 //---------------------------------------------------------
586 // Semaphore (FreeRTOS)
587 //---------------------------------------------------------
588 class Semaphore
589 {
590 private:
591 SemaphoreHandle_t m_sema;
592
593 Semaphore(const Semaphore& other);
594 Semaphore& operator=(const Semaphore& other);
595
596 public:
597 AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema()
598 {
599 assert(initialCount >= 0);
600 m_sema = xSemaphoreCreateCounting(static_cast<UBaseType_t>(~0ull), static_cast<UBaseType_t>(initialCount));
601 assert(m_sema);
602 }
603
604 AE_NO_TSAN ~Semaphore()
605 {
606 vSemaphoreDelete(m_sema);
607 }
608
609 bool wait() AE_NO_TSAN
610 {
611 return xSemaphoreTake(m_sema, portMAX_DELAY) == pdTRUE;
612 }
613
614 bool try_wait() AE_NO_TSAN
615 {
616 // Note: In an ISR context, if this causes a task to unblock,
617 // the caller won't know about it
618 if (xPortIsInsideInterrupt())
619 return xSemaphoreTakeFromISR(m_sema, NULL) == pdTRUE;
620 return xSemaphoreTake(m_sema, 0) == pdTRUE;
621 }
622
623 bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
624 {
625 std::uint64_t msecs = usecs / 1000;
626 TickType_t ticks = static_cast<TickType_t>(msecs / portTICK_PERIOD_MS);
627 if (ticks == 0)
628 return try_wait();
629 return xSemaphoreTake(m_sema, ticks) == pdTRUE;
630 }
631
632 void signal() AE_NO_TSAN
633 {
634 // Note: In an ISR context, if this causes a task to unblock,
635 // the caller won't know about it
636 BaseType_t rc;
637 if (xPortIsInsideInterrupt())
638 rc = xSemaphoreGiveFromISR(m_sema, NULL);
639 else
640 rc = xSemaphoreGive(m_sema);
641 assert(rc == pdTRUE);
642 AE_UNUSED(rc);
643 }
644
645 void signal(int count) AE_NO_TSAN
646 {
647 while (count-- > 0)
648 signal();
649 }
650 };
651#else
652#error Unsupported platform! (No semaphore wrapper available)
653#endif
654
655 //---------------------------------------------------------
656 // LightweightSemaphore
657 //---------------------------------------------------------
659 {
660 public:
662
663 private:
664 weak_atomic<ssize_t> m_count;
665 Semaphore m_sema;
666
667 bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN
668 {
669 ssize_t oldCount;
670 // Is there a better way to set the initial spin count?
671 // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
672 // as threads start hitting the kernel semaphore.
673 int spin = 1024;
674 while (--spin >= 0)
675 {
676 if (m_count.load() > 0)
677 {
678 m_count.fetch_add_acquire(-1);
679 return true;
680 }
681 compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
682 }
683 oldCount = m_count.fetch_add_acquire(-1);
684 if (oldCount > 0)
685 return true;
686 if (timeout_usecs < 0)
687 {
688 if (m_sema.wait())
689 return true;
690 }
691 if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs)))
692 return true;
693 // At this point, we've timed out waiting for the semaphore, but the
694 // count is still decremented indicating we may still be waiting on
695 // it. So we have to re-adjust the count, but only if the semaphore
696 // wasn't signaled enough times for us too since then. If it was, we
697 // need to release the semaphore too.
698 while (true)
699 {
700 oldCount = m_count.fetch_add_release(1);
701 if (oldCount < 0)
702 return false; // successfully restored things to the way they were
703 // Oh, the producer thread just signaled the semaphore after all. Try again:
704 oldCount = m_count.fetch_add_acquire(-1);
705 if (oldCount > 0 && m_sema.try_wait())
706 return true;
707 }
708 }
709
710 public:
711 AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema()
712 {
713 assert(initialCount >= 0);
714 }
715
717 {
718 if (m_count.load() > 0)
719 {
720 m_count.fetch_add_acquire(-1);
721 return true;
722 }
723 return false;
724 }
725
727 {
728 return tryWait() || waitWithPartialSpinning();
729 }
730
731 bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
732 {
733 return tryWait() || waitWithPartialSpinning(timeout_usecs);
734 }
735
736 void signal(ssize_t count = 1) AE_NO_TSAN
737 {
738 assert(count >= 0);
739 ssize_t oldCount = m_count.fetch_add_release(count);
740 assert(oldCount >= -1);
741 if (oldCount < 0)
742 {
743 m_sema.signal(1);
744 }
745 }
746
747 std::size_t availableApprox() const AE_NO_TSAN
748 {
749 ssize_t count = m_count.load();
750 return count > 0 ? static_cast<std::size_t>(count) : 0;
751 }
752 };
753 } // end namespace spsc_sema
754} // end namespace moodycamel
755
756#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
757#pragma warning(pop)
758#ifdef __cplusplus_cli
759#pragma managed(pop)
760#endif
761#endif
#define AE_TSAN_ANNOTATE_RELEASE()
Definition: atomicops.h:64
#define AE_UNUSED(x)
Definition: atomicops.h:44
#define AE_TSAN_ANNOTATE_ACQUIRE()
Definition: atomicops.h:65
#define AE_FORCEINLINE
Definition: atomicops.h:76
#define AE_NO_TSAN
Definition: atomicops.h:61
bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
Definition: atomicops.h:731
void signal(ssize_t count=1) AE_NO_TSAN
Definition: atomicops.h:736
std::make_signed< std::size_t >::type ssize_t
Definition: atomicops.h:661
AE_NO_TSAN LightweightSemaphore(ssize_t initialCount=0)
Definition: atomicops.h:711
bool wait() AE_NO_TSAN
Definition: atomicops.h:726
std::size_t availableApprox() const AE_NO_TSAN
Definition: atomicops.h:747
bool tryWait() AE_NO_TSAN
Definition: atomicops.h:716
Definition: atomicops.h:251
AE_FORCEINLINE weak_atomic const & operator=(U &&x) AE_NO_TSAN
Definition: atomicops.h:307
AE_NO_TSAN weak_atomic(weak_atomic &&other)
Definition: atomicops.h:264
AE_NO_TSAN weak_atomic(weak_atomic const &other)
Definition: atomicops.h:263
AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN
Definition: atomicops.h:326
AE_FORCEINLINE T load() const AE_NO_TSAN
Definition: atomicops.h:319
AE_NO_TSAN weak_atomic()
Definition: atomicops.h:253
AE_NO_TSAN weak_atomic(U &&x)
Definition: atomicops.h:258
AE_FORCEINLINE weak_atomic const & operator=(weak_atomic const &other) AE_NO_TSAN
Definition: atomicops.h:313
AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN
Definition: atomicops.h:321
Definition: atomicops.h:93
AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:206
memory_order
Definition: atomicops.h:95
@ memory_order_acq_rel
Definition: atomicops.h:99
@ memory_order_seq_cst
Definition: atomicops.h:100
@ memory_order_acquire
Definition: atomicops.h:97
@ memory_order_relaxed
Definition: atomicops.h:96
@ memory_order_sync
Definition: atomicops.h:104
@ memory_order_release
Definition: atomicops.h:98
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:218
type
Definition: setup.py:37