21 #if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) 22 #error Do not #include this internal file directly; use public TBB headers instead. 25 #define __TBB_machine_gcc_generic_H 30 #define __TBB_WORDSIZE __SIZEOF_POINTER__ 32 #if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 33 #define __TBB_64BIT_ATOMICS 0 37 #if __ANDROID__ && __TBB_generic_arch 38 #define __TBB_CPU_CTL_ENV_PRESENT 0 43 #if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) 44 #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG 45 #elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) 46 #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE 47 #elif defined(__BYTE_ORDER__) 48 #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED 50 #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT 53 #if __TBB_GCC_VERSION < 40700 61 #define __TBB_acquire_consistency_helper() __sync_synchronize() 62 #define __TBB_release_consistency_helper() __sync_synchronize() 63 #define __TBB_full_memory_fence() __sync_synchronize() 64 #define __TBB_control_consistency_helper() __sync_synchronize() 66 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ 67 inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ 68 return __sync_val_compare_and_swap(reinterpret_cast<volatile T *>(ptr),comparand,value); \ 70 inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ 71 return __sync_fetch_and_add(reinterpret_cast<volatile T *>(ptr),value); \ 74 #define __TBB_USE_GENERIC_FETCH_STORE 1 79 #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") 82 #define __TBB_acquire_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_ACQUIRE); __TBB_compiler_fence() 83 #define __TBB_release_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_RELEASE); __TBB_compiler_fence() 84 #define __TBB_full_memory_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST) 85 #define __TBB_control_consistency_helper() __TBB_acquire_consistency_helper() 87 #define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ 88 inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ 89 (void)__atomic_compare_exchange_n(reinterpret_cast<volatile T *>(ptr), &comparand, value, \ 90 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ 93 inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ 94 return __atomic_fetch_add(reinterpret_cast<volatile T *>(ptr), value, __ATOMIC_SEQ_CST); \ 96 inline T __TBB_machine_fetchstore##S( volatile void *ptr, T value ) { \ 97 return __atomic_exchange_n(reinterpret_cast<volatile T *>(ptr), value, __ATOMIC_SEQ_CST); \ 100 #endif // __TBB_GCC_VERSION < 40700 107 #undef __TBB_MACHINE_DEFINE_ATOMICS 112 #if __TBB_GCC_VERSION < 40700 116 #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 117 #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 118 #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 121 __sync_fetch_and_or(reinterpret_cast<volatile uintptr_t *>(ptr),addend);
125 __sync_fetch_and_and(reinterpret_cast<volatile uintptr_t *>(ptr),addend);
129 return __sync_lock_test_and_set(&flag,1)==0;
133 __sync_lock_release(&flag);
139 static inline void __TBB_machine_or(
volatile void *ptr, uintptr_t addend ) {
140 __atomic_fetch_or(reinterpret_cast<volatile uintptr_t *>(ptr),addend,__ATOMIC_SEQ_CST);
144 __atomic_fetch_and(reinterpret_cast<volatile uintptr_t *>(ptr),addend,__ATOMIC_SEQ_CST);
148 return !__atomic_test_and_set(&flag,__ATOMIC_ACQUIRE);
152 __atomic_clear(&flag,__ATOMIC_RELEASE);
155 namespace tbb {
namespace internal {
161 template <
typename T,
int MemOrder>
162 inline T __TBB_machine_atomic_load(
const volatile T& location) {
164 T
value = __atomic_load_n(&location, MemOrder);
169 template <
typename T,
int MemOrder>
170 inline void __TBB_machine_atomic_store(
volatile T& location, T
value) {
172 __atomic_store_n(&location,
value, MemOrder);
176 template <
typename T,
size_t S>
177 struct machine_load_store {
179 return __TBB_machine_atomic_load<T, __ATOMIC_ACQUIRE>(location);
182 __TBB_machine_atomic_store<T, __ATOMIC_RELEASE>(location,
value);
186 template <
typename T,
size_t S>
187 struct machine_load_store_relaxed {
188 static inline T
load (
const volatile T& location ) {
189 return __TBB_machine_atomic_load<T, __ATOMIC_RELAXED>(location);
191 static inline void store (
volatile T& location, T
value ) {
192 __TBB_machine_atomic_store<T, __ATOMIC_RELAXED>(location,
value);
196 template <
typename T,
size_t S>
197 struct machine_load_store_seq_cst {
198 static T
load (
const volatile T& location ) {
199 return __TBB_machine_atomic_load<T, __ATOMIC_SEQ_CST>(location);
201 static void store (
volatile T &location, T
value ) {
202 __TBB_machine_atomic_store<T, __ATOMIC_SEQ_CST>(location,
value);
208 #endif // __TBB_GCC_VERSION < 40700 211 #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) 212 #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) 214 #define __TBB_TryLockByte __TBB_machine_try_lock_byte 215 #define __TBB_UnlockByte __TBB_machine_unlock_byte 218 namespace tbb{
namespace internal {
namespace gcc_builtins {
219 inline int clz(
unsigned int x){
return __builtin_clz(x); };
220 inline int clz(
unsigned long int x){
return __builtin_clzl(x); };
221 inline int clz(
unsigned long long int x){
return __builtin_clzll(x); };
229 #define __TBB_Log2(V) __TBB_machine_lg(V) 231 #if __TBB_WORDSIZE==4 232 #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 235 #if __TBB_x86_32 || __TBB_x86_64 static void store_with_release(volatile T &location, T value)
static void __TBB_machine_and(volatile void *ptr, uintptr_t addend)
void __TBB_machine_unlock_byte(__TBB_atomic_flag &flag)
bool __TBB_machine_try_lock_byte(__TBB_atomic_flag &flag)
__TBB_atomic __TBB_Flag __TBB_atomic_flag
static void store(volatile T &location, T value)
static void __TBB_machine_or(volatile void *ptr, uintptr_t addend)
static T load_with_acquire(const volatile T &location)
static intptr_t __TBB_machine_lg(uintptr_t x)
static T load(const volatile T &location)
#define __TBB_compiler_fence()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
static void store(T &location, T value)
#define __TBB_MACHINE_DEFINE_ATOMICS(S, T)
static T load(const T &location)