20 #define __TBB_task_H_include_area 34 class task_group_context;
38 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) 39 #define __TBB_TASK_BASE_ACCESS public 41 #define __TBB_TASK_BASE_ACCESS private 61 namespace interface5 {
115 virtual void enqueue(
task& t,
void* reserved ) = 0;
122 #if __TBB_PREVIEW_RESUMABLE_TASKS 130 #if __TBB_TASK_ISOLATION 136 #if __TBB_TASK_GROUP_CONTEXT 171 #if __TBB_PREVIEW_CRITICAL_TASKS 202 #if __TBB_PREVIEW_CRITICAL_TASKS 207 #if __TBB_TASK_ISOLATION 211 intptr_t reserved_space_for_task_isolation_tag;
214 #if __TBB_TASK_GROUP_CONTEXT 230 #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS 238 #if __TBB_TASK_PRIORITY 244 #if __TBB_PREVIEW_RESUMABLE_TASKS 248 #if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS 295 #if __TBB_TASK_GROUP_CONTEXT 297 #if __TBB_TASK_PRIORITY 300 #if __TBB_PREVIEW_CRITICAL_TASKS 314 #if TBB_USE_CAPTURED_EXCEPTION 318 class tbb_exception_ptr;
322 class task_scheduler_init;
323 namespace interface7 {
class task_arena; }
324 using interface7::task_arena;
353 #if TBB_USE_CAPTURED_EXCEPTION 377 #if TBB_USE_CAPTURED_EXCEPTION 446 #if __TBB_TASK_PRIORITY 457 #if __TBB_TASK_PRIORITY 495 :
my_kind(relation_with_parent)
558 #if __TBB_TASK_PRIORITY 584 template <
typename T>
637 #if __TBB_RECYCLE_TO_ENQUEUE 641 #if __TBB_PREVIEW_RESUMABLE_TASKS 656 #if __TBB_TASK_GROUP_CONTEXT 666 return *reinterpret_cast<internal::allocate_continuation_proxy*>(
this);
671 return *reinterpret_cast<internal::allocate_child_proxy*>(
this);
675 using task_base::allocate_additional_child_of;
677 #if __TBB_DEPRECATED_TASK_INTERFACE 685 using task_base::destroy;
722 p.parent = &new_parent;
723 #if __TBB_TASK_GROUP_CONTEXT 736 #if __TBB_RECYCLE_TO_ENQUEUE 739 void recycle_to_enqueue() {
751 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 769 __TBB_ASSERT( k>=0,
"task's reference count underflowed" );
778 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 786 using task_base::spawn;
813 #if __TBB_TASK_PRIORITY 829 #if __TBB_TASK_PRIORITY 832 #if __TBB_PREVIEW_CRITICAL_TASKS 844 #if __TBB_TASK_PRIORITY 847 inline static void enqueue(
task& t, task_arena& arena);
858 #if __TBB_TASK_GROUP_CONTEXT 864 #if __TBB_TASK_GROUP_CONTEXT 875 return (
prefix().extra_state & 0x80)!=0;
881 return (
prefix().extra_state & 0x10)!=0;
884 #if __TBB_PREVIEW_RESUMABLE_TASKS 885 typedef void* suspend_point;
889 template <
typename F>
890 static void suspend(F f);
893 static void resume(suspend_point tag);
906 #if __TBB_PREVIEW_RESUMABLE_TASKS 911 __TBB_ASSERT( ref_count_==
int(ref_count_),
"integer overflow error");
913 #if __TBB_PREVIEW_RESUMABLE_TASKS 944 #if __TBB_TASK_GROUP_CONTEXT 968 #if __TBB_TASK_PRIORITY 982 #if __TBB_TASK_GROUP_CONTEXT 992 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(
this))[-1];
994 #if __TBB_PREVIEW_CRITICAL_TASKS 1000 #if __TBB_PREVIEW_CRITICAL_TASKS 1007 #if __TBB_PREVIEW_RESUMABLE_TASKS 1009 template <
typename F>
1010 static void suspend_callback(
void* user_callback, task::suspend_point tag) {
1012 F user_callback_copy = *static_cast<F*>(user_callback);
1013 user_callback_copy(tag);
1020 template <
typename F>
1021 inline void task::suspend(F f) {
1022 internal::internal_suspend((
void*)internal::suspend_callback<F>, &f);
1024 inline void task::resume(suspend_point tag) {
1025 internal::internal_resume(tag);
1039 template<
typename F>
1041 #if __TBB_ALLOW_MUTABLE_FUNCTORS 1053 #if __TBB_CPP11_RVALUE_REF_PRESENT 1119 t->prefix().owner->spawn( *t, *list.
next_ptr );
1126 t->prefix().owner->spawn_root_and_wait( *t, *root_list.
next_ptr );
1141 #if __TBB_TASK_GROUP_CONTEXT 1143 return &
p.allocate(bytes);
1147 p.free( *static_cast<tbb::task*>(
task) );
1152 return &
p.allocate(bytes);
1156 p.free( *static_cast<tbb::task*>(
task) );
1160 return &
p.allocate(bytes);
1164 p.free( *static_cast<tbb::task*>(
task) );
1168 return &
p.allocate(bytes);
1172 p.free( *static_cast<tbb::task*>(
task) );
1176 #undef __TBB_task_H_include_area uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
static const int priority_stride_v4
char _leading_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - sizeof(void *) - sizeof(internal::context_list_node_t) - sizeof(__itt_caller) - sizeof(internal::cpu_ctl_env_space)]
Leading padding protecting accesses to frequently used members from false sharing.
char _trailing_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - 2 *sizeof(void *) - sizeof(intptr_t) - sizeof(internal::string_index)]
Trailing padding protecting accesses to frequently used members from false sharing.
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
virtual void spawn(task &first, task *&next)=0
For internal use only.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
static const kind_type binding_required
__TBB_DEPRECATED void set_priority(priority_t)
Changes priority of the task group.
static const kind_type binding_completed
task that does nothing. Useful for synchronization.
virtual ~scheduler()=0
Pure virtual destructor;.
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Used to form groups of tasks.
state_type state() const
Current execution state.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
bool is_cancelled() const
Returns true if the context has received cancellation request.
task is in ready pool, or is going to be put there, or was just taken off.
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
void push_back(task &task)
Push task onto back of list.
intptr_t my_priority
Priority level of the task group (in normalized representation)
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
task to be recycled as continuation
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
void __TBB_EXPORTED_METHOD free(task &) const
int ref_count() const
The internal reference count.
task * next_offloaded
Pointer to the next offloaded lower priority task.
friend bool is_critical(task &)
__TBB_DEPRECATED priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
task_group_context(internal::string_index name)
task object is on free list, or is going to be put there, or was just taken off.
allocate_additional_child_of_proxy(task &parent_)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void call_itt_notify(notify_type, void *)
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Base class for types that should not be assigned.
virtual void spawn_root_and_wait(task &first, task *&next)=0
For internal use only.
void increment_ref_count()
Atomically increment reference count.
allocate_root_with_context_proxy(task_group_context &ctx)
task * execute() __TBB_override
Should be overridden by derived classes.
static const kind_type dying
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
task_group_context & my_context
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task's parent becomes "t", not this.
__TBB_EXPORTED_METHOD ~task_group_context()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
#define __TBB_TASK_BASE_ACCESS
state_type
Enumeration of task states that the scheduler considers.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
__TBB_DEPRECATED priority_t priority() const
Retrieves current priority of the current task group.
auto first(Container &c) -> decltype(begin(c))
function_task(const F &f)
void __TBB_EXPORTED_METHOD free(task &) const
void make_critical(task &t)
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
static const int priority_critical
uintptr_t _my_kind_aligner
uintptr_t traits() const
Returns the context's trait.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
struct ___itt_caller * __itt_caller
unsigned char state
A task::state_type, stored as a byte for compactness.
void move(tbb_thread &t1, tbb_thread &t2)
bool is_enqueued_task() const
True if the task was enqueued.
bool empty() const
True if list is empty; false otherwise.
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
void set_ref_count(int count)
Set reference count.
static const kind_type detached
intptr_t isolation_tag
A tag for task isolation.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
#define __TBB_EXPORTED_FUNC
tbb::task & task()
The task corresponding to this task_prefix.
void recycle_as_continuation()
Change this to be a continuation of its former self.
void set_parent(task *p)
sets parent task pointer to specified value
task_group_context * context()
This method is deprecated and will be removed in the future.
scheduler * owner
Obsolete. The scheduler that owns the task.
friend void make_critical(task &)
task * execute() __TBB_override
Should be overridden by derived classes.
virtual ~task()
Destructor.
intptr_t reference_count
A reference count.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
Base class for user-defined tasks.
task()
Default constructor.
Work stealing task scheduler.
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
tbb::task * parent
The task whose reference count includes me.
__TBB_DEPRECATED void set_group_priority(priority_t p)
Changes priority of the task group this task belongs to.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Base class for types that should not be copied or assigned.
unsigned short affinity_id
An id as used for specifying affinity.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
task object is freshly allocated or recycled.
task & pop_front()
Pop the front task from the list.
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Class delimiting the scope of task scheduler activity.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task_list()
Construct empty list.
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
task_group_context * group()
Pointer to the task group descriptor.
Base class for methods that became static in TBB 3.0.
virtual void wait_for_all(task &parent, task *child)=0
For internal use only.
~task_list()
Destroys the list, but does not destroy the task objects.
#define __TBB_FetchAndDecrementWrelease(P)
void __TBB_EXPORTED_METHOD internal_set_ref_count(int count)
Set reference count.
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Exception container that preserves the exact copy of the original exception.
void clear()
Clear the list.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.
static void __TBB_EXPORTED_FUNC free(task &)
void __TBB_EXPORTED_METHOD free(task &) const
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
#define __TBB_EXPORTED_METHOD
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
task is running, and will be destroyed after method execute() completes.
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
isolation_tag isolation
The tag used for task isolation.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
context_list_node_t * my_prev
internal::affinity_id affinity_id
An id as used for specifying affinity.
#define __TBB_FetchAndIncrementWacquire(P)
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count()
Decrement reference count and return its new value.
void set_affinity(affinity_id id)
Set affinity for this task.
bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const
Obsolete, and only retained for the sake of backward compatibility. Always returns true.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
void const char const char int ITT_FORMAT __itt_group_sync p
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
int space[sizeof(internal::uint64_t)/sizeof(int)]
void __TBB_EXPORTED_METHOD free(task &) const
const isolation_tag no_isolation
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
version_traits_word_layout
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
affinity_id affinity() const
Current affinity of this task.
Memory prefix to a task object.
tbb::task * next
"next" field for list of task
context_list_node_t * my_next
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
internal::tbb_exception_ptr exception_container_type
virtual task * execute()=0
Should be overridden by derived classes.
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
virtual void enqueue(task &t, void *reserved)=0
For internal use only.
bool is_critical(task &t)