21 #ifndef _TBB_custom_scheduler_H 22 #define _TBB_custom_scheduler_H 42 #if __TBB_x86_32||__TBB_x86_64 55 template<
typename SchedulerTraits>
78 if( SchedulerTraits::itt_possible )
80 if( SchedulerTraits::has_slow_atomic &&
p.ref_count==1 )
89 __TBB_ASSERT(
p.ref_count==0,
"completion of task caused predecessor's reference count to underflow");
90 if( SchedulerTraits::itt_possible )
95 #if __TBB_TASK_ISOLATION 99 p.isolation = isolation;
103 #if __TBB_RECYCLE_TO_ENQUEUE 104 if (
p.state==task::to_enqueue) {
110 if( bypass_slot==NULL )
112 #if __TBB_PREVIEW_CRITICAL_TASKS 127 s->assert_task_pool_valid();
141 template<
typename SchedulerTraits>
144 bool outermost_worker_level = worker_outermost_level();
145 bool outermost_dispatch_level = outermost_worker_level || master_outermost_level();
146 bool can_steal_here = can_steal();
147 my_inbox.set_is_idle(
true );
148 #if __TBB_HOARD_NONLOCAL_TASKS 151 #if __TBB_TASK_PRIORITY 152 if ( outermost_dispatch_level ) {
153 if ( intptr_t skipped_priority = my_arena->my_skipped_fifo_priority ) {
157 if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
158 && skipped_priority > my_arena->my_top_priority )
160 my_market->update_arena_priority( *my_arena, skipped_priority );
167 size_t n = my_arena->my_limit-1;
171 for(
int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
174 if( completion_ref_count==1 ) {
175 if( SchedulerTraits::itt_possible ) {
176 if( failure_count!=-1 ) {
177 ITT_NOTIFY(sync_prepare, &completion_ref_count);
181 ITT_NOTIFY(sync_acquired, &completion_ref_count);
191 if ( outermost_worker_level && (my_arena->my_num_workers_allotted < my_arena->num_workers_active()
192 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 193 || my_arena->recall_by_mandatory_request()
196 if( SchedulerTraits::itt_possible && failure_count != -1 )
200 #if __TBB_TASK_PRIORITY 201 const int p =
int(my_arena->my_top_priority);
203 static const int p = 0;
207 if ( n && !my_inbox.empty() ) {
209 #if __TBB_TASK_ISOLATION 213 if ( isolation !=
no_isolation && !t && !my_inbox.empty()
214 && my_inbox.is_idle_state(
true ) ) {
217 my_inbox.set_is_idle(
false );
227 !my_arena->my_task_stream.empty(
p) && (
228 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 231 t = my_arena->my_task_stream.pop(
p, my_arena_slot->hint_for_pop )
234 ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream);
237 #if __TBB_TASK_PRIORITY 240 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
247 #if __TBB_PREVIEW_CRITICAL_TASKS 250 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
253 #endif // __TBB_PREVIEW_CRITICAL_TASKS 258 #if __TBB_ARENA_OBSERVER 259 my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() );
261 #if __TBB_SCHEDULER_OBSERVER 262 the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() );
264 if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
272 if( SchedulerTraits::itt_possible && failure_count==-1 ) {
282 const int failure_threshold = 2*
int(n+1);
283 if( failure_count>=failure_threshold ) {
287 failure_count = failure_threshold;
290 #if __TBB_TASK_PRIORITY 292 if ( my_arena->my_orphaned_tasks ) {
294 ++my_arena->my_abandonment_epoch;
295 task* orphans = (
task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 );
299 my_local_reload_epoch--;
300 t = reload_tasks( orphans, link,
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
302 *link = my_offloaded_tasks;
303 if ( !my_offloaded_tasks )
304 my_offloaded_task_list_tail_link = link;
305 my_offloaded_tasks = orphans;
307 __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
309 if( SchedulerTraits::itt_possible )
311 __TBB_ASSERT( !is_proxy(*t),
"The proxy task cannot be offloaded" );
317 const int yield_threshold = 100;
318 if( yield_count++ >= yield_threshold ) {
321 #if __TBB_TASK_PRIORITY 322 if( outermost_worker_level || my_arena->my_top_priority > my_arena->my_bottom_priority ) {
323 if ( my_arena->is_out_of_work() && outermost_worker_level ) {
325 if ( outermost_worker_level && my_arena->is_out_of_work() ) {
327 if( SchedulerTraits::itt_possible )
331 #if __TBB_TASK_PRIORITY 333 if ( my_offloaded_tasks ) {
336 my_local_reload_epoch--;
341 if ( !outermost_worker_level && *my_ref_top_priority > my_arena->my_top_priority ) {
343 my_ref_top_priority = &my_arena->my_top_priority;
345 __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL);
351 n = my_arena->my_limit-1;
354 if ( my_inbox.is_idle_state(
true ) )
355 my_inbox.set_is_idle(
false );
359 template<
typename SchedulerTraits>
364 assert_task_pool_valid();
367 if( SchedulerTraits::itt_possible )
369 #if __TBB_TASK_GROUP_CONTEXT 378 parents_work_done = 1,
382 #if __TBB_TASK_PRIORITY 384 volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
387 volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
389 task* old_innermost_running_task = my_innermost_running_task;
392 __TBB_ASSERT( my_properties.outermost || my_innermost_running_task!=my_dummy_task,
"The outermost property should be set out of a dispatch loop" );
393 my_properties.outermost &= my_innermost_running_task==my_dummy_task;
394 #if __TBB_TASK_ISOLATION 395 isolation_tag isolation = my_innermost_running_task->prefix().isolation;
397 if( master_outermost_level() ) {
399 quit_point = &
parent == my_dummy_task ? all_local_work_done : parents_work_done;
401 quit_point = parents_work_done;
402 #if __TBB_TASK_PRIORITY 403 if ( &
parent != my_dummy_task ) {
407 my_ref_top_priority = &
parent.prefix().context->my_priority;
408 my_ref_reload_epoch = &my_arena->my_reload_epoch;
409 if(my_ref_reload_epoch != old_ref_reload_epoch)
410 my_local_reload_epoch = *my_ref_reload_epoch-1;
418 #if __TBB_TASK_ISOLATION 422 t->prefix().isolation = isolation;
426 #if TBB_USE_EXCEPTIONS 443 #if __TBB_TASK_ISOLATION 445 "A task from another isolated region is going to be executed" );
448 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT 449 assert_context_valid(t->prefix().context);
450 if ( !t->prefix().context->my_cancellation_requested )
454 assert_task_pool_valid();
455 #if __TBB_PREVIEW_CRITICAL_TASKS 461 "Received task must be critical one" );
462 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
464 my_innermost_running_task = t;
465 local_spawn(t, t->prefix().next);
469 #if __TBB_TASK_PRIORITY 470 intptr_t
p = priority(*t);
471 if (
p != *my_ref_top_priority
473 assert_priority_valid(
p);
474 if (
p != my_arena->my_top_priority ) {
475 my_market->update_arena_priority( *my_arena,
p );
477 if (
p < effective_reference_priority() ) {
478 if ( !my_offloaded_tasks ) {
479 my_offloaded_task_list_tail_link = &t->prefix().next_offloaded;
482 *my_offloaded_task_list_tail_link = NULL;
484 offload_task( *t,
p );
485 if ( is_task_pool_published() ) {
494 goto stealing_ground;
498 #if __TBB_PREVIEW_CRITICAL_TASKS 503 my_innermost_running_task = t;
506 #if __TBB_TASK_GROUP_CONTEXT 507 if ( !t->prefix().context->my_cancellation_requested )
511 GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() );
512 GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted );
513 #if __TBB_TASK_PRIORITY 515 GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority );
517 ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller);
518 #if __TBB_PREVIEW_CRITICAL_TASKS 519 internal::critical_task_count_guard tc_guard(my_properties, *t);
522 ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller);
525 "if task::execute() returns task, it must be marked as allocated" );
530 if (next_affinity != 0 && next_affinity != my_affinity_id)
535 assert_task_pool_valid();
536 switch( t->state() ) {
538 task*
s = t->parent();
540 __TBB_ASSERT( t->prefix().ref_count==0,
"Task still has children after it has been executed" );
544 free_task<no_hint>( *t );
546 assert_task_pool_valid();
552 #if __TBB_RECYCLE_TO_ENQUEUE 554 case task::to_enqueue:
556 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
560 assert_task_pool_valid();
564 __TBB_ASSERT( t_next,
"reexecution requires that method execute() return another task" );
565 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
568 local_spawn( t, t->prefix().next );
569 assert_task_pool_valid();
576 __TBB_ASSERT(
false,
"task is in READY state upon return from method execute()" );
589 assert_task_pool_valid();
590 if (
parent.prefix().ref_count == quit_point ) {
591 __TBB_ASSERT( quit_point != all_local_work_done, NULL );
596 if ( is_task_pool_published() ) {
599 __TBB_ASSERT( is_quiescent_local_task_pool_reset(), NULL );
602 assert_task_pool_valid();
609 #if __TBB_TASK_PRIORITY 612 #if __TBB_HOARD_NONLOCAL_TASKS 614 for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
615 t = my_nonlocal_free_list->prefix().next;
616 free_nonlocal_small_task( *my_nonlocal_free_list );
619 if ( quit_point == all_local_work_done ) {
620 __TBB_ASSERT( !is_task_pool_published() && is_quiescent_local_task_pool_reset(), NULL );
622 my_innermost_running_task = old_innermost_running_task;
623 my_properties = old_properties;
624 #if __TBB_TASK_PRIORITY 625 my_ref_top_priority = old_ref_top_priority;
626 if(my_ref_reload_epoch != old_ref_reload_epoch)
627 my_local_reload_epoch = *old_ref_reload_epoch-1;
628 my_ref_reload_epoch = old_ref_reload_epoch;
641 #if TBB_USE_EXCEPTIONS 644 TbbCatchAll( t->prefix().context );
647 #if __TBB_RECYCLE_TO_ENQUEUE 649 || t->state() == task::to_enqueue
654 if( SchedulerTraits::itt_possible )
657 if( SchedulerTraits::itt_possible )
658 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
667 my_innermost_running_task = old_innermost_running_task;
668 my_properties = old_properties;
669 #if __TBB_TASK_PRIORITY 670 my_ref_top_priority = old_ref_top_priority;
671 if(my_ref_reload_epoch != old_ref_reload_epoch)
672 my_local_reload_epoch = *old_ref_reload_epoch-1;
673 my_ref_reload_epoch = old_ref_reload_epoch;
676 if (
parent.prefix().ref_count != parents_work_done ) {
679 "Worker thread exits nested dispatch loop prematurely" );
682 parent.prefix().ref_count = 0;
687 #if __TBB_TASK_GROUP_CONTEXT 690 if ( parent_ctx->my_cancellation_requested ) {
692 if ( master_outermost_level() && parent_ctx == default_context() ) {
695 parent_ctx->my_cancellation_requested = 0;
703 context_guard.restore_default();
704 TbbRethrowException( pe );
707 __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task),
708 "Worker's dummy task context modified");
709 __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task),
710 "Unexpected exception or cancellation data in the master's dummy task");
712 assert_task_pool_valid();
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
#define ITT_STACK(precond, name, obj)
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
Memory prefix to a task object.
unsigned short affinity_id
An id as used for specifying affinity.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Set if ref_count might be changed by another thread. Used for debugging.
#define __TBB_FetchAndDecrementWrelease(P)
virtual task * execute()=0
Should be overridden by derived classes.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
const isolation_tag no_isolation
task object is freshly allocated or recycled.
#define __TBB_ISOLATION_EXPR(isolation)
#define ITT_SYNC_CREATE(obj, type, name)
Used to form groups of tasks.
Bit-field representing properties of a sheduler.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void reset_extra_state(task *t)
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
static generic_scheduler * allocate_scheduler(market &m)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
Base class for user-defined tasks.
Work stealing task scheduler.
intptr_t isolation_tag
A tag for task isolation.
task is running, and will be destroyed after method execute() completes.
custom_scheduler(market &m)
static const bool has_slow_atomic
static const bool itt_possible
void const char const char int ITT_FORMAT __itt_group_sync p
bool is_critical(task &t)
#define __TBB_ISOLATION_ARG(arg1, isolation)
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
static const intptr_t num_priority_levels
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
void local_spawn(task *first, task *&next)
#define GATHER_STATISTIC(x)
static const bool has_slow_atomic
task to be recycled as continuation
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
internal::tbb_exception_ptr exception_container_type
task is in ready pool, or is going to be put there, or was just taken off.
void poison_pointer(T *__TBB_atomic &)
void wait_for_all(task &parent, task *child) __TBB_override
Entry point from client code to the scheduler loop that dispatches tasks.
#define ITT_NOTIFY(name, obj)
static const bool itt_possible
void const char const char int ITT_FORMAT __itt_group_sync s
#define __TBB_CONTEXT_ARG1(context)
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
void local_wait_for_all(task &parent, task *child) __TBB_override
Scheduler loop that dispatches tasks.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Traits classes for scheduler.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
state_type state() const
Current execution state.
intptr_t reference_count
A reference count.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
void assert_task_valid(const task *)
bool ConcurrentWaitsEnabled(task &t)
#define __TBB_control_consistency_helper()
A scheduler with a customized evaluation loop.
#define __TBB_fallthrough
custom_scheduler< SchedulerTraits > scheduler_type
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)