Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_arena.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_arena_H
18 #define __TBB_task_arena_H
19 
20 #define __TBB_task_arena_H_include_area
22 
23 #include "task.h"
24 #include "tbb_exception.h"
26 #if __TBB_NUMA_SUPPORT
27 #include "info.h"
28 #endif /*__TBB_NUMA_SUPPORT*/
29 #if TBB_USE_THREADING_TOOLS
30 #include "atomic.h" // for as_atomic
31 #endif
32 #include "aligned_space.h"
33 
34 namespace tbb {
35 
36 namespace this_task_arena {
37  int max_concurrency();
38 } // namespace this_task_arena
39 
41 namespace internal {
43 
44  class arena;
46 } // namespace internal
48 
49 namespace interface7 {
50 class task_arena;
51 
53 namespace internal {
54 using namespace tbb::internal; //e.g. function_task from task.h
55 
57 public:
58  virtual void operator()() const = 0;
59  virtual ~delegate_base() {}
60 };
61 
62 // If decltype is available, the helper detects the return type of functor of specified type,
63 // otherwise it defines the void type.
64 template <typename F>
66 #if __TBB_CPP11_DECLTYPE_PRESENT && !__TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN
67  typedef decltype(declval<F>()()) type;
68 #else
69  typedef void type;
70 #endif
71 };
72 
73 template<typename F, typename R>
75  F &my_func;
76  tbb::aligned_space<R> my_return_storage;
77  // The function should be called only once.
78  void operator()() const __TBB_override {
79  new (my_return_storage.begin()) R(my_func());
80  }
81 public:
82  delegated_function(F& f) : my_func(f) {}
83  // The function can be called only after operator() and only once.
84  R consume_result() const {
85  return tbb::internal::move(*(my_return_storage.begin()));
86  }
88  my_return_storage.begin()->~R();
89  }
90 };
91 
92 template<typename F>
94  F &my_func;
95  void operator()() const __TBB_override {
96  my_func();
97  }
98 public:
99  delegated_function(F& f) : my_func(f) {}
100  void consume_result() const {}
101 
102  friend class task_arena_base;
103 };
104 
106 #if __TBB_NUMA_SUPPORT
107 public:
108  // TODO: consider version approach to resolve backward compatibility potential issues.
109  struct constraints {
110  constraints(numa_node_id id = automatic, int maximal_concurrency = automatic)
111  : numa_id(id)
112  , max_concurrency(maximal_concurrency)
113  {}
114  numa_node_id numa_id;
115  int max_concurrency;
116  };
117 #endif /*__TBB_NUMA_SUPPORT*/
118 protected:
121 
122 #if __TBB_TASK_GROUP_CONTEXT
123  task_group_context *my_context;
125 #endif
126 
129 
131  unsigned my_master_slots;
132 
135 
137 
138 #if __TBB_NUMA_SUPPORT
139  numa_node_id my_numa_id;
141 
142  // Do not access my_numa_id without the following runtime check.
143  // Despite my_numa_id is accesible, it does not exist in task_arena_base on user side
144  // if TBB_PREVIEW_NUMA_SUPPORT macro is not defined by the user. To be sure that
145  // my_numa_id exists in task_arena_base layout we check the traits.
146  // TODO: Consider increasing interface version for task_arena_base instead of this runtime check.
147  numa_node_id numa_id() {
148  return (my_version_and_traits & numa_support_flag) == numa_support_flag ? my_numa_id : automatic;
149  }
150 #endif
151 
152  enum {
153  default_flags = 0
154 #if __TBB_TASK_GROUP_CONTEXT
156  , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly
157 #endif
158 #if __TBB_NUMA_SUPPORT
159  , numa_support_flag = 1
160 #endif
161  };
162 
163  task_arena_base(int max_concurrency, unsigned reserved_for_masters)
164  : my_arena(0)
166  , my_context(0)
167 #endif
168  , my_max_concurrency(max_concurrency)
169  , my_master_slots(reserved_for_masters)
170 #if __TBB_NUMA_SUPPORT
171  , my_version_and_traits(default_flags | numa_support_flag)
172 #else
173  , my_version_and_traits(default_flags)
174 #endif
175  , my_initialized(false)
176 #if __TBB_NUMA_SUPPORT
177  , my_numa_id(automatic)
178 #endif
179  {}
180 
181 #if __TBB_NUMA_SUPPORT
182  task_arena_base(const constraints& constraints_, unsigned reserved_for_masters)
183  : my_arena(0)
185  , my_context(0)
186 #endif
187  , my_max_concurrency(constraints_.max_concurrency)
188  , my_master_slots(reserved_for_masters)
189  , my_version_and_traits(default_flags | numa_support_flag)
190  , my_initialized(false)
191  , my_numa_id(constraints_.numa_id )
192  {}
193 #endif /*__TBB_NUMA_SUPPORT*/
194 
195  void __TBB_EXPORTED_METHOD internal_initialize();
196  void __TBB_EXPORTED_METHOD internal_terminate();
197  void __TBB_EXPORTED_METHOD internal_attach();
198  void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const;
199  void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const;
200  void __TBB_EXPORTED_METHOD internal_wait() const;
201  static int __TBB_EXPORTED_FUNC internal_current_slot();
202  static int __TBB_EXPORTED_FUNC internal_max_concurrency( const task_arena * );
203 public:
205  static const int automatic = -1;
206  static const int not_initialized = -2;
207 
208 };
209 
210 #if __TBB_TASK_ISOLATION
211 void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t isolation = 0 );
212 
213 template<typename R, typename F>
214 R isolate_impl(F& f) {
217  return d.consume_result();
218 }
219 #endif /* __TBB_TASK_ISOLATION */
220 } // namespace internal
222 
230  friend void task::enqueue(task&, task_arena&
232  , priority_t
233 #endif
234  );
237  __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" );
238 #if __TBB_TASK_GROUP_CONTEXT
239  __TBB_ASSERT( my_context, "task_arena initialization is incomplete" );
240 #endif
241 #if TBB_USE_THREADING_TOOLS
242  // Actual synchronization happens in internal_initialize & internal_attach.
243  // The race on setting my_initialized is benign, but should be hidden from Intel(R) Inspector
244  internal::as_atomic(my_initialized).fetch_and_store<release>(true);
245 #else
246  my_initialized = true;
247 #endif
248  }
249 
250  template<typename F>
253  , priority_t p = priority_t(0)
254 #endif
255  ) {
256 #if !__TBB_TASK_PRIORITY
257  intptr_t p = 0;
258 #endif
259  initialize();
260 #if __TBB_TASK_GROUP_CONTEXT
261  internal_enqueue(*new(task::allocate_root(*my_context)) internal::function_task< typename internal::strip<F>::type >(internal::forward<F>(f)), p);
262 #else
263  internal_enqueue(*new(task::allocate_root()) internal::function_task< typename internal::strip<F>::type >(internal::forward<F>(f)), p);
264 #endif /* __TBB_TASK_GROUP_CONTEXT */
265  }
266 
267  template<typename R, typename F>
268  R execute_impl(F& f) {
269  initialize();
271  internal_execute(d);
272  return d.consume_result();
273  }
274 
275 public:
277 
282  task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1)
283  : task_arena_base(max_concurrency_, reserved_for_masters)
284  {}
285 
286 #if __TBB_NUMA_SUPPORT
287  task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1)
289  : task_arena_base(constraints_, reserved_for_masters)
290  {}
291 #endif /*__TBB_NUMA_SUPPORT*/
292 
294  task_arena(const task_arena &s) // copy settings but not the reference or instance
295  : task_arena_base(s.my_max_concurrency, s.my_master_slots)
296  {}
297 
299  struct attach {};
300 
302  explicit task_arena( attach )
303  : task_arena_base(automatic, 1) // use default settings if attach fails
304  {
305  internal_attach();
306  if( my_arena ) my_initialized = true;
307  }
308 
310  inline void initialize() {
311  if( !my_initialized ) {
312  internal_initialize();
313  mark_initialized();
314  }
315  }
316 
318  inline void initialize(int max_concurrency_, unsigned reserved_for_masters = 1) {
319  // TODO: decide if this call must be thread-safe
320  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
321  if( !my_initialized ) {
322  my_max_concurrency = max_concurrency_;
323  my_master_slots = reserved_for_masters;
324  initialize();
325  }
326  }
327 
328 #if __TBB_NUMA_SUPPORT
329  inline void initialize(constraints constraints_, unsigned reserved_for_masters = 1) {
330  // TODO: decide if this call must be thread-safe
331  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
332  if( !my_initialized ) {
333  my_numa_id = constraints_.numa_id;
334  my_max_concurrency = constraints_.max_concurrency;
335  my_master_slots = reserved_for_masters;
336  initialize();
337  }
338  }
339 #endif /*__TBB_NUMA_SUPPORT*/
340 
342  inline void initialize(attach) {
343  // TODO: decide if this call must be thread-safe
344  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
345  if( !my_initialized ) {
346  internal_attach();
347  if ( !my_arena ) internal_initialize();
348  mark_initialized();
349  }
350  }
351 
354  inline void terminate() {
355  if( my_initialized ) {
356  internal_terminate();
357  my_initialized = false;
358  }
359  }
360 
364  terminate();
365  }
366 
369  bool is_active() const { return my_initialized; }
370 
373 
374 #if __TBB_CPP11_RVALUE_REF_PRESENT
375  template<typename F>
376  void enqueue( F&& f ) {
377  enqueue_impl(std::forward<F>(f));
378  }
379 #else
380  template<typename F>
381  void enqueue( const F& f ) {
382  enqueue_impl(f);
383  }
384 #endif
385 
386 #if __TBB_TASK_PRIORITY
387  template<typename F>
390 #if __TBB_CPP11_RVALUE_REF_PRESENT
392 #if __TBB_PREVIEW_CRITICAL_TASKS
394  || p == internal::priority_critical, "Invalid priority level value");
395 #else
396  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
397 #endif
398  enqueue_impl(std::forward<F>(f), p);
399  }
400 #else
401  __TBB_DEPRECATED void enqueue( const F& f, priority_t p ) {
402 #if __TBB_PREVIEW_CRITICAL_TASKS
404  || p == internal::priority_critical, "Invalid priority level value");
405 #else
406  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
407 #endif
408  enqueue_impl(f,p);
409  }
410 #endif
411 #endif// __TBB_TASK_PRIORITY
412 
417  template<typename F>
420  }
421 
426  template<typename F>
429  }
430 
431 #if __TBB_EXTRA_DEBUG
432  void debug_wait_until_empty() {
436  initialize();
437  internal_wait();
438  }
439 #endif //__TBB_EXTRA_DEBUG
440 
443  inline static int current_thread_index() {
444  return internal_current_slot();
445  }
446 
448  inline int max_concurrency() const {
449  // Handle special cases inside the library
450  return (my_max_concurrency>1) ? my_max_concurrency : internal_max_concurrency(this);
451  }
452 };
453 
454 namespace this_task_arena {
455 #if __TBB_TASK_ISOLATION
456  template<typename F>
461  }
462 
465  template<typename F>
468  }
469 #endif /* __TBB_TASK_ISOLATION */
470 } // namespace this_task_arena
471 } // namespace interfaceX
472 
473 using interface7::task_arena;
474 
475 namespace this_task_arena {
476  using namespace interface7::this_task_arena;
477 
479  inline int current_thread_index() {
481  return idx == -1 ? tbb::task_arena::not_initialized : idx;
482  }
483 
485  inline int max_concurrency() {
487  }
488 } // namespace this_task_arena
489 
491 #if __TBB_TASK_PRIORITY
493 #else
494 void task::enqueue( task& t, task_arena& arena ) {
495  intptr_t p = 0;
496 #endif
497  arena.initialize();
499  arena.internal_enqueue(t, p);
500 }
501 } // namespace tbb
502 
504 #undef __TBB_task_arena_H_include_area
505 
506 #endif /* __TBB_task_arena_H */
task_arena(const task_arena &s)
Copies settings from another task_arena.
Definition: task_arena.h:294
Used to form groups of tasks.
Definition: task.h:347
#define __TBB_TASK_GROUP_CONTEXT
Definition: tbb_config.h:541
internal::return_type_or_void< F >::type execute(const F &f)
Definition: task_arena.h:427
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
task_arena(attach)
Creates an instance of task_arena attached to the current arena of the thread.
Definition: task_arena.h:302
Release.
Definition: atomic.h:59
void initialize(attach)
Attaches this instance to the current arena of the thread.
Definition: task_arena.h:342
void initialize(int max_concurrency_, unsigned reserved_for_masters=1)
Overrides concurrency level and forces initialization of internal representation.
Definition: task_arena.h:318
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
The graph class.
task_arena_base(int max_concurrency, unsigned reserved_for_masters)
Definition: task_arena.h:163
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
Tag class used to indicate the "attaching" constructor.
Definition: task_arena.h:299
static int current_thread_index()
Definition: task_arena.h:443
static const int priority_critical
Definition: task.h:302
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
void const char const char int ITT_FORMAT __itt_group_sync s
#define __TBB_EXPORTED_FUNC
internal::return_type_or_void< F >::type execute(F &f)
Definition: task_arena.h:418
#define __TBB_TASK_PRIORITY
Definition: tbb_config.h:571
void enqueue_impl(__TBB_FORWARDING_REF(F) f, priority_t p=priority_t(0))
Definition: task_arena.h:251
Base class for user-defined tasks.
Definition: task.h:604
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:134
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:652
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:128
#define __TBB_override
Definition: tbb_stddef.h:240
tbb::aligned_space< R > my_return_storage
Definition: task_arena.h:76
int max_concurrency()
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:485
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
task_arena(int max_concurrency_=automatic, unsigned reserved_for_masters=1)
Creates task_arena with certain concurrency limits.
Definition: task_arena.h:282
internal::return_type_or_void< F >::type isolate(const F &f)
Definition: task_arena.h:466
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
int max_concurrency() const
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:448
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:120
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
Definition: task_arena.h:479
void const char const char int ITT_FORMAT __itt_group_sync p
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:131
__TBB_DEPRECATED void enqueue(F &&f, priority_t p)
Definition: task_arena.h:391
void initialize()
Forces allocation of the resources for the task_arena as specified in constructor arguments.
Definition: task_arena.h:310
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:825
priority_t
Definition: task.h:306
void operator()() const __TBB_override
Definition: task_arena.h:78

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.