Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::internal::concurrent_queue_base_v3 Class Referenceabstract

For internal use only. More...

#include <_concurrent_queue_impl.h>

Inheritance diagram for tbb::internal::concurrent_queue_base_v3:
Collaboration diagram for tbb::internal::concurrent_queue_base_v3:

Classes

struct  padded_page
 
struct  page
 Prefix on a page. More...
 

Protected Types

enum  copy_specifics { copy, move }
 

Protected Member Functions

__TBB_EXPORTED_METHOD concurrent_queue_base_v3 (size_t item_size)
 
virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3 ()
 
void __TBB_EXPORTED_METHOD internal_push (const void *src)
 Enqueue item at tail of queue using copy operation. More...
 
void __TBB_EXPORTED_METHOD internal_pop (void *dst)
 Dequeue item from head of queue. More...
 
void __TBB_EXPORTED_METHOD internal_abort ()
 Abort all pending queue operations. More...
 
bool __TBB_EXPORTED_METHOD internal_push_if_not_full (const void *src)
 Attempt to enqueue item onto queue using copy operation. More...
 
bool __TBB_EXPORTED_METHOD internal_pop_if_present (void *dst)
 Attempt to dequeue item from queue. More...
 
ptrdiff_t __TBB_EXPORTED_METHOD internal_size () const
 Get size of queue. More...
 
bool __TBB_EXPORTED_METHOD internal_empty () const
 Check if the queue is emtpy. More...
 
void __TBB_EXPORTED_METHOD internal_set_capacity (ptrdiff_t capacity, size_t element_size)
 Set the queue capacity. More...
 
virtual pageallocate_page ()=0
 custom allocator More...
 
virtual void deallocate_page (page *p)=0
 custom de-allocator More...
 
void __TBB_EXPORTED_METHOD internal_finish_clear ()
 free any remaining pages More...
 
void __TBB_EXPORTED_METHOD internal_throw_exception () const
 throw an exception More...
 
void __TBB_EXPORTED_METHOD assign (const concurrent_queue_base_v3 &src)
 copy internal representation More...
 
void internal_swap (concurrent_queue_base_v3 &src)
 swap queues More...
 
void internal_insert_item (const void *src, copy_specifics op_type)
 Enqueues item at tail of queue using specified operation (copy or move) More...
 
bool internal_insert_if_not_full (const void *src, copy_specifics op_type)
 Attempts to enqueue at tail of queue using specified operation (copy or move) More...
 
void internal_assign (const concurrent_queue_base_v3 &src, copy_specifics op_type)
 Assigns one queue to another using specified operation (copy or move) More...
 

Protected Attributes

ptrdiff_t my_capacity
 Capacity of the queue. More...
 
size_t items_per_page
 Always a power of 2. More...
 
size_t item_size
 Size of an item. More...
 

Private Member Functions

virtual void copy_item (page &dst, size_t index, const void *src)=0
 
virtual void assign_and_destroy_item (void *dst, page &src, size_t index)=0
 
virtual void copy_page_item (page &dst, size_t dindex, const page &src, size_t sindex)=0
 
- Private Member Functions inherited from tbb::internal::no_copy
 no_copy ()
 Allow default construction. More...
 

Private Attributes

concurrent_queue_repmy_rep
 Internal representation. More...
 

Friends

class concurrent_queue_rep
 
struct micro_queue
 
class micro_queue_pop_finalizer
 
class concurrent_queue_iterator_rep
 
class concurrent_queue_iterator_base_v3
 

Detailed Description

For internal use only.

Type-independent portion of concurrent_queue.

Definition at line 825 of file _concurrent_queue_impl.h.

Member Enumeration Documentation

◆ copy_specifics

Constructor & Destructor Documentation

◆ concurrent_queue_base_v3()

tbb::internal::concurrent_queue_base_v3::concurrent_queue_base_v3 ( size_t  item_size)
protected

Definition at line 341 of file concurrent_queue.cpp.

341  {
342  items_per_page = item_sz<= 8 ? 32 :
343  item_sz<= 16 ? 16 :
344  item_sz<= 32 ? 8 :
345  item_sz<= 64 ? 4 :
346  item_sz<=128 ? 2 :
347  1;
348  my_capacity = size_t(-1)/(item_sz>1 ? item_sz : 2);
349  my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1);
350  __TBB_ASSERT( is_aligned(my_rep, NFS_GetLineSize()), "alignment error" );
351  __TBB_ASSERT( is_aligned(&my_rep->head_counter, NFS_GetLineSize()), "alignment error" );
352  __TBB_ASSERT( is_aligned(&my_rep->tail_counter, NFS_GetLineSize()), "alignment error" );
353  __TBB_ASSERT( is_aligned(&my_rep->array, NFS_GetLineSize()), "alignment error" );
354  std::memset(static_cast<void*>(my_rep),0,sizeof(concurrent_queue_rep));
355  new ( &my_rep->items_avail ) concurrent_monitor();
356  new ( &my_rep->slots_avail ) concurrent_monitor();
357  this->item_size = item_sz;
358 }
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
ptrdiff_t my_capacity
Capacity of the queue.
bool is_aligned(T *pointer, uintptr_t alignment)
A function to check if passed in pointer is aligned on a specific border.
Definition: tbb_stddef.h:353
concurrent_queue_rep * my_rep
Internal representation.

References __TBB_ASSERT, tbb::cache_aligned_allocator< T >::allocate(), tbb::internal::concurrent_queue_rep::array, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::is_aligned(), item_size, tbb::internal::concurrent_queue_rep::items_avail, items_per_page, my_capacity, my_rep, tbb::internal::NFS_GetLineSize(), tbb::internal::concurrent_queue_rep::slots_avail, and tbb::internal::concurrent_queue_rep::tail_counter.

Here is the call graph for this function:

◆ ~concurrent_queue_base_v3()

tbb::internal::concurrent_queue_base_v3::~concurrent_queue_base_v3 ( )
protectedvirtual

Definition at line 360 of file concurrent_queue.cpp.

360  {
361  size_t nq = my_rep->n_queue;
362  for( size_t i=0; i<nq; i++ )
363  __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" );
364  cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1);
365 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static const size_t n_queue
Must be power of 2.
concurrent_queue_rep * my_rep
Internal representation.

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::array, tbb::cache_aligned_allocator< T >::deallocate(), my_rep, tbb::internal::concurrent_queue_rep::n_queue, and tbb::internal::micro_queue::tail_page.

Here is the call graph for this function:

Member Function Documentation

◆ allocate_page()

virtual page* tbb::internal::concurrent_queue_base_v3::allocate_page ( )
protectedpure virtual

custom allocator

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::make_copy(), and tbb::internal::micro_queue::push().

Here is the caller graph for this function:

◆ assign()

void __TBB_EXPORTED_METHOD tbb::internal::concurrent_queue_base_v3::assign ( const concurrent_queue_base_v3 src)
protected

copy internal representation

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue().

Here is the caller graph for this function:

◆ assign_and_destroy_item()

virtual void tbb::internal::concurrent_queue_base_v3::assign_and_destroy_item ( void dst,
page src,
size_t  index 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::pop().

Here is the caller graph for this function:

◆ copy_item()

virtual void tbb::internal::concurrent_queue_base_v3::copy_item ( page dst,
size_t  index,
const void src 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::push().

Here is the caller graph for this function:

◆ copy_page_item()

virtual void tbb::internal::concurrent_queue_base_v3::copy_page_item ( page dst,
size_t  dindex,
const page src,
size_t  sindex 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::make_copy().

Here is the caller graph for this function:

◆ deallocate_page()

virtual void tbb::internal::concurrent_queue_base_v3::deallocate_page ( page p)
protectedpure virtual

custom de-allocator

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by internal_finish_clear(), and tbb::internal::micro_queue_pop_finalizer::~micro_queue_pop_finalizer().

Here is the caller graph for this function:

◆ internal_abort()

void tbb::internal::concurrent_queue_base_v3::internal_abort ( )
protected

Abort all pending queue operations.

Definition at line 464 of file concurrent_queue.cpp.

464  {
466  ++r.abort_counter;
467  r.items_avail.abort_all();
468  r.slots_avail.abort_all();
469 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_monitor::abort_all(), tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_queue_rep::items_avail, my_rep, and tbb::internal::concurrent_queue_rep::slots_avail.

Here is the call graph for this function:

◆ internal_assign()

void tbb::internal::concurrent_queue_base_v3::internal_assign ( const concurrent_queue_base_v3 src,
copy_specifics  op_type 
)
protected

Assigns one queue to another using specified operation (copy or move)

Definition at line 555 of file concurrent_queue.cpp.

555  {
556  items_per_page = src.items_per_page;
557  my_capacity = src.my_capacity;
558 
559  // copy concurrent_queue_rep.
560  my_rep->head_counter = src.my_rep->head_counter;
561  my_rep->tail_counter = src.my_rep->tail_counter;
562  my_rep->n_invalid_entries = src.my_rep->n_invalid_entries;
563  my_rep->abort_counter = src.my_rep->abort_counter;
564 
565  // copy micro_queues
566  for( size_t i = 0; i<my_rep->n_queue; ++i )
567  my_rep->array[i].assign( src.my_rep->array[i], *this, op_type );
568 
569  __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter,
570  "the source concurrent queue should not be concurrently modified." );
571 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
ptrdiff_t my_capacity
Capacity of the queue.
micro_queue & assign(const micro_queue &src, concurrent_queue_base &base, concurrent_queue_base::copy_specifics op_type)
static const size_t n_queue
Must be power of 2.
concurrent_queue_rep * my_rep
Internal representation.

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_queue_rep::array, tbb::internal::micro_queue::assign(), tbb::internal::concurrent_queue_rep::head_counter, items_per_page, my_capacity, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, tbb::internal::concurrent_queue_rep::n_queue, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::internal::concurrent_queue_base_v8::move_content().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_empty()

bool tbb::internal::concurrent_queue_base_v3::internal_empty ( ) const
protected

Check if the queue is emtpy.

Definition at line 528 of file concurrent_queue.cpp.

528  {
529  ticket tc = my_rep->tail_counter;
530  ticket hc = my_rep->head_counter;
531  // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.
532  return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 );
533 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::empty().

Here is the caller graph for this function:

◆ internal_finish_clear()

void tbb::internal::concurrent_queue_base_v3::internal_finish_clear ( )
protected

free any remaining pages

Definition at line 539 of file concurrent_queue.cpp.

539  {
540  size_t nq = my_rep->n_queue;
541  for( size_t i=0; i<nq; ++i ) {
542  page* tp = my_rep->array[i].tail_page;
543  __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" );
544  if( tp!=NULL) {
545  if( tp!=static_invalid_page ) deallocate_page( tp );
546  my_rep->array[i].tail_page = NULL;
547  }
548  }
549 }
virtual void deallocate_page(page *p)=0
custom de-allocator
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static void * static_invalid_page
static const size_t n_queue
Must be power of 2.
concurrent_queue_rep * my_rep
Internal representation.

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::array, deallocate_page(), tbb::internal::micro_queue::head_page, my_rep, tbb::internal::concurrent_queue_rep::n_queue, tbb::internal::static_invalid_page, and tbb::internal::micro_queue::tail_page.

Here is the call graph for this function:

◆ internal_insert_if_not_full()

bool tbb::internal::concurrent_queue_base_v3::internal_insert_if_not_full ( const void src,
copy_specifics  op_type 
)
protected

Attempts to enqueue at tail of queue using specified operation (copy or move)

Definition at line 503 of file concurrent_queue.cpp.

503  {
505  ticket k = r.tail_counter;
506  for(;;) {
507  if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) {
508  // Queue is full
509  return false;
510  }
511  // Queue had empty slot with ticket k when we looked. Attempt to claim that slot.
512  ticket tk=k;
513  k = r.tail_counter.compare_and_swap( tk+1, tk );
514  if( k==tk )
515  break;
516  // Another thread claimed the slot, so retry.
517  }
518  r.choose(k).push(src, k, *this, op_type);
519  r.items_avail.notify( predicate_leq(k) );
520  return true;
521 }
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::choose(), tbb::internal::atomic_impl< T >::compare_and_swap(), tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, my_capacity, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::push(), and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by internal_push_if_not_full(), and tbb::internal::concurrent_queue_base_v8::internal_push_move_if_not_full().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_insert_item()

void tbb::internal::concurrent_queue_base_v3::internal_insert_item ( const void src,
copy_specifics  op_type 
)
protected

Enqueues item at tail of queue using specified operation (copy or move)

Definition at line 375 of file concurrent_queue.cpp.

375  {
377  unsigned old_abort_counter = r.abort_counter;
378  ticket k = r.tail_counter++;
379  ptrdiff_t e = my_capacity;
380 #if DO_ITT_NOTIFY
381  bool sync_prepare_done = false;
382 #endif
383  if( (ptrdiff_t)(k-r.head_counter)>=e ) { // queue is full
384 #if DO_ITT_NOTIFY
385  if( !sync_prepare_done ) {
386  ITT_NOTIFY( sync_prepare, &sync_prepare_done );
387  sync_prepare_done = true;
388  }
389 #endif
390  bool slept = false;
391  concurrent_monitor::thread_context thr_ctx;
392  r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );
393  while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) {
394  __TBB_TRY {
395  if( r.abort_counter!=old_abort_counter ) {
396  r.slots_avail.cancel_wait( thr_ctx );
398  }
399  slept = r.slots_avail.commit_wait( thr_ctx );
401  r.choose(k).abort_push(k, *this);
402  __TBB_RETHROW();
403  } __TBB_CATCH(...) {
404  __TBB_RETHROW();
405  }
406  if (slept == true) break;
407  r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );
408  }
409  if( !slept )
410  r.slots_avail.cancel_wait( thr_ctx );
411  }
412  ITT_NOTIFY( sync_acquired, &sync_prepare_done );
413  __TBB_ASSERT( (ptrdiff_t)(k-r.head_counter)<my_capacity, NULL);
414  r.choose( k ).push( src, k, *this, op_type );
415  r.items_avail.notify( predicate_leq(k) );
416 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
ptrdiff_t my_capacity
Capacity of the queue.
#define __TBB_TRY
Definition: tbb_stddef.h:287
Exception for user-initiated abort.
Definition: tbb_exception.h:47
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:288
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_RETHROW()
Definition: tbb_stddef.h:290

References __TBB_ASSERT, __TBB_CATCH, __TBB_RETHROW, __TBB_TRY, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::micro_queue::abort_push(), tbb::internal::concurrent_monitor::cancel_wait(), tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_monitor::commit_wait(), tbb::internal::eid_user_abort, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, ITT_NOTIFY, my_capacity, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::concurrent_monitor::prepare_wait(), tbb::internal::micro_queue::push(), tbb::internal::concurrent_queue_rep::slots_avail, tbb::internal::concurrent_queue_rep::tail_counter, and tbb::internal::throw_exception().

Referenced by internal_push(), and tbb::internal::concurrent_queue_base_v8::internal_push_move().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_pop()

void tbb::internal::concurrent_queue_base_v3::internal_pop ( void dst)
protected

Dequeue item from head of queue.

Definition at line 418 of file concurrent_queue.cpp.

418  {
420  ticket k;
421 #if DO_ITT_NOTIFY
422  bool sync_prepare_done = false;
423 #endif
424  unsigned old_abort_counter = r.abort_counter;
425  // This loop is a single pop operation; abort_counter should not be re-read inside
426  do {
427  k=r.head_counter++;
428  if ( (ptrdiff_t)(r.tail_counter-k)<=0 ) { // queue is empty
429 #if DO_ITT_NOTIFY
430  if( !sync_prepare_done ) {
431  ITT_NOTIFY( sync_prepare, dst );
432  sync_prepare_done = true;
433  }
434 #endif
435  bool slept = false;
436  concurrent_monitor::thread_context thr_ctx;
437  r.items_avail.prepare_wait( thr_ctx, k );
438  while( (ptrdiff_t)(r.tail_counter-k)<=0 ) {
439  __TBB_TRY {
440  if( r.abort_counter!=old_abort_counter ) {
441  r.items_avail.cancel_wait( thr_ctx );
443  }
444  slept = r.items_avail.commit_wait( thr_ctx );
446  r.head_counter--;
447  __TBB_RETHROW();
448  } __TBB_CATCH(...) {
449  __TBB_RETHROW();
450  }
451  if (slept == true) break;
452  r.items_avail.prepare_wait( thr_ctx, k );
453  }
454  if( !slept )
455  r.items_avail.cancel_wait( thr_ctx );
456  }
457  __TBB_ASSERT((ptrdiff_t)(r.tail_counter-k)>0, NULL);
458  } while( !r.choose(k).pop(dst,k,*this) );
459 
460  // wake up a producer..
461  r.slots_avail.notify( predicate_leq(k) );
462 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
#define __TBB_TRY
Definition: tbb_stddef.h:287
Exception for user-initiated abort.
Definition: tbb_exception.h:47
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:288
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_RETHROW()
Definition: tbb_stddef.h:290

References __TBB_ASSERT, __TBB_CATCH, __TBB_RETHROW, __TBB_TRY, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_monitor::cancel_wait(), tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_monitor::commit_wait(), tbb::internal::eid_user_abort, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, ITT_NOTIFY, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::pop(), tbb::internal::concurrent_monitor::prepare_wait(), tbb::internal::concurrent_queue_rep::slots_avail, tbb::internal::concurrent_queue_rep::tail_counter, and tbb::internal::throw_exception().

Referenced by tbb::concurrent_bounded_queue< T, A >::pop().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_pop_if_present()

bool tbb::internal::concurrent_queue_base_v3::internal_pop_if_present ( void dst)
protected

Attempt to dequeue item from queue.

NULL if there was no item to dequeue.

Definition at line 471 of file concurrent_queue.cpp.

471  {
473  ticket k;
474  do {
475  k = r.head_counter;
476  for(;;) {
477  if( (ptrdiff_t)(r.tail_counter-k)<=0 ) {
478  // Queue is empty
479  return false;
480  }
481  // Queue had item with ticket k when we looked. Attempt to get that item.
482  ticket tk=k;
483  k = r.head_counter.compare_and_swap( tk+1, tk );
484  if( k==tk )
485  break;
486  // Another thread snatched the item, retry.
487  }
488  } while( !r.choose( k ).pop( dst, k, *this ) );
489 
490  r.slots_avail.notify( predicate_leq(k) );
491 
492  return true;
493 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::choose(), tbb::internal::atomic_impl< T >::compare_and_swap(), tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::pop(), tbb::internal::concurrent_queue_rep::slots_avail, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::try_pop().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_push()

void tbb::internal::concurrent_queue_base_v3::internal_push ( const void src)
protected

Enqueue item at tail of queue using copy operation.

Definition at line 367 of file concurrent_queue.cpp.

367  {
368  internal_insert_item( src, copy );
369 }
void internal_insert_item(const void *src, copy_specifics op_type)
Enqueues item at tail of queue using specified operation (copy or move)

References copy, and internal_insert_item().

Referenced by tbb::concurrent_bounded_queue< T, A >::push().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_push_if_not_full()

bool tbb::internal::concurrent_queue_base_v3::internal_push_if_not_full ( const void src)
protected

Attempt to enqueue item onto queue using copy operation.

Definition at line 495 of file concurrent_queue.cpp.

495  {
496  return internal_insert_if_not_full( src, copy );
497 }
bool internal_insert_if_not_full(const void *src, copy_specifics op_type)
Attempts to enqueue at tail of queue using specified operation (copy or move)

References copy, and internal_insert_if_not_full().

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue(), and tbb::concurrent_bounded_queue< T, A >::try_push().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_set_capacity()

void tbb::internal::concurrent_queue_base_v3::internal_set_capacity ( ptrdiff_t  capacity,
size_t  element_size 
)
protected

Set the queue capacity.

Definition at line 535 of file concurrent_queue.cpp.

535  {
536  my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity;
537 }
ptrdiff_t my_capacity
Capacity of the queue.
static const ptrdiff_t infinite_capacity
Value for effective_capacity that denotes unbounded queue.

References tbb::internal::concurrent_queue_rep::infinite_capacity, and my_capacity.

Referenced by tbb::concurrent_bounded_queue< T, A >::set_capacity().

Here is the caller graph for this function:

◆ internal_size()

ptrdiff_t tbb::internal::concurrent_queue_base_v3::internal_size ( ) const
protected

Get size of queue.

Definition at line 523 of file concurrent_queue.cpp.

523  {
524  __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
526 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
concurrent_queue_rep * my_rep
Internal representation.

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::size().

Here is the caller graph for this function:

◆ internal_swap()

void tbb::internal::concurrent_queue_base_v3::internal_swap ( concurrent_queue_base_v3 src)
inlineprotected

swap queues

Definition at line 916 of file _concurrent_queue_impl.h.

916  {
919  std::swap( item_size, src.item_size );
920  std::swap( my_rep, src.my_rep );
921  }
ptrdiff_t my_capacity
Capacity of the queue.
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:539
concurrent_queue_rep * my_rep
Internal representation.

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue().

Here is the caller graph for this function:

◆ internal_throw_exception()

void tbb::internal::concurrent_queue_base_v3::internal_throw_exception ( ) const
protected

throw an exception

Definition at line 551 of file concurrent_queue.cpp.

551  {
553 }
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References tbb::internal::eid_bad_alloc, and tbb::internal::throw_exception().

Here is the call graph for this function:

Friends And Related Function Documentation

◆ concurrent_queue_iterator_base_v3

friend class concurrent_queue_iterator_base_v3
friend

Definition at line 834 of file _concurrent_queue_impl.h.

◆ concurrent_queue_iterator_rep

friend class concurrent_queue_iterator_rep
friend

Definition at line 833 of file _concurrent_queue_impl.h.

◆ concurrent_queue_rep

friend class concurrent_queue_rep
friend

Definition at line 830 of file _concurrent_queue_impl.h.

◆ micro_queue

friend struct micro_queue
friend

Definition at line 831 of file _concurrent_queue_impl.h.

◆ micro_queue_pop_finalizer

friend class micro_queue_pop_finalizer
friend

Definition at line 832 of file _concurrent_queue_impl.h.

Member Data Documentation

◆ item_size

size_t tbb::internal::concurrent_queue_base_v3::item_size
protected

◆ items_per_page

◆ my_capacity

ptrdiff_t tbb::internal::concurrent_queue_base_v3::my_capacity
protected

◆ my_rep


The documentation for this class was generated from the following files:

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.