Lines Matching +full:active +full:-
2 * Copyright 2011-2015 Samy Al Bahra.
29 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
44 * acquired some snapshot (e) of the global epoch value (e_g) and set an active
46 * For example, assume an initial e_g value of 1, e value of 0 and active value
51 * active = 1
54 * Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
55 * e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
57 * from). This guarantees us that for any given value e_g, any threads with-in
58 * critical sections (referred to as "active" threads from here on) would have
59 * an e value of e_g-1 or e_g. This also means that hazardous references may be
60 * shared in both e_g-1 and e_g even if they are logically deleted in e_g.
65 * executing some hash table look-ups, while some other writer thread (which
68 * This is possible if the writer thread re-observes the epoch after the
71 * Psuedo-code for writer:
79 * Psuedo-code for reader:
82 * ck_pr_inc(&x->value);
85 * Of course, it is also possible for references logically deleted at e_g-1 to
86 * still be accessed at e_g as threads are "active" at the same time
87 * (real-world time) mutating shared objects.
90 * references could exist to objects logically deleted at e_g-1. The reason for
91 * this is that at e_g+1, all epoch read-side critical sections started at
92 * e_g-1 must have been completed. If any epoch read-side critical sections at
93 * e_g-1 were still active, then we would never increment to e_g+1 (active != 0
95 * objects logically deleted at e_g-1 which means objects logically deleted at
96 * e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
97 * (since it is valid for active threads to be at e_g and threads at e_g still
100 * However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
102 * hazardous references to e_g, no active threads are at e_g or e_g-1. This
103 * means no hazardous references could exist to objects deleted at e_g-1 (at
107 * 1) Active threads will always have a value of e_g or e_g-1.
108 * 2) Items that are logically deleted e_g or e_g-1 cannot be physically
110 * 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
113 * Last but not least, if we are at e_g+2, then no active thread is at e_g
114 * which means it is safe to apply modulo-3 arithmetic to e_g value in order to
115 * re-use e_g to represent the e_g+3 state. This means it is sufficient to
116 * represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
117 * a e_g (which can be determined with a non-empty deferral list) it can assume
123 * must be able to more gracefully handle bursty write work-loads which could
124 * easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
125 * easy-to-trigger live-lock situations. The work-around to this is to not
131 * CK_EPOCH_LENGTH must be a power-of-2 (because (CK_EPOCH_LENGTH - 1) is used
134 #if (CK_EPOCH_LENGTH < 3 || (CK_EPOCH_LENGTH & (CK_EPOCH_LENGTH - 1)) != 0)
148 #define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1) in CK_STACK_CONTAINER()
155 unsigned int i = section->bucket; in CK_STACK_CONTAINER()
157 current = &record->local.bucket[i]; in CK_STACK_CONTAINER()
158 current->count--; in CK_STACK_CONTAINER()
160 if (current->count > 0) in CK_STACK_CONTAINER()
169 * If no other active bucket exists, then the record will go in CK_STACK_CONTAINER()
172 other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK]; in CK_STACK_CONTAINER()
173 if (other->count > 0 && in CK_STACK_CONTAINER()
174 ((int)(current->epoch - other->epoch) < 0)) { in CK_STACK_CONTAINER()
179 ck_pr_store_uint(&record->epoch, other->epoch); in CK_STACK_CONTAINER()
189 struct ck_epoch *global = record->global; in _ck_epoch_addref()
193 epoch = ck_pr_load_uint(&global->epoch); in _ck_epoch_addref()
195 ref = &record->local.bucket[i]; in _ck_epoch_addref()
197 if (ref->count++ == 0) { in _ck_epoch_addref()
202 * The system has already ticked. If another non-zero bucket in _ck_epoch_addref()
208 * and load-{store, load} ordering are sufficient to guarantee in _ck_epoch_addref()
211 previous = &record->local.bucket[(i + 1) & in _ck_epoch_addref()
213 if (previous->count > 0) in _ck_epoch_addref()
221 ref->epoch = epoch; in _ck_epoch_addref()
224 section->bucket = i; in _ck_epoch_addref()
232 ck_stack_init(&global->records); in ck_epoch_init()
233 global->epoch = 1; in ck_epoch_init()
234 global->n_free = 0; in ck_epoch_init()
246 if (ck_pr_load_uint(&global->n_free) == 0) in ck_epoch_recycle()
249 CK_STACK_FOREACH(&global->records, cursor) { in ck_epoch_recycle()
252 if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) { in ck_epoch_recycle()
253 /* Serialize with respect to deferral list clean-up. */ in ck_epoch_recycle()
255 state = ck_pr_fas_uint(&record->state, in ck_epoch_recycle()
258 ck_pr_dec_uint(&global->n_free); in ck_epoch_recycle()
259 ck_pr_store_ptr(&record->ct, ct); in ck_epoch_recycle()
279 record->global = global; in ck_epoch_register()
280 record->state = CK_EPOCH_STATE_USED; in ck_epoch_register()
281 record->active = 0; in ck_epoch_register()
282 record->epoch = 0; in ck_epoch_register()
283 record->n_dispatch = 0; in ck_epoch_register()
284 record->n_peak = 0; in ck_epoch_register()
285 record->n_pending = 0; in ck_epoch_register()
286 record->ct = ct; in ck_epoch_register()
287 memset(&record->local, 0, sizeof record->local); in ck_epoch_register()
290 ck_stack_init(&record->pending[i]); in ck_epoch_register()
293 ck_stack_push_upmc(&global->records, &record->record_next); in ck_epoch_register()
300 struct ck_epoch *global = record->global; in ck_epoch_unregister()
303 record->active = 0; in ck_epoch_unregister()
304 record->epoch = 0; in ck_epoch_unregister()
305 record->n_dispatch = 0; in ck_epoch_unregister()
306 record->n_peak = 0; in ck_epoch_unregister()
307 record->n_pending = 0; in ck_epoch_unregister()
308 memset(&record->local, 0, sizeof record->local); in ck_epoch_unregister()
311 ck_stack_init(&record->pending[i]); in ck_epoch_unregister()
313 ck_pr_store_ptr(&record->ct, NULL); in ck_epoch_unregister()
315 ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE); in ck_epoch_unregister()
316 ck_pr_inc_uint(&global->n_free); in ck_epoch_unregister()
329 cursor = CK_STACK_FIRST(&global->records); in ck_epoch_scan()
332 cursor = &cr->record_next; in ck_epoch_scan()
337 unsigned int state, active; in ck_epoch_scan() local
341 state = ck_pr_load_uint(&cr->state); in ck_epoch_scan()
347 active = ck_pr_load_uint(&cr->active); in ck_epoch_scan()
348 *af |= active; in ck_epoch_scan()
350 if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch) in ck_epoch_scan()
362 unsigned int epoch = e & (CK_EPOCH_LENGTH - 1); in ck_epoch_dispatch()
367 head = ck_stack_batch_pop_upmc(&record->pending[epoch]); in ck_epoch_dispatch()
374 ck_stack_push_spnc(deferred, &entry->stack_entry); in ck_epoch_dispatch()
376 entry->function(entry); in ck_epoch_dispatch()
381 n_peak = ck_pr_load_uint(&record->n_peak); in ck_epoch_dispatch()
382 n_pending = ck_pr_load_uint(&record->n_pending); in ck_epoch_dispatch()
386 ck_pr_store_uint(&record->n_peak, n_peak); in ck_epoch_dispatch()
389 ck_pr_add_uint(&record->n_dispatch, i); in ck_epoch_dispatch()
390 ck_pr_sub_uint(&record->n_pending, i); in ck_epoch_dispatch()
422 * This function must not be called with-in read section.
430 bool active; in ck_epoch_synchronize_wait() local
436 * all prior operations. The re-ordering of loads is permitted given in ck_epoch_synchronize_wait()
440 * to encounter an ABA-issue. If this is a concern, consider tuning in ck_epoch_synchronize_wait()
441 * write-side concurrency. in ck_epoch_synchronize_wait()
443 delta = epoch = ck_pr_load_uint(&global->epoch); in ck_epoch_synchronize_wait()
446 for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) { in ck_epoch_synchronize_wait()
453 while (cr = ck_epoch_scan(global, cr, delta, &active), in ck_epoch_synchronize_wait()
463 e_d = ck_pr_load_uint(&global->epoch); in ck_epoch_synchronize_wait()
491 if (active == false) in ck_epoch_synchronize_wait()
499 * If we can guarantee there will only be one active barrier or in ck_epoch_synchronize_wait()
501 * increment operation. In a multi-barrier workload, however, in ck_epoch_synchronize_wait()
503 * modulo-3 arithmetic. in ck_epoch_synchronize_wait()
505 r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1, in ck_epoch_synchronize_wait()
508 /* Order subsequent thread active checks. */ in ck_epoch_synchronize_wait()
519 * A majority of use-cases will not require full barrier semantics. in ck_epoch_synchronize_wait()
520 * However, if non-temporal instructions are used, full barrier in ck_epoch_synchronize_wait()
532 ck_epoch_synchronize_wait(record->global, NULL, NULL); in ck_epoch_synchronize()
550 ck_epoch_synchronize_wait(record->global, cb, ct); in ck_epoch_barrier_wait()
568 bool active; in ck_epoch_poll_deferred() local
571 struct ck_epoch *global = record->global; in ck_epoch_poll_deferred()
574 epoch = ck_pr_load_uint(&global->epoch); in ck_epoch_poll_deferred()
581 * There may or may not be active threads which observed epoch - 1. in ck_epoch_poll_deferred()
583 * no active threads which observed epoch - 2. in ck_epoch_poll_deferred()
585 * Note that checking epoch - 2 is necessary, as race conditions can in ck_epoch_poll_deferred()
589 n_dispatch = ck_epoch_dispatch(record, epoch - 2, deferred); in ck_epoch_poll_deferred()
591 cr = ck_epoch_scan(global, cr, epoch, &active); in ck_epoch_poll_deferred()
596 if (active == false) { in ck_epoch_poll_deferred()
597 record->epoch = epoch; in ck_epoch_poll_deferred()
605 * If an active thread exists, rely on epoch observation. in ck_epoch_poll_deferred()
607 * All the active threads entered the epoch section during in ck_epoch_poll_deferred()
612 (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1); in ck_epoch_poll_deferred()
614 ck_epoch_dispatch(record, epoch - 1, deferred); in ck_epoch_poll_deferred()