Lines Matching full:epoch

44  * acquired some snapshot (e) of the global epoch value (e_g) and set an active
66 * causes epoch counter tick) actually deletes the same items that reader
68 * This is possible if the writer thread re-observes the epoch after the
89 * Now, if the epoch counter is ticked to e_g+1, then no new hazardous
91 * this is that at e_g+1, all epoch read-side critical sections started at
92 * e_g-1 must have been completed. If any epoch read-side critical sections at
121 * Blocking semantics for epoch reclamation have additional restrictions.
166 * epoch. If so, then make sure to update our shared snapshot in CK_STACK_CONTAINER()
174 ((int)(current->epoch - other->epoch) < 0)) { in CK_STACK_CONTAINER()
176 * The other epoch value is actually the newest, in CK_STACK_CONTAINER()
179 ck_pr_store_uint(&record->epoch, other->epoch); in CK_STACK_CONTAINER()
191 unsigned int epoch, i; in _ck_epoch_addref() local
193 epoch = ck_pr_load_uint(&global->epoch); in _ck_epoch_addref()
194 i = epoch & CK_EPOCH_SENSE_MASK; in _ck_epoch_addref()
205 * from the previous epoch generation. in _ck_epoch_addref()
219 * bucket then cache the associated epoch value. in _ck_epoch_addref()
221 ref->epoch = epoch; in _ck_epoch_addref()
233 global->epoch = 1; in ck_epoch_init()
282 record->epoch = 0; in ck_epoch_register()
304 record->epoch = 0; in ck_epoch_unregister()
323 unsigned int epoch, in ck_epoch_scan() argument
350 if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch) in ck_epoch_scan()
362 unsigned int epoch = e & (CK_EPOCH_LENGTH - 1); in ck_epoch_dispatch() local
367 head = ck_stack_batch_pop_upmc(&record->pending[epoch]); in ck_epoch_dispatch()
402 unsigned int epoch; in ck_epoch_reclaim() local
404 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) in ck_epoch_reclaim()
405 ck_epoch_dispatch(record, epoch, NULL); in ck_epoch_reclaim()
429 unsigned int delta, epoch, goal, i; in ck_epoch_synchronize_wait() local
435 * The observation of the global epoch must be ordered with respect to in ck_epoch_synchronize_wait()
437 * monoticity of global epoch counter. in ck_epoch_synchronize_wait()
443 delta = epoch = ck_pr_load_uint(&global->epoch); in ck_epoch_synchronize_wait()
444 goal = epoch + CK_EPOCH_GRACE; in ck_epoch_synchronize_wait()
451 * epoch with respect to the updates on invocation. in ck_epoch_synchronize_wait()
463 e_d = ck_pr_load_uint(&global->epoch); in ck_epoch_synchronize_wait()
470 * If the epoch has been updated, we may have already in ck_epoch_synchronize_wait()
474 if ((goal > epoch) & (delta >= goal)) in ck_epoch_synchronize_wait()
480 * If the epoch has been updated, then a grace period in ck_epoch_synchronize_wait()
482 * same epoch. in ck_epoch_synchronize_wait()
495 * Increment current epoch. CAS semantics are used to eliminate in ck_epoch_synchronize_wait()
497 * same global epoch value snapshot. in ck_epoch_synchronize_wait()
500 * epoch tick at a given time, then it is sufficient to use an in ck_epoch_synchronize_wait()
502 * it is possible to overflow the epoch value if we apply in ck_epoch_synchronize_wait()
505 r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1, in ck_epoch_synchronize_wait()
556 * It may be worth it to actually apply these deferral semantics to an epoch
560 * ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
562 * becomes a problem, we could actually use a heap for epoch buckets but that
569 unsigned int epoch; in ck_epoch_poll_deferred() local
574 epoch = ck_pr_load_uint(&global->epoch); in ck_epoch_poll_deferred()
576 /* Serialize epoch snapshots with respect to global epoch. */ in ck_epoch_poll_deferred()
580 * At this point, epoch is the current global epoch value. in ck_epoch_poll_deferred()
581 * There may or may not be active threads which observed epoch - 1. in ck_epoch_poll_deferred()
583 * no active threads which observed epoch - 2. in ck_epoch_poll_deferred()
585 * Note that checking epoch - 2 is necessary, as race conditions can in ck_epoch_poll_deferred()
586 * allow another thread to increment the global epoch before this in ck_epoch_poll_deferred()
589 n_dispatch = ck_epoch_dispatch(record, epoch - 2, deferred); in ck_epoch_poll_deferred()
591 cr = ck_epoch_scan(global, cr, epoch, &active); in ck_epoch_poll_deferred()
597 record->epoch = epoch; in ck_epoch_poll_deferred()
598 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) in ck_epoch_poll_deferred()
599 ck_epoch_dispatch(record, epoch, deferred); in ck_epoch_poll_deferred()
605 * If an active thread exists, rely on epoch observation. in ck_epoch_poll_deferred()
607 * All the active threads entered the epoch section during in ck_epoch_poll_deferred()
608 * the current epoch. Therefore, we can now run the handlers in ck_epoch_poll_deferred()
609 * for the immediately preceding epoch and attempt to in ck_epoch_poll_deferred()
610 * advance the epoch if it hasn't been already. in ck_epoch_poll_deferred()
612 (void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1); in ck_epoch_poll_deferred()
614 ck_epoch_dispatch(record, epoch - 1, deferred); in ck_epoch_poll_deferred()