Lines Matching full:epoch
48 * epoch sections.
81 unsigned int epoch; member
89 unsigned int epoch; member
103 unsigned int epoch; member
123 * Marks the beginning of an epoch-protected section.
128 struct ck_epoch *epoch = record->global; in ck_epoch_begin() local
131 * Only observe new epoch if thread is not recursing into a read in ck_epoch_begin()
139 * is committed into the caller's epoch and active fields. in ck_epoch_begin()
152 * active flag due to monotonic nature of the global epoch. in ck_epoch_begin()
155 * of global epoch. in ck_epoch_begin()
157 g_epoch = ck_pr_load_uint(&epoch->epoch); in ck_epoch_begin()
158 ck_pr_store_uint(&record->epoch, g_epoch); in ck_epoch_begin()
170 * Marks the end of an epoch-protected section. Returns true if no more
188 * argument until an epoch counter loop. This allows for a
192 * of the epoch counter. Worst case, this will result in some delays
200 struct ck_epoch *epoch = record->global; in ck_epoch_call() local
201 unsigned int e = ck_pr_load_uint(&epoch->epoch); in ck_epoch_call()
218 struct ck_epoch *epoch = record->global; in ck_epoch_call_strict() local
219 unsigned int e = ck_pr_load_uint(&epoch->epoch); in ck_epoch_call_strict()
238 * Return latest epoch value. This operation provides load ordering.
245 return ck_pr_load_uint(&ep->epoch); in ck_epoch_value()
251 * Attempts to recycle an unused epoch record. If one is successfully
257 * Registers an epoch record. An optional context pointer may be passed that