1 /* 2 * Copyright 2011-2015 Samy Al Bahra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * The implementation here is inspired from the work described in: 29 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 30 * of Cambridge Computing Laboratory. 31 */ 32 33 #include <ck_backoff.h> 34 #include <ck_cc.h> 35 #include <ck_epoch.h> 36 #include <ck_pr.h> 37 #include <ck_stack.h> 38 #include <ck_stdbool.h> 39 #include <ck_string.h> 40 41 /* 42 * Only three distinct values are used for reclamation, but reclamation occurs 43 * at e+2 rather than e+1. Any thread in a "critical section" would have 44 * acquired some snapshot (e) of the global epoch value (e_g) and set an active 45 * flag. Any hazardous references will only occur after a full memory barrier. 46 * For example, assume an initial e_g value of 1, e value of 0 and active value 47 * of 0. 48 * 49 * ck_epoch_begin(...) 50 * e = e_g 51 * active = 1 52 * memory_barrier(); 53 * 54 * Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or 55 * e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread 56 * has already observed the value of "1" (or the value we are incrementing 57 * from). This guarantees us that for any given value e_g, any threads with-in 58 * critical sections (referred to as "active" threads from here on) would have 59 * an e value of e_g-1 or e_g. This also means that hazardous references may be 60 * shared in both e_g-1 and e_g even if they are logically deleted in e_g. 61 * 62 * For example, assume all threads have an e value of e_g. Another thread may 63 * increment to e_g to e_g+1. Older threads may have a reference to an object 64 * which is only deleted in e_g+1. It could be that reader threads are 65 * executing some hash table look-ups, while some other writer thread (which 66 * causes epoch counter tick) actually deletes the same items that reader 67 * threads are looking up (this writer thread having an e value of e_g+1). 68 * This is possible if the writer thread re-observes the epoch after the 69 * counter tick. 70 * 71 * Psuedo-code for writer: 72 * ck_epoch_begin() 73 * ht_delete(x) 74 * ck_epoch_end() 75 * ck_epoch_begin() 76 * ht_delete(x) 77 * ck_epoch_end() 78 * 79 * Psuedo-code for reader: 80 * for (;;) { 81 * x = ht_lookup(x) 82 * ck_pr_inc(&x->value); 83 * } 84 * 85 * Of course, it is also possible for references logically deleted at e_g-1 to 86 * still be accessed at e_g as threads are "active" at the same time 87 * (real-world time) mutating shared objects. 88 * 89 * Now, if the epoch counter is ticked to e_g+1, then no new hazardous 90 * references could exist to objects logically deleted at e_g-1. The reason for 91 * this is that at e_g+1, all epoch read-side critical sections started at 92 * e_g-1 must have been completed. If any epoch read-side critical sections at 93 * e_g-1 were still active, then we would never increment to e_g+1 (active != 0 94 * ^ e != e_g). Additionally, e_g may still have hazardous references to 95 * objects logically deleted at e_g-1 which means objects logically deleted at 96 * e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1 97 * (since it is valid for active threads to be at e_g and threads at e_g still 98 * require safe memory accesses). 99 * 100 * However, at e_g+2, all active threads must be either at e_g+1 or e_g+2. 101 * Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares 102 * hazardous references to e_g, no active threads are at e_g or e_g-1. This 103 * means no hazardous references could exist to objects deleted at e_g-1 (at 104 * e_g+2). 105 * 106 * To summarize these important points, 107 * 1) Active threads will always have a value of e_g or e_g-1. 108 * 2) Items that are logically deleted e_g or e_g-1 cannot be physically 109 * deleted. 110 * 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2 111 * or at e_g+1 if no threads are at e_g. 112 * 113 * Last but not least, if we are at e_g+2, then no active thread is at e_g 114 * which means it is safe to apply modulo-3 arithmetic to e_g value in order to 115 * re-use e_g to represent the e_g+3 state. This means it is sufficient to 116 * represent e_g using only the values 0, 1 or 2. Every time a thread re-visits 117 * a e_g (which can be determined with a non-empty deferral list) it can assume 118 * objects in the e_g deferral list involved at least three e_g transitions and 119 * are thus, safe, for physical deletion. 120 * 121 * Blocking semantics for epoch reclamation have additional restrictions. 122 * Though we only require three deferral lists, reasonable blocking semantics 123 * must be able to more gracefully handle bursty write work-loads which could 124 * easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for 125 * easy-to-trigger live-lock situations. The work-around to this is to not 126 * apply modulo arithmetic to e_g but only to deferral list indexing. 127 */ 128 #define CK_EPOCH_GRACE 3U 129 130 enum { 131 CK_EPOCH_STATE_USED = 0, 132 CK_EPOCH_STATE_FREE = 1 133 }; 134 135 CK_STACK_CONTAINER(struct ck_epoch_record, record_next, 136 ck_epoch_record_container) 137 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, 138 ck_epoch_entry_container) 139 140 #define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1) 141 142 void 143 _ck_epoch_delref(struct ck_epoch_record *record, 144 struct ck_epoch_section *section) 145 { 146 struct ck_epoch_ref *current, *other; 147 unsigned int i = section->bucket; 148 149 current = &record->local.bucket[i]; 150 current->count--; 151 152 if (current->count > 0) 153 return; 154 155 /* 156 * If the current bucket no longer has any references, then 157 * determine whether we have already transitioned into a newer 158 * epoch. If so, then make sure to update our shared snapshot 159 * to allow for forward progress. 160 * 161 * If no other active bucket exists, then the record will go 162 * inactive in order to allow for forward progress. 163 */ 164 other = &record->local.bucket[(i + 1) & 165 CK_EPOCH_SENSE_MASK]; 166 if (other->count > 0 && 167 ((int)(current->epoch - other->epoch) < 0)) { 168 /* 169 * The other epoch value is actually the newest, 170 * transition to it. 171 */ 172 ck_pr_store_uint(&record->epoch, other->epoch); 173 } 174 175 return; 176 } 177 178 void 179 _ck_epoch_addref(struct ck_epoch_record *record, 180 struct ck_epoch_section *section) 181 { 182 struct ck_epoch *global = record->global; 183 struct ck_epoch_ref *ref; 184 unsigned int epoch, i; 185 186 epoch = ck_pr_load_uint(&global->epoch); 187 i = epoch & CK_EPOCH_SENSE_MASK; 188 ref = &record->local.bucket[i]; 189 190 if (ref->count++ == 0) { 191 #ifndef CK_MD_TSO 192 struct ck_epoch_ref *previous; 193 194 /* 195 * The system has already ticked. If another non-zero bucket 196 * exists, make sure to order our observations with respect 197 * to it. Otherwise, it is possible to acquire a reference 198 * from the previous epoch generation. 199 * 200 * On TSO architectures, the monoticity of the global counter 201 * and load-{store, load} ordering are sufficient to guarantee 202 * this ordering. 203 */ 204 previous = &record->local.bucket[(i + 1) & 205 CK_EPOCH_SENSE_MASK]; 206 if (previous->count > 0) 207 ck_pr_fence_acqrel(); 208 #endif /* !CK_MD_TSO */ 209 210 /* 211 * If this is this is a new reference into the current 212 * bucket then cache the associated epoch value. 213 */ 214 ref->epoch = epoch; 215 } 216 217 section->bucket = i; 218 return; 219 } 220 221 void 222 ck_epoch_init(struct ck_epoch *global) 223 { 224 225 ck_stack_init(&global->records); 226 global->epoch = 1; 227 global->n_free = 0; 228 ck_pr_fence_store(); 229 return; 230 } 231 232 struct ck_epoch_record * 233 ck_epoch_recycle(struct ck_epoch *global) 234 { 235 struct ck_epoch_record *record; 236 ck_stack_entry_t *cursor; 237 unsigned int state; 238 239 if (ck_pr_load_uint(&global->n_free) == 0) 240 return NULL; 241 242 CK_STACK_FOREACH(&global->records, cursor) { 243 record = ck_epoch_record_container(cursor); 244 245 if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) { 246 /* Serialize with respect to deferral list clean-up. */ 247 ck_pr_fence_load(); 248 state = ck_pr_fas_uint(&record->state, 249 CK_EPOCH_STATE_USED); 250 if (state == CK_EPOCH_STATE_FREE) { 251 ck_pr_dec_uint(&global->n_free); 252 return record; 253 } 254 } 255 } 256 257 return NULL; 258 } 259 260 void 261 ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record) 262 { 263 size_t i; 264 265 record->global = global; 266 record->state = CK_EPOCH_STATE_USED; 267 record->active = 0; 268 record->epoch = 0; 269 record->n_dispatch = 0; 270 record->n_peak = 0; 271 record->n_pending = 0; 272 memset(&record->local, 0, sizeof record->local); 273 274 for (i = 0; i < CK_EPOCH_LENGTH; i++) 275 ck_stack_init(&record->pending[i]); 276 277 ck_pr_fence_store(); 278 ck_stack_push_upmc(&global->records, &record->record_next); 279 return; 280 } 281 282 void 283 ck_epoch_unregister(struct ck_epoch_record *record) 284 { 285 struct ck_epoch *global = record->global; 286 size_t i; 287 288 record->active = 0; 289 record->epoch = 0; 290 record->n_dispatch = 0; 291 record->n_peak = 0; 292 record->n_pending = 0; 293 memset(&record->local, 0, sizeof record->local); 294 295 for (i = 0; i < CK_EPOCH_LENGTH; i++) 296 ck_stack_init(&record->pending[i]); 297 298 ck_pr_fence_store(); 299 ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE); 300 ck_pr_inc_uint(&global->n_free); 301 return; 302 } 303 304 static struct ck_epoch_record * 305 ck_epoch_scan(struct ck_epoch *global, 306 struct ck_epoch_record *cr, 307 unsigned int epoch, 308 bool *af) 309 { 310 ck_stack_entry_t *cursor; 311 312 *af = false; 313 if (cr == NULL) { 314 cursor = CK_STACK_FIRST(&global->records); 315 } else { 316 cursor = &cr->record_next; 317 } 318 319 while (cursor != NULL) { 320 unsigned int state, active; 321 322 cr = ck_epoch_record_container(cursor); 323 324 state = ck_pr_load_uint(&cr->state); 325 if (state & CK_EPOCH_STATE_FREE) { 326 cursor = CK_STACK_NEXT(cursor); 327 continue; 328 } 329 330 active = ck_pr_load_uint(&cr->active); 331 *af |= active; 332 333 if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch) 334 return cr; 335 336 cursor = CK_STACK_NEXT(cursor); 337 } 338 339 return NULL; 340 } 341 342 static void 343 ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e) 344 { 345 unsigned int epoch = e & (CK_EPOCH_LENGTH - 1); 346 ck_stack_entry_t *head, *next, *cursor; 347 unsigned int i = 0; 348 349 head = CK_STACK_FIRST(&record->pending[epoch]); 350 ck_stack_init(&record->pending[epoch]); 351 352 for (cursor = head; cursor != NULL; cursor = next) { 353 struct ck_epoch_entry *entry = 354 ck_epoch_entry_container(cursor); 355 356 next = CK_STACK_NEXT(cursor); 357 entry->function(entry); 358 i++; 359 } 360 361 if (record->n_pending > record->n_peak) 362 record->n_peak = record->n_pending; 363 364 record->n_dispatch += i; 365 record->n_pending -= i; 366 return; 367 } 368 369 /* 370 * Reclaim all objects associated with a record. 371 */ 372 void 373 ck_epoch_reclaim(struct ck_epoch_record *record) 374 { 375 unsigned int epoch; 376 377 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) 378 ck_epoch_dispatch(record, epoch); 379 380 return; 381 } 382 383 /* 384 * This function must not be called with-in read section. 385 */ 386 void 387 ck_epoch_synchronize(struct ck_epoch_record *record) 388 { 389 struct ck_epoch *global = record->global; 390 struct ck_epoch_record *cr; 391 unsigned int delta, epoch, goal, i; 392 bool active; 393 394 ck_pr_fence_memory(); 395 396 /* 397 * The observation of the global epoch must be ordered with respect to 398 * all prior operations. The re-ordering of loads is permitted given 399 * monoticity of global epoch counter. 400 * 401 * If UINT_MAX concurrent mutations were to occur then it is possible 402 * to encounter an ABA-issue. If this is a concern, consider tuning 403 * write-side concurrency. 404 */ 405 delta = epoch = ck_pr_load_uint(&global->epoch); 406 goal = epoch + CK_EPOCH_GRACE; 407 408 for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) { 409 bool r; 410 411 /* 412 * Determine whether all threads have observed the current 413 * epoch with respect to the updates on invocation. 414 */ 415 while (cr = ck_epoch_scan(global, cr, delta, &active), 416 cr != NULL) { 417 unsigned int e_d; 418 419 ck_pr_stall(); 420 421 /* 422 * Another writer may have already observed a grace 423 * period. 424 */ 425 e_d = ck_pr_load_uint(&global->epoch); 426 if (e_d != delta) { 427 delta = e_d; 428 goto reload; 429 } 430 } 431 432 /* 433 * If we have observed all threads as inactive, then we assume 434 * we are at a grace period. 435 */ 436 if (active == false) 437 break; 438 439 /* 440 * Increment current epoch. CAS semantics are used to eliminate 441 * increment operations for synchronization that occurs for the 442 * same global epoch value snapshot. 443 * 444 * If we can guarantee there will only be one active barrier or 445 * epoch tick at a given time, then it is sufficient to use an 446 * increment operation. In a multi-barrier workload, however, 447 * it is possible to overflow the epoch value if we apply 448 * modulo-3 arithmetic. 449 */ 450 r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1, 451 &delta); 452 453 /* Order subsequent thread active checks. */ 454 ck_pr_fence_atomic_load(); 455 456 /* 457 * If CAS has succeeded, then set delta to latest snapshot. 458 * Otherwise, we have just acquired latest snapshot. 459 */ 460 delta = delta + r; 461 continue; 462 463 reload: 464 if ((goal > epoch) & (delta >= goal)) { 465 /* 466 * Right now, epoch overflow is handled as an edge 467 * case. If we have already observed an epoch 468 * generation, then we can be sure no hazardous 469 * references exist to objects from this generation. We 470 * can actually avoid an addtional scan step at this 471 * point. 472 */ 473 break; 474 } 475 } 476 477 /* 478 * A majority of use-cases will not require full barrier semantics. 479 * However, if non-temporal instructions are used, full barrier 480 * semantics are necessary. 481 */ 482 ck_pr_fence_memory(); 483 record->epoch = delta; 484 return; 485 } 486 487 void 488 ck_epoch_barrier(struct ck_epoch_record *record) 489 { 490 491 ck_epoch_synchronize(record); 492 ck_epoch_reclaim(record); 493 return; 494 } 495 496 /* 497 * It may be worth it to actually apply these deferral semantics to an epoch 498 * that was observed at ck_epoch_call time. The problem is that the latter 499 * would require a full fence. 500 * 501 * ck_epoch_call will dispatch to the latest epoch snapshot that was observed. 502 * There are cases where it will fail to reclaim as early as it could. If this 503 * becomes a problem, we could actually use a heap for epoch buckets but that 504 * is far from ideal too. 505 */ 506 bool 507 ck_epoch_poll(struct ck_epoch_record *record) 508 { 509 bool active; 510 unsigned int epoch; 511 unsigned int snapshot; 512 struct ck_epoch_record *cr = NULL; 513 struct ck_epoch *global = record->global; 514 515 epoch = ck_pr_load_uint(&global->epoch); 516 517 /* Serialize epoch snapshots with respect to global epoch. */ 518 ck_pr_fence_memory(); 519 cr = ck_epoch_scan(global, cr, epoch, &active); 520 if (cr != NULL) { 521 record->epoch = epoch; 522 return false; 523 } 524 525 /* We are at a grace period if all threads are inactive. */ 526 if (active == false) { 527 record->epoch = epoch; 528 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++) 529 ck_epoch_dispatch(record, epoch); 530 531 return true; 532 } 533 534 /* If an active thread exists, rely on epoch observation. */ 535 if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1, 536 &snapshot) == false) { 537 record->epoch = snapshot; 538 } else { 539 record->epoch = epoch + 1; 540 } 541 542 ck_epoch_dispatch(record, epoch + 1); 543 return true; 544 } 545