1 /* 2 * Copyright 2011-2015 Samy Al Bahra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #ifndef CK_EPOCH_H 28 #define CK_EPOCH_H 29 30 /* 31 * The implementation here is inspired from the work described in: 32 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University 33 * of Cambridge Computing Laboratory. 34 */ 35 36 #include <ck_cc.h> 37 #include <ck_md.h> 38 #include <ck_pr.h> 39 #include <ck_stack.h> 40 #include <ck_stdbool.h> 41 42 #ifndef CK_EPOCH_LENGTH 43 #define CK_EPOCH_LENGTH 4 44 #endif 45 46 /* 47 * This is used for sense detection with-respect to concurrent 48 * epoch sections. 49 */ 50 #define CK_EPOCH_SENSE (2) 51 52 struct ck_epoch_entry; 53 typedef struct ck_epoch_entry ck_epoch_entry_t; 54 typedef void ck_epoch_cb_t(ck_epoch_entry_t *); 55 56 /* 57 * This should be embedded into objects you wish to be the target of 58 * ck_epoch_cb_t functions (with ck_epoch_call). 59 */ 60 struct ck_epoch_entry { 61 ck_epoch_cb_t *function; 62 ck_stack_entry_t stack_entry; 63 }; 64 65 /* 66 * A section object may be passed to every begin-end pair to allow for 67 * forward progress guarantees with-in prolonged active sections. 68 */ 69 struct ck_epoch_section { 70 unsigned int bucket; 71 }; 72 typedef struct ck_epoch_section ck_epoch_section_t; 73 74 /* 75 * Return pointer to ck_epoch_entry container object. 76 */ 77 #define CK_EPOCH_CONTAINER(T, M, N) \ 78 CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N) 79 80 struct ck_epoch_ref { 81 unsigned int epoch; 82 unsigned int count; 83 }; 84 85 struct ck_epoch_record { 86 struct ck_epoch *global; 87 unsigned int state; 88 unsigned int epoch; 89 unsigned int active; 90 struct { 91 struct ck_epoch_ref bucket[CK_EPOCH_SENSE]; 92 } local CK_CC_CACHELINE; 93 unsigned int n_pending; 94 unsigned int n_peak; 95 unsigned long n_dispatch; 96 ck_stack_t pending[CK_EPOCH_LENGTH]; 97 ck_stack_entry_t record_next; 98 } CK_CC_CACHELINE; 99 typedef struct ck_epoch_record ck_epoch_record_t; 100 101 struct ck_epoch { 102 unsigned int epoch; 103 char pad[CK_MD_CACHELINE - sizeof(unsigned int)]; 104 ck_stack_t records; 105 unsigned int n_free; 106 }; 107 typedef struct ck_epoch ck_epoch_t; 108 109 /* 110 * Internal functions. 111 */ 112 void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *); 113 void _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *); 114 115 /* 116 * Marks the beginning of an epoch-protected section. 117 */ 118 CK_CC_FORCE_INLINE static void 119 ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section) 120 { 121 struct ck_epoch *epoch = record->global; 122 123 /* 124 * Only observe new epoch if thread is not recursing into a read 125 * section. 126 */ 127 if (record->active == 0) { 128 unsigned int g_epoch; 129 130 /* 131 * It is possible for loads to be re-ordered before the store 132 * is committed into the caller's epoch and active fields. 133 * For this reason, store to load serialization is necessary. 134 */ 135 #if defined(CK_MD_TSO) 136 ck_pr_fas_uint(&record->active, 1); 137 ck_pr_fence_atomic_load(); 138 #else 139 ck_pr_store_uint(&record->active, 1); 140 ck_pr_fence_memory(); 141 #endif 142 143 /* 144 * This load is allowed to be re-ordered prior to setting 145 * active flag due to monotonic nature of the global epoch. 146 * However, stale values lead to measurable performance 147 * degradation in some torture tests so we disallow early load 148 * of global epoch. 149 */ 150 g_epoch = ck_pr_load_uint(&epoch->epoch); 151 ck_pr_store_uint(&record->epoch, g_epoch); 152 } else { 153 ck_pr_store_uint(&record->active, record->active + 1); 154 } 155 156 if (section != NULL) 157 _ck_epoch_addref(record, section); 158 159 return; 160 } 161 162 /* 163 * Marks the end of an epoch-protected section. 164 */ 165 CK_CC_FORCE_INLINE static void 166 ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section) 167 { 168 169 ck_pr_fence_release(); 170 ck_pr_store_uint(&record->active, record->active - 1); 171 172 if (section != NULL) 173 _ck_epoch_delref(record, section); 174 175 return; 176 } 177 178 /* 179 * Defers the execution of the function pointed to by the "cb" 180 * argument until an epoch counter loop. This allows for a 181 * non-blocking deferral. 182 */ 183 CK_CC_FORCE_INLINE static void 184 ck_epoch_call(ck_epoch_record_t *record, 185 ck_epoch_entry_t *entry, 186 ck_epoch_cb_t *function) 187 { 188 struct ck_epoch *epoch = record->global; 189 unsigned int e = ck_pr_load_uint(&epoch->epoch); 190 unsigned int offset = e & (CK_EPOCH_LENGTH - 1); 191 192 record->n_pending++; 193 entry->function = function; 194 ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry); 195 return; 196 } 197 198 void ck_epoch_init(ck_epoch_t *); 199 ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *); 200 void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *); 201 void ck_epoch_unregister(ck_epoch_record_t *); 202 bool ck_epoch_poll(ck_epoch_record_t *); 203 void ck_epoch_synchronize(ck_epoch_record_t *); 204 void ck_epoch_barrier(ck_epoch_record_t *); 205 void ck_epoch_reclaim(ck_epoch_record_t *); 206 207 #endif /* CK_EPOCH_H */ 208