1 /* 2 * Copyright 2013-2015 Samy Al Bahra. 3 * Copyright 2013 Brendon Scheinman. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #ifndef CK_COHORT_H 29 #define CK_COHORT_H 30 31 /* 32 * This is an implementation of lock cohorts as described in: 33 * Dice, D.; Marathe, V.; and Shavit, N. 2012. 34 * Lock Cohorting: A General Technique for Designing NUMA Locks 35 */ 36 37 #include <ck_cc.h> 38 #include <ck_pr.h> 39 #include <ck_stddef.h> 40 41 enum ck_cohort_state { 42 CK_COHORT_STATE_GLOBAL = 0, 43 CK_COHORT_STATE_LOCAL = 1 44 }; 45 46 #define CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT 10 47 48 #define CK_COHORT_NAME(N) ck_cohort_##N 49 #define CK_COHORT_INSTANCE(N) struct CK_COHORT_NAME(N) 50 #define CK_COHORT_INIT(N, C, GL, LL, P) ck_cohort_##N##_init(C, GL, LL, P) 51 #define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC) 52 #define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC) 53 #define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, LLC, LUC) 54 #define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC) 55 56 #define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \ 57 CK_COHORT_INSTANCE(N) { \ 58 void *global_lock; \ 59 void *local_lock; \ 60 enum ck_cohort_state release_state; \ 61 unsigned int waiting_threads; \ 62 unsigned int acquire_count; \ 63 unsigned int local_pass_limit; \ 64 }; \ 65 \ 66 CK_CC_INLINE static void \ 67 ck_cohort_##N##_init(struct ck_cohort_##N *cohort, \ 68 void *global_lock, void *local_lock, unsigned int pass_limit) \ 69 { \ 70 cohort->global_lock = global_lock; \ 71 cohort->local_lock = local_lock; \ 72 cohort->release_state = CK_COHORT_STATE_GLOBAL; \ 73 cohort->waiting_threads = 0; \ 74 cohort->acquire_count = 0; \ 75 cohort->local_pass_limit = pass_limit; \ 76 ck_pr_barrier(); \ 77 return; \ 78 } \ 79 \ 80 CK_CC_INLINE static void \ 81 ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort, \ 82 void *global_context, void *local_context) \ 83 { \ 84 \ 85 ck_pr_inc_uint(&cohort->waiting_threads); \ 86 LL(cohort->local_lock, local_context); \ 87 ck_pr_dec_uint(&cohort->waiting_threads); \ 88 \ 89 if (cohort->release_state == CK_COHORT_STATE_GLOBAL) { \ 90 GL(cohort->global_lock, global_context); \ 91 } \ 92 \ 93 ++cohort->acquire_count; \ 94 return; \ 95 } \ 96 \ 97 CK_CC_INLINE static void \ 98 ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort, \ 99 void *global_context, void *local_context) \ 100 { \ 101 \ 102 if (ck_pr_load_uint(&cohort->waiting_threads) > 0 \ 103 && cohort->acquire_count < cohort->local_pass_limit) { \ 104 cohort->release_state = CK_COHORT_STATE_LOCAL; \ 105 } else { \ 106 GU(cohort->global_lock, global_context); \ 107 cohort->release_state = CK_COHORT_STATE_GLOBAL; \ 108 cohort->acquire_count = 0; \ 109 } \ 110 \ 111 ck_pr_fence_release(); \ 112 LU(cohort->local_lock, local_context); \ 113 \ 114 return; \ 115 } \ 116 \ 117 CK_CC_INLINE static bool \ 118 ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort, \ 119 void *global_context, void *local_context) \ 120 { \ 121 return GI(cohort->local_lock, local_context) || \ 122 LI(cohort->global_lock, global_context); \ 123 } 124 125 #define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL) \ 126 CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \ 127 CK_CC_INLINE static bool \ 128 ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort, \ 129 void *global_context, void *local_context, \ 130 void *local_unlock_context) \ 131 { \ 132 \ 133 bool trylock_result; \ 134 \ 135 ck_pr_inc_uint(&cohort->waiting_threads); \ 136 trylock_result = LTL(cohort->local_lock, local_context); \ 137 ck_pr_dec_uint(&cohort->waiting_threads); \ 138 if (trylock_result == false) { \ 139 return false; \ 140 } \ 141 \ 142 if (cohort->release_state == CK_COHORT_STATE_GLOBAL && \ 143 GTL(cohort->global_lock, global_context) == false) { \ 144 LU(cohort->local_lock, local_unlock_context); \ 145 return false; \ 146 } \ 147 \ 148 ++cohort->acquire_count; \ 149 return true; \ 150 } 151 152 #define CK_COHORT_INITIALIZER { \ 153 .global_lock = NULL, \ 154 .local_lock = NULL, \ 155 .release_state = CK_COHORT_STATE_GLOBAL, \ 156 .waiting_threads = 0, \ 157 .acquire_count = 0, \ 158 .local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \ 159 } 160 161 #endif /* CK_COHORT_H */ 162