1 /*- 2 * Copyright (c) 2001 Matthew Dillon. All Rights Reserved. Copyright 3 * terms are as specified in the COPYRIGHT file at the base of the source 4 * tree. 5 * 6 * Mutex pool routines. These routines are designed to be used as short 7 * term leaf mutexes (e.g. the last mutex you might aquire other then 8 * calling msleep()). They operate using a shared pool. A mutex is chosen 9 * from the pool based on the supplied pointer (which may or may not be 10 * valid). 11 * 12 * Advantages: 13 * - no structural overhead. Mutexes can be associated with structures 14 * without adding bloat to the structures. 15 * - mutexes can be obtained for invalid pointers, useful when uses 16 * mutexes to interlock destructor ops. 17 * - no initialization/destructor overhead. 18 * - can be used with msleep. 19 * 20 * Disadvantages: 21 * - should generally only be used as leaf mutexes. 22 * - pool/pool dependancy ordering cannot be depended on. 23 * - possible L1 cache mastersip contention between cpus. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include <sys/param.h> 30 #include <sys/proc.h> 31 #include <sys/kernel.h> 32 #include <sys/ktr.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/systm.h> 37 38 39 MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool"); 40 41 /* Pool sizes must be a power of two */ 42 #ifndef MTX_POOL_LOCKBUILDER_SIZE 43 #define MTX_POOL_LOCKBUILDER_SIZE 128 44 #endif 45 #ifndef MTX_POOL_SLEEP_SIZE 46 #define MTX_POOL_SLEEP_SIZE 128 47 #endif 48 49 struct mtxpool_header { 50 int mtxpool_size; 51 int mtxpool_mask; 52 int mtxpool_shift; 53 int mtxpool_next; 54 }; 55 56 struct mtx_pool { 57 struct mtxpool_header mtx_pool_header; 58 struct mtx mtx_pool_ary[1]; 59 }; 60 61 static struct mtx_pool_lockbuilder { 62 struct mtxpool_header mtx_pool_header; 63 struct mtx mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE]; 64 } lockbuilder_pool; 65 66 #define mtx_pool_size mtx_pool_header.mtxpool_size 67 #define mtx_pool_mask mtx_pool_header.mtxpool_mask 68 #define mtx_pool_shift mtx_pool_header.mtxpool_shift 69 #define mtx_pool_next mtx_pool_header.mtxpool_next 70 71 struct mtx_pool *mtxpool_sleep; 72 struct mtx_pool *mtxpool_lockbuilder; 73 74 #if UINTPTR_MAX == UINT64_MAX /* 64 bits */ 75 # define POINTER_BITS 64 76 # define HASH_MULTIPLIER 11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */ 77 #else /* assume 32 bits */ 78 # define POINTER_BITS 32 79 # define HASH_MULTIPLIER 2654435769u /* (2^32)*(sqrt(5)-1)/2 */ 80 #endif 81 82 /* 83 * Return the (shared) pool mutex associated with the specified address. 84 * The returned mutex is a leaf level mutex, meaning that if you obtain it 85 * you cannot obtain any other mutexes until you release it. You can 86 * legally msleep() on the mutex. 87 */ 88 struct mtx * 89 mtx_pool_find(struct mtx_pool *pool, void *ptr) 90 { 91 int p; 92 93 KASSERT(pool != NULL, ("_mtx_pool_find(): null pool")); 94 /* 95 * Fibonacci hash, see Knuth's 96 * _Art of Computer Programming, Volume 3 / Sorting and Searching_ 97 */ 98 p = ((HASH_MULTIPLIER * (uintptr_t)ptr) >> pool->mtx_pool_shift) & 99 pool->mtx_pool_mask; 100 return (&pool->mtx_pool_ary[p]); 101 } 102 103 static void 104 mtx_pool_initialize(struct mtx_pool *pool, const char *mtx_name, int pool_size, 105 int opts) 106 { 107 int i, maskbits; 108 109 pool->mtx_pool_size = pool_size; 110 pool->mtx_pool_mask = pool_size - 1; 111 for (i = 1, maskbits = 0; (i & pool_size) == 0; i = i << 1) 112 maskbits++; 113 pool->mtx_pool_shift = POINTER_BITS - maskbits; 114 pool->mtx_pool_next = 0; 115 for (i = 0; i < pool_size; ++i) 116 mtx_init(&pool->mtx_pool_ary[i], mtx_name, NULL, opts); 117 } 118 119 struct mtx_pool * 120 mtx_pool_create(const char *mtx_name, int pool_size, int opts) 121 { 122 struct mtx_pool *pool; 123 124 if (pool_size <= 0 || !powerof2(pool_size)) { 125 printf("WARNING: %s pool size is not a power of 2.\n", 126 mtx_name); 127 pool_size = 128; 128 } 129 MALLOC(pool, struct mtx_pool *, 130 sizeof (struct mtx_pool) + ((pool_size - 1) * sizeof (struct mtx)), 131 M_MTXPOOL, M_WAITOK | M_ZERO); 132 mtx_pool_initialize(pool, mtx_name, pool_size, opts); 133 return pool; 134 } 135 136 void 137 mtx_pool_destroy(struct mtx_pool **poolp) 138 { 139 int i; 140 struct mtx_pool *pool = *poolp; 141 142 for (i = pool->mtx_pool_size - 1; i >= 0; --i) 143 mtx_destroy(&pool->mtx_pool_ary[i]); 144 FREE(pool, M_MTXPOOL); 145 *poolp = NULL; 146 } 147 148 static void 149 mtx_pool_setup_static(void *dummy __unused) 150 { 151 mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool, 152 "lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE, 153 MTX_DEF | MTX_NOWITNESS | MTX_QUIET); 154 mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool; 155 } 156 157 static void 158 mtx_pool_setup_dynamic(void *dummy __unused) 159 { 160 mtxpool_sleep = mtx_pool_create("sleep mtxpool", 161 MTX_POOL_SLEEP_SIZE, MTX_DEF); 162 } 163 164 /* 165 * Obtain a (shared) mutex from the pool. The returned mutex is a leaf 166 * level mutex, meaning that if you obtain it you cannot obtain any other 167 * mutexes until you release it. You can legally msleep() on the mutex. 168 */ 169 struct mtx * 170 mtx_pool_alloc(struct mtx_pool *pool) 171 { 172 int i; 173 174 KASSERT(pool != NULL, ("mtx_pool_alloc(): null pool")); 175 /* 176 * mtx_pool_next is unprotected against multiple accesses, 177 * but simultaneous access by two CPUs should not be very 178 * harmful. 179 */ 180 i = pool->mtx_pool_next; 181 pool->mtx_pool_next = (i + 1) & pool->mtx_pool_mask; 182 return (&pool->mtx_pool_ary[i]); 183 } 184 185 /* 186 * The lockbuilder pool must be initialized early because the lockmgr 187 * and sx locks depend on it. The sx locks are used in the kernel 188 * memory allocator. The lockmgr subsystem is initialized by 189 * SYSINIT(..., SI_SUB_LOCKMGR, ...). 190 * 191 * We can't call MALLOC() to dynamically allocate the sleep pool 192 * until after kmeminit() has been called, which is done by 193 * SYSINIT(..., SI_SUB_KMEM, ...). 194 */ 195 SYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST, 196 mtx_pool_setup_static, NULL); 197 SYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST, 198 mtx_pool_setup_dynamic, NULL); 199