1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <fmd_alloc.h> 27 #include <fmd_subr.h> 28 #include <fmd_conf.h> 29 #include <fmd_error.h> 30 #include <fmd_string.h> 31 #include <fmd_idspace.h> 32 #include <fmd.h> 33 34 fmd_idspace_t * 35 fmd_idspace_create(const char *name, id_t min, id_t max) 36 { 37 fmd_idspace_t *ids = fmd_alloc(sizeof (fmd_idspace_t), FMD_SLEEP); 38 uint_t ids_avg, ids_max, hashlen, hashmax; 39 40 /* 41 * Dynamically size the hash table bucket array based on the desired 42 * chain length. We hash by indexing on the low-order bits. 43 * Do not permit the hash bucket array to exceed a reasonable size. 44 */ 45 ASSERT(min >= 0 && max >= 0); 46 ASSERT(max >= min); 47 48 (void) fmd_conf_getprop(fmd.d_conf, "ids.avg", &ids_avg); 49 (void) fmd_conf_getprop(fmd.d_conf, "ids.max", &ids_max); 50 51 hashmax = max - min + 1; 52 hashlen = 1 << fls(hashmax / ids_avg); 53 if (hashlen > ids_max) 54 hashlen = ids_max; 55 56 (void) strlcpy(ids->ids_name, name, sizeof (ids->ids_name)); 57 (void) pthread_mutex_init(&ids->ids_lock, NULL); 58 (void) pthread_cond_init(&ids->ids_cv, NULL); 59 60 ids->ids_hash = fmd_zalloc(sizeof (void *) * hashlen, FMD_SLEEP); 61 ids->ids_hashlen = hashlen; 62 ids->ids_refs = 0; 63 ids->ids_nextid = min - 1; 64 ids->ids_minid = min; 65 ids->ids_maxid = max; 66 ids->ids_count = 0; 67 68 return (ids); 69 } 70 71 void 72 fmd_idspace_destroy(fmd_idspace_t *ids) 73 { 74 fmd_idelem_t *ide, *nde; 75 uint_t i; 76 77 (void) pthread_mutex_lock(&ids->ids_lock); 78 79 while (ids->ids_refs != 0) 80 (void) pthread_cond_wait(&ids->ids_cv, &ids->ids_lock); 81 82 for (i = 0; i < ids->ids_hashlen; i++) { 83 for (ide = ids->ids_hash[i]; ide != NULL; ide = nde) { 84 nde = ide->ide_next; 85 fmd_free(ide, sizeof (fmd_idelem_t)); 86 } 87 } 88 89 fmd_free(ids->ids_hash, sizeof (void *) * ids->ids_hashlen); 90 fmd_free(ids, sizeof (fmd_idspace_t)); 91 } 92 93 void 94 fmd_idspace_apply(fmd_idspace_t *ids, 95 void (*func)(fmd_idspace_t *, id_t, void *), void *arg) 96 { 97 fmd_idelem_t *ide; 98 id_t *ida, *idp; 99 uint_t i, count; 100 101 (void) pthread_mutex_lock(&ids->ids_lock); 102 count = ids->ids_count; 103 ida = idp = fmd_alloc(sizeof (id_t) * count, FMD_SLEEP); 104 105 for (i = 0; i < ids->ids_hashlen; i++) { 106 for (ide = ids->ids_hash[i]; ide != NULL; ide = ide->ide_next) 107 *idp++ = ide->ide_id; 108 } 109 110 ASSERT(idp == ida + count); 111 (void) pthread_mutex_unlock(&ids->ids_lock); 112 113 for (i = 0; i < count; i++) 114 func(ids, ida[i], arg); 115 116 fmd_free(ida, sizeof (id_t) * count); 117 } 118 119 static fmd_idelem_t * 120 fmd_idspace_lookup(fmd_idspace_t *ids, id_t id) 121 { 122 fmd_idelem_t *ide; 123 124 ASSERT(MUTEX_HELD(&ids->ids_lock)); 125 ide = ids->ids_hash[id & (ids->ids_hashlen - 1)]; 126 127 for (; ide != NULL; ide = ide->ide_next) { 128 if (ide->ide_id == id) 129 break; 130 } 131 132 return (ide); 133 } 134 135 void * 136 fmd_idspace_getspecific(fmd_idspace_t *ids, id_t id) 137 { 138 fmd_idelem_t *ide; 139 void *data; 140 141 (void) pthread_mutex_lock(&ids->ids_lock); 142 ide = fmd_idspace_lookup(ids, id); 143 data = ide ? ide->ide_data : NULL; 144 (void) pthread_mutex_unlock(&ids->ids_lock); 145 146 return (data); 147 } 148 149 void 150 fmd_idspace_setspecific(fmd_idspace_t *ids, id_t id, void *data) 151 { 152 fmd_idelem_t *ide; 153 154 (void) pthread_mutex_lock(&ids->ids_lock); 155 156 while (ids->ids_refs != 0) 157 (void) pthread_cond_wait(&ids->ids_cv, &ids->ids_lock); 158 159 if ((ide = fmd_idspace_lookup(ids, id)) == NULL) { 160 fmd_panic("idspace %p (%s) does not contain id %ld", 161 (void *)ids, ids->ids_name, id); 162 } 163 164 ide->ide_data = data; 165 (void) pthread_mutex_unlock(&ids->ids_lock); 166 } 167 168 int 169 fmd_idspace_contains(fmd_idspace_t *ids, id_t id) 170 { 171 fmd_idelem_t *ide; 172 173 (void) pthread_mutex_lock(&ids->ids_lock); 174 ide = fmd_idspace_lookup(ids, id); 175 (void) pthread_mutex_unlock(&ids->ids_lock); 176 177 return (ide != NULL); 178 } 179 180 int 181 fmd_idspace_valid(fmd_idspace_t *ids, id_t id) 182 { 183 return (id >= ids->ids_minid && id <= ids->ids_maxid); 184 } 185 186 static id_t 187 fmd_idspace_xalloc_locked(fmd_idspace_t *ids, id_t id, void *data) 188 { 189 fmd_idelem_t *ide; 190 uint_t h; 191 192 if (id < ids->ids_minid || id > ids->ids_maxid) { 193 fmd_panic("%ld out of range [%ld .. %ld] for idspace %p (%s)\n", 194 id, ids->ids_minid, ids->ids_maxid, 195 (void *)ids, ids->ids_name); 196 } 197 198 if (fmd_idspace_lookup(ids, id) != NULL) 199 return (fmd_set_errno(EALREADY)); 200 201 ide = fmd_alloc(sizeof (fmd_idelem_t), FMD_SLEEP); 202 h = id & (ids->ids_hashlen - 1); 203 204 ide->ide_next = ids->ids_hash[h]; 205 ide->ide_data = data; 206 ide->ide_id = id; 207 208 ids->ids_hash[h] = ide; 209 ids->ids_count++; 210 211 return (id); 212 } 213 214 id_t 215 fmd_idspace_xalloc(fmd_idspace_t *ids, id_t id, void *data) 216 { 217 (void) pthread_mutex_lock(&ids->ids_lock); 218 id = fmd_idspace_xalloc_locked(ids, id, data); 219 (void) pthread_mutex_unlock(&ids->ids_lock); 220 return (id); 221 } 222 223 static id_t 224 fmd_idspace_alloc_locked(fmd_idspace_t *ids, void *data) 225 { 226 id_t id; 227 228 ASSERT(MUTEX_HELD(&ids->ids_lock)); 229 230 if (ids->ids_count == ids->ids_maxid - ids->ids_minid + 1) 231 return (fmd_set_errno(ENOSPC)); 232 233 do { 234 if (++ids->ids_nextid > ids->ids_maxid) 235 ids->ids_nextid = ids->ids_minid; 236 id = ids->ids_nextid; 237 } while (fmd_idspace_xalloc_locked(ids, id, data) != id); 238 239 return (id); 240 } 241 242 id_t 243 fmd_idspace_alloc(fmd_idspace_t *ids, void *data) 244 { 245 id_t id; 246 247 (void) pthread_mutex_lock(&ids->ids_lock); 248 id = fmd_idspace_alloc_locked(ids, data); 249 (void) pthread_mutex_unlock(&ids->ids_lock); 250 251 return (id); 252 } 253 254 /* 255 * For the moment, we use a simple but slow implementation: reset ids_nextid to 256 * the minimum id and search in order from there. If this becomes performance 257 * sensitive we can maintain a freelist of the unallocated identifiers, etc. 258 */ 259 id_t 260 fmd_idspace_alloc_min(fmd_idspace_t *ids, void *data) 261 { 262 id_t id; 263 264 (void) pthread_mutex_lock(&ids->ids_lock); 265 ids->ids_nextid = ids->ids_minid - 1; 266 id = fmd_idspace_alloc_locked(ids, data); 267 (void) pthread_mutex_unlock(&ids->ids_lock); 268 269 return (id); 270 } 271 272 void * 273 fmd_idspace_free(fmd_idspace_t *ids, id_t id) 274 { 275 fmd_idelem_t *ide, **pp; 276 void *data; 277 278 (void) pthread_mutex_lock(&ids->ids_lock); 279 pp = &ids->ids_hash[id & (ids->ids_hashlen - 1)]; 280 281 for (ide = *pp; ide != NULL; ide = ide->ide_next) { 282 if (ide->ide_id != id) 283 pp = &ide->ide_next; 284 else 285 break; 286 } 287 288 if (ide == NULL) { 289 (void) pthread_mutex_unlock(&ids->ids_lock); 290 return (NULL); 291 } 292 293 data = ide->ide_data; 294 *pp = ide->ide_next; 295 fmd_free(ide, sizeof (fmd_idelem_t)); 296 297 ASSERT(ids->ids_count != 0); 298 ids->ids_count--; 299 300 (void) pthread_mutex_unlock(&ids->ids_lock); 301 return (data); 302 } 303 304 /* 305 * Retrieve the id-specific data for the specified id and place a hold on the 306 * id so that it cannot be or deleted until fmd_idspace_rele(ids, id) is 307 * called. For simplicity, we now use a single global reference count for all 308 * holds. If this feature needs to be used in a place where there is high 309 * contention between holders and deleters, the implementation can be modified 310 * to use either a per-hash-bucket or a per-id-element condition variable. 311 */ 312 void * 313 fmd_idspace_hold(fmd_idspace_t *ids, id_t id) 314 { 315 fmd_idelem_t *ide; 316 void *data = NULL; 317 318 (void) pthread_mutex_lock(&ids->ids_lock); 319 320 if ((ide = fmd_idspace_lookup(ids, id)) != NULL) { 321 ids->ids_refs++; 322 ASSERT(ids->ids_refs != 0); 323 data = ide->ide_data; 324 ASSERT(data != NULL); 325 } 326 327 (void) pthread_mutex_unlock(&ids->ids_lock); 328 return (data); 329 } 330 331 void 332 fmd_idspace_rele(fmd_idspace_t *ids, id_t id) 333 { 334 (void) pthread_mutex_lock(&ids->ids_lock); 335 336 ASSERT(fmd_idspace_lookup(ids, id) != NULL); 337 ASSERT(ids->ids_refs != 0); 338 ids->ids_refs--; 339 340 (void) pthread_cond_broadcast(&ids->ids_cv); 341 (void) pthread_mutex_unlock(&ids->ids_lock); 342 } 343