xref: /illumos-gate/usr/src/cmd/fm/fmd/common/fmd_idspace.c (revision e4d060fb4c00d44cd578713eb9a921f594b733b8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <fmd_alloc.h>
30 #include <fmd_subr.h>
31 #include <fmd_conf.h>
32 #include <fmd_error.h>
33 #include <fmd_string.h>
34 #include <fmd_idspace.h>
35 #include <fmd.h>
36 
37 static int
38 highbit(ulong_t i)
39 {
40 	int h = 1;
41 
42 	if (i == 0)
43 		return (0);
44 
45 #ifdef _LP64
46 	if (i & 0xffffffff00000000ul) {
47 		h += 32;
48 		i >>= 32;
49 	}
50 #endif
51 
52 	if (i & 0xffff0000) {
53 		h += 16;
54 		i >>= 16;
55 	}
56 
57 	if (i & 0xff00) {
58 		h += 8;
59 		i >>= 8;
60 	}
61 
62 	if (i & 0xf0) {
63 		h += 4;
64 		i >>= 4;
65 	}
66 
67 	if (i & 0xc) {
68 		h += 2;
69 		i >>= 2;
70 	}
71 
72 	if (i & 0x2)
73 		h += 1;
74 
75 	return (h);
76 }
77 
78 fmd_idspace_t *
79 fmd_idspace_create(const char *name, id_t min, id_t max)
80 {
81 	fmd_idspace_t *ids = fmd_alloc(sizeof (fmd_idspace_t), FMD_SLEEP);
82 	uint_t ids_avg, ids_max, hashlen, hashmax;
83 
84 	/*
85 	 * Dynamically size the hash table bucket array based on the desired
86 	 * chain length.  We hash by indexing on the low-order bits.
87 	 * Do not permit the hash bucket array to exceed a reasonable size.
88 	 */
89 	ASSERT(min >= 0 && max >= 0);
90 	ASSERT(max >= min);
91 
92 	(void) fmd_conf_getprop(fmd.d_conf, "ids.avg", &ids_avg);
93 	(void) fmd_conf_getprop(fmd.d_conf, "ids.max", &ids_max);
94 
95 	hashmax = max - min + 1;
96 	hashlen = 1 << highbit(hashmax / ids_avg);
97 	if (hashlen > ids_max)
98 		hashlen = ids_max;
99 
100 	(void) strlcpy(ids->ids_name, name, sizeof (ids->ids_name));
101 	(void) pthread_mutex_init(&ids->ids_lock, NULL);
102 	(void) pthread_cond_init(&ids->ids_cv, NULL);
103 
104 	ids->ids_hash = fmd_zalloc(sizeof (void *) * hashlen, FMD_SLEEP);
105 	ids->ids_hashlen = hashlen;
106 	ids->ids_refs = 0;
107 	ids->ids_nextid = min - 1;
108 	ids->ids_minid = min;
109 	ids->ids_maxid = max;
110 	ids->ids_count = 0;
111 
112 	return (ids);
113 }
114 
115 void
116 fmd_idspace_destroy(fmd_idspace_t *ids)
117 {
118 	fmd_idelem_t *ide, *nde;
119 	uint_t i;
120 
121 	(void) pthread_mutex_lock(&ids->ids_lock);
122 
123 	while (ids->ids_refs != 0)
124 		(void) pthread_cond_wait(&ids->ids_cv, &ids->ids_lock);
125 
126 	for (i = 0; i < ids->ids_hashlen; i++) {
127 		for (ide = ids->ids_hash[i]; ide != NULL; ide = nde) {
128 			nde = ide->ide_next;
129 			fmd_free(ide, sizeof (fmd_idelem_t));
130 		}
131 	}
132 
133 	fmd_free(ids->ids_hash, sizeof (void *) * ids->ids_hashlen);
134 	fmd_free(ids, sizeof (fmd_idspace_t));
135 }
136 
137 void
138 fmd_idspace_apply(fmd_idspace_t *ids,
139     void (*func)(fmd_idspace_t *, id_t, void *), void *arg)
140 {
141 	fmd_idelem_t *ide;
142 	id_t *ida, *idp;
143 	uint_t i, count;
144 
145 	(void) pthread_mutex_lock(&ids->ids_lock);
146 	count = ids->ids_count;
147 	ida = idp = fmd_alloc(sizeof (id_t) * count, FMD_SLEEP);
148 
149 	for (i = 0; i < ids->ids_hashlen; i++) {
150 		for (ide = ids->ids_hash[i]; ide != NULL; ide = ide->ide_next)
151 			*idp++ = ide->ide_id;
152 	}
153 
154 	ASSERT(idp == ida + count);
155 	(void) pthread_mutex_unlock(&ids->ids_lock);
156 
157 	for (i = 0; i < count; i++)
158 		func(ids, ida[i], arg);
159 
160 	fmd_free(ida, sizeof (id_t) * count);
161 }
162 
163 static fmd_idelem_t *
164 fmd_idspace_lookup(fmd_idspace_t *ids, id_t id)
165 {
166 	fmd_idelem_t *ide;
167 
168 	ASSERT(MUTEX_HELD(&ids->ids_lock));
169 	ide = ids->ids_hash[id & (ids->ids_hashlen - 1)];
170 
171 	for (; ide != NULL; ide = ide->ide_next) {
172 		if (ide->ide_id == id)
173 			break;
174 	}
175 
176 	return (ide);
177 }
178 
179 void *
180 fmd_idspace_getspecific(fmd_idspace_t *ids, id_t id)
181 {
182 	fmd_idelem_t *ide;
183 	void *data;
184 
185 	(void) pthread_mutex_lock(&ids->ids_lock);
186 	ide = fmd_idspace_lookup(ids, id);
187 	data = ide ? ide->ide_data : NULL;
188 	(void) pthread_mutex_unlock(&ids->ids_lock);
189 
190 	return (data);
191 }
192 
193 void
194 fmd_idspace_setspecific(fmd_idspace_t *ids, id_t id, void *data)
195 {
196 	fmd_idelem_t *ide;
197 
198 	(void) pthread_mutex_lock(&ids->ids_lock);
199 
200 	while (ids->ids_refs != 0)
201 		(void) pthread_cond_wait(&ids->ids_cv, &ids->ids_lock);
202 
203 	if ((ide = fmd_idspace_lookup(ids, id)) == NULL) {
204 		fmd_panic("idspace %p (%s) does not contain id %ld",
205 		    (void *)ids, ids->ids_name, id);
206 	}
207 
208 	ide->ide_data = data;
209 	(void) pthread_mutex_unlock(&ids->ids_lock);
210 }
211 
212 int
213 fmd_idspace_contains(fmd_idspace_t *ids, id_t id)
214 {
215 	fmd_idelem_t *ide;
216 
217 	(void) pthread_mutex_lock(&ids->ids_lock);
218 	ide = fmd_idspace_lookup(ids, id);
219 	(void) pthread_mutex_unlock(&ids->ids_lock);
220 
221 	return (ide != NULL);
222 }
223 
224 int
225 fmd_idspace_valid(fmd_idspace_t *ids, id_t id)
226 {
227 	return (id >= ids->ids_minid && id <= ids->ids_maxid);
228 }
229 
230 static id_t
231 fmd_idspace_xalloc_locked(fmd_idspace_t *ids, id_t id, void *data)
232 {
233 	fmd_idelem_t *ide;
234 	uint_t h;
235 
236 	if (id < ids->ids_minid || id > ids->ids_maxid) {
237 		fmd_panic("%ld out of range [%ld .. %ld] for idspace %p (%s)\n",
238 		    id, ids->ids_minid, ids->ids_maxid,
239 		    (void *)ids, ids->ids_name);
240 	}
241 
242 	if (fmd_idspace_lookup(ids, id) != NULL)
243 		return (fmd_set_errno(EALREADY));
244 
245 	ide = fmd_alloc(sizeof (fmd_idelem_t), FMD_SLEEP);
246 	h = id & (ids->ids_hashlen - 1);
247 
248 	ide->ide_next = ids->ids_hash[h];
249 	ide->ide_data = data;
250 	ide->ide_id = id;
251 
252 	ids->ids_hash[h] = ide;
253 	ids->ids_count++;
254 
255 	return (id);
256 }
257 
258 id_t
259 fmd_idspace_xalloc(fmd_idspace_t *ids, id_t id, void *data)
260 {
261 	(void) pthread_mutex_lock(&ids->ids_lock);
262 	id = fmd_idspace_xalloc_locked(ids, id, data);
263 	(void) pthread_mutex_unlock(&ids->ids_lock);
264 	return (id);
265 }
266 
267 static id_t
268 fmd_idspace_alloc_locked(fmd_idspace_t *ids, void *data)
269 {
270 	id_t id;
271 
272 	ASSERT(MUTEX_HELD(&ids->ids_lock));
273 
274 	if (ids->ids_count == ids->ids_maxid - ids->ids_minid + 1)
275 		return (fmd_set_errno(ENOSPC));
276 
277 	do {
278 		if (++ids->ids_nextid > ids->ids_maxid)
279 			ids->ids_nextid = ids->ids_minid;
280 		id = ids->ids_nextid;
281 	} while (fmd_idspace_xalloc_locked(ids, id, data) != id);
282 
283 	return (id);
284 }
285 
286 id_t
287 fmd_idspace_alloc(fmd_idspace_t *ids, void *data)
288 {
289 	id_t id;
290 
291 	(void) pthread_mutex_lock(&ids->ids_lock);
292 	id = fmd_idspace_alloc_locked(ids, data);
293 	(void) pthread_mutex_unlock(&ids->ids_lock);
294 
295 	return (id);
296 }
297 
298 /*
299  * For the moment, we use a simple but slow implementation: reset ids_nextid to
300  * the minimum id and search in order from there.  If this becomes performance
301  * sensitive we can maintain a freelist of the unallocated identifiers, etc.
302  */
303 id_t
304 fmd_idspace_alloc_min(fmd_idspace_t *ids, void *data)
305 {
306 	id_t id;
307 
308 	(void) pthread_mutex_lock(&ids->ids_lock);
309 	ids->ids_nextid = ids->ids_minid - 1;
310 	id = fmd_idspace_alloc_locked(ids, data);
311 	(void) pthread_mutex_unlock(&ids->ids_lock);
312 
313 	return (id);
314 }
315 
316 void *
317 fmd_idspace_free(fmd_idspace_t *ids, id_t id)
318 {
319 	fmd_idelem_t *ide, **pp;
320 	void *data;
321 
322 	(void) pthread_mutex_lock(&ids->ids_lock);
323 	pp = &ids->ids_hash[id & (ids->ids_hashlen - 1)];
324 
325 	for (ide = *pp; ide != NULL; ide = ide->ide_next) {
326 		if (ide->ide_id != id)
327 			pp = &ide->ide_next;
328 		else
329 			break;
330 	}
331 
332 	if (ide == NULL) {
333 		(void) pthread_mutex_unlock(&ids->ids_lock);
334 		return (NULL);
335 	}
336 
337 	data = ide->ide_data;
338 	*pp = ide->ide_next;
339 	fmd_free(ide, sizeof (fmd_idelem_t));
340 
341 	ASSERT(ids->ids_count != 0);
342 	ids->ids_count--;
343 
344 	(void) pthread_mutex_unlock(&ids->ids_lock);
345 	return (data);
346 }
347 
348 /*
349  * Retrieve the id-specific data for the specified id and place a hold on the
350  * id so that it cannot be or deleted until fmd_idspace_rele(ids, id) is
351  * called.  For simplicity, we now use a single global reference count for all
352  * holds.  If this feature needs to be used in a place where there is high
353  * contention between holders and deleters, the implementation can be modified
354  * to use either a per-hash-bucket or a per-id-element condition variable.
355  */
356 void *
357 fmd_idspace_hold(fmd_idspace_t *ids, id_t id)
358 {
359 	fmd_idelem_t *ide;
360 	void *data = NULL;
361 
362 	(void) pthread_mutex_lock(&ids->ids_lock);
363 
364 	if ((ide = fmd_idspace_lookup(ids, id)) != NULL) {
365 		ids->ids_refs++;
366 		ASSERT(ids->ids_refs != 0);
367 		data = ide->ide_data;
368 		ASSERT(data != NULL);
369 	}
370 
371 	(void) pthread_mutex_unlock(&ids->ids_lock);
372 	return (data);
373 }
374 
375 void
376 fmd_idspace_rele(fmd_idspace_t *ids, id_t id)
377 {
378 	(void) pthread_mutex_lock(&ids->ids_lock);
379 
380 	ASSERT(fmd_idspace_lookup(ids, id) != NULL);
381 	ASSERT(ids->ids_refs != 0);
382 	ids->ids_refs--;
383 
384 	(void) pthread_cond_broadcast(&ids->ids_cv);
385 	(void) pthread_mutex_unlock(&ids->ids_lock);
386 }
387