1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * This file contains most of the functionality
31 * required to support the threads portion of libc_db.
32 */
33
34 #include "lint.h"
35 #include "thr_uberdata.h"
36
37 static void
tdb_event_ready(void)38 tdb_event_ready(void) {}
39
40 static void
tdb_event_sleep(void)41 tdb_event_sleep(void) {}
42
43 static void
tdb_event_switchto(void)44 tdb_event_switchto(void) {}
45
46 static void
tdb_event_switchfrom(void)47 tdb_event_switchfrom(void) {}
48
49 static void
tdb_event_lock_try(void)50 tdb_event_lock_try(void) {}
51
52 static void
tdb_event_catchsig(void)53 tdb_event_catchsig(void) {}
54
55 static void
tdb_event_idle(void)56 tdb_event_idle(void) {}
57
58 static void
tdb_event_create(void)59 tdb_event_create(void) {}
60
61 static void
tdb_event_death(void)62 tdb_event_death(void) {}
63
64 static void
tdb_event_preempt(void)65 tdb_event_preempt(void) {}
66
67 static void
tdb_event_pri_inherit(void)68 tdb_event_pri_inherit(void) {}
69
70 static void
tdb_event_reap(void)71 tdb_event_reap(void) {}
72
73 static void
tdb_event_concurrency(void)74 tdb_event_concurrency(void) {}
75
76 static void
tdb_event_timeout(void)77 tdb_event_timeout(void) {}
78
79 /*
80 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger
81 * to empty the table and then enable synchronization object registration.
82 *
83 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger
84 * to empty the table and then disable synchronization object registration.
85 */
86
87 const tdb_ev_func_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1] = {
88 tdb_event_ready,
89 tdb_event_sleep,
90 tdb_event_switchto,
91 tdb_event_switchfrom,
92 tdb_event_lock_try,
93 tdb_event_catchsig,
94 tdb_event_idle,
95 tdb_event_create,
96 tdb_event_death,
97 tdb_event_preempt,
98 tdb_event_pri_inherit,
99 tdb_event_reap,
100 tdb_event_concurrency,
101 tdb_event_timeout
102 };
103
104 #if TDB_HASH_SHIFT != 15
105 #error "this is all broken because TDB_HASH_SHIFT is not 15"
106 #endif
107
108 static uint_t
tdb_addr_hash(void * addr)109 tdb_addr_hash(void *addr)
110 {
111 /*
112 * This knows for a fact that the hash table has
113 * 32K entries; that is, that TDB_HASH_SHIFT is 15.
114 */
115 #ifdef _LP64
116 uint64_t value60 = ((uintptr_t)addr >> 4); /* 60 bits */
117 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
118 #else
119 uint32_t value30 = ((uintptr_t)addr >> 2); /* 30 bits */
120 #endif
121 return ((value30 >> 15) ^ (value30 & 0x7fff));
122 }
123
124 static tdb_sync_stats_t *
alloc_sync_addr(void * addr)125 alloc_sync_addr(void *addr)
126 {
127 uberdata_t *udp = curthread->ul_uberdata;
128 tdb_t *tdbp = &udp->tdb;
129 tdb_sync_stats_t *sap;
130
131 ASSERT(MUTEX_OWNED(&udp->tdb_hash_lock, curthread));
132
133 if ((sap = tdbp->tdb_sync_addr_free) == NULL) {
134 void *vaddr;
135 int i;
136
137 /*
138 * Don't keep trying after mmap() has already failed.
139 */
140 if (tdbp->tdb_hash_alloc_failed)
141 return (NULL);
142
143 /* double the allocation each time */
144 tdbp->tdb_sync_alloc *= 2;
145 if ((vaddr = mmap(NULL,
146 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t),
147 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
148 -1, (off_t)0)) == MAP_FAILED) {
149 tdbp->tdb_hash_alloc_failed = 1;
150 return (NULL);
151 }
152 sap = tdbp->tdb_sync_addr_free = vaddr;
153 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++)
154 sap->next = (uintptr_t)(sap + 1);
155 sap->next = (uintptr_t)0;
156 tdbp->tdb_sync_addr_last = sap;
157
158 sap = tdbp->tdb_sync_addr_free;
159 }
160
161 tdbp->tdb_sync_addr_free = (tdb_sync_stats_t *)(uintptr_t)sap->next;
162 sap->next = (uintptr_t)0;
163 sap->sync_addr = (uintptr_t)addr;
164 (void) memset(&sap->un, 0, sizeof (sap->un));
165 return (sap);
166 }
167
168 static void
initialize_sync_hash()169 initialize_sync_hash()
170 {
171 uberdata_t *udp = curthread->ul_uberdata;
172 tdb_t *tdbp = &udp->tdb;
173 uint64_t *addr_hash;
174 tdb_sync_stats_t *sap;
175 void *vaddr;
176 int i;
177
178 if (tdbp->tdb_hash_alloc_failed)
179 return;
180 lmutex_lock(&udp->tdb_hash_lock);
181 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_DISABLE) {
182 /*
183 * There is no point allocating the hash table
184 * if we are disabling registration.
185 */
186 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
187 lmutex_unlock(&udp->tdb_hash_lock);
188 return;
189 }
190 if (tdbp->tdb_sync_addr_hash != NULL || tdbp->tdb_hash_alloc_failed) {
191 lmutex_unlock(&udp->tdb_hash_lock);
192 return;
193 }
194 /* start with a free list of 2k elements */
195 tdbp->tdb_sync_alloc = 2*1024;
196 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t) +
197 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t),
198 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
199 -1, (off_t)0)) == MAP_FAILED) {
200 tdbp->tdb_hash_alloc_failed = 1;
201 return;
202 }
203 addr_hash = vaddr;
204
205 /* initialize the free list */
206 tdbp->tdb_sync_addr_free = sap =
207 (tdb_sync_stats_t *)&addr_hash[TDB_HASH_SIZE];
208 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++)
209 sap->next = (uintptr_t)(sap + 1);
210 sap->next = (uintptr_t)0;
211 tdbp->tdb_sync_addr_last = sap;
212
213 /* insert &udp->tdb_hash_lock itself into the new (empty) table */
214 udp->tdb_hash_lock_stats.next = (uintptr_t)0;
215 udp->tdb_hash_lock_stats.sync_addr = (uintptr_t)&udp->tdb_hash_lock;
216 addr_hash[tdb_addr_hash(&udp->tdb_hash_lock)] =
217 (uintptr_t)&udp->tdb_hash_lock_stats;
218
219 tdbp->tdb_register_count = 1;
220 /* assign to tdb_sync_addr_hash only after fully initialized */
221 membar_producer();
222 tdbp->tdb_sync_addr_hash = addr_hash;
223 lmutex_unlock(&udp->tdb_hash_lock);
224 }
225
226 tdb_sync_stats_t *
tdb_sync_obj_register(void * addr,int * new)227 tdb_sync_obj_register(void *addr, int *new)
228 {
229 ulwp_t *self = curthread;
230 uberdata_t *udp = self->ul_uberdata;
231 tdb_t *tdbp = &udp->tdb;
232 uint64_t *sapp;
233 tdb_sync_stats_t *sap = NULL;
234 int locked = 0;
235 int i;
236
237 /*
238 * Don't start statistics collection until
239 * we have initialized the primary link map.
240 */
241 if (!self->ul_primarymap)
242 return (NULL);
243
244 if (new)
245 *new = 0;
246 /*
247 * To avoid recursion problems, we must do two things:
248 * 1. Make a special case for tdb_hash_lock (we use it internally).
249 * 2. Deal with the dynamic linker's lock interface:
250 * When calling any external function, we may invoke the
251 * dynamic linker. It grabs a lock, which calls back here.
252 * This only happens on the first call to the external
253 * function, so we can just return NULL if we are called
254 * recursively (and miss the first count).
255 */
256 if (addr == (void *)&udp->tdb_hash_lock)
257 return (&udp->tdb_hash_lock_stats);
258 if (self->ul_sync_obj_reg) /* recursive call */
259 return (NULL);
260 self->ul_sync_obj_reg = 1;
261
262 /*
263 * On the first time through, initialize the hash table and free list.
264 */
265 if (tdbp->tdb_sync_addr_hash == NULL) {
266 initialize_sync_hash();
267 if (tdbp->tdb_sync_addr_hash == NULL) { /* utter failure */
268 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
269 goto out;
270 }
271 }
272 membar_consumer();
273
274 sapp = &tdbp->tdb_sync_addr_hash[tdb_addr_hash(addr)];
275 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_ON) {
276 /*
277 * Look up an address in the synchronization object hash table.
278 * No lock is required since it can only deliver a false
279 * negative, in which case we fall into the locked case below.
280 */
281 for (sap = (tdb_sync_stats_t *)(uintptr_t)*sapp; sap != NULL;
282 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) {
283 if (sap->sync_addr == (uintptr_t)addr)
284 goto out;
285 }
286 }
287
288 /*
289 * The search with no lock held failed or a special action is required.
290 * Grab tdb_hash_lock to do special actions and/or get a precise result.
291 */
292 lmutex_lock(&udp->tdb_hash_lock);
293 locked = 1;
294
295 switch (udp->uberflags.uf_tdb_register_sync) {
296 case REGISTER_SYNC_ON:
297 break;
298 case REGISTER_SYNC_OFF:
299 goto out;
300 default:
301 /*
302 * For all debugger actions, first zero out the
303 * statistics block of every element in the hash table.
304 */
305 for (i = 0; i < TDB_HASH_SIZE; i++)
306 for (sap = (tdb_sync_stats_t *)
307 (uintptr_t)tdbp->tdb_sync_addr_hash[i];
308 sap != NULL;
309 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next)
310 (void) memset(&sap->un, 0, sizeof (sap->un));
311
312 switch (udp->uberflags.uf_tdb_register_sync) {
313 case REGISTER_SYNC_ENABLE:
314 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_ON;
315 break;
316 case REGISTER_SYNC_DISABLE:
317 default:
318 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
319 goto out;
320 }
321 break;
322 }
323
324 /*
325 * Perform the search while holding tdb_hash_lock.
326 * Keep track of the insertion point.
327 */
328 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) {
329 if (sap->sync_addr == (uintptr_t)addr)
330 break;
331 sapp = &sap->next;
332 }
333
334 /*
335 * Insert a new element if necessary.
336 */
337 if (sap == NULL && (sap = alloc_sync_addr(addr)) != NULL) {
338 *sapp = (uintptr_t)sap;
339 tdbp->tdb_register_count++;
340 if (new)
341 *new = 1;
342 }
343
344 out:
345 if (locked)
346 lmutex_unlock(&udp->tdb_hash_lock);
347 self->ul_sync_obj_reg = 0;
348 return (sap);
349 }
350
351 void
tdb_sync_obj_deregister(void * addr)352 tdb_sync_obj_deregister(void *addr)
353 {
354 uberdata_t *udp = curthread->ul_uberdata;
355 tdb_t *tdbp = &udp->tdb;
356 uint64_t *sapp;
357 tdb_sync_stats_t *sap;
358 uint_t hash;
359
360 /*
361 * tdb_hash_lock is never destroyed.
362 */
363 ASSERT(addr != &udp->tdb_hash_lock);
364
365 /*
366 * Avoid acquiring tdb_hash_lock if lock statistics gathering has
367 * never been initiated or there is nothing in the hash bucket.
368 * (Once the hash table is allocated, it is never deallocated.)
369 */
370 if (tdbp->tdb_sync_addr_hash == NULL ||
371 tdbp->tdb_sync_addr_hash[hash = tdb_addr_hash(addr)] == NULL)
372 return;
373
374 lmutex_lock(&udp->tdb_hash_lock);
375 sapp = &tdbp->tdb_sync_addr_hash[hash];
376 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) {
377 if (sap->sync_addr == (uintptr_t)addr) {
378 /* remove it from the hash table */
379 *sapp = sap->next;
380 tdbp->tdb_register_count--;
381 /* clear it */
382 sap->next = (uintptr_t)0;
383 sap->sync_addr = (uintptr_t)0;
384 /* insert it on the tail of the free list */
385 if (tdbp->tdb_sync_addr_free == NULL) {
386 tdbp->tdb_sync_addr_free = sap;
387 tdbp->tdb_sync_addr_last = sap;
388 } else {
389 tdbp->tdb_sync_addr_last->next = (uintptr_t)sap;
390 tdbp->tdb_sync_addr_last = sap;
391 }
392 break;
393 }
394 sapp = &sap->next;
395 }
396 lmutex_unlock(&udp->tdb_hash_lock);
397 }
398
399 /*
400 * Return a mutex statistics block for the given mutex.
401 */
402 tdb_mutex_stats_t *
tdb_mutex_stats(mutex_t * mp)403 tdb_mutex_stats(mutex_t *mp)
404 {
405 tdb_sync_stats_t *tssp;
406
407 /* avoid stealing the cache line unnecessarily */
408 if (mp->mutex_magic != MUTEX_MAGIC)
409 mp->mutex_magic = MUTEX_MAGIC;
410 if ((tssp = tdb_sync_obj_register(mp, NULL)) == NULL)
411 return (NULL);
412 tssp->un.type = TDB_MUTEX;
413 return (&tssp->un.mutex);
414 }
415
416 /*
417 * Return a condvar statistics block for the given condvar.
418 */
419 tdb_cond_stats_t *
tdb_cond_stats(cond_t * cvp)420 tdb_cond_stats(cond_t *cvp)
421 {
422 tdb_sync_stats_t *tssp;
423
424 /* avoid stealing the cache line unnecessarily */
425 if (cvp->cond_magic != COND_MAGIC)
426 cvp->cond_magic = COND_MAGIC;
427 if ((tssp = tdb_sync_obj_register(cvp, NULL)) == NULL)
428 return (NULL);
429 tssp->un.type = TDB_COND;
430 return (&tssp->un.cond);
431 }
432
433 /*
434 * Return an rwlock statistics block for the given rwlock.
435 */
436 tdb_rwlock_stats_t *
tdb_rwlock_stats(rwlock_t * rwlp)437 tdb_rwlock_stats(rwlock_t *rwlp)
438 {
439 tdb_sync_stats_t *tssp;
440
441 /* avoid stealing the cache line unnecessarily */
442 if (rwlp->magic != RWL_MAGIC)
443 rwlp->magic = RWL_MAGIC;
444 if ((tssp = tdb_sync_obj_register(rwlp, NULL)) == NULL)
445 return (NULL);
446 tssp->un.type = TDB_RWLOCK;
447 return (&tssp->un.rwlock);
448 }
449
450 /*
451 * Return a semaphore statistics block for the given semaphore.
452 */
453 tdb_sema_stats_t *
tdb_sema_stats(sema_t * sp)454 tdb_sema_stats(sema_t *sp)
455 {
456 tdb_sync_stats_t *tssp;
457 int new;
458
459 /* avoid stealing the cache line unnecessarily */
460 if (sp->magic != SEMA_MAGIC)
461 sp->magic = SEMA_MAGIC;
462 if ((tssp = tdb_sync_obj_register(sp, &new)) == NULL)
463 return (NULL);
464 tssp->un.type = TDB_SEMA;
465 if (new) {
466 tssp->un.sema.sema_max_count = sp->count;
467 tssp->un.sema.sema_min_count = sp->count;
468 }
469 return (&tssp->un.sema);
470 }
471