1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * This file contains most of the functionality
29 * required to support the threads portion of libc_db.
30 */
31
32 #include "lint.h"
33 #include "thr_uberdata.h"
34
35 static void
tdb_event_ready(void)36 tdb_event_ready(void) {}
37
38 static void
tdb_event_sleep(void)39 tdb_event_sleep(void) {}
40
41 static void
tdb_event_switchto(void)42 tdb_event_switchto(void) {}
43
44 static void
tdb_event_switchfrom(void)45 tdb_event_switchfrom(void) {}
46
47 static void
tdb_event_lock_try(void)48 tdb_event_lock_try(void) {}
49
50 static void
tdb_event_catchsig(void)51 tdb_event_catchsig(void) {}
52
53 static void
tdb_event_idle(void)54 tdb_event_idle(void) {}
55
56 static void
tdb_event_create(void)57 tdb_event_create(void) {}
58
59 static void
tdb_event_death(void)60 tdb_event_death(void) {}
61
62 static void
tdb_event_preempt(void)63 tdb_event_preempt(void) {}
64
65 static void
tdb_event_pri_inherit(void)66 tdb_event_pri_inherit(void) {}
67
68 static void
tdb_event_reap(void)69 tdb_event_reap(void) {}
70
71 static void
tdb_event_concurrency(void)72 tdb_event_concurrency(void) {}
73
74 static void
tdb_event_timeout(void)75 tdb_event_timeout(void) {}
76
77 /*
78 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger
79 * to empty the table and then enable synchronization object registration.
80 *
81 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger
82 * to empty the table and then disable synchronization object registration.
83 */
84
85 const tdb_ev_func_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1] = {
86 tdb_event_ready,
87 tdb_event_sleep,
88 tdb_event_switchto,
89 tdb_event_switchfrom,
90 tdb_event_lock_try,
91 tdb_event_catchsig,
92 tdb_event_idle,
93 tdb_event_create,
94 tdb_event_death,
95 tdb_event_preempt,
96 tdb_event_pri_inherit,
97 tdb_event_reap,
98 tdb_event_concurrency,
99 tdb_event_timeout
100 };
101
102 #if TDB_HASH_SHIFT != 15
103 #error "this is all broken because TDB_HASH_SHIFT is not 15"
104 #endif
105
106 static uint_t
tdb_addr_hash(void * addr)107 tdb_addr_hash(void *addr)
108 {
109 /*
110 * This knows for a fact that the hash table has
111 * 32K entries; that is, that TDB_HASH_SHIFT is 15.
112 */
113 #ifdef _LP64
114 uint64_t value60 = ((uintptr_t)addr >> 4); /* 60 bits */
115 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
116 #else
117 uint32_t value30 = ((uintptr_t)addr >> 2); /* 30 bits */
118 #endif
119 return ((value30 >> 15) ^ (value30 & 0x7fff));
120 }
121
122 static tdb_sync_stats_t *
alloc_sync_addr(void * addr)123 alloc_sync_addr(void *addr)
124 {
125 uberdata_t *udp = curthread->ul_uberdata;
126 tdb_t *tdbp = &udp->tdb;
127 tdb_sync_stats_t *sap;
128
129 ASSERT(MUTEX_OWNED(&udp->tdb_hash_lock, curthread));
130
131 if ((sap = tdbp->tdb_sync_addr_free) == NULL) {
132 void *vaddr;
133 int i;
134
135 /*
136 * Don't keep trying after mmap() has already failed.
137 */
138 if (tdbp->tdb_hash_alloc_failed)
139 return (NULL);
140
141 /* double the allocation each time */
142 tdbp->tdb_sync_alloc *= 2;
143 if ((vaddr = mmap(NULL,
144 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t),
145 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
146 -1, (off_t)0)) == MAP_FAILED) {
147 tdbp->tdb_hash_alloc_failed = 1;
148 return (NULL);
149 }
150 sap = tdbp->tdb_sync_addr_free = vaddr;
151 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++)
152 sap->next = (uintptr_t)(sap + 1);
153 sap->next = (uintptr_t)0;
154 tdbp->tdb_sync_addr_last = sap;
155
156 sap = tdbp->tdb_sync_addr_free;
157 }
158
159 tdbp->tdb_sync_addr_free = (tdb_sync_stats_t *)(uintptr_t)sap->next;
160 sap->next = (uintptr_t)0;
161 sap->sync_addr = (uintptr_t)addr;
162 (void) memset(&sap->un, 0, sizeof (sap->un));
163 return (sap);
164 }
165
166 static void
initialize_sync_hash()167 initialize_sync_hash()
168 {
169 uberdata_t *udp = curthread->ul_uberdata;
170 tdb_t *tdbp = &udp->tdb;
171 uint64_t *addr_hash;
172 tdb_sync_stats_t *sap;
173 void *vaddr;
174 int i;
175
176 if (tdbp->tdb_hash_alloc_failed)
177 return;
178 lmutex_lock(&udp->tdb_hash_lock);
179 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_DISABLE) {
180 /*
181 * There is no point allocating the hash table
182 * if we are disabling registration.
183 */
184 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
185 lmutex_unlock(&udp->tdb_hash_lock);
186 return;
187 }
188 if (tdbp->tdb_sync_addr_hash != NULL || tdbp->tdb_hash_alloc_failed) {
189 lmutex_unlock(&udp->tdb_hash_lock);
190 return;
191 }
192 /* start with a free list of 2k elements */
193 tdbp->tdb_sync_alloc = 2*1024;
194 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t) +
195 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t),
196 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON,
197 -1, (off_t)0)) == MAP_FAILED) {
198 tdbp->tdb_hash_alloc_failed = 1;
199 return;
200 }
201 addr_hash = vaddr;
202
203 /* initialize the free list */
204 tdbp->tdb_sync_addr_free = sap =
205 (tdb_sync_stats_t *)&addr_hash[TDB_HASH_SIZE];
206 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++)
207 sap->next = (uintptr_t)(sap + 1);
208 sap->next = (uintptr_t)0;
209 tdbp->tdb_sync_addr_last = sap;
210
211 /* insert &udp->tdb_hash_lock itself into the new (empty) table */
212 udp->tdb_hash_lock_stats.next = (uintptr_t)0;
213 udp->tdb_hash_lock_stats.sync_addr = (uintptr_t)&udp->tdb_hash_lock;
214 addr_hash[tdb_addr_hash(&udp->tdb_hash_lock)] =
215 (uintptr_t)&udp->tdb_hash_lock_stats;
216
217 tdbp->tdb_register_count = 1;
218 /* assign to tdb_sync_addr_hash only after fully initialized */
219 membar_producer();
220 tdbp->tdb_sync_addr_hash = addr_hash;
221 lmutex_unlock(&udp->tdb_hash_lock);
222 }
223
224 tdb_sync_stats_t *
tdb_sync_obj_register(void * addr,int * new)225 tdb_sync_obj_register(void *addr, int *new)
226 {
227 ulwp_t *self = curthread;
228 uberdata_t *udp = self->ul_uberdata;
229 tdb_t *tdbp = &udp->tdb;
230 uint64_t *sapp;
231 tdb_sync_stats_t *sap = NULL;
232 int locked = 0;
233 int i;
234
235 /*
236 * Don't start statistics collection until
237 * we have initialized the primary link map.
238 */
239 if (!self->ul_primarymap)
240 return (NULL);
241
242 if (new)
243 *new = 0;
244 /*
245 * To avoid recursion problems, we must do two things:
246 * 1. Make a special case for tdb_hash_lock (we use it internally).
247 * 2. Deal with the dynamic linker's lock interface:
248 * When calling any external function, we may invoke the
249 * dynamic linker. It grabs a lock, which calls back here.
250 * This only happens on the first call to the external
251 * function, so we can just return NULL if we are called
252 * recursively (and miss the first count).
253 */
254 if (addr == (void *)&udp->tdb_hash_lock)
255 return (&udp->tdb_hash_lock_stats);
256 if (self->ul_sync_obj_reg) /* recursive call */
257 return (NULL);
258 self->ul_sync_obj_reg = 1;
259
260 /*
261 * On the first time through, initialize the hash table and free list.
262 */
263 if (tdbp->tdb_sync_addr_hash == NULL) {
264 initialize_sync_hash();
265 if (tdbp->tdb_sync_addr_hash == NULL) { /* utter failure */
266 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
267 goto out;
268 }
269 }
270 membar_consumer();
271
272 sapp = &tdbp->tdb_sync_addr_hash[tdb_addr_hash(addr)];
273 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_ON) {
274 /*
275 * Look up an address in the synchronization object hash table.
276 * No lock is required since it can only deliver a false
277 * negative, in which case we fall into the locked case below.
278 */
279 for (sap = (tdb_sync_stats_t *)(uintptr_t)*sapp; sap != NULL;
280 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) {
281 if (sap->sync_addr == (uintptr_t)addr)
282 goto out;
283 }
284 }
285
286 /*
287 * The search with no lock held failed or a special action is required.
288 * Grab tdb_hash_lock to do special actions and/or get a precise result.
289 */
290 lmutex_lock(&udp->tdb_hash_lock);
291 locked = 1;
292
293 switch (udp->uberflags.uf_tdb_register_sync) {
294 case REGISTER_SYNC_ON:
295 break;
296 case REGISTER_SYNC_OFF:
297 goto out;
298 default:
299 /*
300 * For all debugger actions, first zero out the
301 * statistics block of every element in the hash table.
302 */
303 for (i = 0; i < TDB_HASH_SIZE; i++)
304 for (sap = (tdb_sync_stats_t *)
305 (uintptr_t)tdbp->tdb_sync_addr_hash[i];
306 sap != NULL;
307 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next)
308 (void) memset(&sap->un, 0, sizeof (sap->un));
309
310 switch (udp->uberflags.uf_tdb_register_sync) {
311 case REGISTER_SYNC_ENABLE:
312 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_ON;
313 break;
314 case REGISTER_SYNC_DISABLE:
315 default:
316 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF;
317 goto out;
318 }
319 break;
320 }
321
322 /*
323 * Perform the search while holding tdb_hash_lock.
324 * Keep track of the insertion point.
325 */
326 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) {
327 if (sap->sync_addr == (uintptr_t)addr)
328 break;
329 sapp = &sap->next;
330 }
331
332 /*
333 * Insert a new element if necessary.
334 */
335 if (sap == NULL && (sap = alloc_sync_addr(addr)) != NULL) {
336 *sapp = (uintptr_t)sap;
337 tdbp->tdb_register_count++;
338 if (new)
339 *new = 1;
340 }
341
342 out:
343 if (locked)
344 lmutex_unlock(&udp->tdb_hash_lock);
345 self->ul_sync_obj_reg = 0;
346 return (sap);
347 }
348
349 void
tdb_sync_obj_deregister(void * addr)350 tdb_sync_obj_deregister(void *addr)
351 {
352 uberdata_t *udp = curthread->ul_uberdata;
353 tdb_t *tdbp = &udp->tdb;
354 uint64_t *sapp;
355 tdb_sync_stats_t *sap;
356 uint_t hash;
357
358 /*
359 * tdb_hash_lock is never destroyed.
360 */
361 ASSERT(addr != &udp->tdb_hash_lock);
362
363 /*
364 * Avoid acquiring tdb_hash_lock if lock statistics gathering has
365 * never been initiated or there is nothing in the hash bucket.
366 * (Once the hash table is allocated, it is never deallocated.)
367 */
368 if (tdbp->tdb_sync_addr_hash == NULL ||
369 tdbp->tdb_sync_addr_hash[hash = tdb_addr_hash(addr)] == 0)
370 return;
371
372 lmutex_lock(&udp->tdb_hash_lock);
373 sapp = &tdbp->tdb_sync_addr_hash[hash];
374 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) {
375 if (sap->sync_addr == (uintptr_t)addr) {
376 /* remove it from the hash table */
377 *sapp = sap->next;
378 tdbp->tdb_register_count--;
379 /* clear it */
380 sap->next = (uintptr_t)0;
381 sap->sync_addr = (uintptr_t)0;
382 /* insert it on the tail of the free list */
383 if (tdbp->tdb_sync_addr_free == NULL) {
384 tdbp->tdb_sync_addr_free = sap;
385 tdbp->tdb_sync_addr_last = sap;
386 } else {
387 tdbp->tdb_sync_addr_last->next = (uintptr_t)sap;
388 tdbp->tdb_sync_addr_last = sap;
389 }
390 break;
391 }
392 sapp = &sap->next;
393 }
394 lmutex_unlock(&udp->tdb_hash_lock);
395 }
396
397 /*
398 * Return a mutex statistics block for the given mutex.
399 */
400 tdb_mutex_stats_t *
tdb_mutex_stats(mutex_t * mp)401 tdb_mutex_stats(mutex_t *mp)
402 {
403 tdb_sync_stats_t *tssp;
404
405 /* avoid stealing the cache line unnecessarily */
406 if (mp->mutex_magic != MUTEX_MAGIC)
407 mp->mutex_magic = MUTEX_MAGIC;
408 if ((tssp = tdb_sync_obj_register(mp, NULL)) == NULL)
409 return (NULL);
410 tssp->un.type = TDB_MUTEX;
411 return (&tssp->un.mutex);
412 }
413
414 /*
415 * Return a condvar statistics block for the given condvar.
416 */
417 tdb_cond_stats_t *
tdb_cond_stats(cond_t * cvp)418 tdb_cond_stats(cond_t *cvp)
419 {
420 tdb_sync_stats_t *tssp;
421
422 /* avoid stealing the cache line unnecessarily */
423 if (cvp->cond_magic != COND_MAGIC)
424 cvp->cond_magic = COND_MAGIC;
425 if ((tssp = tdb_sync_obj_register(cvp, NULL)) == NULL)
426 return (NULL);
427 tssp->un.type = TDB_COND;
428 return (&tssp->un.cond);
429 }
430
431 /*
432 * Return an rwlock statistics block for the given rwlock.
433 */
434 tdb_rwlock_stats_t *
tdb_rwlock_stats(rwlock_t * rwlp)435 tdb_rwlock_stats(rwlock_t *rwlp)
436 {
437 tdb_sync_stats_t *tssp;
438
439 /* avoid stealing the cache line unnecessarily */
440 if (rwlp->magic != RWL_MAGIC)
441 rwlp->magic = RWL_MAGIC;
442 if ((tssp = tdb_sync_obj_register(rwlp, NULL)) == NULL)
443 return (NULL);
444 tssp->un.type = TDB_RWLOCK;
445 return (&tssp->un.rwlock);
446 }
447
448 /*
449 * Return a semaphore statistics block for the given semaphore.
450 */
451 tdb_sema_stats_t *
tdb_sema_stats(sema_t * sp)452 tdb_sema_stats(sema_t *sp)
453 {
454 tdb_sync_stats_t *tssp;
455 int new;
456
457 /* avoid stealing the cache line unnecessarily */
458 if (sp->magic != SEMA_MAGIC)
459 sp->magic = SEMA_MAGIC;
460 if ((tssp = tdb_sync_obj_register(sp, &new)) == NULL)
461 return (NULL);
462 tssp->un.type = TDB_SEMA;
463 if (new) {
464 tssp->un.sema.sema_max_count = sp->count;
465 tssp->un.sema.sema_min_count = sp->count;
466 }
467 return (&tssp->un.sema);
468 }
469