1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains most of the functionality 31 * required to support the threads portion of libc_db. 32 */ 33 34 #include "lint.h" 35 #include "thr_uberdata.h" 36 37 static void 38 tdb_event_ready(void) {} 39 40 static void 41 tdb_event_sleep(void) {} 42 43 static void 44 tdb_event_switchto(void) {} 45 46 static void 47 tdb_event_switchfrom(void) {} 48 49 static void 50 tdb_event_lock_try(void) {} 51 52 static void 53 tdb_event_catchsig(void) {} 54 55 static void 56 tdb_event_idle(void) {} 57 58 static void 59 tdb_event_create(void) {} 60 61 static void 62 tdb_event_death(void) {} 63 64 static void 65 tdb_event_preempt(void) {} 66 67 static void 68 tdb_event_pri_inherit(void) {} 69 70 static void 71 tdb_event_reap(void) {} 72 73 static void 74 tdb_event_concurrency(void) {} 75 76 static void 77 tdb_event_timeout(void) {} 78 79 /* 80 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger 81 * to empty the table and then enable synchronization object registration. 82 * 83 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger 84 * to empty the table and then disable synchronization object registration. 85 */ 86 87 const tdb_ev_func_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1] = { 88 tdb_event_ready, 89 tdb_event_sleep, 90 tdb_event_switchto, 91 tdb_event_switchfrom, 92 tdb_event_lock_try, 93 tdb_event_catchsig, 94 tdb_event_idle, 95 tdb_event_create, 96 tdb_event_death, 97 tdb_event_preempt, 98 tdb_event_pri_inherit, 99 tdb_event_reap, 100 tdb_event_concurrency, 101 tdb_event_timeout 102 }; 103 104 #if TDB_HASH_SHIFT != 15 105 #error "this is all broken because TDB_HASH_SHIFT is not 15" 106 #endif 107 108 static uint_t 109 tdb_addr_hash(void *addr) 110 { 111 /* 112 * This knows for a fact that the hash table has 113 * 32K entries; that is, that TDB_HASH_SHIFT is 15. 114 */ 115 #ifdef _LP64 116 uint64_t value60 = ((uintptr_t)addr >> 4); /* 60 bits */ 117 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff); 118 #else 119 uint32_t value30 = ((uintptr_t)addr >> 2); /* 30 bits */ 120 #endif 121 return ((value30 >> 15) ^ (value30 & 0x7fff)); 122 } 123 124 static tdb_sync_stats_t * 125 alloc_sync_addr(void *addr) 126 { 127 uberdata_t *udp = curthread->ul_uberdata; 128 tdb_t *tdbp = &udp->tdb; 129 tdb_sync_stats_t *sap; 130 131 ASSERT(MUTEX_OWNED(&udp->tdb_hash_lock, curthread)); 132 133 if ((sap = tdbp->tdb_sync_addr_free) == NULL) { 134 void *vaddr; 135 int i; 136 137 /* 138 * Don't keep trying after mmap() has already failed. 139 */ 140 if (tdbp->tdb_hash_alloc_failed) 141 return (NULL); 142 143 /* double the allocation each time */ 144 tdbp->tdb_sync_alloc *= 2; 145 if ((vaddr = _private_mmap(NULL, 146 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t), 147 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, 148 -1, (off_t)0)) == MAP_FAILED) { 149 tdbp->tdb_hash_alloc_failed = 1; 150 return (NULL); 151 } 152 sap = tdbp->tdb_sync_addr_free = vaddr; 153 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++) 154 sap->next = (uintptr_t)(sap + 1); 155 sap->next = (uintptr_t)0; 156 tdbp->tdb_sync_addr_last = sap; 157 158 sap = tdbp->tdb_sync_addr_free; 159 } 160 161 tdbp->tdb_sync_addr_free = (tdb_sync_stats_t *)(uintptr_t)sap->next; 162 sap->next = (uintptr_t)0; 163 sap->sync_addr = (uintptr_t)addr; 164 (void) _memset(&sap->un, 0, sizeof (sap->un)); 165 return (sap); 166 } 167 168 static void 169 initialize_sync_hash() 170 { 171 uberdata_t *udp = curthread->ul_uberdata; 172 tdb_t *tdbp = &udp->tdb; 173 uint64_t *addr_hash; 174 tdb_sync_stats_t *sap; 175 void *vaddr; 176 int i; 177 178 if (tdbp->tdb_hash_alloc_failed) 179 return; 180 lmutex_lock(&udp->tdb_hash_lock); 181 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_DISABLE) { 182 /* 183 * There is no point allocating the hash table 184 * if we are disabling registration. 185 */ 186 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 187 lmutex_unlock(&udp->tdb_hash_lock); 188 return; 189 } 190 if (tdbp->tdb_sync_addr_hash != NULL || tdbp->tdb_hash_alloc_failed) { 191 lmutex_unlock(&udp->tdb_hash_lock); 192 return; 193 } 194 /* start with a free list of 2k elements */ 195 tdbp->tdb_sync_alloc = 2*1024; 196 if ((vaddr = _private_mmap(NULL, 197 TDB_HASH_SIZE * sizeof (uint64_t) + 198 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t), 199 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, 200 -1, (off_t)0)) == MAP_FAILED) { 201 tdbp->tdb_hash_alloc_failed = 1; 202 return; 203 } 204 addr_hash = vaddr; 205 206 /* initialize the free list */ 207 tdbp->tdb_sync_addr_free = sap = 208 (tdb_sync_stats_t *)&addr_hash[TDB_HASH_SIZE]; 209 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++) 210 sap->next = (uintptr_t)(sap + 1); 211 sap->next = (uintptr_t)0; 212 tdbp->tdb_sync_addr_last = sap; 213 214 /* insert &udp->tdb_hash_lock itself into the new (empty) table */ 215 udp->tdb_hash_lock_stats.next = (uintptr_t)0; 216 udp->tdb_hash_lock_stats.sync_addr = (uintptr_t)&udp->tdb_hash_lock; 217 addr_hash[tdb_addr_hash(&udp->tdb_hash_lock)] = 218 (uintptr_t)&udp->tdb_hash_lock_stats; 219 220 /* assign to tdb_sync_addr_hash only after fully initialized */ 221 tdbp->tdb_sync_addr_hash = addr_hash; 222 tdbp->tdb_register_count = 1; 223 lmutex_unlock(&udp->tdb_hash_lock); 224 } 225 226 tdb_sync_stats_t * 227 tdb_sync_obj_register(void *addr, int *new) 228 { 229 ulwp_t *self = curthread; 230 uberdata_t *udp = self->ul_uberdata; 231 tdb_t *tdbp = &udp->tdb; 232 uint64_t *sapp; 233 tdb_sync_stats_t *sap = NULL; 234 int locked = 0; 235 int i; 236 237 /* 238 * Don't start statistics collection until 239 * we have initialized the primary link map. 240 */ 241 if (!self->ul_primarymap) 242 return (NULL); 243 244 if (new) 245 *new = 0; 246 /* 247 * To avoid recursion problems, we must do two things: 248 * 1. Make a special case for tdb_hash_lock (we use it internally). 249 * 2. Deal with the dynamic linker's lock interface: 250 * When calling any external function, we may invoke the 251 * dynamic linker. It grabs a lock, which calls back here. 252 * This only happens on the first call to the external 253 * function, so we can just return NULL if we are called 254 * recursively (and miss the first count). 255 */ 256 if (addr == (void *)&udp->tdb_hash_lock) 257 return (&udp->tdb_hash_lock_stats); 258 if (self->ul_sync_obj_reg) /* recursive call */ 259 return (NULL); 260 self->ul_sync_obj_reg = 1; 261 262 /* 263 * On the first time through, initialize the hash table and free list. 264 */ 265 if (tdbp->tdb_sync_addr_hash == NULL) { 266 initialize_sync_hash(); 267 if (tdbp->tdb_sync_addr_hash == NULL) { /* utter failure */ 268 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 269 goto out; 270 } 271 } 272 273 sapp = &tdbp->tdb_sync_addr_hash[tdb_addr_hash(addr)]; 274 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_ON) { 275 /* 276 * Look up an address in the synchronization object hash table. 277 * No lock is required since it can only deliver a false 278 * negative, in which case we fall into the locked case below. 279 */ 280 for (sap = (tdb_sync_stats_t *)(uintptr_t)*sapp; sap != NULL; 281 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) { 282 if (sap->sync_addr == (uintptr_t)addr) 283 goto out; 284 } 285 } 286 287 /* 288 * The search with no lock held failed or a special action is required. 289 * Grab tdb_hash_lock to do special actions and/or get a precise result. 290 */ 291 lmutex_lock(&udp->tdb_hash_lock); 292 locked = 1; 293 294 switch (udp->uberflags.uf_tdb_register_sync) { 295 case REGISTER_SYNC_ON: 296 break; 297 case REGISTER_SYNC_OFF: 298 goto out; 299 default: 300 /* 301 * For all debugger actions, first zero out the 302 * statistics block of every element in the hash table. 303 */ 304 for (i = 0; i < TDB_HASH_SIZE; i++) 305 for (sap = (tdb_sync_stats_t *) 306 (uintptr_t)tdbp->tdb_sync_addr_hash[i]; 307 sap != NULL; 308 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) 309 (void) _memset(&sap->un, 0, sizeof (sap->un)); 310 311 switch (udp->uberflags.uf_tdb_register_sync) { 312 case REGISTER_SYNC_ENABLE: 313 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_ON; 314 break; 315 case REGISTER_SYNC_DISABLE: 316 default: 317 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 318 goto out; 319 } 320 break; 321 } 322 323 /* 324 * Perform the search while holding tdb_hash_lock. 325 * Keep track of the insertion point. 326 */ 327 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) { 328 if (sap->sync_addr == (uintptr_t)addr) 329 break; 330 sapp = &sap->next; 331 } 332 333 /* 334 * Insert a new element if necessary. 335 */ 336 if (sap == NULL && (sap = alloc_sync_addr(addr)) != NULL) { 337 *sapp = (uintptr_t)sap; 338 tdbp->tdb_register_count++; 339 if (new) 340 *new = 1; 341 } 342 343 out: 344 if (locked) 345 lmutex_unlock(&udp->tdb_hash_lock); 346 self->ul_sync_obj_reg = 0; 347 return (sap); 348 } 349 350 void 351 tdb_sync_obj_deregister(void *addr) 352 { 353 uberdata_t *udp = curthread->ul_uberdata; 354 tdb_t *tdbp = &udp->tdb; 355 uint64_t *sapp; 356 tdb_sync_stats_t *sap; 357 uint_t hash; 358 359 /* 360 * tdb_hash_lock is never destroyed. 361 */ 362 ASSERT(addr != &udp->tdb_hash_lock); 363 364 /* 365 * Avoid acquiring tdb_hash_lock if lock statistics gathering has 366 * never been initiated or there is nothing in the hash bucket. 367 * (Once the hash table is allocated, it is never deallocated.) 368 */ 369 if (tdbp->tdb_sync_addr_hash == NULL || 370 tdbp->tdb_sync_addr_hash[hash = tdb_addr_hash(addr)] == NULL) 371 return; 372 373 lmutex_lock(&udp->tdb_hash_lock); 374 sapp = &tdbp->tdb_sync_addr_hash[hash]; 375 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) { 376 if (sap->sync_addr == (uintptr_t)addr) { 377 /* remove it from the hash table */ 378 *sapp = sap->next; 379 tdbp->tdb_register_count--; 380 /* clear it */ 381 sap->next = (uintptr_t)0; 382 sap->sync_addr = (uintptr_t)0; 383 /* insert it on the tail of the free list */ 384 if (tdbp->tdb_sync_addr_free == NULL) { 385 tdbp->tdb_sync_addr_free = sap; 386 tdbp->tdb_sync_addr_last = sap; 387 } else { 388 tdbp->tdb_sync_addr_last->next = (uintptr_t)sap; 389 tdbp->tdb_sync_addr_last = sap; 390 } 391 break; 392 } 393 sapp = &sap->next; 394 } 395 lmutex_unlock(&udp->tdb_hash_lock); 396 } 397 398 /* 399 * Return a mutex statistics block for the given mutex. 400 */ 401 tdb_mutex_stats_t * 402 tdb_mutex_stats(mutex_t *mp) 403 { 404 tdb_sync_stats_t *tssp; 405 406 /* avoid stealing the cache line unnecessarily */ 407 if (mp->mutex_magic != MUTEX_MAGIC) 408 mp->mutex_magic = MUTEX_MAGIC; 409 if ((tssp = tdb_sync_obj_register(mp, NULL)) == NULL) 410 return (NULL); 411 tssp->un.type = TDB_MUTEX; 412 return (&tssp->un.mutex); 413 } 414 415 /* 416 * Return a condvar statistics block for the given condvar. 417 */ 418 tdb_cond_stats_t * 419 tdb_cond_stats(cond_t *cvp) 420 { 421 tdb_sync_stats_t *tssp; 422 423 /* avoid stealing the cache line unnecessarily */ 424 if (cvp->cond_magic != COND_MAGIC) 425 cvp->cond_magic = COND_MAGIC; 426 if ((tssp = tdb_sync_obj_register(cvp, NULL)) == NULL) 427 return (NULL); 428 tssp->un.type = TDB_COND; 429 return (&tssp->un.cond); 430 } 431 432 /* 433 * Return an rwlock statistics block for the given rwlock. 434 */ 435 tdb_rwlock_stats_t * 436 tdb_rwlock_stats(rwlock_t *rwlp) 437 { 438 tdb_sync_stats_t *tssp; 439 440 /* avoid stealing the cache line unnecessarily */ 441 if (rwlp->magic != RWL_MAGIC) 442 rwlp->magic = RWL_MAGIC; 443 if ((tssp = tdb_sync_obj_register(rwlp, NULL)) == NULL) 444 return (NULL); 445 tssp->un.type = TDB_RWLOCK; 446 return (&tssp->un.rwlock); 447 } 448 449 /* 450 * Return a semaphore statistics block for the given semaphore. 451 */ 452 tdb_sema_stats_t * 453 tdb_sema_stats(sema_t *sp) 454 { 455 tdb_sync_stats_t *tssp; 456 int new; 457 458 /* avoid stealing the cache line unnecessarily */ 459 if (sp->magic != SEMA_MAGIC) 460 sp->magic = SEMA_MAGIC; 461 if ((tssp = tdb_sync_obj_register(sp, &new)) == NULL) 462 return (NULL); 463 tssp->un.type = TDB_SEMA; 464 if (new) { 465 tssp->un.sema.sema_max_count = sp->count; 466 tssp->un.sema.sema_min_count = sp->count; 467 } 468 return (&tssp->un.sema); 469 } 470