1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * This file contains most of the functionality 31 * required to support the threads portion of libc_db. 32 */ 33 34 #include "lint.h" 35 #include "thr_uberdata.h" 36 37 static void 38 tdb_event_ready(void) {} 39 40 static void 41 tdb_event_sleep(void) {} 42 43 static void 44 tdb_event_switchto(void) {} 45 46 static void 47 tdb_event_switchfrom(void) {} 48 49 static void 50 tdb_event_lock_try(void) {} 51 52 static void 53 tdb_event_catchsig(void) {} 54 55 static void 56 tdb_event_idle(void) {} 57 58 static void 59 tdb_event_create(void) {} 60 61 static void 62 tdb_event_death(void) {} 63 64 static void 65 tdb_event_preempt(void) {} 66 67 static void 68 tdb_event_pri_inherit(void) {} 69 70 static void 71 tdb_event_reap(void) {} 72 73 static void 74 tdb_event_concurrency(void) {} 75 76 static void 77 tdb_event_timeout(void) {} 78 79 /* 80 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger 81 * to empty the table and then enable synchronization object registration. 82 * 83 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger 84 * to empty the table and then disable synchronization object registration. 85 */ 86 87 const tdb_ev_func_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1] = { 88 tdb_event_ready, 89 tdb_event_sleep, 90 tdb_event_switchto, 91 tdb_event_switchfrom, 92 tdb_event_lock_try, 93 tdb_event_catchsig, 94 tdb_event_idle, 95 tdb_event_create, 96 tdb_event_death, 97 tdb_event_preempt, 98 tdb_event_pri_inherit, 99 tdb_event_reap, 100 tdb_event_concurrency, 101 tdb_event_timeout 102 }; 103 104 #if TDB_HASH_SHIFT != 15 105 #error "this is all broken because TDB_HASH_SHIFT is not 15" 106 #endif 107 108 static uint_t 109 tdb_addr_hash(void *addr) 110 { 111 /* 112 * This knows for a fact that the hash table has 113 * 32K entries; that is, that TDB_HASH_SHIFT is 15. 114 */ 115 #ifdef _LP64 116 uint64_t value60 = ((uintptr_t)addr >> 4); /* 60 bits */ 117 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff); 118 #else 119 uint32_t value30 = ((uintptr_t)addr >> 2); /* 30 bits */ 120 #endif 121 return ((value30 >> 15) ^ (value30 & 0x7fff)); 122 } 123 124 static tdb_sync_stats_t * 125 alloc_sync_addr(void *addr) 126 { 127 uberdata_t *udp = curthread->ul_uberdata; 128 tdb_t *tdbp = &udp->tdb; 129 tdb_sync_stats_t *sap; 130 131 ASSERT(MUTEX_OWNED(&udp->tdb_hash_lock, curthread)); 132 133 if ((sap = tdbp->tdb_sync_addr_free) == NULL) { 134 void *vaddr; 135 int i; 136 137 /* 138 * Don't keep trying after mmap() has already failed. 139 */ 140 if (tdbp->tdb_hash_alloc_failed) 141 return (NULL); 142 143 /* double the allocation each time */ 144 tdbp->tdb_sync_alloc *= 2; 145 if ((vaddr = _private_mmap(NULL, 146 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t), 147 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, 148 -1, (off_t)0)) == MAP_FAILED) { 149 tdbp->tdb_hash_alloc_failed = 1; 150 return (NULL); 151 } 152 sap = tdbp->tdb_sync_addr_free = vaddr; 153 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++) 154 sap->next = (uintptr_t)(sap + 1); 155 sap->next = (uintptr_t)0; 156 tdbp->tdb_sync_addr_last = sap; 157 158 sap = tdbp->tdb_sync_addr_free; 159 } 160 161 tdbp->tdb_sync_addr_free = (tdb_sync_stats_t *)(uintptr_t)sap->next; 162 sap->next = (uintptr_t)0; 163 sap->sync_addr = (uintptr_t)addr; 164 (void) _memset(&sap->un, 0, sizeof (sap->un)); 165 return (sap); 166 } 167 168 static void 169 initialize_sync_hash() 170 { 171 uberdata_t *udp = curthread->ul_uberdata; 172 tdb_t *tdbp = &udp->tdb; 173 uint64_t *addr_hash; 174 tdb_sync_stats_t *sap; 175 void *vaddr; 176 int i; 177 178 if (tdbp->tdb_hash_alloc_failed) 179 return; 180 lmutex_lock(&udp->tdb_hash_lock); 181 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_DISABLE) { 182 /* 183 * There is no point allocating the hash table 184 * if we are disabling registration. 185 */ 186 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 187 lmutex_unlock(&udp->tdb_hash_lock); 188 return; 189 } 190 if (tdbp->tdb_sync_addr_hash != NULL || tdbp->tdb_hash_alloc_failed) { 191 lmutex_unlock(&udp->tdb_hash_lock); 192 return; 193 } 194 /* start with a free list of 2k elements */ 195 tdbp->tdb_sync_alloc = 2*1024; 196 if ((vaddr = _private_mmap(NULL, 197 TDB_HASH_SIZE * sizeof (uint64_t) + 198 tdbp->tdb_sync_alloc * sizeof (tdb_sync_stats_t), 199 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, 200 -1, (off_t)0)) == MAP_FAILED) { 201 tdbp->tdb_hash_alloc_failed = 1; 202 return; 203 } 204 addr_hash = vaddr; 205 206 /* initialize the free list */ 207 tdbp->tdb_sync_addr_free = sap = 208 (tdb_sync_stats_t *)&addr_hash[TDB_HASH_SIZE]; 209 for (i = 1; i < tdbp->tdb_sync_alloc; sap++, i++) 210 sap->next = (uintptr_t)(sap + 1); 211 sap->next = (uintptr_t)0; 212 tdbp->tdb_sync_addr_last = sap; 213 214 /* insert &udp->tdb_hash_lock itself into the new (empty) table */ 215 udp->tdb_hash_lock_stats.next = (uintptr_t)0; 216 udp->tdb_hash_lock_stats.sync_addr = (uintptr_t)&udp->tdb_hash_lock; 217 addr_hash[tdb_addr_hash(&udp->tdb_hash_lock)] = 218 (uintptr_t)&udp->tdb_hash_lock_stats; 219 220 tdbp->tdb_register_count = 1; 221 /* assign to tdb_sync_addr_hash only after fully initialized */ 222 _membar_producer(); 223 tdbp->tdb_sync_addr_hash = addr_hash; 224 lmutex_unlock(&udp->tdb_hash_lock); 225 } 226 227 tdb_sync_stats_t * 228 tdb_sync_obj_register(void *addr, int *new) 229 { 230 ulwp_t *self = curthread; 231 uberdata_t *udp = self->ul_uberdata; 232 tdb_t *tdbp = &udp->tdb; 233 uint64_t *sapp; 234 tdb_sync_stats_t *sap = NULL; 235 int locked = 0; 236 int i; 237 238 /* 239 * Don't start statistics collection until 240 * we have initialized the primary link map. 241 */ 242 if (!self->ul_primarymap) 243 return (NULL); 244 245 if (new) 246 *new = 0; 247 /* 248 * To avoid recursion problems, we must do two things: 249 * 1. Make a special case for tdb_hash_lock (we use it internally). 250 * 2. Deal with the dynamic linker's lock interface: 251 * When calling any external function, we may invoke the 252 * dynamic linker. It grabs a lock, which calls back here. 253 * This only happens on the first call to the external 254 * function, so we can just return NULL if we are called 255 * recursively (and miss the first count). 256 */ 257 if (addr == (void *)&udp->tdb_hash_lock) 258 return (&udp->tdb_hash_lock_stats); 259 if (self->ul_sync_obj_reg) /* recursive call */ 260 return (NULL); 261 self->ul_sync_obj_reg = 1; 262 263 /* 264 * On the first time through, initialize the hash table and free list. 265 */ 266 if (tdbp->tdb_sync_addr_hash == NULL) { 267 initialize_sync_hash(); 268 if (tdbp->tdb_sync_addr_hash == NULL) { /* utter failure */ 269 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 270 goto out; 271 } 272 } 273 _membar_consumer(); 274 275 sapp = &tdbp->tdb_sync_addr_hash[tdb_addr_hash(addr)]; 276 if (udp->uberflags.uf_tdb_register_sync == REGISTER_SYNC_ON) { 277 /* 278 * Look up an address in the synchronization object hash table. 279 * No lock is required since it can only deliver a false 280 * negative, in which case we fall into the locked case below. 281 */ 282 for (sap = (tdb_sync_stats_t *)(uintptr_t)*sapp; sap != NULL; 283 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) { 284 if (sap->sync_addr == (uintptr_t)addr) 285 goto out; 286 } 287 } 288 289 /* 290 * The search with no lock held failed or a special action is required. 291 * Grab tdb_hash_lock to do special actions and/or get a precise result. 292 */ 293 lmutex_lock(&udp->tdb_hash_lock); 294 locked = 1; 295 296 switch (udp->uberflags.uf_tdb_register_sync) { 297 case REGISTER_SYNC_ON: 298 break; 299 case REGISTER_SYNC_OFF: 300 goto out; 301 default: 302 /* 303 * For all debugger actions, first zero out the 304 * statistics block of every element in the hash table. 305 */ 306 for (i = 0; i < TDB_HASH_SIZE; i++) 307 for (sap = (tdb_sync_stats_t *) 308 (uintptr_t)tdbp->tdb_sync_addr_hash[i]; 309 sap != NULL; 310 sap = (tdb_sync_stats_t *)(uintptr_t)sap->next) 311 (void) _memset(&sap->un, 0, sizeof (sap->un)); 312 313 switch (udp->uberflags.uf_tdb_register_sync) { 314 case REGISTER_SYNC_ENABLE: 315 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_ON; 316 break; 317 case REGISTER_SYNC_DISABLE: 318 default: 319 udp->uberflags.uf_tdb_register_sync = REGISTER_SYNC_OFF; 320 goto out; 321 } 322 break; 323 } 324 325 /* 326 * Perform the search while holding tdb_hash_lock. 327 * Keep track of the insertion point. 328 */ 329 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) { 330 if (sap->sync_addr == (uintptr_t)addr) 331 break; 332 sapp = &sap->next; 333 } 334 335 /* 336 * Insert a new element if necessary. 337 */ 338 if (sap == NULL && (sap = alloc_sync_addr(addr)) != NULL) { 339 *sapp = (uintptr_t)sap; 340 tdbp->tdb_register_count++; 341 if (new) 342 *new = 1; 343 } 344 345 out: 346 if (locked) 347 lmutex_unlock(&udp->tdb_hash_lock); 348 self->ul_sync_obj_reg = 0; 349 return (sap); 350 } 351 352 void 353 tdb_sync_obj_deregister(void *addr) 354 { 355 uberdata_t *udp = curthread->ul_uberdata; 356 tdb_t *tdbp = &udp->tdb; 357 uint64_t *sapp; 358 tdb_sync_stats_t *sap; 359 uint_t hash; 360 361 /* 362 * tdb_hash_lock is never destroyed. 363 */ 364 ASSERT(addr != &udp->tdb_hash_lock); 365 366 /* 367 * Avoid acquiring tdb_hash_lock if lock statistics gathering has 368 * never been initiated or there is nothing in the hash bucket. 369 * (Once the hash table is allocated, it is never deallocated.) 370 */ 371 if (tdbp->tdb_sync_addr_hash == NULL || 372 tdbp->tdb_sync_addr_hash[hash = tdb_addr_hash(addr)] == NULL) 373 return; 374 375 lmutex_lock(&udp->tdb_hash_lock); 376 sapp = &tdbp->tdb_sync_addr_hash[hash]; 377 while ((sap = (tdb_sync_stats_t *)(uintptr_t)*sapp) != NULL) { 378 if (sap->sync_addr == (uintptr_t)addr) { 379 /* remove it from the hash table */ 380 *sapp = sap->next; 381 tdbp->tdb_register_count--; 382 /* clear it */ 383 sap->next = (uintptr_t)0; 384 sap->sync_addr = (uintptr_t)0; 385 /* insert it on the tail of the free list */ 386 if (tdbp->tdb_sync_addr_free == NULL) { 387 tdbp->tdb_sync_addr_free = sap; 388 tdbp->tdb_sync_addr_last = sap; 389 } else { 390 tdbp->tdb_sync_addr_last->next = (uintptr_t)sap; 391 tdbp->tdb_sync_addr_last = sap; 392 } 393 break; 394 } 395 sapp = &sap->next; 396 } 397 lmutex_unlock(&udp->tdb_hash_lock); 398 } 399 400 /* 401 * Return a mutex statistics block for the given mutex. 402 */ 403 tdb_mutex_stats_t * 404 tdb_mutex_stats(mutex_t *mp) 405 { 406 tdb_sync_stats_t *tssp; 407 408 /* avoid stealing the cache line unnecessarily */ 409 if (mp->mutex_magic != MUTEX_MAGIC) 410 mp->mutex_magic = MUTEX_MAGIC; 411 if ((tssp = tdb_sync_obj_register(mp, NULL)) == NULL) 412 return (NULL); 413 tssp->un.type = TDB_MUTEX; 414 return (&tssp->un.mutex); 415 } 416 417 /* 418 * Return a condvar statistics block for the given condvar. 419 */ 420 tdb_cond_stats_t * 421 tdb_cond_stats(cond_t *cvp) 422 { 423 tdb_sync_stats_t *tssp; 424 425 /* avoid stealing the cache line unnecessarily */ 426 if (cvp->cond_magic != COND_MAGIC) 427 cvp->cond_magic = COND_MAGIC; 428 if ((tssp = tdb_sync_obj_register(cvp, NULL)) == NULL) 429 return (NULL); 430 tssp->un.type = TDB_COND; 431 return (&tssp->un.cond); 432 } 433 434 /* 435 * Return an rwlock statistics block for the given rwlock. 436 */ 437 tdb_rwlock_stats_t * 438 tdb_rwlock_stats(rwlock_t *rwlp) 439 { 440 tdb_sync_stats_t *tssp; 441 442 /* avoid stealing the cache line unnecessarily */ 443 if (rwlp->magic != RWL_MAGIC) 444 rwlp->magic = RWL_MAGIC; 445 if ((tssp = tdb_sync_obj_register(rwlp, NULL)) == NULL) 446 return (NULL); 447 tssp->un.type = TDB_RWLOCK; 448 return (&tssp->un.rwlock); 449 } 450 451 /* 452 * Return a semaphore statistics block for the given semaphore. 453 */ 454 tdb_sema_stats_t * 455 tdb_sema_stats(sema_t *sp) 456 { 457 tdb_sync_stats_t *tssp; 458 int new; 459 460 /* avoid stealing the cache line unnecessarily */ 461 if (sp->magic != SEMA_MAGIC) 462 sp->magic = SEMA_MAGIC; 463 if ((tssp = tdb_sync_obj_register(sp, &new)) == NULL) 464 return (NULL); 465 tssp->un.type = TDB_SEMA; 466 if (new) { 467 tssp->un.sema.sema_max_count = sp->count; 468 tssp->un.sema.sema_min_count = sp->count; 469 } 470 return (&tssp->un.sema); 471 } 472