1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <stdio.h> 28 #include <stdlib.h> 29 #include <stddef.h> 30 #include <unistd.h> 31 #include <thr_uberdata.h> 32 #include <thread_db.h> 33 #include <libc_int.h> 34 35 /* 36 * Private structures. 37 */ 38 39 typedef union { 40 mutex_t lock; 41 rwlock_t rwlock; 42 sema_t semaphore; 43 cond_t condition; 44 } td_so_un_t; 45 46 struct td_thragent { 47 rwlock_t rwlock; 48 struct ps_prochandle *ph_p; 49 int initialized; 50 int sync_tracking; 51 int model; 52 int primary_map; 53 psaddr_t bootstrap_addr; 54 psaddr_t uberdata_addr; 55 psaddr_t tdb_eventmask_addr; 56 psaddr_t tdb_register_sync_addr; 57 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1]; 58 psaddr_t hash_table_addr; 59 int hash_size; 60 lwpid_t single_lwpid; 61 psaddr_t single_ulwp_addr; 62 }; 63 64 /* 65 * This is the name of the variable in libc that contains 66 * the uberdata address that we will need. 67 */ 68 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap" 69 /* 70 * This is the actual name of uberdata, used in the event 71 * that tdb_bootstrap has not yet been initialized. 72 */ 73 #define TD_UBERDATA_NAME "_uberdata" 74 /* 75 * The library name should end with ".so.1", but older versions of 76 * dbx expect the unadorned name and malfunction if ".1" is specified. 77 * Unfortunately, if ".1" is not specified, mdb malfunctions when it 78 * is applied to another instance of itself (due to the presence of 79 * /usr/lib/mdb/proc/libc.so). So we try it both ways. 80 */ 81 #define TD_LIBRARY_NAME "libc.so" 82 #define TD_LIBRARY_NAME_1 "libc.so.1" 83 84 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p); 85 86 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb, 87 void *cbdata_p, td_thr_state_e state, int ti_pri, 88 sigset_t *ti_sigmask_p, unsigned ti_user_flags); 89 90 /* 91 * Initialize threads debugging interface. 92 */ 93 #pragma weak td_init = __td_init 94 td_err_e 95 __td_init() 96 { 97 return (TD_OK); 98 } 99 100 /* 101 * This function does nothing, and never did. 102 * But the symbol is in the ABI, so we can't delete it. 103 */ 104 #pragma weak td_log = __td_log 105 void 106 __td_log() 107 { 108 } 109 110 /* 111 * Short-cut to read just the hash table size from the process, 112 * to avoid repeatedly reading the full uberdata structure when 113 * dealing with a single-threaded process. 114 */ 115 static uint_t 116 td_read_hash_size(td_thragent_t *ta_p) 117 { 118 psaddr_t addr; 119 uint_t hash_size; 120 121 switch (ta_p->initialized) { 122 default: /* uninitialized */ 123 return (0); 124 case 1: /* partially initialized */ 125 break; 126 case 2: /* fully initialized */ 127 return (ta_p->hash_size); 128 } 129 130 if (ta_p->model == PR_MODEL_NATIVE) { 131 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size); 132 } else { 133 #if defined(_LP64) && defined(_SYSCALL32) 134 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size); 135 #else 136 addr = 0; 137 #endif 138 } 139 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size)) 140 != PS_OK) 141 return (0); 142 return (hash_size); 143 } 144 145 static td_err_e 146 td_read_uberdata(td_thragent_t *ta_p) 147 { 148 struct ps_prochandle *ph_p = ta_p->ph_p; 149 150 if (ta_p->model == PR_MODEL_NATIVE) { 151 uberdata_t uberdata; 152 153 if (ps_pdread(ph_p, ta_p->uberdata_addr, 154 &uberdata, sizeof (uberdata)) != PS_OK) 155 return (TD_DBERR); 156 ta_p->primary_map = uberdata.primary_map; 157 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr + 158 offsetof(uberdata_t, tdb.tdb_ev_global_mask); 159 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr + 160 offsetof(uberdata_t, uberflags.uf_tdb_register_sync); 161 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table; 162 ta_p->hash_size = uberdata.hash_size; 163 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events, 164 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK) 165 return (TD_DBERR); 166 167 } else { 168 #if defined(_LP64) && defined(_SYSCALL32) 169 uberdata32_t uberdata; 170 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1]; 171 int i; 172 173 if (ps_pdread(ph_p, ta_p->uberdata_addr, 174 &uberdata, sizeof (uberdata)) != PS_OK) 175 return (TD_DBERR); 176 ta_p->primary_map = uberdata.primary_map; 177 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr + 178 offsetof(uberdata32_t, tdb.tdb_ev_global_mask); 179 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr + 180 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync); 181 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table; 182 ta_p->hash_size = uberdata.hash_size; 183 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events, 184 tdb_events, sizeof (tdb_events)) != PS_OK) 185 return (TD_DBERR); 186 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) 187 ta_p->tdb_events[i] = tdb_events[i]; 188 #else 189 return (TD_DBERR); 190 #endif 191 } 192 if (ta_p->hash_size != 1) { /* multi-threaded */ 193 ta_p->initialized = 2; 194 ta_p->single_lwpid = 0; 195 ta_p->single_ulwp_addr = NULL; 196 } else { /* single-threaded */ 197 ta_p->initialized = 1; 198 /* 199 * Get the address and lwpid of the single thread/LWP. 200 * It may not be ulwp_one if this is a child of fork1(). 201 */ 202 if (ta_p->model == PR_MODEL_NATIVE) { 203 thr_hash_table_t head; 204 lwpid_t lwpid = 0; 205 206 if (ps_pdread(ph_p, ta_p->hash_table_addr, 207 &head, sizeof (head)) != PS_OK) 208 return (TD_DBERR); 209 if ((psaddr_t)head.hash_bucket == NULL) 210 ta_p->initialized = 0; 211 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket + 212 offsetof(ulwp_t, ul_lwpid), 213 &lwpid, sizeof (lwpid)) != PS_OK) 214 return (TD_DBERR); 215 ta_p->single_lwpid = lwpid; 216 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket; 217 } else { 218 #if defined(_LP64) && defined(_SYSCALL32) 219 thr_hash_table32_t head; 220 lwpid_t lwpid = 0; 221 222 if (ps_pdread(ph_p, ta_p->hash_table_addr, 223 &head, sizeof (head)) != PS_OK) 224 return (TD_DBERR); 225 if ((psaddr_t)head.hash_bucket == NULL) 226 ta_p->initialized = 0; 227 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket + 228 offsetof(ulwp32_t, ul_lwpid), 229 &lwpid, sizeof (lwpid)) != PS_OK) 230 return (TD_DBERR); 231 ta_p->single_lwpid = lwpid; 232 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket; 233 #else 234 return (TD_DBERR); 235 #endif 236 } 237 } 238 if (!ta_p->primary_map) 239 ta_p->initialized = 0; 240 return (TD_OK); 241 } 242 243 static td_err_e 244 td_read_bootstrap_data(td_thragent_t *ta_p) 245 { 246 struct ps_prochandle *ph_p = ta_p->ph_p; 247 psaddr_t bootstrap_addr; 248 psaddr_t uberdata_addr; 249 ps_err_e db_return; 250 td_err_e return_val; 251 int do_1; 252 253 switch (ta_p->initialized) { 254 case 2: /* fully initialized */ 255 return (TD_OK); 256 case 1: /* partially initialized */ 257 if (td_read_hash_size(ta_p) == 1) 258 return (TD_OK); 259 return (td_read_uberdata(ta_p)); 260 } 261 262 /* 263 * Uninitialized -- do the startup work. 264 * We set ta_p->initialized to -1 to cut off recursive calls 265 * into libc_db by code in the provider of ps_pglobal_lookup(). 266 */ 267 do_1 = 0; 268 ta_p->initialized = -1; 269 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME, 270 TD_BOOTSTRAP_NAME, &bootstrap_addr); 271 if (db_return == PS_NOSYM) { 272 do_1 = 1; 273 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1, 274 TD_BOOTSTRAP_NAME, &bootstrap_addr); 275 } 276 if (db_return == PS_NOSYM) /* libc is not linked yet */ 277 return (TD_NOLIBTHREAD); 278 if (db_return != PS_OK) 279 return (TD_ERR); 280 db_return = ps_pglobal_lookup(ph_p, 281 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME, 282 TD_UBERDATA_NAME, &uberdata_addr); 283 if (db_return == PS_NOSYM) /* libc is not linked yet */ 284 return (TD_NOLIBTHREAD); 285 if (db_return != PS_OK) 286 return (TD_ERR); 287 288 /* 289 * Read the uberdata address into the thread agent structure. 290 */ 291 if (ta_p->model == PR_MODEL_NATIVE) { 292 psaddr_t psaddr; 293 if (ps_pdread(ph_p, bootstrap_addr, 294 &psaddr, sizeof (psaddr)) != PS_OK) 295 return (TD_DBERR); 296 if ((ta_p->bootstrap_addr = psaddr) == NULL) 297 psaddr = uberdata_addr; 298 else if (ps_pdread(ph_p, psaddr, 299 &psaddr, sizeof (psaddr)) != PS_OK) 300 return (TD_DBERR); 301 if (psaddr == NULL) { 302 /* primary linkmap in the tgt is not initialized */ 303 ta_p->bootstrap_addr = NULL; 304 psaddr = uberdata_addr; 305 } 306 ta_p->uberdata_addr = psaddr; 307 } else { 308 #if defined(_LP64) && defined(_SYSCALL32) 309 caddr32_t psaddr; 310 if (ps_pdread(ph_p, bootstrap_addr, 311 &psaddr, sizeof (psaddr)) != PS_OK) 312 return (TD_DBERR); 313 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL) 314 psaddr = (caddr32_t)uberdata_addr; 315 else if (ps_pdread(ph_p, (psaddr_t)psaddr, 316 &psaddr, sizeof (psaddr)) != PS_OK) 317 return (TD_DBERR); 318 if (psaddr == NULL) { 319 /* primary linkmap in the tgt is not initialized */ 320 ta_p->bootstrap_addr = NULL; 321 psaddr = (caddr32_t)uberdata_addr; 322 } 323 ta_p->uberdata_addr = (psaddr_t)psaddr; 324 #else 325 return (TD_DBERR); 326 #endif /* _SYSCALL32 */ 327 } 328 329 if ((return_val = td_read_uberdata(ta_p)) != TD_OK) 330 return (return_val); 331 if (ta_p->bootstrap_addr == NULL) 332 ta_p->initialized = 0; 333 return (TD_OK); 334 } 335 336 #pragma weak ps_kill 337 #pragma weak ps_lrolltoaddr 338 339 /* 340 * Allocate a new agent process handle ("thread agent"). 341 */ 342 #pragma weak td_ta_new = __td_ta_new 343 td_err_e 344 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp) 345 { 346 td_thragent_t *ta_p; 347 int model; 348 td_err_e return_val = TD_OK; 349 350 if (ph_p == NULL) 351 return (TD_BADPH); 352 if (ta_pp == NULL) 353 return (TD_ERR); 354 *ta_pp = NULL; 355 if (ps_pstop(ph_p) != PS_OK) 356 return (TD_DBERR); 357 /* 358 * ps_pdmodel might not be defined if this is an older client. 359 * Make it a weak symbol and test if it exists before calling. 360 */ 361 #pragma weak ps_pdmodel 362 if (ps_pdmodel == NULL) { 363 model = PR_MODEL_NATIVE; 364 } else if (ps_pdmodel(ph_p, &model) != PS_OK) { 365 (void) ps_pcontinue(ph_p); 366 return (TD_ERR); 367 } 368 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) { 369 (void) ps_pcontinue(ph_p); 370 return (TD_MALLOC); 371 } 372 373 /* 374 * Initialize the agent process handle. 375 * Pick up the symbol value we need from the target process. 376 */ 377 (void) memset(ta_p, 0, sizeof (*ta_p)); 378 ta_p->ph_p = ph_p; 379 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL); 380 ta_p->model = model; 381 return_val = td_read_bootstrap_data(ta_p); 382 383 /* 384 * Because the old libthread_db enabled lock tracking by default, 385 * we must also do it. However, we do it only if the application 386 * provides the ps_kill() and ps_lrolltoaddr() interfaces. 387 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.) 388 */ 389 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) { 390 register_sync_t oldenable; 391 register_sync_t enable = REGISTER_SYNC_ENABLE; 392 psaddr_t psaddr = ta_p->tdb_register_sync_addr; 393 394 if (ps_pdread(ph_p, psaddr, 395 &oldenable, sizeof (oldenable)) != PS_OK) 396 return_val = TD_DBERR; 397 else if (oldenable != REGISTER_SYNC_OFF || 398 ps_pdwrite(ph_p, psaddr, 399 &enable, sizeof (enable)) != PS_OK) { 400 /* 401 * Lock tracking was already enabled or we 402 * failed to enable it, probably because we 403 * are examining a core file. In either case 404 * set the sync_tracking flag non-zero to 405 * indicate that we should not attempt to 406 * disable lock tracking when we delete the 407 * agent process handle in td_ta_delete(). 408 */ 409 ta_p->sync_tracking = 1; 410 } 411 } 412 413 if (return_val == TD_OK) 414 *ta_pp = ta_p; 415 else 416 free(ta_p); 417 418 (void) ps_pcontinue(ph_p); 419 return (return_val); 420 } 421 422 /* 423 * Utility function to grab the readers lock and return the prochandle, 424 * given an agent process handle. Performs standard error checking. 425 * Returns non-NULL with the lock held, or NULL with the lock not held. 426 */ 427 static struct ps_prochandle * 428 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err) 429 { 430 struct ps_prochandle *ph_p = NULL; 431 td_err_e error; 432 433 if (ta_p == NULL || ta_p->initialized == -1) { 434 *err = TD_BADTA; 435 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */ 436 *err = TD_BADTA; 437 } else if ((ph_p = ta_p->ph_p) == NULL) { 438 (void) rw_unlock(&ta_p->rwlock); 439 *err = TD_BADPH; 440 } else if (ta_p->initialized != 2 && 441 (error = td_read_bootstrap_data(ta_p)) != TD_OK) { 442 (void) rw_unlock(&ta_p->rwlock); 443 ph_p = NULL; 444 *err = error; 445 } else { 446 *err = TD_OK; 447 } 448 449 return (ph_p); 450 } 451 452 /* 453 * Utility function to grab the readers lock and return the prochandle, 454 * given an agent thread handle. Performs standard error checking. 455 * Returns non-NULL with the lock held, or NULL with the lock not held. 456 */ 457 static struct ps_prochandle * 458 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err) 459 { 460 if (th_p == NULL || th_p->th_unique == NULL) { 461 *err = TD_BADTH; 462 return (NULL); 463 } 464 return (ph_lock_ta(th_p->th_ta_p, err)); 465 } 466 467 /* 468 * Utility function to grab the readers lock and return the prochandle, 469 * given a synchronization object handle. Performs standard error checking. 470 * Returns non-NULL with the lock held, or NULL with the lock not held. 471 */ 472 static struct ps_prochandle * 473 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err) 474 { 475 if (sh_p == NULL || sh_p->sh_unique == NULL) { 476 *err = TD_BADSH; 477 return (NULL); 478 } 479 return (ph_lock_ta(sh_p->sh_ta_p, err)); 480 } 481 482 /* 483 * Unlock the agent process handle obtained from ph_lock_*(). 484 */ 485 static void 486 ph_unlock(td_thragent_t *ta_p) 487 { 488 (void) rw_unlock(&ta_p->rwlock); 489 } 490 491 /* 492 * De-allocate an agent process handle, 493 * releasing all related resources. 494 * 495 * XXX -- This is hopelessly broken --- 496 * Storage for thread agent is not deallocated. The prochandle 497 * in the thread agent is set to NULL so that future uses of 498 * the thread agent can be detected and an error value returned. 499 * All functions in the external user interface that make 500 * use of the thread agent are expected 501 * to check for a NULL prochandle in the thread agent. 502 * All such functions are also expected to obtain a 503 * reader lock on the thread agent while it is using it. 504 */ 505 #pragma weak td_ta_delete = __td_ta_delete 506 td_err_e 507 __td_ta_delete(td_thragent_t *ta_p) 508 { 509 struct ps_prochandle *ph_p; 510 511 /* 512 * This is the only place we grab the writer lock. 513 * We are going to NULL out the prochandle. 514 */ 515 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0) 516 return (TD_BADTA); 517 if ((ph_p = ta_p->ph_p) == NULL) { 518 (void) rw_unlock(&ta_p->rwlock); 519 return (TD_BADPH); 520 } 521 /* 522 * If synch. tracking was disabled when td_ta_new() was called and 523 * if td_ta_sync_tracking_enable() was never called, then disable 524 * synch. tracking (it was enabled by default in td_ta_new()). 525 */ 526 if (ta_p->sync_tracking == 0 && 527 ps_kill != NULL && ps_lrolltoaddr != NULL) { 528 register_sync_t enable = REGISTER_SYNC_DISABLE; 529 530 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr, 531 &enable, sizeof (enable)); 532 } 533 ta_p->ph_p = NULL; 534 (void) rw_unlock(&ta_p->rwlock); 535 return (TD_OK); 536 } 537 538 /* 539 * Map an agent process handle to a client prochandle. 540 * Currently unused by dbx. 541 */ 542 #pragma weak td_ta_get_ph = __td_ta_get_ph 543 td_err_e 544 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp) 545 { 546 td_err_e return_val; 547 548 if (ph_pp != NULL) /* protect stupid callers */ 549 *ph_pp = NULL; 550 if (ph_pp == NULL) 551 return (TD_ERR); 552 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL) 553 return (return_val); 554 ph_unlock(ta_p); 555 return (TD_OK); 556 } 557 558 /* 559 * Set the process's suggested concurrency level. 560 * This is a no-op in a one-level model. 561 * Currently unused by dbx. 562 */ 563 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency 564 /* ARGSUSED1 */ 565 td_err_e 566 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level) 567 { 568 if (ta_p == NULL) 569 return (TD_BADTA); 570 if (ta_p->ph_p == NULL) 571 return (TD_BADPH); 572 return (TD_OK); 573 } 574 575 /* 576 * Get the number of threads in the process. 577 */ 578 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads 579 td_err_e 580 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p) 581 { 582 struct ps_prochandle *ph_p; 583 td_err_e return_val; 584 int nthreads; 585 int nzombies; 586 psaddr_t nthreads_addr; 587 psaddr_t nzombies_addr; 588 589 if (ta_p->model == PR_MODEL_NATIVE) { 590 nthreads_addr = ta_p->uberdata_addr + 591 offsetof(uberdata_t, nthreads); 592 nzombies_addr = ta_p->uberdata_addr + 593 offsetof(uberdata_t, nzombies); 594 } else { 595 #if defined(_LP64) && defined(_SYSCALL32) 596 nthreads_addr = ta_p->uberdata_addr + 597 offsetof(uberdata32_t, nthreads); 598 nzombies_addr = ta_p->uberdata_addr + 599 offsetof(uberdata32_t, nzombies); 600 #else 601 nthreads_addr = 0; 602 nzombies_addr = 0; 603 #endif /* _SYSCALL32 */ 604 } 605 606 if (nthread_p == NULL) 607 return (TD_ERR); 608 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 609 return (return_val); 610 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK) 611 return_val = TD_DBERR; 612 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK) 613 return_val = TD_DBERR; 614 ph_unlock(ta_p); 615 if (return_val == TD_OK) 616 *nthread_p = nthreads + nzombies; 617 return (return_val); 618 } 619 620 typedef struct { 621 thread_t tid; 622 int found; 623 td_thrhandle_t th; 624 } td_mapper_param_t; 625 626 /* 627 * Check the value in data against the thread id. 628 * If it matches, return 1 to terminate iterations. 629 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle. 630 */ 631 static int 632 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data) 633 { 634 td_thrinfo_t ti; 635 636 if (__td_thr_get_info(th_p, &ti) == TD_OK && 637 data->tid == ti.ti_tid) { 638 data->found = 1; 639 data->th = *th_p; 640 return (1); 641 } 642 return (0); 643 } 644 645 /* 646 * Given a thread identifier, return the corresponding thread handle. 647 */ 648 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr 649 td_err_e 650 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid, 651 td_thrhandle_t *th_p) 652 { 653 td_err_e return_val; 654 td_mapper_param_t data; 655 656 if (th_p != NULL && /* optimize for a single thread */ 657 ta_p != NULL && 658 ta_p->initialized == 1 && 659 (td_read_hash_size(ta_p) == 1 || 660 td_read_uberdata(ta_p) == TD_OK) && 661 ta_p->initialized == 1 && 662 ta_p->single_lwpid == tid) { 663 th_p->th_ta_p = ta_p; 664 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0) 665 return (TD_NOTHR); 666 return (TD_OK); 667 } 668 669 /* 670 * LOCKING EXCEPTION - Locking is not required here because 671 * the locking and checking will be done in __td_ta_thr_iter. 672 */ 673 674 if (ta_p == NULL) 675 return (TD_BADTA); 676 if (th_p == NULL) 677 return (TD_BADTH); 678 if (tid == 0) 679 return (TD_NOTHR); 680 681 data.tid = tid; 682 data.found = 0; 683 return_val = __td_ta_thr_iter(ta_p, 684 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data, 685 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, 686 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 687 if (return_val == TD_OK) { 688 if (data.found == 0) 689 return_val = TD_NOTHR; 690 else 691 *th_p = data.th; 692 } 693 694 return (return_val); 695 } 696 697 /* 698 * Map the address of a synchronization object to a sync. object handle. 699 */ 700 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync 701 td_err_e 702 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p) 703 { 704 struct ps_prochandle *ph_p; 705 td_err_e return_val; 706 uint16_t sync_magic; 707 708 if (sh_p == NULL) 709 return (TD_BADSH); 710 if (addr == NULL) 711 return (TD_ERR); 712 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 713 return (return_val); 714 /* 715 * Check the magic number of the sync. object to make sure it's valid. 716 * The magic number is at the same offset for all sync. objects. 717 */ 718 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic, 719 &sync_magic, sizeof (sync_magic)) != PS_OK) { 720 ph_unlock(ta_p); 721 return (TD_BADSH); 722 } 723 ph_unlock(ta_p); 724 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC && 725 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC) 726 return (TD_BADSH); 727 /* 728 * Just fill in the appropriate fields of the sync. handle. 729 */ 730 sh_p->sh_ta_p = (td_thragent_t *)ta_p; 731 sh_p->sh_unique = addr; 732 return (TD_OK); 733 } 734 735 /* 736 * Iterate over the set of global TSD keys. 737 * The call back function is called with three arguments, 738 * a key, a pointer to the destructor function, and the cbdata pointer. 739 * Currently unused by dbx. 740 */ 741 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter 742 td_err_e 743 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p) 744 { 745 struct ps_prochandle *ph_p; 746 td_err_e return_val; 747 int key; 748 int numkeys; 749 psaddr_t dest_addr; 750 psaddr_t *destructors = NULL; 751 PFrV destructor; 752 753 if (cb == NULL) 754 return (TD_ERR); 755 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 756 return (return_val); 757 if (ps_pstop(ph_p) != PS_OK) { 758 ph_unlock(ta_p); 759 return (TD_DBERR); 760 } 761 762 if (ta_p->model == PR_MODEL_NATIVE) { 763 tsd_metadata_t tsdm; 764 765 if (ps_pdread(ph_p, 766 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata), 767 &tsdm, sizeof (tsdm)) != PS_OK) 768 return_val = TD_DBERR; 769 else { 770 numkeys = tsdm.tsdm_nused; 771 dest_addr = (psaddr_t)tsdm.tsdm_destro; 772 if (numkeys > 0) 773 destructors = 774 malloc(numkeys * sizeof (psaddr_t)); 775 } 776 } else { 777 #if defined(_LP64) && defined(_SYSCALL32) 778 tsd_metadata32_t tsdm; 779 780 if (ps_pdread(ph_p, 781 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata), 782 &tsdm, sizeof (tsdm)) != PS_OK) 783 return_val = TD_DBERR; 784 else { 785 numkeys = tsdm.tsdm_nused; 786 dest_addr = (psaddr_t)tsdm.tsdm_destro; 787 if (numkeys > 0) 788 destructors = 789 malloc(numkeys * sizeof (caddr32_t)); 790 } 791 #else 792 return_val = TD_DBERR; 793 #endif /* _SYSCALL32 */ 794 } 795 796 if (return_val != TD_OK || numkeys <= 0) { 797 (void) ps_pcontinue(ph_p); 798 ph_unlock(ta_p); 799 return (return_val); 800 } 801 802 if (destructors == NULL) 803 return_val = TD_MALLOC; 804 else if (ta_p->model == PR_MODEL_NATIVE) { 805 if (ps_pdread(ph_p, dest_addr, 806 destructors, numkeys * sizeof (psaddr_t)) != PS_OK) 807 return_val = TD_DBERR; 808 else { 809 for (key = 1; key < numkeys; key++) { 810 destructor = (PFrV)destructors[key]; 811 if (destructor != TSD_UNALLOCATED && 812 (*cb)(key, destructor, cbdata_p)) 813 break; 814 } 815 } 816 #if defined(_LP64) && defined(_SYSCALL32) 817 } else { 818 caddr32_t *destructors32 = (caddr32_t *)destructors; 819 caddr32_t destruct32; 820 821 if (ps_pdread(ph_p, dest_addr, 822 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK) 823 return_val = TD_DBERR; 824 else { 825 for (key = 1; key < numkeys; key++) { 826 destruct32 = destructors32[key]; 827 if ((destruct32 != 828 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) && 829 (*cb)(key, (PFrV)(uintptr_t)destruct32, 830 cbdata_p)) 831 break; 832 } 833 } 834 #endif /* _SYSCALL32 */ 835 } 836 837 if (destructors) 838 free(destructors); 839 (void) ps_pcontinue(ph_p); 840 ph_unlock(ta_p); 841 return (return_val); 842 } 843 844 int 845 sigequalset(const sigset_t *s1, const sigset_t *s2) 846 { 847 return ( 848 s1->__sigbits[0] == s2->__sigbits[0] && 849 s1->__sigbits[1] == s2->__sigbits[1] && 850 s1->__sigbits[2] == s2->__sigbits[2] && 851 s1->__sigbits[3] == s2->__sigbits[3]); 852 } 853 854 /* 855 * Description: 856 * Iterate over all threads. For each thread call 857 * the function pointed to by "cb" with a pointer 858 * to a thread handle, and a pointer to data which 859 * can be NULL. Only call td_thr_iter_f() on threads 860 * which match the properties of state, ti_pri, 861 * ti_sigmask_p, and ti_user_flags. If cb returns 862 * a non-zero value, terminate iterations. 863 * 864 * Input: 865 * *ta_p - thread agent 866 * *cb - call back function defined by user. 867 * td_thr_iter_f() takes a thread handle and 868 * cbdata_p as a parameter. 869 * cbdata_p - parameter for td_thr_iter_f(). 870 * 871 * state - state of threads of interest. A value of 872 * TD_THR_ANY_STATE from enum td_thr_state_e 873 * does not restrict iterations by state. 874 * ti_pri - lower bound of priorities of threads of 875 * interest. A value of TD_THR_LOWEST_PRIORITY 876 * defined in thread_db.h does not restrict 877 * iterations by priority. A thread with priority 878 * less than ti_pri will NOT be passed to the callback 879 * function. 880 * ti_sigmask_p - signal mask of threads of interest. 881 * A value of TD_SIGNO_MASK defined in thread_db.h 882 * does not restrict iterations by signal mask. 883 * ti_user_flags - user flags of threads of interest. A 884 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h 885 * does not restrict iterations by user flags. 886 */ 887 #pragma weak td_ta_thr_iter = __td_ta_thr_iter 888 td_err_e 889 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb, 890 void *cbdata_p, td_thr_state_e state, int ti_pri, 891 sigset_t *ti_sigmask_p, unsigned ti_user_flags) 892 { 893 struct ps_prochandle *ph_p; 894 psaddr_t first_lwp_addr; 895 psaddr_t first_zombie_addr; 896 psaddr_t curr_lwp_addr; 897 psaddr_t next_lwp_addr; 898 td_thrhandle_t th; 899 ps_err_e db_return; 900 ps_err_e db_return2; 901 td_err_e return_val; 902 903 if (cb == NULL) 904 return (TD_ERR); 905 /* 906 * If state is not within bound, short circuit. 907 */ 908 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP) 909 return (TD_OK); 910 911 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 912 return (return_val); 913 if (ps_pstop(ph_p) != PS_OK) { 914 ph_unlock(ta_p); 915 return (TD_DBERR); 916 } 917 918 /* 919 * For each ulwp_t in the circular linked lists pointed 920 * to by "all_lwps" and "all_zombies": 921 * (1) Filter each thread. 922 * (2) Create the thread_object for each thread that passes. 923 * (3) Call the call back function on each thread. 924 */ 925 926 if (ta_p->model == PR_MODEL_NATIVE) { 927 db_return = ps_pdread(ph_p, 928 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps), 929 &first_lwp_addr, sizeof (first_lwp_addr)); 930 db_return2 = ps_pdread(ph_p, 931 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies), 932 &first_zombie_addr, sizeof (first_zombie_addr)); 933 } else { 934 #if defined(_LP64) && defined(_SYSCALL32) 935 caddr32_t addr32; 936 937 db_return = ps_pdread(ph_p, 938 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps), 939 &addr32, sizeof (addr32)); 940 first_lwp_addr = addr32; 941 db_return2 = ps_pdread(ph_p, 942 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies), 943 &addr32, sizeof (addr32)); 944 first_zombie_addr = addr32; 945 #else /* _SYSCALL32 */ 946 db_return = PS_ERR; 947 db_return2 = PS_ERR; 948 #endif /* _SYSCALL32 */ 949 } 950 if (db_return == PS_OK) 951 db_return = db_return2; 952 953 /* 954 * If first_lwp_addr and first_zombie_addr are both NULL, 955 * libc must not yet be initialized or all threads have 956 * exited. Return TD_NOTHR and all will be well. 957 */ 958 if (db_return == PS_OK && 959 first_lwp_addr == NULL && first_zombie_addr == NULL) { 960 (void) ps_pcontinue(ph_p); 961 ph_unlock(ta_p); 962 return (TD_NOTHR); 963 } 964 if (db_return != PS_OK) { 965 (void) ps_pcontinue(ph_p); 966 ph_unlock(ta_p); 967 return (TD_DBERR); 968 } 969 970 /* 971 * Run down the lists of all living and dead lwps. 972 */ 973 if (first_lwp_addr == NULL) 974 first_lwp_addr = first_zombie_addr; 975 curr_lwp_addr = first_lwp_addr; 976 for (;;) { 977 td_thr_state_e ts_state; 978 int userpri; 979 unsigned userflags; 980 sigset_t mask; 981 982 /* 983 * Read the ulwp struct. 984 */ 985 if (ta_p->model == PR_MODEL_NATIVE) { 986 ulwp_t ulwp; 987 988 if (ps_pdread(ph_p, curr_lwp_addr, 989 &ulwp, sizeof (ulwp)) != PS_OK && 990 ((void) memset(&ulwp, 0, sizeof (ulwp)), 991 ps_pdread(ph_p, curr_lwp_addr, 992 &ulwp, REPLACEMENT_SIZE)) != PS_OK) { 993 return_val = TD_DBERR; 994 break; 995 } 996 next_lwp_addr = (psaddr_t)ulwp.ul_forw; 997 998 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE : 999 ulwp.ul_stop? TD_THR_STOPPED : 1000 ulwp.ul_wchan? TD_THR_SLEEP : 1001 TD_THR_ACTIVE; 1002 userpri = ulwp.ul_pri; 1003 userflags = ulwp.ul_usropts; 1004 if (ulwp.ul_dead) 1005 (void) sigemptyset(&mask); 1006 else 1007 mask = *(sigset_t *)&ulwp.ul_sigmask; 1008 } else { 1009 #if defined(_LP64) && defined(_SYSCALL32) 1010 ulwp32_t ulwp; 1011 1012 if (ps_pdread(ph_p, curr_lwp_addr, 1013 &ulwp, sizeof (ulwp)) != PS_OK && 1014 ((void) memset(&ulwp, 0, sizeof (ulwp)), 1015 ps_pdread(ph_p, curr_lwp_addr, 1016 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) { 1017 return_val = TD_DBERR; 1018 break; 1019 } 1020 next_lwp_addr = (psaddr_t)ulwp.ul_forw; 1021 1022 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE : 1023 ulwp.ul_stop? TD_THR_STOPPED : 1024 ulwp.ul_wchan? TD_THR_SLEEP : 1025 TD_THR_ACTIVE; 1026 userpri = ulwp.ul_pri; 1027 userflags = ulwp.ul_usropts; 1028 if (ulwp.ul_dead) 1029 (void) sigemptyset(&mask); 1030 else 1031 mask = *(sigset_t *)&ulwp.ul_sigmask; 1032 #else /* _SYSCALL32 */ 1033 return_val = TD_ERR; 1034 break; 1035 #endif /* _SYSCALL32 */ 1036 } 1037 1038 /* 1039 * Filter on state, priority, sigmask, and user flags. 1040 */ 1041 1042 if ((state != ts_state) && 1043 (state != TD_THR_ANY_STATE)) 1044 goto advance; 1045 1046 if (ti_pri > userpri) 1047 goto advance; 1048 1049 if (ti_sigmask_p != TD_SIGNO_MASK && 1050 !sigequalset(ti_sigmask_p, &mask)) 1051 goto advance; 1052 1053 if (ti_user_flags != userflags && 1054 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS) 1055 goto advance; 1056 1057 /* 1058 * Call back - break if the return 1059 * from the call back is non-zero. 1060 */ 1061 th.th_ta_p = (td_thragent_t *)ta_p; 1062 th.th_unique = curr_lwp_addr; 1063 if ((*cb)(&th, cbdata_p)) 1064 break; 1065 1066 advance: 1067 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) { 1068 /* 1069 * Switch to the zombie list, unless it is NULL 1070 * or we have already been doing the zombie list, 1071 * in which case terminate the loop. 1072 */ 1073 if (first_zombie_addr == NULL || 1074 first_lwp_addr == first_zombie_addr) 1075 break; 1076 curr_lwp_addr = first_lwp_addr = first_zombie_addr; 1077 } 1078 } 1079 1080 (void) ps_pcontinue(ph_p); 1081 ph_unlock(ta_p); 1082 return (return_val); 1083 } 1084 1085 /* 1086 * Enable or disable process synchronization object tracking. 1087 * Currently unused by dbx. 1088 */ 1089 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable 1090 td_err_e 1091 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff) 1092 { 1093 struct ps_prochandle *ph_p; 1094 td_err_e return_val; 1095 register_sync_t enable; 1096 1097 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1098 return (return_val); 1099 /* 1100 * Values of tdb_register_sync in the victim process: 1101 * REGISTER_SYNC_ENABLE enables registration of synch objects 1102 * REGISTER_SYNC_DISABLE disables registration of synch objects 1103 * These cause the table to be cleared and tdb_register_sync set to: 1104 * REGISTER_SYNC_ON registration in effect 1105 * REGISTER_SYNC_OFF registration not in effect 1106 */ 1107 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE; 1108 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr, 1109 &enable, sizeof (enable)) != PS_OK) 1110 return_val = TD_DBERR; 1111 /* 1112 * Remember that this interface was called (see td_ta_delete()). 1113 */ 1114 ta_p->sync_tracking = 1; 1115 ph_unlock(ta_p); 1116 return (return_val); 1117 } 1118 1119 /* 1120 * Iterate over all known synchronization variables. 1121 * It is very possible that the list generated is incomplete, 1122 * because the iterator can only find synchronization variables 1123 * that have been registered by the process since synchronization 1124 * object registration was enabled. 1125 * The call back function cb is called for each synchronization 1126 * variable with two arguments: a pointer to the synchronization 1127 * handle and the passed-in argument cbdata. 1128 * If cb returns a non-zero value, iterations are terminated. 1129 */ 1130 #pragma weak td_ta_sync_iter = __td_ta_sync_iter 1131 td_err_e 1132 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata) 1133 { 1134 struct ps_prochandle *ph_p; 1135 td_err_e return_val; 1136 int i; 1137 register_sync_t enable; 1138 psaddr_t next_desc; 1139 tdb_sync_stats_t sync_stats; 1140 td_synchandle_t synchandle; 1141 psaddr_t psaddr; 1142 void *vaddr; 1143 uint64_t *sync_addr_hash = NULL; 1144 1145 if (cb == NULL) 1146 return (TD_ERR); 1147 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1148 return (return_val); 1149 if (ps_pstop(ph_p) != PS_OK) { 1150 ph_unlock(ta_p); 1151 return (TD_DBERR); 1152 } 1153 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr, 1154 &enable, sizeof (enable)) != PS_OK) { 1155 return_val = TD_DBERR; 1156 goto out; 1157 } 1158 if (enable != REGISTER_SYNC_ON) 1159 goto out; 1160 1161 /* 1162 * First read the hash table. 1163 * The hash table is large; allocate with mmap(). 1164 */ 1165 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t), 1166 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 1167 == MAP_FAILED) { 1168 return_val = TD_MALLOC; 1169 goto out; 1170 } 1171 sync_addr_hash = vaddr; 1172 1173 if (ta_p->model == PR_MODEL_NATIVE) { 1174 if (ps_pdread(ph_p, ta_p->uberdata_addr + 1175 offsetof(uberdata_t, tdb.tdb_sync_addr_hash), 1176 &psaddr, sizeof (&psaddr)) != PS_OK) { 1177 return_val = TD_DBERR; 1178 goto out; 1179 } 1180 } else { 1181 #ifdef _SYSCALL32 1182 caddr32_t addr; 1183 1184 if (ps_pdread(ph_p, ta_p->uberdata_addr + 1185 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash), 1186 &addr, sizeof (addr)) != PS_OK) { 1187 return_val = TD_DBERR; 1188 goto out; 1189 } 1190 psaddr = addr; 1191 #else 1192 return_val = TD_ERR; 1193 goto out; 1194 #endif /* _SYSCALL32 */ 1195 } 1196 1197 if (psaddr == NULL) 1198 goto out; 1199 if (ps_pdread(ph_p, psaddr, sync_addr_hash, 1200 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) { 1201 return_val = TD_DBERR; 1202 goto out; 1203 } 1204 1205 /* 1206 * Now scan the hash table. 1207 */ 1208 for (i = 0; i < TDB_HASH_SIZE; i++) { 1209 for (next_desc = (psaddr_t)sync_addr_hash[i]; 1210 next_desc != NULL; 1211 next_desc = (psaddr_t)sync_stats.next) { 1212 if (ps_pdread(ph_p, next_desc, 1213 &sync_stats, sizeof (sync_stats)) != PS_OK) { 1214 return_val = TD_DBERR; 1215 goto out; 1216 } 1217 if (sync_stats.un.type == TDB_NONE) { 1218 /* not registered since registration enabled */ 1219 continue; 1220 } 1221 synchandle.sh_ta_p = ta_p; 1222 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr; 1223 if ((*cb)(&synchandle, cbdata) != 0) 1224 goto out; 1225 } 1226 } 1227 1228 out: 1229 if (sync_addr_hash != NULL) 1230 (void) munmap((void *)sync_addr_hash, 1231 TDB_HASH_SIZE * sizeof (uint64_t)); 1232 (void) ps_pcontinue(ph_p); 1233 ph_unlock(ta_p); 1234 return (return_val); 1235 } 1236 1237 /* 1238 * Enable process statistics collection. 1239 */ 1240 #pragma weak td_ta_enable_stats = __td_ta_enable_stats 1241 /* ARGSUSED */ 1242 td_err_e 1243 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff) 1244 { 1245 return (TD_NOCAPAB); 1246 } 1247 1248 /* 1249 * Reset process statistics. 1250 */ 1251 #pragma weak td_ta_reset_stats = __td_ta_reset_stats 1252 /* ARGSUSED */ 1253 td_err_e 1254 __td_ta_reset_stats(const td_thragent_t *ta_p) 1255 { 1256 return (TD_NOCAPAB); 1257 } 1258 1259 /* 1260 * Read process statistics. 1261 */ 1262 #pragma weak td_ta_get_stats = __td_ta_get_stats 1263 /* ARGSUSED */ 1264 td_err_e 1265 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats) 1266 { 1267 return (TD_NOCAPAB); 1268 } 1269 1270 /* 1271 * Transfer information from lwp struct to thread information struct. 1272 * XXX -- lots of this needs cleaning up. 1273 */ 1274 static void 1275 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr, 1276 ulwp_t *ulwp, td_thrinfo_t *ti_p) 1277 { 1278 lwpid_t lwpid; 1279 1280 if ((lwpid = ulwp->ul_lwpid) == 0) 1281 lwpid = 1; 1282 (void) memset(ti_p, 0, sizeof (*ti_p)); 1283 ti_p->ti_ta_p = ta_p; 1284 ti_p->ti_user_flags = ulwp->ul_usropts; 1285 ti_p->ti_tid = lwpid; 1286 ti_p->ti_exitval = ulwp->ul_rval; 1287 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc; 1288 if (!ulwp->ul_dead) { 1289 /* 1290 * The bloody fools got this backwards! 1291 */ 1292 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop; 1293 ti_p->ti_stksize = ulwp->ul_stksiz; 1294 } 1295 ti_p->ti_ro_area = ts_addr; 1296 ti_p->ti_ro_size = ulwp->ul_replace? 1297 REPLACEMENT_SIZE : sizeof (ulwp_t); 1298 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE : 1299 ulwp->ul_stop? TD_THR_STOPPED : 1300 ulwp->ul_wchan? TD_THR_SLEEP : 1301 TD_THR_ACTIVE; 1302 ti_p->ti_db_suspended = 0; 1303 ti_p->ti_type = TD_THR_USER; 1304 ti_p->ti_sp = ulwp->ul_sp; 1305 ti_p->ti_flags = 0; 1306 ti_p->ti_pri = ulwp->ul_pri; 1307 ti_p->ti_lid = lwpid; 1308 if (!ulwp->ul_dead) 1309 ti_p->ti_sigmask = ulwp->ul_sigmask; 1310 ti_p->ti_traceme = 0; 1311 ti_p->ti_preemptflag = 0; 1312 ti_p->ti_pirecflag = 0; 1313 (void) sigemptyset(&ti_p->ti_pending); 1314 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask; 1315 } 1316 1317 #if defined(_LP64) && defined(_SYSCALL32) 1318 static void 1319 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr, 1320 ulwp32_t *ulwp, td_thrinfo_t *ti_p) 1321 { 1322 lwpid_t lwpid; 1323 1324 if ((lwpid = ulwp->ul_lwpid) == 0) 1325 lwpid = 1; 1326 (void) memset(ti_p, 0, sizeof (*ti_p)); 1327 ti_p->ti_ta_p = ta_p; 1328 ti_p->ti_user_flags = ulwp->ul_usropts; 1329 ti_p->ti_tid = lwpid; 1330 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval; 1331 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc; 1332 if (!ulwp->ul_dead) { 1333 /* 1334 * The bloody fools got this backwards! 1335 */ 1336 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop; 1337 ti_p->ti_stksize = ulwp->ul_stksiz; 1338 } 1339 ti_p->ti_ro_area = ts_addr; 1340 ti_p->ti_ro_size = ulwp->ul_replace? 1341 REPLACEMENT_SIZE32 : sizeof (ulwp32_t); 1342 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE : 1343 ulwp->ul_stop? TD_THR_STOPPED : 1344 ulwp->ul_wchan? TD_THR_SLEEP : 1345 TD_THR_ACTIVE; 1346 ti_p->ti_db_suspended = 0; 1347 ti_p->ti_type = TD_THR_USER; 1348 ti_p->ti_sp = (uint32_t)ulwp->ul_sp; 1349 ti_p->ti_flags = 0; 1350 ti_p->ti_pri = ulwp->ul_pri; 1351 ti_p->ti_lid = lwpid; 1352 if (!ulwp->ul_dead) 1353 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask; 1354 ti_p->ti_traceme = 0; 1355 ti_p->ti_preemptflag = 0; 1356 ti_p->ti_pirecflag = 0; 1357 (void) sigemptyset(&ti_p->ti_pending); 1358 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask; 1359 } 1360 #endif /* _SYSCALL32 */ 1361 1362 /* 1363 * Get thread information. 1364 */ 1365 #pragma weak td_thr_get_info = __td_thr_get_info 1366 td_err_e 1367 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p) 1368 { 1369 struct ps_prochandle *ph_p; 1370 td_thragent_t *ta_p; 1371 td_err_e return_val; 1372 psaddr_t psaddr; 1373 1374 if (ti_p == NULL) 1375 return (TD_ERR); 1376 (void) memset(ti_p, NULL, sizeof (*ti_p)); 1377 1378 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1379 return (return_val); 1380 ta_p = th_p->th_ta_p; 1381 if (ps_pstop(ph_p) != PS_OK) { 1382 ph_unlock(ta_p); 1383 return (TD_DBERR); 1384 } 1385 1386 /* 1387 * Read the ulwp struct from the process. 1388 * Transfer the ulwp struct to the thread information struct. 1389 */ 1390 psaddr = th_p->th_unique; 1391 if (ta_p->model == PR_MODEL_NATIVE) { 1392 ulwp_t ulwp; 1393 1394 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK && 1395 ((void) memset(&ulwp, 0, sizeof (ulwp)), 1396 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK) 1397 return_val = TD_DBERR; 1398 else 1399 td_thr2to(ta_p, psaddr, &ulwp, ti_p); 1400 } else { 1401 #if defined(_LP64) && defined(_SYSCALL32) 1402 ulwp32_t ulwp; 1403 1404 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK && 1405 ((void) memset(&ulwp, 0, sizeof (ulwp)), 1406 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) != 1407 PS_OK) 1408 return_val = TD_DBERR; 1409 else 1410 td_thr2to32(ta_p, psaddr, &ulwp, ti_p); 1411 #else 1412 return_val = TD_ERR; 1413 #endif /* _SYSCALL32 */ 1414 } 1415 1416 (void) ps_pcontinue(ph_p); 1417 ph_unlock(ta_p); 1418 return (return_val); 1419 } 1420 1421 /* 1422 * Given a process and an event number, return information about 1423 * an address in the process or at which a breakpoint can be set 1424 * to monitor the event. 1425 */ 1426 #pragma weak td_ta_event_addr = __td_ta_event_addr 1427 td_err_e 1428 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p) 1429 { 1430 if (ta_p == NULL) 1431 return (TD_BADTA); 1432 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM) 1433 return (TD_NOEVENT); 1434 if (notify_p == NULL) 1435 return (TD_ERR); 1436 1437 notify_p->type = NOTIFY_BPT; 1438 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM]; 1439 1440 return (TD_OK); 1441 } 1442 1443 /* 1444 * Add the events in eventset 2 to eventset 1. 1445 */ 1446 static void 1447 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p) 1448 { 1449 int i; 1450 1451 for (i = 0; i < TD_EVENTSIZE; i++) 1452 event1_p->event_bits[i] |= event2_p->event_bits[i]; 1453 } 1454 1455 /* 1456 * Delete the events in eventset 2 from eventset 1. 1457 */ 1458 static void 1459 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p) 1460 { 1461 int i; 1462 1463 for (i = 0; i < TD_EVENTSIZE; i++) 1464 event1_p->event_bits[i] &= ~event2_p->event_bits[i]; 1465 } 1466 1467 /* 1468 * Either add or delete the given event set from a thread's event mask. 1469 */ 1470 static td_err_e 1471 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff) 1472 { 1473 struct ps_prochandle *ph_p; 1474 td_err_e return_val = TD_OK; 1475 char enable; 1476 td_thr_events_t evset; 1477 psaddr_t psaddr_evset; 1478 psaddr_t psaddr_enab; 1479 1480 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1481 return (return_val); 1482 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1483 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1484 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask; 1485 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable; 1486 } else { 1487 #if defined(_LP64) && defined(_SYSCALL32) 1488 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1489 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask; 1490 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable; 1491 #else 1492 ph_unlock(th_p->th_ta_p); 1493 return (TD_ERR); 1494 #endif /* _SYSCALL32 */ 1495 } 1496 if (ps_pstop(ph_p) != PS_OK) { 1497 ph_unlock(th_p->th_ta_p); 1498 return (TD_DBERR); 1499 } 1500 1501 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK) 1502 return_val = TD_DBERR; 1503 else { 1504 if (onoff) 1505 eventsetaddset(&evset, events); 1506 else 1507 eventsetdelset(&evset, events); 1508 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset)) 1509 != PS_OK) 1510 return_val = TD_DBERR; 1511 else { 1512 enable = 0; 1513 if (td_eventismember(&evset, TD_EVENTS_ENABLE)) 1514 enable = 1; 1515 if (ps_pdwrite(ph_p, psaddr_enab, 1516 &enable, sizeof (enable)) != PS_OK) 1517 return_val = TD_DBERR; 1518 } 1519 } 1520 1521 (void) ps_pcontinue(ph_p); 1522 ph_unlock(th_p->th_ta_p); 1523 return (return_val); 1524 } 1525 1526 /* 1527 * Enable or disable tracing for a given thread. Tracing 1528 * is filtered based on the event mask of each thread. Tracing 1529 * can be turned on/off for the thread without changing thread 1530 * event mask. 1531 * Currently unused by dbx. 1532 */ 1533 #pragma weak td_thr_event_enable = __td_thr_event_enable 1534 td_err_e 1535 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff) 1536 { 1537 td_thr_events_t evset; 1538 1539 td_event_emptyset(&evset); 1540 td_event_addset(&evset, TD_EVENTS_ENABLE); 1541 return (mod_eventset(th_p, &evset, onoff)); 1542 } 1543 1544 /* 1545 * Set event mask to enable event. event is turned on in 1546 * event mask for thread. If a thread encounters an event 1547 * for which its event mask is on, notification will be sent 1548 * to the debugger. 1549 * Addresses for each event are provided to the 1550 * debugger. It is assumed that a breakpoint of some type will 1551 * be placed at that address. If the event mask for the thread 1552 * is on, the instruction at the address will be executed. 1553 * Otherwise, the instruction will be skipped. 1554 */ 1555 #pragma weak td_thr_set_event = __td_thr_set_event 1556 td_err_e 1557 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events) 1558 { 1559 return (mod_eventset(th_p, events, 1)); 1560 } 1561 1562 /* 1563 * Enable or disable a set of events in the process-global event mask, 1564 * depending on the value of onoff. 1565 */ 1566 static td_err_e 1567 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff) 1568 { 1569 struct ps_prochandle *ph_p; 1570 td_thr_events_t targ_eventset; 1571 td_err_e return_val; 1572 1573 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL) 1574 return (return_val); 1575 if (ps_pstop(ph_p) != PS_OK) { 1576 ph_unlock(ta_p); 1577 return (TD_DBERR); 1578 } 1579 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr, 1580 &targ_eventset, sizeof (targ_eventset)) != PS_OK) 1581 return_val = TD_DBERR; 1582 else { 1583 if (onoff) 1584 eventsetaddset(&targ_eventset, events); 1585 else 1586 eventsetdelset(&targ_eventset, events); 1587 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr, 1588 &targ_eventset, sizeof (targ_eventset)) != PS_OK) 1589 return_val = TD_DBERR; 1590 } 1591 (void) ps_pcontinue(ph_p); 1592 ph_unlock(ta_p); 1593 return (return_val); 1594 } 1595 1596 /* 1597 * Enable a set of events in the process-global event mask. 1598 */ 1599 #pragma weak td_ta_set_event = __td_ta_set_event 1600 td_err_e 1601 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events) 1602 { 1603 return (td_ta_mod_event(ta_p, events, 1)); 1604 } 1605 1606 /* 1607 * Set event mask to disable the given event set; these events are cleared 1608 * from the event mask of the thread. Events that occur for a thread 1609 * with the event masked off will not cause notification to be 1610 * sent to the debugger (see td_thr_set_event for fuller description). 1611 */ 1612 #pragma weak td_thr_clear_event = __td_thr_clear_event 1613 td_err_e 1614 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events) 1615 { 1616 return (mod_eventset(th_p, events, 0)); 1617 } 1618 1619 /* 1620 * Disable a set of events in the process-global event mask. 1621 */ 1622 #pragma weak td_ta_clear_event = __td_ta_clear_event 1623 td_err_e 1624 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events) 1625 { 1626 return (td_ta_mod_event(ta_p, events, 0)); 1627 } 1628 1629 /* 1630 * This function returns the most recent event message, if any, 1631 * associated with a thread. Given a thread handle, return the message 1632 * corresponding to the event encountered by the thread. Only one 1633 * message per thread is saved. Messages from earlier events are lost 1634 * when later events occur. 1635 */ 1636 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg 1637 td_err_e 1638 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg) 1639 { 1640 struct ps_prochandle *ph_p; 1641 td_err_e return_val = TD_OK; 1642 psaddr_t psaddr; 1643 1644 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1645 return (return_val); 1646 if (ps_pstop(ph_p) != PS_OK) { 1647 ph_unlock(th_p->th_ta_p); 1648 return (TD_BADTA); 1649 } 1650 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1651 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1652 td_evbuf_t evbuf; 1653 1654 psaddr = (psaddr_t)&ulwp->ul_td_evbuf; 1655 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) { 1656 return_val = TD_DBERR; 1657 } else if (evbuf.eventnum == TD_EVENT_NONE) { 1658 return_val = TD_NOEVENT; 1659 } else { 1660 msg->event = evbuf.eventnum; 1661 msg->th_p = (td_thrhandle_t *)th_p; 1662 msg->msg.data = (uintptr_t)evbuf.eventdata; 1663 /* "Consume" the message */ 1664 evbuf.eventnum = TD_EVENT_NONE; 1665 evbuf.eventdata = NULL; 1666 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf)) 1667 != PS_OK) 1668 return_val = TD_DBERR; 1669 } 1670 } else { 1671 #if defined(_LP64) && defined(_SYSCALL32) 1672 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1673 td_evbuf32_t evbuf; 1674 1675 psaddr = (psaddr_t)&ulwp->ul_td_evbuf; 1676 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) { 1677 return_val = TD_DBERR; 1678 } else if (evbuf.eventnum == TD_EVENT_NONE) { 1679 return_val = TD_NOEVENT; 1680 } else { 1681 msg->event = evbuf.eventnum; 1682 msg->th_p = (td_thrhandle_t *)th_p; 1683 msg->msg.data = (uintptr_t)evbuf.eventdata; 1684 /* "Consume" the message */ 1685 evbuf.eventnum = TD_EVENT_NONE; 1686 evbuf.eventdata = NULL; 1687 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf)) 1688 != PS_OK) 1689 return_val = TD_DBERR; 1690 } 1691 #else 1692 return_val = TD_ERR; 1693 #endif /* _SYSCALL32 */ 1694 } 1695 1696 (void) ps_pcontinue(ph_p); 1697 ph_unlock(th_p->th_ta_p); 1698 return (return_val); 1699 } 1700 1701 /* 1702 * The callback function td_ta_event_getmsg uses when looking for 1703 * a thread with an event. A thin wrapper around td_thr_event_getmsg. 1704 */ 1705 static int 1706 event_msg_cb(const td_thrhandle_t *th_p, void *arg) 1707 { 1708 static td_thrhandle_t th; 1709 td_event_msg_t *msg = arg; 1710 1711 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) { 1712 /* 1713 * Got an event, stop iterating. 1714 * 1715 * Because of past mistakes in interface definition, 1716 * we are forced to pass back a static local variable 1717 * for the thread handle because th_p is a pointer 1718 * to a local variable in __td_ta_thr_iter(). 1719 * Grr... 1720 */ 1721 th = *th_p; 1722 msg->th_p = &th; 1723 return (1); 1724 } 1725 return (0); 1726 } 1727 1728 /* 1729 * This function is just like td_thr_event_getmsg, except that it is 1730 * passed a process handle rather than a thread handle, and returns 1731 * an event message for some thread in the process that has an event 1732 * message pending. If no thread has an event message pending, this 1733 * routine returns TD_NOEVENT. Thus, all pending event messages may 1734 * be collected from a process by repeatedly calling this routine 1735 * until it returns TD_NOEVENT. 1736 */ 1737 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg 1738 td_err_e 1739 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg) 1740 { 1741 td_err_e return_val; 1742 1743 if (ta_p == NULL) 1744 return (TD_BADTA); 1745 if (ta_p->ph_p == NULL) 1746 return (TD_BADPH); 1747 if (msg == NULL) 1748 return (TD_ERR); 1749 msg->event = TD_EVENT_NONE; 1750 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg, 1751 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, 1752 TD_THR_ANY_USER_FLAGS)) != TD_OK) 1753 return (return_val); 1754 if (msg->event == TD_EVENT_NONE) 1755 return (TD_NOEVENT); 1756 return (TD_OK); 1757 } 1758 1759 static lwpid_t 1760 thr_to_lwpid(const td_thrhandle_t *th_p) 1761 { 1762 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p; 1763 lwpid_t lwpid; 1764 1765 /* 1766 * The caller holds the prochandle lock 1767 * and has already verfied everything. 1768 */ 1769 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 1770 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 1771 1772 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid, 1773 &lwpid, sizeof (lwpid)) != PS_OK) 1774 lwpid = 0; 1775 else if (lwpid == 0) 1776 lwpid = 1; 1777 } else { 1778 #if defined(_LP64) && defined(_SYSCALL32) 1779 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 1780 1781 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid, 1782 &lwpid, sizeof (lwpid)) != PS_OK) 1783 lwpid = 0; 1784 else if (lwpid == 0) 1785 lwpid = 1; 1786 #else 1787 lwpid = 0; 1788 #endif /* _SYSCALL32 */ 1789 } 1790 1791 return (lwpid); 1792 } 1793 1794 /* 1795 * Suspend a thread. 1796 * XXX: What does this mean in a one-level model? 1797 */ 1798 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend 1799 td_err_e 1800 __td_thr_dbsuspend(const td_thrhandle_t *th_p) 1801 { 1802 struct ps_prochandle *ph_p; 1803 td_err_e return_val; 1804 1805 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1806 return (return_val); 1807 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK) 1808 return_val = TD_DBERR; 1809 ph_unlock(th_p->th_ta_p); 1810 return (return_val); 1811 } 1812 1813 /* 1814 * Resume a suspended thread. 1815 * XXX: What does this mean in a one-level model? 1816 */ 1817 #pragma weak td_thr_dbresume = __td_thr_dbresume 1818 td_err_e 1819 __td_thr_dbresume(const td_thrhandle_t *th_p) 1820 { 1821 struct ps_prochandle *ph_p; 1822 td_err_e return_val; 1823 1824 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1825 return (return_val); 1826 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK) 1827 return_val = TD_DBERR; 1828 ph_unlock(th_p->th_ta_p); 1829 return (return_val); 1830 } 1831 1832 /* 1833 * Set a thread's signal mask. 1834 * Currently unused by dbx. 1835 */ 1836 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask 1837 /* ARGSUSED */ 1838 td_err_e 1839 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask) 1840 { 1841 return (TD_NOCAPAB); 1842 } 1843 1844 /* 1845 * Set a thread's "signals-pending" set. 1846 * Currently unused by dbx. 1847 */ 1848 #pragma weak td_thr_setsigpending = __td_thr_setsigpending 1849 /* ARGSUSED */ 1850 td_err_e 1851 __td_thr_setsigpending(const td_thrhandle_t *th_p, 1852 uchar_t ti_pending_flag, const sigset_t ti_pending) 1853 { 1854 return (TD_NOCAPAB); 1855 } 1856 1857 /* 1858 * Get a thread's general register set. 1859 */ 1860 #pragma weak td_thr_getgregs = __td_thr_getgregs 1861 td_err_e 1862 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset) 1863 { 1864 struct ps_prochandle *ph_p; 1865 td_err_e return_val; 1866 1867 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1868 return (return_val); 1869 if (ps_pstop(ph_p) != PS_OK) { 1870 ph_unlock(th_p->th_ta_p); 1871 return (TD_DBERR); 1872 } 1873 1874 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK) 1875 return_val = TD_DBERR; 1876 1877 (void) ps_pcontinue(ph_p); 1878 ph_unlock(th_p->th_ta_p); 1879 return (return_val); 1880 } 1881 1882 /* 1883 * Set a thread's general register set. 1884 */ 1885 #pragma weak td_thr_setgregs = __td_thr_setgregs 1886 td_err_e 1887 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset) 1888 { 1889 struct ps_prochandle *ph_p; 1890 td_err_e return_val; 1891 1892 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1893 return (return_val); 1894 if (ps_pstop(ph_p) != PS_OK) { 1895 ph_unlock(th_p->th_ta_p); 1896 return (TD_DBERR); 1897 } 1898 1899 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK) 1900 return_val = TD_DBERR; 1901 1902 (void) ps_pcontinue(ph_p); 1903 ph_unlock(th_p->th_ta_p); 1904 return (return_val); 1905 } 1906 1907 /* 1908 * Get a thread's floating-point register set. 1909 */ 1910 #pragma weak td_thr_getfpregs = __td_thr_getfpregs 1911 td_err_e 1912 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset) 1913 { 1914 struct ps_prochandle *ph_p; 1915 td_err_e return_val; 1916 1917 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1918 return (return_val); 1919 if (ps_pstop(ph_p) != PS_OK) { 1920 ph_unlock(th_p->th_ta_p); 1921 return (TD_DBERR); 1922 } 1923 1924 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK) 1925 return_val = TD_DBERR; 1926 1927 (void) ps_pcontinue(ph_p); 1928 ph_unlock(th_p->th_ta_p); 1929 return (return_val); 1930 } 1931 1932 /* 1933 * Set a thread's floating-point register set. 1934 */ 1935 #pragma weak td_thr_setfpregs = __td_thr_setfpregs 1936 td_err_e 1937 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset) 1938 { 1939 struct ps_prochandle *ph_p; 1940 td_err_e return_val; 1941 1942 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1943 return (return_val); 1944 if (ps_pstop(ph_p) != PS_OK) { 1945 ph_unlock(th_p->th_ta_p); 1946 return (TD_DBERR); 1947 } 1948 1949 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK) 1950 return_val = TD_DBERR; 1951 1952 (void) ps_pcontinue(ph_p); 1953 ph_unlock(th_p->th_ta_p); 1954 return (return_val); 1955 } 1956 1957 /* 1958 * Get the size of the extra state register set for this architecture. 1959 * Currently unused by dbx. 1960 */ 1961 #pragma weak td_thr_getxregsize = __td_thr_getxregsize 1962 /* ARGSUSED */ 1963 td_err_e 1964 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize) 1965 { 1966 #if defined(__sparc) 1967 struct ps_prochandle *ph_p; 1968 td_err_e return_val; 1969 1970 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 1971 return (return_val); 1972 if (ps_pstop(ph_p) != PS_OK) { 1973 ph_unlock(th_p->th_ta_p); 1974 return (TD_DBERR); 1975 } 1976 1977 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK) 1978 return_val = TD_DBERR; 1979 1980 (void) ps_pcontinue(ph_p); 1981 ph_unlock(th_p->th_ta_p); 1982 return (return_val); 1983 #else /* __sparc */ 1984 return (TD_NOXREGS); 1985 #endif /* __sparc */ 1986 } 1987 1988 /* 1989 * Get a thread's extra state register set. 1990 */ 1991 #pragma weak td_thr_getxregs = __td_thr_getxregs 1992 /* ARGSUSED */ 1993 td_err_e 1994 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset) 1995 { 1996 #if defined(__sparc) 1997 struct ps_prochandle *ph_p; 1998 td_err_e return_val; 1999 2000 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2001 return (return_val); 2002 if (ps_pstop(ph_p) != PS_OK) { 2003 ph_unlock(th_p->th_ta_p); 2004 return (TD_DBERR); 2005 } 2006 2007 if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK) 2008 return_val = TD_DBERR; 2009 2010 (void) ps_pcontinue(ph_p); 2011 ph_unlock(th_p->th_ta_p); 2012 return (return_val); 2013 #else /* __sparc */ 2014 return (TD_NOXREGS); 2015 #endif /* __sparc */ 2016 } 2017 2018 /* 2019 * Set a thread's extra state register set. 2020 */ 2021 #pragma weak td_thr_setxregs = __td_thr_setxregs 2022 /* ARGSUSED */ 2023 td_err_e 2024 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset) 2025 { 2026 #if defined(__sparc) 2027 struct ps_prochandle *ph_p; 2028 td_err_e return_val; 2029 2030 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2031 return (return_val); 2032 if (ps_pstop(ph_p) != PS_OK) { 2033 ph_unlock(th_p->th_ta_p); 2034 return (TD_DBERR); 2035 } 2036 2037 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK) 2038 return_val = TD_DBERR; 2039 2040 (void) ps_pcontinue(ph_p); 2041 ph_unlock(th_p->th_ta_p); 2042 return (return_val); 2043 #else /* __sparc */ 2044 return (TD_NOXREGS); 2045 #endif /* __sparc */ 2046 } 2047 2048 struct searcher { 2049 psaddr_t addr; 2050 int status; 2051 }; 2052 2053 /* 2054 * Check the struct thread address in *th_p again first 2055 * value in "data". If value in data is found, set second value 2056 * in "data" to 1 and return 1 to terminate iterations. 2057 * This function is used by td_thr_validate() to verify that 2058 * a thread handle is valid. 2059 */ 2060 static int 2061 td_searcher(const td_thrhandle_t *th_p, void *data) 2062 { 2063 struct searcher *searcher_data = (struct searcher *)data; 2064 2065 if (searcher_data->addr == th_p->th_unique) { 2066 searcher_data->status = 1; 2067 return (1); 2068 } 2069 return (0); 2070 } 2071 2072 /* 2073 * Validate the thread handle. Check that 2074 * a thread exists in the thread agent/process that 2075 * corresponds to thread with handle *th_p. 2076 * Currently unused by dbx. 2077 */ 2078 #pragma weak td_thr_validate = __td_thr_validate 2079 td_err_e 2080 __td_thr_validate(const td_thrhandle_t *th_p) 2081 { 2082 td_err_e return_val; 2083 struct searcher searcher_data = {0, 0}; 2084 2085 if (th_p == NULL) 2086 return (TD_BADTH); 2087 if (th_p->th_unique == NULL || th_p->th_ta_p == NULL) 2088 return (TD_BADTH); 2089 2090 /* 2091 * LOCKING EXCEPTION - Locking is not required 2092 * here because no use of the thread agent is made (other 2093 * than the sanity check) and checking of the thread 2094 * agent will be done in __td_ta_thr_iter. 2095 */ 2096 2097 searcher_data.addr = th_p->th_unique; 2098 return_val = __td_ta_thr_iter(th_p->th_ta_p, 2099 td_searcher, &searcher_data, 2100 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, 2101 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 2102 2103 if (return_val == TD_OK && searcher_data.status == 0) 2104 return_val = TD_NOTHR; 2105 2106 return (return_val); 2107 } 2108 2109 /* 2110 * Get a thread's private binding to a given thread specific 2111 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't 2112 * have a binding for a particular key, then NULL is returned. 2113 */ 2114 #pragma weak td_thr_tsd = __td_thr_tsd 2115 td_err_e 2116 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp) 2117 { 2118 struct ps_prochandle *ph_p; 2119 td_thragent_t *ta_p; 2120 td_err_e return_val; 2121 int maxkey; 2122 int nkey; 2123 psaddr_t tsd_paddr; 2124 2125 if (data_pp == NULL) 2126 return (TD_ERR); 2127 *data_pp = NULL; 2128 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2129 return (return_val); 2130 ta_p = th_p->th_ta_p; 2131 if (ps_pstop(ph_p) != PS_OK) { 2132 ph_unlock(ta_p); 2133 return (TD_DBERR); 2134 } 2135 2136 if (ta_p->model == PR_MODEL_NATIVE) { 2137 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2138 tsd_metadata_t tsdm; 2139 tsd_t stsd; 2140 2141 if (ps_pdread(ph_p, 2142 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata), 2143 &tsdm, sizeof (tsdm)) != PS_OK) 2144 return_val = TD_DBERR; 2145 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd, 2146 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK) 2147 return_val = TD_DBERR; 2148 else if (tsd_paddr != NULL && 2149 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK) 2150 return_val = TD_DBERR; 2151 else { 2152 maxkey = tsdm.tsdm_nused; 2153 nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc; 2154 2155 if (key < TSD_NFAST) 2156 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0]; 2157 } 2158 } else { 2159 #if defined(_LP64) && defined(_SYSCALL32) 2160 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2161 tsd_metadata32_t tsdm; 2162 tsd32_t stsd; 2163 caddr32_t addr; 2164 2165 if (ps_pdread(ph_p, 2166 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata), 2167 &tsdm, sizeof (tsdm)) != PS_OK) 2168 return_val = TD_DBERR; 2169 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd, 2170 &addr, sizeof (addr)) != PS_OK) 2171 return_val = TD_DBERR; 2172 else if (addr != NULL && 2173 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK) 2174 return_val = TD_DBERR; 2175 else { 2176 maxkey = tsdm.tsdm_nused; 2177 nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc; 2178 2179 if (key < TSD_NFAST) { 2180 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0]; 2181 } else { 2182 tsd_paddr = addr; 2183 } 2184 } 2185 #else 2186 return_val = TD_ERR; 2187 #endif /* _SYSCALL32 */ 2188 } 2189 2190 if (return_val == TD_OK && (key < 1 || key >= maxkey)) 2191 return_val = TD_NOTSD; 2192 if (return_val != TD_OK || key >= nkey) { 2193 /* NULL has already been stored in data_pp */ 2194 (void) ps_pcontinue(ph_p); 2195 ph_unlock(ta_p); 2196 return (return_val); 2197 } 2198 2199 /* 2200 * Read the value from the thread's tsd array. 2201 */ 2202 if (ta_p->model == PR_MODEL_NATIVE) { 2203 void *value; 2204 2205 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *), 2206 &value, sizeof (value)) != PS_OK) 2207 return_val = TD_DBERR; 2208 else 2209 *data_pp = value; 2210 #if defined(_LP64) && defined(_SYSCALL32) 2211 } else { 2212 caddr32_t value32; 2213 2214 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t), 2215 &value32, sizeof (value32)) != PS_OK) 2216 return_val = TD_DBERR; 2217 else 2218 *data_pp = (void *)(uintptr_t)value32; 2219 #endif /* _SYSCALL32 */ 2220 } 2221 2222 (void) ps_pcontinue(ph_p); 2223 ph_unlock(ta_p); 2224 return (return_val); 2225 } 2226 2227 /* 2228 * Get the base address of a thread's thread local storage (TLS) block 2229 * for the module (executable or shared object) identified by 'moduleid'. 2230 */ 2231 #pragma weak td_thr_tlsbase = __td_thr_tlsbase 2232 td_err_e 2233 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base) 2234 { 2235 struct ps_prochandle *ph_p; 2236 td_thragent_t *ta_p; 2237 td_err_e return_val; 2238 2239 if (base == NULL) 2240 return (TD_ERR); 2241 *base = NULL; 2242 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL) 2243 return (return_val); 2244 ta_p = th_p->th_ta_p; 2245 if (ps_pstop(ph_p) != PS_OK) { 2246 ph_unlock(ta_p); 2247 return (TD_DBERR); 2248 } 2249 2250 if (ta_p->model == PR_MODEL_NATIVE) { 2251 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2252 tls_metadata_t tls_metadata; 2253 TLS_modinfo tlsmod; 2254 tls_t tls; 2255 2256 if (ps_pdread(ph_p, 2257 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata), 2258 &tls_metadata, sizeof (tls_metadata)) != PS_OK) 2259 return_val = TD_DBERR; 2260 else if (moduleid >= tls_metadata.tls_modinfo.tls_size) 2261 return_val = TD_NOTLS; 2262 else if (ps_pdread(ph_p, 2263 (psaddr_t)((TLS_modinfo *) 2264 tls_metadata.tls_modinfo.tls_data + moduleid), 2265 &tlsmod, sizeof (tlsmod)) != PS_OK) 2266 return_val = TD_DBERR; 2267 else if (tlsmod.tm_memsz == 0) 2268 return_val = TD_NOTLS; 2269 else if (tlsmod.tm_flags & TM_FLG_STATICTLS) 2270 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset; 2271 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls, 2272 &tls, sizeof (tls)) != PS_OK) 2273 return_val = TD_DBERR; 2274 else if (moduleid >= tls.tls_size) 2275 return_val = TD_TLSDEFER; 2276 else if (ps_pdread(ph_p, 2277 (psaddr_t)((tls_t *)tls.tls_data + moduleid), 2278 &tls, sizeof (tls)) != PS_OK) 2279 return_val = TD_DBERR; 2280 else if (tls.tls_size == 0) 2281 return_val = TD_TLSDEFER; 2282 else 2283 *base = (psaddr_t)tls.tls_data; 2284 } else { 2285 #if defined(_LP64) && defined(_SYSCALL32) 2286 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2287 tls_metadata32_t tls_metadata; 2288 TLS_modinfo32 tlsmod; 2289 tls32_t tls; 2290 2291 if (ps_pdread(ph_p, 2292 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata), 2293 &tls_metadata, sizeof (tls_metadata)) != PS_OK) 2294 return_val = TD_DBERR; 2295 else if (moduleid >= tls_metadata.tls_modinfo.tls_size) 2296 return_val = TD_NOTLS; 2297 else if (ps_pdread(ph_p, 2298 (psaddr_t)((TLS_modinfo32 *) 2299 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid), 2300 &tlsmod, sizeof (tlsmod)) != PS_OK) 2301 return_val = TD_DBERR; 2302 else if (tlsmod.tm_memsz == 0) 2303 return_val = TD_NOTLS; 2304 else if (tlsmod.tm_flags & TM_FLG_STATICTLS) 2305 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset; 2306 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls, 2307 &tls, sizeof (tls)) != PS_OK) 2308 return_val = TD_DBERR; 2309 else if (moduleid >= tls.tls_size) 2310 return_val = TD_TLSDEFER; 2311 else if (ps_pdread(ph_p, 2312 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid), 2313 &tls, sizeof (tls)) != PS_OK) 2314 return_val = TD_DBERR; 2315 else if (tls.tls_size == 0) 2316 return_val = TD_TLSDEFER; 2317 else 2318 *base = (psaddr_t)tls.tls_data; 2319 #else 2320 return_val = TD_ERR; 2321 #endif /* _SYSCALL32 */ 2322 } 2323 2324 (void) ps_pcontinue(ph_p); 2325 ph_unlock(ta_p); 2326 return (return_val); 2327 } 2328 2329 /* 2330 * Change a thread's priority to the value specified by ti_pri. 2331 * Currently unused by dbx. 2332 */ 2333 #pragma weak td_thr_setprio = __td_thr_setprio 2334 /* ARGSUSED */ 2335 td_err_e 2336 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri) 2337 { 2338 return (TD_NOCAPAB); 2339 } 2340 2341 /* 2342 * This structure links td_thr_lockowner and the lowner_cb callback function. 2343 */ 2344 typedef struct { 2345 td_sync_iter_f *owner_cb; 2346 void *owner_cb_arg; 2347 td_thrhandle_t *th_p; 2348 } lowner_cb_ctl_t; 2349 2350 static int 2351 lowner_cb(const td_synchandle_t *sh_p, void *arg) 2352 { 2353 lowner_cb_ctl_t *ocb = arg; 2354 int trunc = 0; 2355 union { 2356 rwlock_t rwl; 2357 mutex_t mx; 2358 } rw_m; 2359 2360 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique, 2361 &rw_m, sizeof (rw_m)) != PS_OK) { 2362 trunc = 1; 2363 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique, 2364 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK) 2365 return (0); 2366 } 2367 if (rw_m.mx.mutex_magic == MUTEX_MAGIC && 2368 rw_m.mx.mutex_owner == ocb->th_p->th_unique) 2369 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg)); 2370 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) { 2371 mutex_t *rwlock = &rw_m.rwl.mutex; 2372 if (rwlock->mutex_owner == ocb->th_p->th_unique) 2373 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg)); 2374 } 2375 return (0); 2376 } 2377 2378 /* 2379 * Iterate over the set of locks owned by a specified thread. 2380 * If cb returns a non-zero value, terminate iterations. 2381 */ 2382 #pragma weak td_thr_lockowner = __td_thr_lockowner 2383 td_err_e 2384 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb, 2385 void *cb_data) 2386 { 2387 td_thragent_t *ta_p; 2388 td_err_e return_val; 2389 lowner_cb_ctl_t lcb; 2390 2391 /* 2392 * Just sanity checks. 2393 */ 2394 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL) 2395 return (return_val); 2396 ta_p = th_p->th_ta_p; 2397 ph_unlock(ta_p); 2398 2399 lcb.owner_cb = cb; 2400 lcb.owner_cb_arg = cb_data; 2401 lcb.th_p = (td_thrhandle_t *)th_p; 2402 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb)); 2403 } 2404 2405 /* 2406 * If a thread is asleep on a synchronization variable, 2407 * then get the synchronization handle. 2408 */ 2409 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo 2410 td_err_e 2411 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p) 2412 { 2413 struct ps_prochandle *ph_p; 2414 td_err_e return_val = TD_OK; 2415 uintptr_t wchan; 2416 2417 if (sh_p == NULL) 2418 return (TD_ERR); 2419 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL) 2420 return (return_val); 2421 2422 /* 2423 * No need to stop the process for a simple read. 2424 */ 2425 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) { 2426 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2427 2428 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2429 &wchan, sizeof (wchan)) != PS_OK) 2430 return_val = TD_DBERR; 2431 } else { 2432 #if defined(_LP64) && defined(_SYSCALL32) 2433 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2434 caddr32_t wchan32; 2435 2436 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2437 &wchan32, sizeof (wchan32)) != PS_OK) 2438 return_val = TD_DBERR; 2439 wchan = wchan32; 2440 #else 2441 return_val = TD_ERR; 2442 #endif /* _SYSCALL32 */ 2443 } 2444 2445 if (return_val != TD_OK || wchan == NULL) { 2446 sh_p->sh_ta_p = NULL; 2447 sh_p->sh_unique = NULL; 2448 if (return_val == TD_OK) 2449 return_val = TD_ERR; 2450 } else { 2451 sh_p->sh_ta_p = th_p->th_ta_p; 2452 sh_p->sh_unique = (psaddr_t)wchan; 2453 } 2454 2455 ph_unlock(th_p->th_ta_p); 2456 return (return_val); 2457 } 2458 2459 /* 2460 * Which thread is running on an lwp? 2461 */ 2462 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr 2463 td_err_e 2464 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid, 2465 td_thrhandle_t *th_p) 2466 { 2467 return (__td_ta_map_id2thr(ta_p, lwpid, th_p)); 2468 } 2469 2470 /* 2471 * Common code for td_sync_get_info() and td_sync_get_stats() 2472 */ 2473 static td_err_e 2474 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p, 2475 td_syncinfo_t *si_p) 2476 { 2477 int trunc = 0; 2478 td_so_un_t generic_so; 2479 2480 /* 2481 * Determine the sync. object type; a little type fudgery here. 2482 * First attempt to read the whole union. If that fails, attempt 2483 * to read just the condvar. A condvar is the smallest sync. object. 2484 */ 2485 if (ps_pdread(ph_p, sh_p->sh_unique, 2486 &generic_so, sizeof (generic_so)) != PS_OK) { 2487 trunc = 1; 2488 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition, 2489 sizeof (generic_so.condition)) != PS_OK) 2490 return (TD_DBERR); 2491 } 2492 2493 switch (generic_so.condition.cond_magic) { 2494 case MUTEX_MAGIC: 2495 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2496 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) 2497 return (TD_DBERR); 2498 si_p->si_type = TD_SYNC_MUTEX; 2499 si_p->si_shared_type = 2500 (generic_so.lock.mutex_type & USYNC_PROCESS); 2501 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag, 2502 sizeof (generic_so.lock.mutex_flag)); 2503 si_p->si_state.mutex_locked = 2504 (generic_so.lock.mutex_lockw != 0); 2505 si_p->si_size = sizeof (generic_so.lock); 2506 si_p->si_has_waiters = generic_so.lock.mutex_waiters; 2507 si_p->si_rcount = generic_so.lock.mutex_rcount; 2508 si_p->si_prioceiling = generic_so.lock.mutex_ceiling; 2509 if (si_p->si_state.mutex_locked) { 2510 if (si_p->si_shared_type & USYNC_PROCESS) 2511 si_p->si_ownerpid = 2512 generic_so.lock.mutex_ownerpid; 2513 si_p->si_owner.th_ta_p = sh_p->sh_ta_p; 2514 si_p->si_owner.th_unique = generic_so.lock.mutex_owner; 2515 } 2516 break; 2517 case COND_MAGIC: 2518 si_p->si_type = TD_SYNC_COND; 2519 si_p->si_shared_type = 2520 (generic_so.condition.cond_type & USYNC_PROCESS); 2521 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag, 2522 sizeof (generic_so.condition.flags.flag)); 2523 si_p->si_size = sizeof (generic_so.condition); 2524 si_p->si_has_waiters = 2525 (generic_so.condition.cond_waiters_user | 2526 generic_so.condition.cond_waiters_kernel)? 1 : 0; 2527 break; 2528 case SEMA_MAGIC: 2529 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2530 &generic_so.semaphore, sizeof (generic_so.semaphore)) 2531 != PS_OK) 2532 return (TD_DBERR); 2533 si_p->si_type = TD_SYNC_SEMA; 2534 si_p->si_shared_type = 2535 (generic_so.semaphore.type & USYNC_PROCESS); 2536 si_p->si_state.sem_count = generic_so.semaphore.count; 2537 si_p->si_size = sizeof (generic_so.semaphore); 2538 si_p->si_has_waiters = 2539 ((lwp_sema_t *)&generic_so.semaphore)->flags[7]; 2540 /* this is useless but the old interface provided it */ 2541 si_p->si_data = (psaddr_t)generic_so.semaphore.count; 2542 break; 2543 case RWL_MAGIC: 2544 { 2545 uint32_t rwstate; 2546 2547 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2548 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) 2549 return (TD_DBERR); 2550 si_p->si_type = TD_SYNC_RWLOCK; 2551 si_p->si_shared_type = 2552 (generic_so.rwlock.rwlock_type & USYNC_PROCESS); 2553 si_p->si_size = sizeof (generic_so.rwlock); 2554 2555 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers; 2556 if (rwstate & URW_WRITE_LOCKED) { 2557 si_p->si_state.nreaders = -1; 2558 si_p->si_is_wlock = 1; 2559 si_p->si_owner.th_ta_p = sh_p->sh_ta_p; 2560 si_p->si_owner.th_unique = 2561 generic_so.rwlock.rwlock_owner; 2562 if (si_p->si_shared_type & USYNC_PROCESS) 2563 si_p->si_ownerpid = 2564 generic_so.rwlock.rwlock_ownerpid; 2565 } else { 2566 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK); 2567 } 2568 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0); 2569 2570 /* this is useless but the old interface provided it */ 2571 si_p->si_data = (psaddr_t)generic_so.rwlock.readers; 2572 break; 2573 } 2574 default: 2575 return (TD_BADSH); 2576 } 2577 2578 si_p->si_ta_p = sh_p->sh_ta_p; 2579 si_p->si_sv_addr = sh_p->sh_unique; 2580 return (TD_OK); 2581 } 2582 2583 /* 2584 * Given a synchronization handle, fill in the 2585 * information for the synchronization variable into *si_p. 2586 */ 2587 #pragma weak td_sync_get_info = __td_sync_get_info 2588 td_err_e 2589 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p) 2590 { 2591 struct ps_prochandle *ph_p; 2592 td_err_e return_val; 2593 2594 if (si_p == NULL) 2595 return (TD_ERR); 2596 (void) memset(si_p, 0, sizeof (*si_p)); 2597 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2598 return (return_val); 2599 if (ps_pstop(ph_p) != PS_OK) { 2600 ph_unlock(sh_p->sh_ta_p); 2601 return (TD_DBERR); 2602 } 2603 2604 return_val = sync_get_info_common(sh_p, ph_p, si_p); 2605 2606 (void) ps_pcontinue(ph_p); 2607 ph_unlock(sh_p->sh_ta_p); 2608 return (return_val); 2609 } 2610 2611 static uint_t 2612 tdb_addr_hash64(uint64_t addr) 2613 { 2614 uint64_t value60 = (addr >> 4); 2615 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff); 2616 return ((value30 >> 15) ^ (value30 & 0x7fff)); 2617 } 2618 2619 static uint_t 2620 tdb_addr_hash32(uint64_t addr) 2621 { 2622 uint32_t value30 = (addr >> 2); /* 30 bits */ 2623 return ((value30 >> 15) ^ (value30 & 0x7fff)); 2624 } 2625 2626 static td_err_e 2627 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table, 2628 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats) 2629 { 2630 psaddr_t next_desc; 2631 uint64_t first; 2632 uint_t ix; 2633 2634 /* 2635 * Compute the hash table index from the synch object's address. 2636 */ 2637 if (ta_p->model == PR_MODEL_LP64) 2638 ix = tdb_addr_hash64(sync_obj_addr); 2639 else 2640 ix = tdb_addr_hash32(sync_obj_addr); 2641 2642 /* 2643 * Get the address of the first element in the linked list. 2644 */ 2645 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t), 2646 &first, sizeof (first)) != PS_OK) 2647 return (TD_DBERR); 2648 2649 /* 2650 * Search the linked list for an entry for the synch object.. 2651 */ 2652 for (next_desc = (psaddr_t)first; next_desc != NULL; 2653 next_desc = (psaddr_t)sync_stats->next) { 2654 if (ps_pdread(ta_p->ph_p, next_desc, 2655 sync_stats, sizeof (*sync_stats)) != PS_OK) 2656 return (TD_DBERR); 2657 if (sync_stats->sync_addr == sync_obj_addr) 2658 return (TD_OK); 2659 } 2660 2661 (void) memset(sync_stats, 0, sizeof (*sync_stats)); 2662 return (TD_OK); 2663 } 2664 2665 /* 2666 * Given a synchronization handle, fill in the 2667 * statistics for the synchronization variable into *ss_p. 2668 */ 2669 #pragma weak td_sync_get_stats = __td_sync_get_stats 2670 td_err_e 2671 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p) 2672 { 2673 struct ps_prochandle *ph_p; 2674 td_thragent_t *ta_p; 2675 td_err_e return_val; 2676 register_sync_t enable; 2677 psaddr_t hashaddr; 2678 tdb_sync_stats_t sync_stats; 2679 size_t ix; 2680 2681 if (ss_p == NULL) 2682 return (TD_ERR); 2683 (void) memset(ss_p, 0, sizeof (*ss_p)); 2684 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2685 return (return_val); 2686 ta_p = sh_p->sh_ta_p; 2687 if (ps_pstop(ph_p) != PS_OK) { 2688 ph_unlock(ta_p); 2689 return (TD_DBERR); 2690 } 2691 2692 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info)) 2693 != TD_OK) { 2694 if (return_val != TD_BADSH) 2695 goto out; 2696 /* we can correct TD_BADSH */ 2697 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info)); 2698 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p; 2699 ss_p->ss_info.si_sv_addr = sh_p->sh_unique; 2700 /* we correct si_type and si_size below */ 2701 return_val = TD_OK; 2702 } 2703 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr, 2704 &enable, sizeof (enable)) != PS_OK) { 2705 return_val = TD_DBERR; 2706 goto out; 2707 } 2708 if (enable != REGISTER_SYNC_ON) 2709 goto out; 2710 2711 /* 2712 * Get the address of the hash table in the target process. 2713 */ 2714 if (ta_p->model == PR_MODEL_NATIVE) { 2715 if (ps_pdread(ph_p, ta_p->uberdata_addr + 2716 offsetof(uberdata_t, tdb.tdb_sync_addr_hash), 2717 &hashaddr, sizeof (&hashaddr)) != PS_OK) { 2718 return_val = TD_DBERR; 2719 goto out; 2720 } 2721 } else { 2722 #if defined(_LP64) && defined(_SYSCALL32) 2723 caddr32_t addr; 2724 2725 if (ps_pdread(ph_p, ta_p->uberdata_addr + 2726 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash), 2727 &addr, sizeof (addr)) != PS_OK) { 2728 return_val = TD_DBERR; 2729 goto out; 2730 } 2731 hashaddr = addr; 2732 #else 2733 return_val = TD_ERR; 2734 goto out; 2735 #endif /* _SYSCALL32 */ 2736 } 2737 2738 if (hashaddr == 0) 2739 return_val = TD_BADSH; 2740 else 2741 return_val = read_sync_stats(ta_p, hashaddr, 2742 sh_p->sh_unique, &sync_stats); 2743 if (return_val != TD_OK) 2744 goto out; 2745 2746 /* 2747 * We have the hash table entry. Transfer the data to 2748 * the td_syncstats_t structure provided by the caller. 2749 */ 2750 switch (sync_stats.un.type) { 2751 case TDB_MUTEX: 2752 { 2753 td_mutex_stats_t *msp = &ss_p->ss_un.mutex; 2754 2755 ss_p->ss_info.si_type = TD_SYNC_MUTEX; 2756 ss_p->ss_info.si_size = sizeof (mutex_t); 2757 msp->mutex_lock = 2758 sync_stats.un.mutex.mutex_lock; 2759 msp->mutex_sleep = 2760 sync_stats.un.mutex.mutex_sleep; 2761 msp->mutex_sleep_time = 2762 sync_stats.un.mutex.mutex_sleep_time; 2763 msp->mutex_hold_time = 2764 sync_stats.un.mutex.mutex_hold_time; 2765 msp->mutex_try = 2766 sync_stats.un.mutex.mutex_try; 2767 msp->mutex_try_fail = 2768 sync_stats.un.mutex.mutex_try_fail; 2769 if (sync_stats.sync_addr >= ta_p->hash_table_addr && 2770 (ix = sync_stats.sync_addr - ta_p->hash_table_addr) 2771 < ta_p->hash_size * sizeof (thr_hash_table_t)) 2772 msp->mutex_internal = 2773 ix / sizeof (thr_hash_table_t) + 1; 2774 break; 2775 } 2776 case TDB_COND: 2777 { 2778 td_cond_stats_t *csp = &ss_p->ss_un.cond; 2779 2780 ss_p->ss_info.si_type = TD_SYNC_COND; 2781 ss_p->ss_info.si_size = sizeof (cond_t); 2782 csp->cond_wait = 2783 sync_stats.un.cond.cond_wait; 2784 csp->cond_timedwait = 2785 sync_stats.un.cond.cond_timedwait; 2786 csp->cond_wait_sleep_time = 2787 sync_stats.un.cond.cond_wait_sleep_time; 2788 csp->cond_timedwait_sleep_time = 2789 sync_stats.un.cond.cond_timedwait_sleep_time; 2790 csp->cond_timedwait_timeout = 2791 sync_stats.un.cond.cond_timedwait_timeout; 2792 csp->cond_signal = 2793 sync_stats.un.cond.cond_signal; 2794 csp->cond_broadcast = 2795 sync_stats.un.cond.cond_broadcast; 2796 if (sync_stats.sync_addr >= ta_p->hash_table_addr && 2797 (ix = sync_stats.sync_addr - ta_p->hash_table_addr) 2798 < ta_p->hash_size * sizeof (thr_hash_table_t)) 2799 csp->cond_internal = 2800 ix / sizeof (thr_hash_table_t) + 1; 2801 break; 2802 } 2803 case TDB_RWLOCK: 2804 { 2805 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock; 2806 2807 ss_p->ss_info.si_type = TD_SYNC_RWLOCK; 2808 ss_p->ss_info.si_size = sizeof (rwlock_t); 2809 rwsp->rw_rdlock = 2810 sync_stats.un.rwlock.rw_rdlock; 2811 rwsp->rw_rdlock_try = 2812 sync_stats.un.rwlock.rw_rdlock_try; 2813 rwsp->rw_rdlock_try_fail = 2814 sync_stats.un.rwlock.rw_rdlock_try_fail; 2815 rwsp->rw_wrlock = 2816 sync_stats.un.rwlock.rw_wrlock; 2817 rwsp->rw_wrlock_hold_time = 2818 sync_stats.un.rwlock.rw_wrlock_hold_time; 2819 rwsp->rw_wrlock_try = 2820 sync_stats.un.rwlock.rw_wrlock_try; 2821 rwsp->rw_wrlock_try_fail = 2822 sync_stats.un.rwlock.rw_wrlock_try_fail; 2823 break; 2824 } 2825 case TDB_SEMA: 2826 { 2827 td_sema_stats_t *ssp = &ss_p->ss_un.sema; 2828 2829 ss_p->ss_info.si_type = TD_SYNC_SEMA; 2830 ss_p->ss_info.si_size = sizeof (sema_t); 2831 ssp->sema_wait = 2832 sync_stats.un.sema.sema_wait; 2833 ssp->sema_wait_sleep = 2834 sync_stats.un.sema.sema_wait_sleep; 2835 ssp->sema_wait_sleep_time = 2836 sync_stats.un.sema.sema_wait_sleep_time; 2837 ssp->sema_trywait = 2838 sync_stats.un.sema.sema_trywait; 2839 ssp->sema_trywait_fail = 2840 sync_stats.un.sema.sema_trywait_fail; 2841 ssp->sema_post = 2842 sync_stats.un.sema.sema_post; 2843 ssp->sema_max_count = 2844 sync_stats.un.sema.sema_max_count; 2845 ssp->sema_min_count = 2846 sync_stats.un.sema.sema_min_count; 2847 break; 2848 } 2849 default: 2850 return_val = TD_BADSH; 2851 break; 2852 } 2853 2854 out: 2855 (void) ps_pcontinue(ph_p); 2856 ph_unlock(ta_p); 2857 return (return_val); 2858 } 2859 2860 /* 2861 * Change the state of a synchronization variable. 2862 * 1) mutex lock state set to value 2863 * 2) semaphore's count set to value 2864 * 3) writer's lock set by value < 0 2865 * 4) reader's lock number of readers set to value >= 0 2866 * Currently unused by dbx. 2867 */ 2868 #pragma weak td_sync_setstate = __td_sync_setstate 2869 td_err_e 2870 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue) 2871 { 2872 struct ps_prochandle *ph_p; 2873 int trunc = 0; 2874 td_err_e return_val; 2875 td_so_un_t generic_so; 2876 uint32_t *rwstate; 2877 int value = (int)lvalue; 2878 2879 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 2880 return (return_val); 2881 if (ps_pstop(ph_p) != PS_OK) { 2882 ph_unlock(sh_p->sh_ta_p); 2883 return (TD_DBERR); 2884 } 2885 2886 /* 2887 * Read the synch. variable information. 2888 * First attempt to read the whole union and if that fails 2889 * fall back to reading only the smallest member, the condvar. 2890 */ 2891 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so, 2892 sizeof (generic_so)) != PS_OK) { 2893 trunc = 1; 2894 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition, 2895 sizeof (generic_so.condition)) != PS_OK) { 2896 (void) ps_pcontinue(ph_p); 2897 ph_unlock(sh_p->sh_ta_p); 2898 return (TD_DBERR); 2899 } 2900 } 2901 2902 /* 2903 * Set the new value in the sync. variable, read the synch. variable 2904 * information. from the process, reset its value and write it back. 2905 */ 2906 switch (generic_so.condition.mutex_magic) { 2907 case MUTEX_MAGIC: 2908 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2909 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) { 2910 return_val = TD_DBERR; 2911 break; 2912 } 2913 generic_so.lock.mutex_lockw = (uint8_t)value; 2914 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock, 2915 sizeof (generic_so.lock)) != PS_OK) 2916 return_val = TD_DBERR; 2917 break; 2918 case SEMA_MAGIC: 2919 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2920 &generic_so.semaphore, sizeof (generic_so.semaphore)) 2921 != PS_OK) { 2922 return_val = TD_DBERR; 2923 break; 2924 } 2925 generic_so.semaphore.count = value; 2926 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore, 2927 sizeof (generic_so.semaphore)) != PS_OK) 2928 return_val = TD_DBERR; 2929 break; 2930 case COND_MAGIC: 2931 /* Operation not supported on a condition variable */ 2932 return_val = TD_ERR; 2933 break; 2934 case RWL_MAGIC: 2935 if (trunc && ps_pdread(ph_p, sh_p->sh_unique, 2936 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) { 2937 return_val = TD_DBERR; 2938 break; 2939 } 2940 rwstate = (uint32_t *)&generic_so.rwlock.readers; 2941 *rwstate &= URW_HAS_WAITERS; 2942 if (value < 0) 2943 *rwstate |= URW_WRITE_LOCKED; 2944 else 2945 *rwstate |= (value & URW_READERS_MASK); 2946 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock, 2947 sizeof (generic_so.rwlock)) != PS_OK) 2948 return_val = TD_DBERR; 2949 break; 2950 default: 2951 /* Bad sync. object type */ 2952 return_val = TD_BADSH; 2953 break; 2954 } 2955 2956 (void) ps_pcontinue(ph_p); 2957 ph_unlock(sh_p->sh_ta_p); 2958 return (return_val); 2959 } 2960 2961 typedef struct { 2962 td_thr_iter_f *waiter_cb; 2963 psaddr_t sync_obj_addr; 2964 uint16_t sync_magic; 2965 void *waiter_cb_arg; 2966 td_err_e errcode; 2967 } waiter_cb_ctl_t; 2968 2969 static int 2970 waiters_cb(const td_thrhandle_t *th_p, void *arg) 2971 { 2972 td_thragent_t *ta_p = th_p->th_ta_p; 2973 struct ps_prochandle *ph_p = ta_p->ph_p; 2974 waiter_cb_ctl_t *wcb = arg; 2975 caddr_t wchan; 2976 2977 if (ta_p->model == PR_MODEL_NATIVE) { 2978 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique; 2979 2980 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2981 &wchan, sizeof (wchan)) != PS_OK) { 2982 wcb->errcode = TD_DBERR; 2983 return (1); 2984 } 2985 } else { 2986 #if defined(_LP64) && defined(_SYSCALL32) 2987 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique; 2988 caddr32_t wchan32; 2989 2990 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan, 2991 &wchan32, sizeof (wchan32)) != PS_OK) { 2992 wcb->errcode = TD_DBERR; 2993 return (1); 2994 } 2995 wchan = (caddr_t)(uintptr_t)wchan32; 2996 #else 2997 wcb->errcode = TD_ERR; 2998 return (1); 2999 #endif /* _SYSCALL32 */ 3000 } 3001 3002 if (wchan == NULL) 3003 return (0); 3004 3005 if (wchan == (caddr_t)wcb->sync_obj_addr) 3006 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg)); 3007 3008 return (0); 3009 } 3010 3011 /* 3012 * For a given synchronization variable, iterate over the 3013 * set of waiting threads. The call back function is passed 3014 * two parameters, a pointer to a thread handle and a pointer 3015 * to extra call back data. 3016 */ 3017 #pragma weak td_sync_waiters = __td_sync_waiters 3018 td_err_e 3019 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data) 3020 { 3021 struct ps_prochandle *ph_p; 3022 waiter_cb_ctl_t wcb; 3023 td_err_e return_val; 3024 3025 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL) 3026 return (return_val); 3027 if (ps_pdread(ph_p, 3028 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic, 3029 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) { 3030 ph_unlock(sh_p->sh_ta_p); 3031 return (TD_DBERR); 3032 } 3033 ph_unlock(sh_p->sh_ta_p); 3034 3035 switch (wcb.sync_magic) { 3036 case MUTEX_MAGIC: 3037 case COND_MAGIC: 3038 case SEMA_MAGIC: 3039 case RWL_MAGIC: 3040 break; 3041 default: 3042 return (TD_BADSH); 3043 } 3044 3045 wcb.waiter_cb = cb; 3046 wcb.sync_obj_addr = sh_p->sh_unique; 3047 wcb.waiter_cb_arg = cb_data; 3048 wcb.errcode = TD_OK; 3049 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb, 3050 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY, 3051 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS); 3052 3053 if (return_val != TD_OK) 3054 return (return_val); 3055 3056 return (wcb.errcode); 3057 } 3058