1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
29 * Copyright 2023 Oxide Computer Company
30 */
31
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <stddef.h>
35 #include <unistd.h>
36 #include <thr_uberdata.h>
37 #include <thread_db.h>
38 #include <libc_int.h>
39
40 /*
41 * Private structures.
42 */
43
44 typedef union {
45 mutex_t lock;
46 rwlock_t rwlock;
47 sema_t semaphore;
48 cond_t condition;
49 } td_so_un_t;
50
51 struct td_thragent {
52 rwlock_t rwlock;
53 struct ps_prochandle *ph_p;
54 int initialized;
55 int sync_tracking;
56 int model;
57 int primary_map;
58 psaddr_t bootstrap_addr;
59 psaddr_t uberdata_addr;
60 psaddr_t tdb_eventmask_addr;
61 psaddr_t tdb_register_sync_addr;
62 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
63 psaddr_t hash_table_addr;
64 int hash_size;
65 lwpid_t single_lwpid;
66 psaddr_t single_ulwp_addr;
67 };
68
69 /*
70 * This is the name of the variable in libc that contains
71 * the uberdata address that we will need.
72 */
73 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
74 /*
75 * This is the actual name of uberdata, used in the event
76 * that tdb_bootstrap has not yet been initialized.
77 */
78 #define TD_UBERDATA_NAME "_uberdata"
79 /*
80 * The library name should end with ".so.1", but older versions of
81 * dbx expect the unadorned name and malfunction if ".1" is specified.
82 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
83 * is applied to another instance of itself (due to the presence of
84 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
85 */
86 #define TD_LIBRARY_NAME "libc.so"
87 #define TD_LIBRARY_NAME_1 "libc.so.1"
88
89 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
90
91 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
92 void *cbdata_p, td_thr_state_e state, int ti_pri,
93 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
94
95 /*
96 * Initialize threads debugging interface.
97 */
98 #pragma weak td_init = __td_init
99 td_err_e
__td_init()100 __td_init()
101 {
102 return (TD_OK);
103 }
104
105 /*
106 * This function does nothing, and never did.
107 * But the symbol is in the ABI, so we can't delete it.
108 */
109 #pragma weak td_log = __td_log
110 void
__td_log()111 __td_log()
112 {
113 }
114
115 /*
116 * Short-cut to read just the hash table size from the process,
117 * to avoid repeatedly reading the full uberdata structure when
118 * dealing with a single-threaded process.
119 */
120 static uint_t
td_read_hash_size(td_thragent_t * ta_p)121 td_read_hash_size(td_thragent_t *ta_p)
122 {
123 psaddr_t addr;
124 uint_t hash_size;
125
126 switch (ta_p->initialized) {
127 default: /* uninitialized */
128 return (0);
129 case 1: /* partially initialized */
130 break;
131 case 2: /* fully initialized */
132 return (ta_p->hash_size);
133 }
134
135 if (ta_p->model == PR_MODEL_NATIVE) {
136 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
137 } else {
138 #if defined(_LP64) && defined(_SYSCALL32)
139 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
140 #else
141 addr = 0;
142 #endif
143 }
144 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
145 != PS_OK)
146 return (0);
147 return (hash_size);
148 }
149
150 static td_err_e
td_read_uberdata(td_thragent_t * ta_p)151 td_read_uberdata(td_thragent_t *ta_p)
152 {
153 struct ps_prochandle *ph_p = ta_p->ph_p;
154 int i;
155
156 if (ta_p->model == PR_MODEL_NATIVE) {
157 uberdata_t uberdata;
158
159 if (ps_pdread(ph_p, ta_p->uberdata_addr,
160 &uberdata, sizeof (uberdata)) != PS_OK)
161 return (TD_DBERR);
162 ta_p->primary_map = uberdata.primary_map;
163 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
164 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
165 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
166 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
167 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
168 ta_p->hash_size = uberdata.hash_size;
169 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
170 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
171 return (TD_DBERR);
172 } else {
173 #if defined(_LP64) && defined(_SYSCALL32)
174 uberdata32_t uberdata;
175 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
176
177 if (ps_pdread(ph_p, ta_p->uberdata_addr,
178 &uberdata, sizeof (uberdata)) != PS_OK)
179 return (TD_DBERR);
180 ta_p->primary_map = uberdata.primary_map;
181 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
182 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
183 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
184 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
185 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
186 ta_p->hash_size = uberdata.hash_size;
187 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
188 tdb_events, sizeof (tdb_events)) != PS_OK)
189 return (TD_DBERR);
190 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
191 ta_p->tdb_events[i] = tdb_events[i];
192 #else
193 return (TD_DBERR);
194 #endif
195 }
196
197 /*
198 * Unfortunately, we are (implicitly) assuming that our uberdata
199 * definition precisely matches that of our target. If this is not
200 * true (that is, if we're examining a core file from a foreign
201 * system that has a different definition of uberdata), the failure
202 * modes can be frustratingly non-explicit. In an effort to catch
203 * this upon initialization (when the debugger may still be able to
204 * opt for another thread model or may be able to fail explicitly), we
205 * check that each of our tdb_events points to valid memory (these are
206 * putatively text upon which a breakpoint can be issued), with the
207 * hope that this is enough of a self-consistency check to lead to
208 * explicit failure on a mismatch.
209 */
210 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
211 uint8_t check;
212
213 if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
214 &check, sizeof (check)) != PS_OK) {
215 return (TD_DBERR);
216 }
217 }
218
219 if (ta_p->hash_size != 1) { /* multi-threaded */
220 ta_p->initialized = 2;
221 ta_p->single_lwpid = 0;
222 ta_p->single_ulwp_addr = 0;
223 } else { /* single-threaded */
224 ta_p->initialized = 1;
225 /*
226 * Get the address and lwpid of the single thread/LWP.
227 * It may not be ulwp_one if this is a child of fork1().
228 */
229 if (ta_p->model == PR_MODEL_NATIVE) {
230 thr_hash_table_t head;
231 lwpid_t lwpid = 0;
232
233 if (ps_pdread(ph_p, ta_p->hash_table_addr,
234 &head, sizeof (head)) != PS_OK)
235 return (TD_DBERR);
236 if ((psaddr_t)head.hash_bucket == 0)
237 ta_p->initialized = 0;
238 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
239 offsetof(ulwp_t, ul_lwpid),
240 &lwpid, sizeof (lwpid)) != PS_OK)
241 return (TD_DBERR);
242 ta_p->single_lwpid = lwpid;
243 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
244 } else {
245 #if defined(_LP64) && defined(_SYSCALL32)
246 thr_hash_table32_t head;
247 lwpid_t lwpid = 0;
248
249 if (ps_pdread(ph_p, ta_p->hash_table_addr,
250 &head, sizeof (head)) != PS_OK)
251 return (TD_DBERR);
252 if ((psaddr_t)head.hash_bucket == 0)
253 ta_p->initialized = 0;
254 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
255 offsetof(ulwp32_t, ul_lwpid),
256 &lwpid, sizeof (lwpid)) != PS_OK)
257 return (TD_DBERR);
258 ta_p->single_lwpid = lwpid;
259 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
260 #else
261 return (TD_DBERR);
262 #endif
263 }
264 }
265 if (!ta_p->primary_map)
266 ta_p->initialized = 0;
267 return (TD_OK);
268 }
269
270 static td_err_e
td_read_bootstrap_data(td_thragent_t * ta_p)271 td_read_bootstrap_data(td_thragent_t *ta_p)
272 {
273 struct ps_prochandle *ph_p = ta_p->ph_p;
274 psaddr_t bootstrap_addr;
275 psaddr_t uberdata_addr;
276 ps_err_e db_return;
277 td_err_e return_val;
278 int do_1;
279
280 switch (ta_p->initialized) {
281 case 2: /* fully initialized */
282 return (TD_OK);
283 case 1: /* partially initialized */
284 if (td_read_hash_size(ta_p) == 1)
285 return (TD_OK);
286 return (td_read_uberdata(ta_p));
287 }
288
289 /*
290 * Uninitialized -- do the startup work.
291 * We set ta_p->initialized to -1 to cut off recursive calls
292 * into libc_db by code in the provider of ps_pglobal_lookup().
293 */
294 do_1 = 0;
295 ta_p->initialized = -1;
296 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
297 TD_BOOTSTRAP_NAME, &bootstrap_addr);
298 if (db_return == PS_NOSYM) {
299 do_1 = 1;
300 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
301 TD_BOOTSTRAP_NAME, &bootstrap_addr);
302 }
303 if (db_return == PS_NOSYM) /* libc is not linked yet */
304 return (TD_NOLIBTHREAD);
305 if (db_return != PS_OK)
306 return (TD_ERR);
307 db_return = ps_pglobal_lookup(ph_p,
308 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
309 TD_UBERDATA_NAME, &uberdata_addr);
310 if (db_return == PS_NOSYM) /* libc is not linked yet */
311 return (TD_NOLIBTHREAD);
312 if (db_return != PS_OK)
313 return (TD_ERR);
314
315 /*
316 * Read the uberdata address into the thread agent structure.
317 */
318 if (ta_p->model == PR_MODEL_NATIVE) {
319 psaddr_t psaddr;
320 if (ps_pdread(ph_p, bootstrap_addr,
321 &psaddr, sizeof (psaddr)) != PS_OK)
322 return (TD_DBERR);
323 if ((ta_p->bootstrap_addr = psaddr) == 0)
324 psaddr = uberdata_addr;
325 else if (ps_pdread(ph_p, psaddr,
326 &psaddr, sizeof (psaddr)) != PS_OK)
327 return (TD_DBERR);
328 if (psaddr == 0) {
329 /* primary linkmap in the tgt is not initialized */
330 ta_p->bootstrap_addr = 0;
331 psaddr = uberdata_addr;
332 }
333 ta_p->uberdata_addr = psaddr;
334 } else {
335 #if defined(_LP64) && defined(_SYSCALL32)
336 caddr32_t psaddr;
337 if (ps_pdread(ph_p, bootstrap_addr,
338 &psaddr, sizeof (psaddr)) != PS_OK)
339 return (TD_DBERR);
340 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
341 psaddr = (caddr32_t)uberdata_addr;
342 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
343 &psaddr, sizeof (psaddr)) != PS_OK)
344 return (TD_DBERR);
345 if (psaddr == 0) {
346 /* primary linkmap in the tgt is not initialized */
347 ta_p->bootstrap_addr = 0;
348 psaddr = (caddr32_t)uberdata_addr;
349 }
350 ta_p->uberdata_addr = (psaddr_t)psaddr;
351 #else
352 return (TD_DBERR);
353 #endif /* _SYSCALL32 */
354 }
355
356 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
357 return (return_val);
358 if (ta_p->bootstrap_addr == 0)
359 ta_p->initialized = 0;
360 return (TD_OK);
361 }
362
363 #pragma weak ps_kill
364 #pragma weak ps_lrolltoaddr
365
366 /*
367 * Allocate a new agent process handle ("thread agent").
368 */
369 #pragma weak td_ta_new = __td_ta_new
370 td_err_e
__td_ta_new(struct ps_prochandle * ph_p,td_thragent_t ** ta_pp)371 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
372 {
373 td_thragent_t *ta_p;
374 int model;
375 td_err_e return_val = TD_OK;
376
377 if (ph_p == NULL)
378 return (TD_BADPH);
379 if (ta_pp == NULL)
380 return (TD_ERR);
381 *ta_pp = NULL;
382 if (ps_pstop(ph_p) != PS_OK)
383 return (TD_DBERR);
384 /*
385 * ps_pdmodel might not be defined if this is an older client.
386 * Make it a weak symbol and test if it exists before calling.
387 */
388 #pragma weak ps_pdmodel
389 if (ps_pdmodel == NULL) {
390 model = PR_MODEL_NATIVE;
391 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
392 (void) ps_pcontinue(ph_p);
393 return (TD_ERR);
394 }
395 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
396 (void) ps_pcontinue(ph_p);
397 return (TD_MALLOC);
398 }
399
400 /*
401 * Initialize the agent process handle.
402 * Pick up the symbol value we need from the target process.
403 */
404 (void) memset(ta_p, 0, sizeof (*ta_p));
405 ta_p->ph_p = ph_p;
406 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
407 ta_p->model = model;
408 return_val = td_read_bootstrap_data(ta_p);
409
410 /*
411 * Because the old libthread_db enabled lock tracking by default,
412 * we must also do it. However, we do it only if the application
413 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
414 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
415 */
416 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
417 register_sync_t oldenable;
418 register_sync_t enable = REGISTER_SYNC_ENABLE;
419 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
420
421 if (ps_pdread(ph_p, psaddr,
422 &oldenable, sizeof (oldenable)) != PS_OK)
423 return_val = TD_DBERR;
424 else if (oldenable != REGISTER_SYNC_OFF ||
425 ps_pdwrite(ph_p, psaddr,
426 &enable, sizeof (enable)) != PS_OK) {
427 /*
428 * Lock tracking was already enabled or we
429 * failed to enable it, probably because we
430 * are examining a core file. In either case
431 * set the sync_tracking flag non-zero to
432 * indicate that we should not attempt to
433 * disable lock tracking when we delete the
434 * agent process handle in td_ta_delete().
435 */
436 ta_p->sync_tracking = 1;
437 }
438 }
439
440 if (return_val == TD_OK)
441 *ta_pp = ta_p;
442 else
443 free(ta_p);
444
445 (void) ps_pcontinue(ph_p);
446 return (return_val);
447 }
448
449 /*
450 * Utility function to grab the readers lock and return the prochandle,
451 * given an agent process handle. Performs standard error checking.
452 * Returns non-NULL with the lock held, or NULL with the lock not held.
453 */
454 static struct ps_prochandle *
ph_lock_ta(td_thragent_t * ta_p,td_err_e * err)455 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
456 {
457 struct ps_prochandle *ph_p = NULL;
458 td_err_e error;
459
460 if (ta_p == NULL || ta_p->initialized == -1) {
461 *err = TD_BADTA;
462 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
463 *err = TD_BADTA;
464 } else if ((ph_p = ta_p->ph_p) == NULL) {
465 (void) rw_unlock(&ta_p->rwlock);
466 *err = TD_BADPH;
467 } else if (ta_p->initialized != 2 &&
468 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
469 (void) rw_unlock(&ta_p->rwlock);
470 ph_p = NULL;
471 *err = error;
472 } else {
473 *err = TD_OK;
474 }
475
476 return (ph_p);
477 }
478
479 /*
480 * Utility function to grab the readers lock and return the prochandle,
481 * given an agent thread handle. Performs standard error checking.
482 * Returns non-NULL with the lock held, or NULL with the lock not held.
483 */
484 static struct ps_prochandle *
ph_lock_th(const td_thrhandle_t * th_p,td_err_e * err)485 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
486 {
487 if (th_p == NULL || th_p->th_unique == 0) {
488 *err = TD_BADTH;
489 return (NULL);
490 }
491 return (ph_lock_ta(th_p->th_ta_p, err));
492 }
493
494 /*
495 * Utility function to grab the readers lock and return the prochandle,
496 * given a synchronization object handle. Performs standard error checking.
497 * Returns non-NULL with the lock held, or NULL with the lock not held.
498 */
499 static struct ps_prochandle *
ph_lock_sh(const td_synchandle_t * sh_p,td_err_e * err)500 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
501 {
502 if (sh_p == NULL || sh_p->sh_unique == 0) {
503 *err = TD_BADSH;
504 return (NULL);
505 }
506 return (ph_lock_ta(sh_p->sh_ta_p, err));
507 }
508
509 /*
510 * Unlock the agent process handle obtained from ph_lock_*().
511 */
512 static void
ph_unlock(td_thragent_t * ta_p)513 ph_unlock(td_thragent_t *ta_p)
514 {
515 (void) rw_unlock(&ta_p->rwlock);
516 }
517
518 /*
519 * De-allocate an agent process handle,
520 * releasing all related resources.
521 *
522 * XXX -- This is hopelessly broken ---
523 * Storage for thread agent is not deallocated. The prochandle
524 * in the thread agent is set to NULL so that future uses of
525 * the thread agent can be detected and an error value returned.
526 * All functions in the external user interface that make
527 * use of the thread agent are expected
528 * to check for a NULL prochandle in the thread agent.
529 * All such functions are also expected to obtain a
530 * reader lock on the thread agent while it is using it.
531 */
532 #pragma weak td_ta_delete = __td_ta_delete
533 td_err_e
__td_ta_delete(td_thragent_t * ta_p)534 __td_ta_delete(td_thragent_t *ta_p)
535 {
536 struct ps_prochandle *ph_p;
537
538 /*
539 * This is the only place we grab the writer lock.
540 * We are going to NULL out the prochandle.
541 */
542 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
543 return (TD_BADTA);
544 if ((ph_p = ta_p->ph_p) == NULL) {
545 (void) rw_unlock(&ta_p->rwlock);
546 return (TD_BADPH);
547 }
548 /*
549 * If synch. tracking was disabled when td_ta_new() was called and
550 * if td_ta_sync_tracking_enable() was never called, then disable
551 * synch. tracking (it was enabled by default in td_ta_new()).
552 */
553 if (ta_p->sync_tracking == 0 &&
554 ps_kill != NULL && ps_lrolltoaddr != NULL) {
555 register_sync_t enable = REGISTER_SYNC_DISABLE;
556
557 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
558 &enable, sizeof (enable));
559 }
560 ta_p->ph_p = NULL;
561 (void) rw_unlock(&ta_p->rwlock);
562 return (TD_OK);
563 }
564
565 /*
566 * Map an agent process handle to a client prochandle.
567 * Currently unused by dbx.
568 */
569 #pragma weak td_ta_get_ph = __td_ta_get_ph
570 td_err_e
__td_ta_get_ph(td_thragent_t * ta_p,struct ps_prochandle ** ph_pp)571 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
572 {
573 td_err_e return_val;
574
575 if (ph_pp != NULL) /* protect stupid callers */
576 *ph_pp = NULL;
577 if (ph_pp == NULL)
578 return (TD_ERR);
579 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
580 return (return_val);
581 ph_unlock(ta_p);
582 return (TD_OK);
583 }
584
585 /*
586 * Set the process's suggested concurrency level.
587 * This is a no-op in a one-level model.
588 * Currently unused by dbx.
589 */
590 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
591 /* ARGSUSED1 */
592 td_err_e
__td_ta_setconcurrency(const td_thragent_t * ta_p,int level)593 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
594 {
595 if (ta_p == NULL)
596 return (TD_BADTA);
597 if (ta_p->ph_p == NULL)
598 return (TD_BADPH);
599 return (TD_OK);
600 }
601
602 /*
603 * Get the number of threads in the process.
604 */
605 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
606 td_err_e
__td_ta_get_nthreads(td_thragent_t * ta_p,int * nthread_p)607 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
608 {
609 struct ps_prochandle *ph_p;
610 td_err_e return_val;
611 int nthreads;
612 int nzombies;
613 psaddr_t nthreads_addr;
614 psaddr_t nzombies_addr;
615
616 if (ta_p->model == PR_MODEL_NATIVE) {
617 nthreads_addr = ta_p->uberdata_addr +
618 offsetof(uberdata_t, nthreads);
619 nzombies_addr = ta_p->uberdata_addr +
620 offsetof(uberdata_t, nzombies);
621 } else {
622 #if defined(_LP64) && defined(_SYSCALL32)
623 nthreads_addr = ta_p->uberdata_addr +
624 offsetof(uberdata32_t, nthreads);
625 nzombies_addr = ta_p->uberdata_addr +
626 offsetof(uberdata32_t, nzombies);
627 #else
628 nthreads_addr = 0;
629 nzombies_addr = 0;
630 #endif /* _SYSCALL32 */
631 }
632
633 if (nthread_p == NULL)
634 return (TD_ERR);
635 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
636 return (return_val);
637 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
638 return_val = TD_DBERR;
639 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
640 return_val = TD_DBERR;
641 ph_unlock(ta_p);
642 if (return_val == TD_OK)
643 *nthread_p = nthreads + nzombies;
644 return (return_val);
645 }
646
647 typedef struct {
648 thread_t tid;
649 int found;
650 td_thrhandle_t th;
651 } td_mapper_param_t;
652
653 /*
654 * Check the value in data against the thread id.
655 * If it matches, return 1 to terminate iterations.
656 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
657 */
658 static int
td_mapper_id2thr(td_thrhandle_t * th_p,td_mapper_param_t * data)659 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
660 {
661 td_thrinfo_t ti;
662
663 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
664 data->tid == ti.ti_tid) {
665 data->found = 1;
666 data->th = *th_p;
667 return (1);
668 }
669 return (0);
670 }
671
672 /*
673 * Given a thread identifier, return the corresponding thread handle.
674 */
675 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
676 td_err_e
__td_ta_map_id2thr(td_thragent_t * ta_p,thread_t tid,td_thrhandle_t * th_p)677 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
678 td_thrhandle_t *th_p)
679 {
680 td_err_e return_val;
681 td_mapper_param_t data;
682
683 if (th_p != NULL && /* optimize for a single thread */
684 ta_p != NULL &&
685 ta_p->initialized == 1 &&
686 (td_read_hash_size(ta_p) == 1 ||
687 td_read_uberdata(ta_p) == TD_OK) &&
688 ta_p->initialized == 1 &&
689 ta_p->single_lwpid == tid) {
690 th_p->th_ta_p = ta_p;
691 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
692 return (TD_NOTHR);
693 return (TD_OK);
694 }
695
696 /*
697 * LOCKING EXCEPTION - Locking is not required here because
698 * the locking and checking will be done in __td_ta_thr_iter.
699 */
700
701 if (ta_p == NULL)
702 return (TD_BADTA);
703 if (th_p == NULL)
704 return (TD_BADTH);
705 if (tid == 0)
706 return (TD_NOTHR);
707
708 data.tid = tid;
709 data.found = 0;
710 return_val = __td_ta_thr_iter(ta_p,
711 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
712 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
713 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
714 if (return_val == TD_OK) {
715 if (data.found == 0)
716 return_val = TD_NOTHR;
717 else
718 *th_p = data.th;
719 }
720
721 return (return_val);
722 }
723
724 /*
725 * Map the address of a synchronization object to a sync. object handle.
726 */
727 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
728 td_err_e
__td_ta_map_addr2sync(td_thragent_t * ta_p,psaddr_t addr,td_synchandle_t * sh_p)729 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
730 {
731 struct ps_prochandle *ph_p;
732 td_err_e return_val;
733 uint16_t sync_magic;
734
735 if (sh_p == NULL)
736 return (TD_BADSH);
737 if (addr == 0)
738 return (TD_ERR);
739 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
740 return (return_val);
741 /*
742 * Check the magic number of the sync. object to make sure it's valid.
743 * The magic number is at the same offset for all sync. objects.
744 */
745 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
746 &sync_magic, sizeof (sync_magic)) != PS_OK) {
747 ph_unlock(ta_p);
748 return (TD_BADSH);
749 }
750 ph_unlock(ta_p);
751 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
752 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
753 return (TD_BADSH);
754 /*
755 * Just fill in the appropriate fields of the sync. handle.
756 */
757 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
758 sh_p->sh_unique = addr;
759 return (TD_OK);
760 }
761
762 /*
763 * Iterate over the set of global TSD keys.
764 * The call back function is called with three arguments,
765 * a key, a pointer to the destructor function, and the cbdata pointer.
766 * Currently unused by dbx.
767 */
768 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
769 td_err_e
__td_ta_tsd_iter(td_thragent_t * ta_p,td_key_iter_f * cb,void * cbdata_p)770 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
771 {
772 struct ps_prochandle *ph_p;
773 td_err_e return_val;
774 int key;
775 int numkeys;
776 psaddr_t dest_addr;
777 psaddr_t *destructors = NULL;
778 PFrV destructor;
779
780 if (cb == NULL)
781 return (TD_ERR);
782 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
783 return (return_val);
784 if (ps_pstop(ph_p) != PS_OK) {
785 ph_unlock(ta_p);
786 return (TD_DBERR);
787 }
788
789 if (ta_p->model == PR_MODEL_NATIVE) {
790 tsd_metadata_t tsdm;
791
792 if (ps_pdread(ph_p,
793 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
794 &tsdm, sizeof (tsdm)) != PS_OK)
795 return_val = TD_DBERR;
796 else {
797 numkeys = tsdm.tsdm_nused;
798 dest_addr = (psaddr_t)tsdm.tsdm_destro;
799 if (numkeys > 0)
800 destructors =
801 malloc(numkeys * sizeof (psaddr_t));
802 }
803 } else {
804 #if defined(_LP64) && defined(_SYSCALL32)
805 tsd_metadata32_t tsdm;
806
807 if (ps_pdread(ph_p,
808 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
809 &tsdm, sizeof (tsdm)) != PS_OK)
810 return_val = TD_DBERR;
811 else {
812 numkeys = tsdm.tsdm_nused;
813 dest_addr = (psaddr_t)tsdm.tsdm_destro;
814 if (numkeys > 0)
815 destructors =
816 malloc(numkeys * sizeof (caddr32_t));
817 }
818 #else
819 return_val = TD_DBERR;
820 #endif /* _SYSCALL32 */
821 }
822
823 if (return_val != TD_OK || numkeys <= 0) {
824 (void) ps_pcontinue(ph_p);
825 ph_unlock(ta_p);
826 return (return_val);
827 }
828
829 if (destructors == NULL)
830 return_val = TD_MALLOC;
831 else if (ta_p->model == PR_MODEL_NATIVE) {
832 if (ps_pdread(ph_p, dest_addr,
833 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
834 return_val = TD_DBERR;
835 else {
836 for (key = 1; key < numkeys; key++) {
837 destructor = (PFrV)destructors[key];
838 if (destructor != TSD_UNALLOCATED &&
839 (*cb)(key, destructor, cbdata_p))
840 break;
841 }
842 }
843 #if defined(_LP64) && defined(_SYSCALL32)
844 } else {
845 caddr32_t *destructors32 = (caddr32_t *)destructors;
846 caddr32_t destruct32;
847
848 if (ps_pdread(ph_p, dest_addr,
849 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
850 return_val = TD_DBERR;
851 else {
852 for (key = 1; key < numkeys; key++) {
853 destruct32 = destructors32[key];
854 if ((destruct32 !=
855 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
856 (*cb)(key, (PFrV)(uintptr_t)destruct32,
857 cbdata_p))
858 break;
859 }
860 }
861 #endif /* _SYSCALL32 */
862 }
863
864 if (destructors)
865 free(destructors);
866 (void) ps_pcontinue(ph_p);
867 ph_unlock(ta_p);
868 return (return_val);
869 }
870
871 int
sigequalset(const sigset_t * s1,const sigset_t * s2)872 sigequalset(const sigset_t *s1, const sigset_t *s2)
873 {
874 return (
875 s1->__sigbits[0] == s2->__sigbits[0] &&
876 s1->__sigbits[1] == s2->__sigbits[1] &&
877 s1->__sigbits[2] == s2->__sigbits[2] &&
878 s1->__sigbits[3] == s2->__sigbits[3]);
879 }
880
881 /*
882 * Description:
883 * Iterate over all threads. For each thread call
884 * the function pointed to by "cb" with a pointer
885 * to a thread handle, and a pointer to data which
886 * can be NULL. Only call td_thr_iter_f() on threads
887 * which match the properties of state, ti_pri,
888 * ti_sigmask_p, and ti_user_flags. If cb returns
889 * a non-zero value, terminate iterations.
890 *
891 * Input:
892 * *ta_p - thread agent
893 * *cb - call back function defined by user.
894 * td_thr_iter_f() takes a thread handle and
895 * cbdata_p as a parameter.
896 * cbdata_p - parameter for td_thr_iter_f().
897 *
898 * state - state of threads of interest. A value of
899 * TD_THR_ANY_STATE from enum td_thr_state_e
900 * does not restrict iterations by state.
901 * ti_pri - lower bound of priorities of threads of
902 * interest. A value of TD_THR_LOWEST_PRIORITY
903 * defined in thread_db.h does not restrict
904 * iterations by priority. A thread with priority
905 * less than ti_pri will NOT be passed to the callback
906 * function.
907 * ti_sigmask_p - signal mask of threads of interest.
908 * A value of TD_SIGNO_MASK defined in thread_db.h
909 * does not restrict iterations by signal mask.
910 * ti_user_flags - user flags of threads of interest. A
911 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
912 * does not restrict iterations by user flags.
913 */
914 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
915 td_err_e
__td_ta_thr_iter(td_thragent_t * ta_p,td_thr_iter_f * cb,void * cbdata_p,td_thr_state_e state,int ti_pri,sigset_t * ti_sigmask_p,unsigned ti_user_flags)916 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
917 void *cbdata_p, td_thr_state_e state, int ti_pri,
918 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
919 {
920 struct ps_prochandle *ph_p;
921 psaddr_t first_lwp_addr;
922 psaddr_t first_zombie_addr;
923 psaddr_t curr_lwp_addr;
924 psaddr_t next_lwp_addr;
925 td_thrhandle_t th;
926 ps_err_e db_return;
927 ps_err_e db_return2;
928 td_err_e return_val;
929
930 if (cb == NULL)
931 return (TD_ERR);
932 /*
933 * If state is not within bound, short circuit.
934 */
935 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
936 return (TD_OK);
937
938 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
939 return (return_val);
940 if (ps_pstop(ph_p) != PS_OK) {
941 ph_unlock(ta_p);
942 return (TD_DBERR);
943 }
944
945 /*
946 * For each ulwp_t in the circular linked lists pointed
947 * to by "all_lwps" and "all_zombies":
948 * (1) Filter each thread.
949 * (2) Create the thread_object for each thread that passes.
950 * (3) Call the call back function on each thread.
951 */
952
953 if (ta_p->model == PR_MODEL_NATIVE) {
954 db_return = ps_pdread(ph_p,
955 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
956 &first_lwp_addr, sizeof (first_lwp_addr));
957 db_return2 = ps_pdread(ph_p,
958 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
959 &first_zombie_addr, sizeof (first_zombie_addr));
960 } else {
961 #if defined(_LP64) && defined(_SYSCALL32)
962 caddr32_t addr32;
963
964 db_return = ps_pdread(ph_p,
965 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
966 &addr32, sizeof (addr32));
967 first_lwp_addr = addr32;
968 db_return2 = ps_pdread(ph_p,
969 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
970 &addr32, sizeof (addr32));
971 first_zombie_addr = addr32;
972 #else /* _SYSCALL32 */
973 db_return = PS_ERR;
974 db_return2 = PS_ERR;
975 #endif /* _SYSCALL32 */
976 }
977 if (db_return == PS_OK)
978 db_return = db_return2;
979
980 /*
981 * If first_lwp_addr and first_zombie_addr are both NULL,
982 * libc must not yet be initialized or all threads have
983 * exited. Return TD_NOTHR and all will be well.
984 */
985 if (db_return == PS_OK &&
986 first_lwp_addr == 0 && first_zombie_addr == 0) {
987 (void) ps_pcontinue(ph_p);
988 ph_unlock(ta_p);
989 return (TD_NOTHR);
990 }
991 if (db_return != PS_OK) {
992 (void) ps_pcontinue(ph_p);
993 ph_unlock(ta_p);
994 return (TD_DBERR);
995 }
996
997 /*
998 * Run down the lists of all living and dead lwps.
999 */
1000 if (first_lwp_addr == 0)
1001 first_lwp_addr = first_zombie_addr;
1002 curr_lwp_addr = first_lwp_addr;
1003 for (;;) {
1004 td_thr_state_e ts_state;
1005 int userpri;
1006 unsigned userflags;
1007 sigset_t mask;
1008
1009 /*
1010 * Read the ulwp struct.
1011 */
1012 if (ta_p->model == PR_MODEL_NATIVE) {
1013 ulwp_t ulwp;
1014
1015 if (ps_pdread(ph_p, curr_lwp_addr,
1016 &ulwp, sizeof (ulwp)) != PS_OK &&
1017 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1018 ps_pdread(ph_p, curr_lwp_addr,
1019 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1020 return_val = TD_DBERR;
1021 break;
1022 }
1023 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1024
1025 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1026 ulwp.ul_stop? TD_THR_STOPPED :
1027 ulwp.ul_wchan? TD_THR_SLEEP :
1028 TD_THR_ACTIVE;
1029 userpri = ulwp.ul_pri;
1030 userflags = ulwp.ul_usropts;
1031 if (ulwp.ul_dead)
1032 (void) sigemptyset(&mask);
1033 else
1034 mask = *(sigset_t *)&ulwp.ul_sigmask;
1035 } else {
1036 #if defined(_LP64) && defined(_SYSCALL32)
1037 ulwp32_t ulwp;
1038
1039 if (ps_pdread(ph_p, curr_lwp_addr,
1040 &ulwp, sizeof (ulwp)) != PS_OK &&
1041 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1042 ps_pdread(ph_p, curr_lwp_addr,
1043 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1044 return_val = TD_DBERR;
1045 break;
1046 }
1047 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1048
1049 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1050 ulwp.ul_stop? TD_THR_STOPPED :
1051 ulwp.ul_wchan? TD_THR_SLEEP :
1052 TD_THR_ACTIVE;
1053 userpri = ulwp.ul_pri;
1054 userflags = ulwp.ul_usropts;
1055 if (ulwp.ul_dead)
1056 (void) sigemptyset(&mask);
1057 else
1058 mask = *(sigset_t *)&ulwp.ul_sigmask;
1059 #else /* _SYSCALL32 */
1060 return_val = TD_ERR;
1061 break;
1062 #endif /* _SYSCALL32 */
1063 }
1064
1065 /*
1066 * Filter on state, priority, sigmask, and user flags.
1067 */
1068
1069 if ((state != ts_state) &&
1070 (state != TD_THR_ANY_STATE))
1071 goto advance;
1072
1073 if (ti_pri > userpri)
1074 goto advance;
1075
1076 if (ti_sigmask_p != TD_SIGNO_MASK &&
1077 !sigequalset(ti_sigmask_p, &mask))
1078 goto advance;
1079
1080 if (ti_user_flags != userflags &&
1081 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1082 goto advance;
1083
1084 /*
1085 * Call back - break if the return
1086 * from the call back is non-zero.
1087 */
1088 th.th_ta_p = (td_thragent_t *)ta_p;
1089 th.th_unique = curr_lwp_addr;
1090 if ((*cb)(&th, cbdata_p))
1091 break;
1092
1093 advance:
1094 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1095 /*
1096 * Switch to the zombie list, unless it is NULL
1097 * or we have already been doing the zombie list,
1098 * in which case terminate the loop.
1099 */
1100 if (first_zombie_addr == 0 ||
1101 first_lwp_addr == first_zombie_addr)
1102 break;
1103 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1104 }
1105 }
1106
1107 (void) ps_pcontinue(ph_p);
1108 ph_unlock(ta_p);
1109 return (return_val);
1110 }
1111
1112 /*
1113 * Enable or disable process synchronization object tracking.
1114 * Currently unused by dbx.
1115 */
1116 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1117 td_err_e
__td_ta_sync_tracking_enable(td_thragent_t * ta_p,int onoff)1118 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1119 {
1120 struct ps_prochandle *ph_p;
1121 td_err_e return_val;
1122 register_sync_t enable;
1123
1124 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1125 return (return_val);
1126 /*
1127 * Values of tdb_register_sync in the victim process:
1128 * REGISTER_SYNC_ENABLE enables registration of synch objects
1129 * REGISTER_SYNC_DISABLE disables registration of synch objects
1130 * These cause the table to be cleared and tdb_register_sync set to:
1131 * REGISTER_SYNC_ON registration in effect
1132 * REGISTER_SYNC_OFF registration not in effect
1133 */
1134 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1135 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1136 &enable, sizeof (enable)) != PS_OK)
1137 return_val = TD_DBERR;
1138 /*
1139 * Remember that this interface was called (see td_ta_delete()).
1140 */
1141 ta_p->sync_tracking = 1;
1142 ph_unlock(ta_p);
1143 return (return_val);
1144 }
1145
1146 /*
1147 * Iterate over all known synchronization variables.
1148 * It is very possible that the list generated is incomplete,
1149 * because the iterator can only find synchronization variables
1150 * that have been registered by the process since synchronization
1151 * object registration was enabled.
1152 * The call back function cb is called for each synchronization
1153 * variable with two arguments: a pointer to the synchronization
1154 * handle and the passed-in argument cbdata.
1155 * If cb returns a non-zero value, iterations are terminated.
1156 */
1157 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1158 td_err_e
__td_ta_sync_iter(td_thragent_t * ta_p,td_sync_iter_f * cb,void * cbdata)1159 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1160 {
1161 struct ps_prochandle *ph_p;
1162 td_err_e return_val;
1163 int i;
1164 register_sync_t enable;
1165 psaddr_t next_desc;
1166 tdb_sync_stats_t sync_stats;
1167 td_synchandle_t synchandle;
1168 psaddr_t psaddr;
1169 void *vaddr;
1170 uint64_t *sync_addr_hash = NULL;
1171
1172 if (cb == NULL)
1173 return (TD_ERR);
1174 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1175 return (return_val);
1176 if (ps_pstop(ph_p) != PS_OK) {
1177 ph_unlock(ta_p);
1178 return (TD_DBERR);
1179 }
1180 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1181 &enable, sizeof (enable)) != PS_OK) {
1182 return_val = TD_DBERR;
1183 goto out;
1184 }
1185 if (enable != REGISTER_SYNC_ON)
1186 goto out;
1187
1188 /*
1189 * First read the hash table.
1190 * The hash table is large; allocate with mmap().
1191 */
1192 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1193 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1194 == MAP_FAILED) {
1195 return_val = TD_MALLOC;
1196 goto out;
1197 }
1198 sync_addr_hash = vaddr;
1199
1200 if (ta_p->model == PR_MODEL_NATIVE) {
1201 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1202 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1203 &psaddr, sizeof (&psaddr)) != PS_OK) {
1204 return_val = TD_DBERR;
1205 goto out;
1206 }
1207 } else {
1208 #ifdef _SYSCALL32
1209 caddr32_t addr;
1210
1211 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1212 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1213 &addr, sizeof (addr)) != PS_OK) {
1214 return_val = TD_DBERR;
1215 goto out;
1216 }
1217 psaddr = addr;
1218 #else
1219 return_val = TD_ERR;
1220 goto out;
1221 #endif /* _SYSCALL32 */
1222 }
1223
1224 if (psaddr == 0)
1225 goto out;
1226 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1227 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1228 return_val = TD_DBERR;
1229 goto out;
1230 }
1231
1232 /*
1233 * Now scan the hash table.
1234 */
1235 for (i = 0; i < TDB_HASH_SIZE; i++) {
1236 for (next_desc = (psaddr_t)sync_addr_hash[i];
1237 next_desc != 0;
1238 next_desc = (psaddr_t)sync_stats.next) {
1239 if (ps_pdread(ph_p, next_desc,
1240 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1241 return_val = TD_DBERR;
1242 goto out;
1243 }
1244 if (sync_stats.un.type == TDB_NONE) {
1245 /* not registered since registration enabled */
1246 continue;
1247 }
1248 synchandle.sh_ta_p = ta_p;
1249 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1250 if ((*cb)(&synchandle, cbdata) != 0)
1251 goto out;
1252 }
1253 }
1254
1255 out:
1256 if (sync_addr_hash != NULL)
1257 (void) munmap((void *)sync_addr_hash,
1258 TDB_HASH_SIZE * sizeof (uint64_t));
1259 (void) ps_pcontinue(ph_p);
1260 ph_unlock(ta_p);
1261 return (return_val);
1262 }
1263
1264 /*
1265 * Enable process statistics collection.
1266 */
1267 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1268 /* ARGSUSED */
1269 td_err_e
__td_ta_enable_stats(const td_thragent_t * ta_p,int onoff)1270 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1271 {
1272 return (TD_NOCAPAB);
1273 }
1274
1275 /*
1276 * Reset process statistics.
1277 */
1278 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1279 /* ARGSUSED */
1280 td_err_e
__td_ta_reset_stats(const td_thragent_t * ta_p)1281 __td_ta_reset_stats(const td_thragent_t *ta_p)
1282 {
1283 return (TD_NOCAPAB);
1284 }
1285
1286 /*
1287 * Read process statistics.
1288 */
1289 #pragma weak td_ta_get_stats = __td_ta_get_stats
1290 /* ARGSUSED */
1291 td_err_e
__td_ta_get_stats(const td_thragent_t * ta_p,td_ta_stats_t * tstats)1292 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1293 {
1294 return (TD_NOCAPAB);
1295 }
1296
1297 /*
1298 * Transfer information from lwp struct to thread information struct.
1299 * XXX -- lots of this needs cleaning up.
1300 */
1301 static void
td_thr2to(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp_t * ulwp,td_thrinfo_t * ti_p)1302 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1303 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1304 {
1305 lwpid_t lwpid;
1306
1307 if ((lwpid = ulwp->ul_lwpid) == 0)
1308 lwpid = 1;
1309 (void) memset(ti_p, 0, sizeof (*ti_p));
1310 ti_p->ti_ta_p = ta_p;
1311 ti_p->ti_user_flags = ulwp->ul_usropts;
1312 ti_p->ti_tid = lwpid;
1313 ti_p->ti_exitval = ulwp->ul_rval;
1314 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1315 if (!ulwp->ul_dead) {
1316 /*
1317 * The bloody fools got this backwards!
1318 */
1319 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1320 ti_p->ti_stksize = ulwp->ul_stksiz;
1321 }
1322 ti_p->ti_ro_area = ts_addr;
1323 ti_p->ti_ro_size = ulwp->ul_replace?
1324 REPLACEMENT_SIZE : sizeof (ulwp_t);
1325 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1326 ulwp->ul_stop? TD_THR_STOPPED :
1327 ulwp->ul_wchan? TD_THR_SLEEP :
1328 TD_THR_ACTIVE;
1329 ti_p->ti_db_suspended = 0;
1330 ti_p->ti_type = TD_THR_USER;
1331 ti_p->ti_sp = ulwp->ul_sp;
1332 ti_p->ti_flags = 0;
1333 ti_p->ti_pri = ulwp->ul_pri;
1334 ti_p->ti_lid = lwpid;
1335 if (!ulwp->ul_dead)
1336 ti_p->ti_sigmask = ulwp->ul_sigmask;
1337 ti_p->ti_traceme = 0;
1338 ti_p->ti_preemptflag = 0;
1339 ti_p->ti_pirecflag = 0;
1340 (void) sigemptyset(&ti_p->ti_pending);
1341 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1342 }
1343
1344 #if defined(_LP64) && defined(_SYSCALL32)
1345 static void
td_thr2to32(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp32_t * ulwp,td_thrinfo_t * ti_p)1346 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1347 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1348 {
1349 lwpid_t lwpid;
1350
1351 if ((lwpid = ulwp->ul_lwpid) == 0)
1352 lwpid = 1;
1353 (void) memset(ti_p, 0, sizeof (*ti_p));
1354 ti_p->ti_ta_p = ta_p;
1355 ti_p->ti_user_flags = ulwp->ul_usropts;
1356 ti_p->ti_tid = lwpid;
1357 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1358 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1359 if (!ulwp->ul_dead) {
1360 /*
1361 * The bloody fools got this backwards!
1362 */
1363 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1364 ti_p->ti_stksize = ulwp->ul_stksiz;
1365 }
1366 ti_p->ti_ro_area = ts_addr;
1367 ti_p->ti_ro_size = ulwp->ul_replace?
1368 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1369 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1370 ulwp->ul_stop? TD_THR_STOPPED :
1371 ulwp->ul_wchan? TD_THR_SLEEP :
1372 TD_THR_ACTIVE;
1373 ti_p->ti_db_suspended = 0;
1374 ti_p->ti_type = TD_THR_USER;
1375 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1376 ti_p->ti_flags = 0;
1377 ti_p->ti_pri = ulwp->ul_pri;
1378 ti_p->ti_lid = lwpid;
1379 if (!ulwp->ul_dead)
1380 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1381 ti_p->ti_traceme = 0;
1382 ti_p->ti_preemptflag = 0;
1383 ti_p->ti_pirecflag = 0;
1384 (void) sigemptyset(&ti_p->ti_pending);
1385 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1386 }
1387 #endif /* _SYSCALL32 */
1388
1389 /*
1390 * Get thread information.
1391 */
1392 #pragma weak td_thr_get_info = __td_thr_get_info
1393 td_err_e
__td_thr_get_info(td_thrhandle_t * th_p,td_thrinfo_t * ti_p)1394 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1395 {
1396 struct ps_prochandle *ph_p;
1397 td_thragent_t *ta_p;
1398 td_err_e return_val;
1399 psaddr_t psaddr;
1400
1401 if (ti_p == NULL)
1402 return (TD_ERR);
1403 (void) memset(ti_p, 0, sizeof (*ti_p));
1404
1405 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1406 return (return_val);
1407 ta_p = th_p->th_ta_p;
1408 if (ps_pstop(ph_p) != PS_OK) {
1409 ph_unlock(ta_p);
1410 return (TD_DBERR);
1411 }
1412
1413 /*
1414 * Read the ulwp struct from the process.
1415 * Transfer the ulwp struct to the thread information struct.
1416 */
1417 psaddr = th_p->th_unique;
1418 if (ta_p->model == PR_MODEL_NATIVE) {
1419 ulwp_t ulwp;
1420
1421 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1422 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1423 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1424 return_val = TD_DBERR;
1425 else
1426 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1427 } else {
1428 #if defined(_LP64) && defined(_SYSCALL32)
1429 ulwp32_t ulwp;
1430
1431 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1432 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1433 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1434 PS_OK)
1435 return_val = TD_DBERR;
1436 else
1437 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1438 #else
1439 return_val = TD_ERR;
1440 #endif /* _SYSCALL32 */
1441 }
1442
1443 (void) ps_pcontinue(ph_p);
1444 ph_unlock(ta_p);
1445 return (return_val);
1446 }
1447
1448 /*
1449 * Given a process and an event number, return information about
1450 * an address in the process or at which a breakpoint can be set
1451 * to monitor the event.
1452 */
1453 #pragma weak td_ta_event_addr = __td_ta_event_addr
1454 td_err_e
__td_ta_event_addr(td_thragent_t * ta_p,td_event_e event,td_notify_t * notify_p)1455 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1456 {
1457 if (ta_p == NULL)
1458 return (TD_BADTA);
1459 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1460 return (TD_NOEVENT);
1461 if (notify_p == NULL)
1462 return (TD_ERR);
1463
1464 notify_p->type = NOTIFY_BPT;
1465 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1466
1467 return (TD_OK);
1468 }
1469
1470 /*
1471 * Add the events in eventset 2 to eventset 1.
1472 */
1473 static void
eventsetaddset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1474 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1475 {
1476 int i;
1477
1478 for (i = 0; i < TD_EVENTSIZE; i++)
1479 event1_p->event_bits[i] |= event2_p->event_bits[i];
1480 }
1481
1482 /*
1483 * Delete the events in eventset 2 from eventset 1.
1484 */
1485 static void
eventsetdelset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1486 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1487 {
1488 int i;
1489
1490 for (i = 0; i < TD_EVENTSIZE; i++)
1491 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1492 }
1493
1494 /*
1495 * Either add or delete the given event set from a thread's event mask.
1496 */
1497 static td_err_e
mod_eventset(td_thrhandle_t * th_p,td_thr_events_t * events,int onoff)1498 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1499 {
1500 struct ps_prochandle *ph_p;
1501 td_err_e return_val = TD_OK;
1502 char enable;
1503 td_thr_events_t evset;
1504 psaddr_t psaddr_evset;
1505 psaddr_t psaddr_enab;
1506
1507 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1508 return (return_val);
1509 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1510 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1511 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1512 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1513 } else {
1514 #if defined(_LP64) && defined(_SYSCALL32)
1515 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1516 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1517 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1518 #else
1519 ph_unlock(th_p->th_ta_p);
1520 return (TD_ERR);
1521 #endif /* _SYSCALL32 */
1522 }
1523 if (ps_pstop(ph_p) != PS_OK) {
1524 ph_unlock(th_p->th_ta_p);
1525 return (TD_DBERR);
1526 }
1527
1528 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1529 return_val = TD_DBERR;
1530 else {
1531 if (onoff)
1532 eventsetaddset(&evset, events);
1533 else
1534 eventsetdelset(&evset, events);
1535 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1536 != PS_OK)
1537 return_val = TD_DBERR;
1538 else {
1539 enable = 0;
1540 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1541 enable = 1;
1542 if (ps_pdwrite(ph_p, psaddr_enab,
1543 &enable, sizeof (enable)) != PS_OK)
1544 return_val = TD_DBERR;
1545 }
1546 }
1547
1548 (void) ps_pcontinue(ph_p);
1549 ph_unlock(th_p->th_ta_p);
1550 return (return_val);
1551 }
1552
1553 /*
1554 * Enable or disable tracing for a given thread. Tracing
1555 * is filtered based on the event mask of each thread. Tracing
1556 * can be turned on/off for the thread without changing thread
1557 * event mask.
1558 * Currently unused by dbx.
1559 */
1560 #pragma weak td_thr_event_enable = __td_thr_event_enable
1561 td_err_e
__td_thr_event_enable(td_thrhandle_t * th_p,int onoff)1562 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1563 {
1564 td_thr_events_t evset;
1565
1566 td_event_emptyset(&evset);
1567 td_event_addset(&evset, TD_EVENTS_ENABLE);
1568 return (mod_eventset(th_p, &evset, onoff));
1569 }
1570
1571 /*
1572 * Set event mask to enable event. event is turned on in
1573 * event mask for thread. If a thread encounters an event
1574 * for which its event mask is on, notification will be sent
1575 * to the debugger.
1576 * Addresses for each event are provided to the
1577 * debugger. It is assumed that a breakpoint of some type will
1578 * be placed at that address. If the event mask for the thread
1579 * is on, the instruction at the address will be executed.
1580 * Otherwise, the instruction will be skipped.
1581 */
1582 #pragma weak td_thr_set_event = __td_thr_set_event
1583 td_err_e
__td_thr_set_event(td_thrhandle_t * th_p,td_thr_events_t * events)1584 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1585 {
1586 return (mod_eventset(th_p, events, 1));
1587 }
1588
1589 /*
1590 * Enable or disable a set of events in the process-global event mask,
1591 * depending on the value of onoff.
1592 */
1593 static td_err_e
td_ta_mod_event(td_thragent_t * ta_p,td_thr_events_t * events,int onoff)1594 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1595 {
1596 struct ps_prochandle *ph_p;
1597 td_thr_events_t targ_eventset;
1598 td_err_e return_val;
1599
1600 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1601 return (return_val);
1602 if (ps_pstop(ph_p) != PS_OK) {
1603 ph_unlock(ta_p);
1604 return (TD_DBERR);
1605 }
1606 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1607 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1608 return_val = TD_DBERR;
1609 else {
1610 if (onoff)
1611 eventsetaddset(&targ_eventset, events);
1612 else
1613 eventsetdelset(&targ_eventset, events);
1614 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1615 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1616 return_val = TD_DBERR;
1617 }
1618 (void) ps_pcontinue(ph_p);
1619 ph_unlock(ta_p);
1620 return (return_val);
1621 }
1622
1623 /*
1624 * Enable a set of events in the process-global event mask.
1625 */
1626 #pragma weak td_ta_set_event = __td_ta_set_event
1627 td_err_e
__td_ta_set_event(td_thragent_t * ta_p,td_thr_events_t * events)1628 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1629 {
1630 return (td_ta_mod_event(ta_p, events, 1));
1631 }
1632
1633 /*
1634 * Set event mask to disable the given event set; these events are cleared
1635 * from the event mask of the thread. Events that occur for a thread
1636 * with the event masked off will not cause notification to be
1637 * sent to the debugger (see td_thr_set_event for fuller description).
1638 */
1639 #pragma weak td_thr_clear_event = __td_thr_clear_event
1640 td_err_e
__td_thr_clear_event(td_thrhandle_t * th_p,td_thr_events_t * events)1641 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1642 {
1643 return (mod_eventset(th_p, events, 0));
1644 }
1645
1646 /*
1647 * Disable a set of events in the process-global event mask.
1648 */
1649 #pragma weak td_ta_clear_event = __td_ta_clear_event
1650 td_err_e
__td_ta_clear_event(td_thragent_t * ta_p,td_thr_events_t * events)1651 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1652 {
1653 return (td_ta_mod_event(ta_p, events, 0));
1654 }
1655
1656 /*
1657 * This function returns the most recent event message, if any,
1658 * associated with a thread. Given a thread handle, return the message
1659 * corresponding to the event encountered by the thread. Only one
1660 * message per thread is saved. Messages from earlier events are lost
1661 * when later events occur.
1662 */
1663 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1664 td_err_e
__td_thr_event_getmsg(td_thrhandle_t * th_p,td_event_msg_t * msg)1665 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1666 {
1667 struct ps_prochandle *ph_p;
1668 td_err_e return_val = TD_OK;
1669 psaddr_t psaddr;
1670
1671 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1672 return (return_val);
1673 if (ps_pstop(ph_p) != PS_OK) {
1674 ph_unlock(th_p->th_ta_p);
1675 return (TD_BADTA);
1676 }
1677 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1678 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1679 td_evbuf_t evbuf;
1680
1681 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1682 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1683 return_val = TD_DBERR;
1684 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1685 return_val = TD_NOEVENT;
1686 } else {
1687 msg->event = evbuf.eventnum;
1688 msg->th_p = (td_thrhandle_t *)th_p;
1689 msg->msg.data = (uintptr_t)evbuf.eventdata;
1690 /* "Consume" the message */
1691 evbuf.eventnum = TD_EVENT_NONE;
1692 evbuf.eventdata = NULL;
1693 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1694 != PS_OK)
1695 return_val = TD_DBERR;
1696 }
1697 } else {
1698 #if defined(_LP64) && defined(_SYSCALL32)
1699 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1700 td_evbuf32_t evbuf;
1701
1702 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1703 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1704 return_val = TD_DBERR;
1705 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1706 return_val = TD_NOEVENT;
1707 } else {
1708 msg->event = evbuf.eventnum;
1709 msg->th_p = (td_thrhandle_t *)th_p;
1710 msg->msg.data = (uintptr_t)evbuf.eventdata;
1711 /* "Consume" the message */
1712 evbuf.eventnum = TD_EVENT_NONE;
1713 evbuf.eventdata = 0;
1714 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1715 != PS_OK)
1716 return_val = TD_DBERR;
1717 }
1718 #else
1719 return_val = TD_ERR;
1720 #endif /* _SYSCALL32 */
1721 }
1722
1723 (void) ps_pcontinue(ph_p);
1724 ph_unlock(th_p->th_ta_p);
1725 return (return_val);
1726 }
1727
1728 /*
1729 * The callback function td_ta_event_getmsg uses when looking for
1730 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1731 */
1732 static int
event_msg_cb(const td_thrhandle_t * th_p,void * arg)1733 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1734 {
1735 static td_thrhandle_t th;
1736 td_event_msg_t *msg = arg;
1737
1738 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1739 /*
1740 * Got an event, stop iterating.
1741 *
1742 * Because of past mistakes in interface definition,
1743 * we are forced to pass back a static local variable
1744 * for the thread handle because th_p is a pointer
1745 * to a local variable in __td_ta_thr_iter().
1746 * Grr...
1747 */
1748 th = *th_p;
1749 msg->th_p = &th;
1750 return (1);
1751 }
1752 return (0);
1753 }
1754
1755 /*
1756 * This function is just like td_thr_event_getmsg, except that it is
1757 * passed a process handle rather than a thread handle, and returns
1758 * an event message for some thread in the process that has an event
1759 * message pending. If no thread has an event message pending, this
1760 * routine returns TD_NOEVENT. Thus, all pending event messages may
1761 * be collected from a process by repeatedly calling this routine
1762 * until it returns TD_NOEVENT.
1763 */
1764 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1765 td_err_e
__td_ta_event_getmsg(td_thragent_t * ta_p,td_event_msg_t * msg)1766 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1767 {
1768 td_err_e return_val;
1769
1770 if (ta_p == NULL)
1771 return (TD_BADTA);
1772 if (ta_p->ph_p == NULL)
1773 return (TD_BADPH);
1774 if (msg == NULL)
1775 return (TD_ERR);
1776 msg->event = TD_EVENT_NONE;
1777 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1778 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1779 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1780 return (return_val);
1781 if (msg->event == TD_EVENT_NONE)
1782 return (TD_NOEVENT);
1783 return (TD_OK);
1784 }
1785
1786 static lwpid_t
thr_to_lwpid(const td_thrhandle_t * th_p)1787 thr_to_lwpid(const td_thrhandle_t *th_p)
1788 {
1789 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1790 lwpid_t lwpid;
1791
1792 /*
1793 * The caller holds the prochandle lock
1794 * and has already verfied everything.
1795 */
1796 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1797 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1798
1799 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1800 &lwpid, sizeof (lwpid)) != PS_OK)
1801 lwpid = 0;
1802 else if (lwpid == 0)
1803 lwpid = 1;
1804 } else {
1805 #if defined(_LP64) && defined(_SYSCALL32)
1806 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1807
1808 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1809 &lwpid, sizeof (lwpid)) != PS_OK)
1810 lwpid = 0;
1811 else if (lwpid == 0)
1812 lwpid = 1;
1813 #else
1814 lwpid = 0;
1815 #endif /* _SYSCALL32 */
1816 }
1817
1818 return (lwpid);
1819 }
1820
1821 /*
1822 * Suspend a thread.
1823 * XXX: What does this mean in a one-level model?
1824 */
1825 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1826 td_err_e
__td_thr_dbsuspend(const td_thrhandle_t * th_p)1827 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1828 {
1829 struct ps_prochandle *ph_p;
1830 td_err_e return_val;
1831
1832 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1833 return (return_val);
1834 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1835 return_val = TD_DBERR;
1836 ph_unlock(th_p->th_ta_p);
1837 return (return_val);
1838 }
1839
1840 /*
1841 * Resume a suspended thread.
1842 * XXX: What does this mean in a one-level model?
1843 */
1844 #pragma weak td_thr_dbresume = __td_thr_dbresume
1845 td_err_e
__td_thr_dbresume(const td_thrhandle_t * th_p)1846 __td_thr_dbresume(const td_thrhandle_t *th_p)
1847 {
1848 struct ps_prochandle *ph_p;
1849 td_err_e return_val;
1850
1851 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1852 return (return_val);
1853 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1854 return_val = TD_DBERR;
1855 ph_unlock(th_p->th_ta_p);
1856 return (return_val);
1857 }
1858
1859 /*
1860 * Set a thread's signal mask.
1861 * Currently unused by dbx.
1862 */
1863 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1864 /* ARGSUSED */
1865 td_err_e
__td_thr_sigsetmask(const td_thrhandle_t * th_p,const sigset_t ti_sigmask)1866 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1867 {
1868 return (TD_NOCAPAB);
1869 }
1870
1871 /*
1872 * Set a thread's "signals-pending" set.
1873 * Currently unused by dbx.
1874 */
1875 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1876 /* ARGSUSED */
1877 td_err_e
__td_thr_setsigpending(const td_thrhandle_t * th_p,uchar_t ti_pending_flag,const sigset_t ti_pending)1878 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1879 uchar_t ti_pending_flag, const sigset_t ti_pending)
1880 {
1881 return (TD_NOCAPAB);
1882 }
1883
1884 /*
1885 * Get a thread's general register set.
1886 */
1887 #pragma weak td_thr_getgregs = __td_thr_getgregs
1888 td_err_e
__td_thr_getgregs(td_thrhandle_t * th_p,prgregset_t regset)1889 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1890 {
1891 struct ps_prochandle *ph_p;
1892 td_err_e return_val;
1893
1894 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1895 return (return_val);
1896 if (ps_pstop(ph_p) != PS_OK) {
1897 ph_unlock(th_p->th_ta_p);
1898 return (TD_DBERR);
1899 }
1900
1901 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1902 return_val = TD_DBERR;
1903
1904 (void) ps_pcontinue(ph_p);
1905 ph_unlock(th_p->th_ta_p);
1906 return (return_val);
1907 }
1908
1909 /*
1910 * Set a thread's general register set.
1911 */
1912 #pragma weak td_thr_setgregs = __td_thr_setgregs
1913 td_err_e
__td_thr_setgregs(td_thrhandle_t * th_p,const prgregset_t regset)1914 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1915 {
1916 struct ps_prochandle *ph_p;
1917 td_err_e return_val;
1918
1919 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1920 return (return_val);
1921 if (ps_pstop(ph_p) != PS_OK) {
1922 ph_unlock(th_p->th_ta_p);
1923 return (TD_DBERR);
1924 }
1925
1926 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1927 return_val = TD_DBERR;
1928
1929 (void) ps_pcontinue(ph_p);
1930 ph_unlock(th_p->th_ta_p);
1931 return (return_val);
1932 }
1933
1934 /*
1935 * Get a thread's floating-point register set.
1936 */
1937 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1938 td_err_e
__td_thr_getfpregs(td_thrhandle_t * th_p,prfpregset_t * fpregset)1939 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1940 {
1941 struct ps_prochandle *ph_p;
1942 td_err_e return_val;
1943
1944 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1945 return (return_val);
1946 if (ps_pstop(ph_p) != PS_OK) {
1947 ph_unlock(th_p->th_ta_p);
1948 return (TD_DBERR);
1949 }
1950
1951 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1952 return_val = TD_DBERR;
1953
1954 (void) ps_pcontinue(ph_p);
1955 ph_unlock(th_p->th_ta_p);
1956 return (return_val);
1957 }
1958
1959 /*
1960 * Set a thread's floating-point register set.
1961 */
1962 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1963 td_err_e
__td_thr_setfpregs(td_thrhandle_t * th_p,const prfpregset_t * fpregset)1964 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1965 {
1966 struct ps_prochandle *ph_p;
1967 td_err_e return_val;
1968
1969 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1970 return (return_val);
1971 if (ps_pstop(ph_p) != PS_OK) {
1972 ph_unlock(th_p->th_ta_p);
1973 return (TD_DBERR);
1974 }
1975
1976 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1977 return_val = TD_DBERR;
1978
1979 (void) ps_pcontinue(ph_p);
1980 ph_unlock(th_p->th_ta_p);
1981 return (return_val);
1982 }
1983
1984 /*
1985 * Get the size of the extra state register set for this architecture.
1986 * Currently unused by dbx.
1987 */
1988 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1989 /* ARGSUSED */
1990 td_err_e
__td_thr_getxregsize(td_thrhandle_t * th_p,int * xregsize)1991 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1992 {
1993 struct ps_prochandle *ph_p;
1994 td_err_e return_val;
1995
1996 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1997 return (return_val);
1998 if (ps_pstop(ph_p) != PS_OK) {
1999 ph_unlock(th_p->th_ta_p);
2000 return (TD_DBERR);
2001 }
2002
2003 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
2004 return_val = TD_DBERR;
2005
2006 if (*xregsize == 0)
2007 return_val = TD_NOXREGS;
2008
2009 (void) ps_pcontinue(ph_p);
2010 ph_unlock(th_p->th_ta_p);
2011 return (return_val);
2012 }
2013
2014 /*
2015 * Get a thread's extra state register set.
2016 */
2017 #pragma weak td_thr_getxregs = __td_thr_getxregs
2018 td_err_e
__td_thr_getxregs(td_thrhandle_t * th_p,void * xregset)2019 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2020 {
2021 struct ps_prochandle *ph_p;
2022 td_err_e return_val;
2023 ps_err_e ps_err;
2024
2025 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2026 return (return_val);
2027 if (ps_pstop(ph_p) != PS_OK) {
2028 ph_unlock(th_p->th_ta_p);
2029 return (TD_DBERR);
2030 }
2031
2032 ps_err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
2033 if (ps_err == PS_NOXREGS)
2034 return_val = TD_NOXREGS;
2035 else if (ps_err != PS_OK)
2036 return_val = TD_DBERR;
2037
2038 (void) ps_pcontinue(ph_p);
2039 ph_unlock(th_p->th_ta_p);
2040 return (return_val);
2041 }
2042
2043 /*
2044 * Set a thread's extra state register set.
2045 */
2046 #pragma weak td_thr_setxregs = __td_thr_setxregs
2047 td_err_e
__td_thr_setxregs(td_thrhandle_t * th_p,const void * xregset)2048 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2049 {
2050 struct ps_prochandle *ph_p;
2051 td_err_e return_val;
2052
2053 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2054 return (return_val);
2055 if (ps_pstop(ph_p) != PS_OK) {
2056 ph_unlock(th_p->th_ta_p);
2057 return (TD_DBERR);
2058 }
2059
2060 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2061 return_val = TD_DBERR;
2062
2063 (void) ps_pcontinue(ph_p);
2064 ph_unlock(th_p->th_ta_p);
2065 return (return_val);
2066 }
2067
2068 struct searcher {
2069 psaddr_t addr;
2070 int status;
2071 };
2072
2073 /*
2074 * Check the struct thread address in *th_p again first
2075 * value in "data". If value in data is found, set second value
2076 * in "data" to 1 and return 1 to terminate iterations.
2077 * This function is used by td_thr_validate() to verify that
2078 * a thread handle is valid.
2079 */
2080 static int
td_searcher(const td_thrhandle_t * th_p,void * data)2081 td_searcher(const td_thrhandle_t *th_p, void *data)
2082 {
2083 struct searcher *searcher_data = (struct searcher *)data;
2084
2085 if (searcher_data->addr == th_p->th_unique) {
2086 searcher_data->status = 1;
2087 return (1);
2088 }
2089 return (0);
2090 }
2091
2092 /*
2093 * Validate the thread handle. Check that
2094 * a thread exists in the thread agent/process that
2095 * corresponds to thread with handle *th_p.
2096 * Currently unused by dbx.
2097 */
2098 #pragma weak td_thr_validate = __td_thr_validate
2099 td_err_e
__td_thr_validate(const td_thrhandle_t * th_p)2100 __td_thr_validate(const td_thrhandle_t *th_p)
2101 {
2102 td_err_e return_val;
2103 struct searcher searcher_data = {0, 0};
2104
2105 if (th_p == NULL)
2106 return (TD_BADTH);
2107 if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
2108 return (TD_BADTH);
2109
2110 /*
2111 * LOCKING EXCEPTION - Locking is not required
2112 * here because no use of the thread agent is made (other
2113 * than the sanity check) and checking of the thread
2114 * agent will be done in __td_ta_thr_iter.
2115 */
2116
2117 searcher_data.addr = th_p->th_unique;
2118 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2119 td_searcher, &searcher_data,
2120 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2121 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2122
2123 if (return_val == TD_OK && searcher_data.status == 0)
2124 return_val = TD_NOTHR;
2125
2126 return (return_val);
2127 }
2128
2129 /*
2130 * Get a thread's private binding to a given thread specific
2131 * data(TSD) key(see thr_getspecific(3C). If the thread doesn't
2132 * have a binding for a particular key, then NULL is returned.
2133 */
2134 #pragma weak td_thr_tsd = __td_thr_tsd
2135 td_err_e
__td_thr_tsd(td_thrhandle_t * th_p,thread_key_t key,void ** data_pp)2136 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2137 {
2138 struct ps_prochandle *ph_p;
2139 td_thragent_t *ta_p;
2140 td_err_e return_val;
2141 int maxkey;
2142 int nkey;
2143 psaddr_t tsd_paddr;
2144
2145 if (data_pp == NULL)
2146 return (TD_ERR);
2147 *data_pp = NULL;
2148 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2149 return (return_val);
2150 ta_p = th_p->th_ta_p;
2151 if (ps_pstop(ph_p) != PS_OK) {
2152 ph_unlock(ta_p);
2153 return (TD_DBERR);
2154 }
2155
2156 if (ta_p->model == PR_MODEL_NATIVE) {
2157 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2158 tsd_metadata_t tsdm;
2159 tsd_t stsd;
2160
2161 if (ps_pdread(ph_p,
2162 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2163 &tsdm, sizeof (tsdm)) != PS_OK)
2164 return_val = TD_DBERR;
2165 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2166 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2167 return_val = TD_DBERR;
2168 else if (tsd_paddr != 0 &&
2169 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2170 return_val = TD_DBERR;
2171 else {
2172 maxkey = tsdm.tsdm_nused;
2173 nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2174
2175 if (key < TSD_NFAST)
2176 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2177 }
2178 } else {
2179 #if defined(_LP64) && defined(_SYSCALL32)
2180 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2181 tsd_metadata32_t tsdm;
2182 tsd32_t stsd;
2183 caddr32_t addr;
2184
2185 if (ps_pdread(ph_p,
2186 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2187 &tsdm, sizeof (tsdm)) != PS_OK)
2188 return_val = TD_DBERR;
2189 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2190 &addr, sizeof (addr)) != PS_OK)
2191 return_val = TD_DBERR;
2192 else if (addr != 0 &&
2193 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2194 return_val = TD_DBERR;
2195 else {
2196 maxkey = tsdm.tsdm_nused;
2197 nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2198
2199 if (key < TSD_NFAST) {
2200 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2201 } else {
2202 tsd_paddr = addr;
2203 }
2204 }
2205 #else
2206 return_val = TD_ERR;
2207 #endif /* _SYSCALL32 */
2208 }
2209
2210 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2211 return_val = TD_NOTSD;
2212 if (return_val != TD_OK || key >= nkey) {
2213 /* NULL has already been stored in data_pp */
2214 (void) ps_pcontinue(ph_p);
2215 ph_unlock(ta_p);
2216 return (return_val);
2217 }
2218
2219 /*
2220 * Read the value from the thread's tsd array.
2221 */
2222 if (ta_p->model == PR_MODEL_NATIVE) {
2223 void *value;
2224
2225 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2226 &value, sizeof (value)) != PS_OK)
2227 return_val = TD_DBERR;
2228 else
2229 *data_pp = value;
2230 #if defined(_LP64) && defined(_SYSCALL32)
2231 } else {
2232 caddr32_t value32;
2233
2234 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2235 &value32, sizeof (value32)) != PS_OK)
2236 return_val = TD_DBERR;
2237 else
2238 *data_pp = (void *)(uintptr_t)value32;
2239 #endif /* _SYSCALL32 */
2240 }
2241
2242 (void) ps_pcontinue(ph_p);
2243 ph_unlock(ta_p);
2244 return (return_val);
2245 }
2246
2247 /*
2248 * Get the base address of a thread's thread local storage (TLS) block
2249 * for the module (executable or shared object) identified by 'moduleid'.
2250 */
2251 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2252 td_err_e
__td_thr_tlsbase(td_thrhandle_t * th_p,ulong_t moduleid,psaddr_t * base)2253 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2254 {
2255 struct ps_prochandle *ph_p;
2256 td_thragent_t *ta_p;
2257 td_err_e return_val;
2258
2259 if (base == NULL)
2260 return (TD_ERR);
2261 *base = 0;
2262 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2263 return (return_val);
2264 ta_p = th_p->th_ta_p;
2265 if (ps_pstop(ph_p) != PS_OK) {
2266 ph_unlock(ta_p);
2267 return (TD_DBERR);
2268 }
2269
2270 if (ta_p->model == PR_MODEL_NATIVE) {
2271 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2272 tls_metadata_t tls_metadata;
2273 TLS_modinfo tlsmod;
2274 tls_t tls;
2275
2276 if (ps_pdread(ph_p,
2277 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2278 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2279 return_val = TD_DBERR;
2280 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2281 return_val = TD_NOTLS;
2282 else if (ps_pdread(ph_p,
2283 (psaddr_t)((TLS_modinfo *)
2284 tls_metadata.tls_modinfo.tls_data + moduleid),
2285 &tlsmod, sizeof (tlsmod)) != PS_OK)
2286 return_val = TD_DBERR;
2287 else if (tlsmod.tm_memsz == 0)
2288 return_val = TD_NOTLS;
2289 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2290 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2291 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2292 &tls, sizeof (tls)) != PS_OK)
2293 return_val = TD_DBERR;
2294 else if (moduleid >= tls.tls_size)
2295 return_val = TD_TLSDEFER;
2296 else if (ps_pdread(ph_p,
2297 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2298 &tls, sizeof (tls)) != PS_OK)
2299 return_val = TD_DBERR;
2300 else if (tls.tls_size == 0)
2301 return_val = TD_TLSDEFER;
2302 else
2303 *base = (psaddr_t)tls.tls_data;
2304 } else {
2305 #if defined(_LP64) && defined(_SYSCALL32)
2306 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2307 tls_metadata32_t tls_metadata;
2308 TLS_modinfo32 tlsmod;
2309 tls32_t tls;
2310
2311 if (ps_pdread(ph_p,
2312 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2313 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2314 return_val = TD_DBERR;
2315 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2316 return_val = TD_NOTLS;
2317 else if (ps_pdread(ph_p,
2318 (psaddr_t)((TLS_modinfo32 *)
2319 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2320 &tlsmod, sizeof (tlsmod)) != PS_OK)
2321 return_val = TD_DBERR;
2322 else if (tlsmod.tm_memsz == 0)
2323 return_val = TD_NOTLS;
2324 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2325 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2326 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2327 &tls, sizeof (tls)) != PS_OK)
2328 return_val = TD_DBERR;
2329 else if (moduleid >= tls.tls_size)
2330 return_val = TD_TLSDEFER;
2331 else if (ps_pdread(ph_p,
2332 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2333 &tls, sizeof (tls)) != PS_OK)
2334 return_val = TD_DBERR;
2335 else if (tls.tls_size == 0)
2336 return_val = TD_TLSDEFER;
2337 else
2338 *base = (psaddr_t)tls.tls_data;
2339 #else
2340 return_val = TD_ERR;
2341 #endif /* _SYSCALL32 */
2342 }
2343
2344 (void) ps_pcontinue(ph_p);
2345 ph_unlock(ta_p);
2346 return (return_val);
2347 }
2348
2349 /*
2350 * Change a thread's priority to the value specified by ti_pri.
2351 * Currently unused by dbx.
2352 */
2353 #pragma weak td_thr_setprio = __td_thr_setprio
2354 /* ARGSUSED */
2355 td_err_e
__td_thr_setprio(td_thrhandle_t * th_p,int ti_pri)2356 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2357 {
2358 return (TD_NOCAPAB);
2359 }
2360
2361 /*
2362 * This structure links td_thr_lockowner and the lowner_cb callback function.
2363 */
2364 typedef struct {
2365 td_sync_iter_f *owner_cb;
2366 void *owner_cb_arg;
2367 td_thrhandle_t *th_p;
2368 } lowner_cb_ctl_t;
2369
2370 static int
lowner_cb(const td_synchandle_t * sh_p,void * arg)2371 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2372 {
2373 lowner_cb_ctl_t *ocb = arg;
2374 int trunc = 0;
2375 union {
2376 rwlock_t rwl;
2377 mutex_t mx;
2378 } rw_m;
2379
2380 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2381 &rw_m, sizeof (rw_m)) != PS_OK) {
2382 trunc = 1;
2383 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2384 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2385 return (0);
2386 }
2387 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2388 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2389 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2390 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2391 mutex_t *rwlock = &rw_m.rwl.mutex;
2392 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2393 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2394 }
2395 return (0);
2396 }
2397
2398 /*
2399 * Iterate over the set of locks owned by a specified thread.
2400 * If cb returns a non-zero value, terminate iterations.
2401 */
2402 #pragma weak td_thr_lockowner = __td_thr_lockowner
2403 td_err_e
__td_thr_lockowner(const td_thrhandle_t * th_p,td_sync_iter_f * cb,void * cb_data)2404 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2405 void *cb_data)
2406 {
2407 td_thragent_t *ta_p;
2408 td_err_e return_val;
2409 lowner_cb_ctl_t lcb;
2410
2411 /*
2412 * Just sanity checks.
2413 */
2414 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2415 return (return_val);
2416 ta_p = th_p->th_ta_p;
2417 ph_unlock(ta_p);
2418
2419 lcb.owner_cb = cb;
2420 lcb.owner_cb_arg = cb_data;
2421 lcb.th_p = (td_thrhandle_t *)th_p;
2422 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2423 }
2424
2425 /*
2426 * If a thread is asleep on a synchronization variable,
2427 * then get the synchronization handle.
2428 */
2429 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2430 td_err_e
__td_thr_sleepinfo(const td_thrhandle_t * th_p,td_synchandle_t * sh_p)2431 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2432 {
2433 struct ps_prochandle *ph_p;
2434 td_err_e return_val = TD_OK;
2435 uintptr_t wchan;
2436
2437 if (sh_p == NULL)
2438 return (TD_ERR);
2439 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2440 return (return_val);
2441
2442 /*
2443 * No need to stop the process for a simple read.
2444 */
2445 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2446 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2447
2448 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2449 &wchan, sizeof (wchan)) != PS_OK)
2450 return_val = TD_DBERR;
2451 } else {
2452 #if defined(_LP64) && defined(_SYSCALL32)
2453 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2454 caddr32_t wchan32;
2455
2456 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2457 &wchan32, sizeof (wchan32)) != PS_OK)
2458 return_val = TD_DBERR;
2459 wchan = wchan32;
2460 #else
2461 return_val = TD_ERR;
2462 #endif /* _SYSCALL32 */
2463 }
2464
2465 if (return_val != TD_OK || wchan == 0) {
2466 sh_p->sh_ta_p = NULL;
2467 sh_p->sh_unique = 0;
2468 if (return_val == TD_OK)
2469 return_val = TD_ERR;
2470 } else {
2471 sh_p->sh_ta_p = th_p->th_ta_p;
2472 sh_p->sh_unique = (psaddr_t)wchan;
2473 }
2474
2475 ph_unlock(th_p->th_ta_p);
2476 return (return_val);
2477 }
2478
2479 /*
2480 * Which thread is running on an lwp?
2481 */
2482 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2483 td_err_e
__td_ta_map_lwp2thr(td_thragent_t * ta_p,lwpid_t lwpid,td_thrhandle_t * th_p)2484 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2485 td_thrhandle_t *th_p)
2486 {
2487 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2488 }
2489
2490 /*
2491 * Common code for td_sync_get_info() and td_sync_get_stats()
2492 */
2493 static td_err_e
sync_get_info_common(const td_synchandle_t * sh_p,struct ps_prochandle * ph_p,td_syncinfo_t * si_p)2494 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2495 td_syncinfo_t *si_p)
2496 {
2497 int trunc = 0;
2498 td_so_un_t generic_so;
2499
2500 /*
2501 * Determine the sync. object type; a little type fudgery here.
2502 * First attempt to read the whole union. If that fails, attempt
2503 * to read just the condvar. A condvar is the smallest sync. object.
2504 */
2505 if (ps_pdread(ph_p, sh_p->sh_unique,
2506 &generic_so, sizeof (generic_so)) != PS_OK) {
2507 trunc = 1;
2508 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2509 sizeof (generic_so.condition)) != PS_OK)
2510 return (TD_DBERR);
2511 }
2512
2513 switch (generic_so.condition.cond_magic) {
2514 case MUTEX_MAGIC:
2515 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2516 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2517 return (TD_DBERR);
2518 si_p->si_type = TD_SYNC_MUTEX;
2519 si_p->si_shared_type =
2520 (generic_so.lock.mutex_type & USYNC_PROCESS);
2521 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2522 sizeof (generic_so.lock.mutex_flag));
2523 si_p->si_state.mutex_locked =
2524 (generic_so.lock.mutex_lockw != 0);
2525 si_p->si_size = sizeof (generic_so.lock);
2526 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2527 si_p->si_rcount = generic_so.lock.mutex_rcount;
2528 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2529 if (si_p->si_state.mutex_locked) {
2530 if (si_p->si_shared_type & USYNC_PROCESS)
2531 si_p->si_ownerpid =
2532 generic_so.lock.mutex_ownerpid;
2533 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2534 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2535 }
2536 break;
2537 case COND_MAGIC:
2538 si_p->si_type = TD_SYNC_COND;
2539 si_p->si_shared_type =
2540 (generic_so.condition.cond_type & USYNC_PROCESS);
2541 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2542 sizeof (generic_so.condition.flags.flag));
2543 si_p->si_size = sizeof (generic_so.condition);
2544 si_p->si_has_waiters =
2545 (generic_so.condition.cond_waiters_user |
2546 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2547 break;
2548 case SEMA_MAGIC:
2549 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2550 &generic_so.semaphore, sizeof (generic_so.semaphore))
2551 != PS_OK)
2552 return (TD_DBERR);
2553 si_p->si_type = TD_SYNC_SEMA;
2554 si_p->si_shared_type =
2555 (generic_so.semaphore.type & USYNC_PROCESS);
2556 si_p->si_state.sem_count = generic_so.semaphore.count;
2557 si_p->si_size = sizeof (generic_so.semaphore);
2558 si_p->si_has_waiters =
2559 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2560 /* this is useless but the old interface provided it */
2561 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2562 break;
2563 case RWL_MAGIC:
2564 {
2565 uint32_t rwstate;
2566
2567 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2568 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2569 return (TD_DBERR);
2570 si_p->si_type = TD_SYNC_RWLOCK;
2571 si_p->si_shared_type =
2572 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2573 si_p->si_size = sizeof (generic_so.rwlock);
2574
2575 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2576 if (rwstate & URW_WRITE_LOCKED) {
2577 si_p->si_state.nreaders = -1;
2578 si_p->si_is_wlock = 1;
2579 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2580 si_p->si_owner.th_unique =
2581 generic_so.rwlock.rwlock_owner;
2582 if (si_p->si_shared_type & USYNC_PROCESS)
2583 si_p->si_ownerpid =
2584 generic_so.rwlock.rwlock_ownerpid;
2585 } else {
2586 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2587 }
2588 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2589
2590 /* this is useless but the old interface provided it */
2591 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2592 break;
2593 }
2594 default:
2595 return (TD_BADSH);
2596 }
2597
2598 si_p->si_ta_p = sh_p->sh_ta_p;
2599 si_p->si_sv_addr = sh_p->sh_unique;
2600 return (TD_OK);
2601 }
2602
2603 /*
2604 * Given a synchronization handle, fill in the
2605 * information for the synchronization variable into *si_p.
2606 */
2607 #pragma weak td_sync_get_info = __td_sync_get_info
2608 td_err_e
__td_sync_get_info(const td_synchandle_t * sh_p,td_syncinfo_t * si_p)2609 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2610 {
2611 struct ps_prochandle *ph_p;
2612 td_err_e return_val;
2613
2614 if (si_p == NULL)
2615 return (TD_ERR);
2616 (void) memset(si_p, 0, sizeof (*si_p));
2617 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2618 return (return_val);
2619 if (ps_pstop(ph_p) != PS_OK) {
2620 ph_unlock(sh_p->sh_ta_p);
2621 return (TD_DBERR);
2622 }
2623
2624 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2625
2626 (void) ps_pcontinue(ph_p);
2627 ph_unlock(sh_p->sh_ta_p);
2628 return (return_val);
2629 }
2630
2631 static uint_t
tdb_addr_hash64(uint64_t addr)2632 tdb_addr_hash64(uint64_t addr)
2633 {
2634 uint64_t value60 = (addr >> 4);
2635 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2636 return ((value30 >> 15) ^ (value30 & 0x7fff));
2637 }
2638
2639 static uint_t
tdb_addr_hash32(uint64_t addr)2640 tdb_addr_hash32(uint64_t addr)
2641 {
2642 uint32_t value30 = (addr >> 2); /* 30 bits */
2643 return ((value30 >> 15) ^ (value30 & 0x7fff));
2644 }
2645
2646 static td_err_e
read_sync_stats(td_thragent_t * ta_p,psaddr_t hash_table,psaddr_t sync_obj_addr,tdb_sync_stats_t * sync_stats)2647 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2648 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2649 {
2650 psaddr_t next_desc;
2651 uint64_t first;
2652 uint_t ix;
2653
2654 /*
2655 * Compute the hash table index from the synch object's address.
2656 */
2657 if (ta_p->model == PR_MODEL_LP64)
2658 ix = tdb_addr_hash64(sync_obj_addr);
2659 else
2660 ix = tdb_addr_hash32(sync_obj_addr);
2661
2662 /*
2663 * Get the address of the first element in the linked list.
2664 */
2665 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2666 &first, sizeof (first)) != PS_OK)
2667 return (TD_DBERR);
2668
2669 /*
2670 * Search the linked list for an entry for the synch object..
2671 */
2672 for (next_desc = (psaddr_t)first; next_desc != 0;
2673 next_desc = (psaddr_t)sync_stats->next) {
2674 if (ps_pdread(ta_p->ph_p, next_desc,
2675 sync_stats, sizeof (*sync_stats)) != PS_OK)
2676 return (TD_DBERR);
2677 if (sync_stats->sync_addr == sync_obj_addr)
2678 return (TD_OK);
2679 }
2680
2681 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2682 return (TD_OK);
2683 }
2684
2685 /*
2686 * Given a synchronization handle, fill in the
2687 * statistics for the synchronization variable into *ss_p.
2688 */
2689 #pragma weak td_sync_get_stats = __td_sync_get_stats
2690 td_err_e
__td_sync_get_stats(const td_synchandle_t * sh_p,td_syncstats_t * ss_p)2691 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2692 {
2693 struct ps_prochandle *ph_p;
2694 td_thragent_t *ta_p;
2695 td_err_e return_val;
2696 register_sync_t enable;
2697 psaddr_t hashaddr;
2698 tdb_sync_stats_t sync_stats;
2699 size_t ix;
2700
2701 if (ss_p == NULL)
2702 return (TD_ERR);
2703 (void) memset(ss_p, 0, sizeof (*ss_p));
2704 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2705 return (return_val);
2706 ta_p = sh_p->sh_ta_p;
2707 if (ps_pstop(ph_p) != PS_OK) {
2708 ph_unlock(ta_p);
2709 return (TD_DBERR);
2710 }
2711
2712 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2713 != TD_OK) {
2714 if (return_val != TD_BADSH)
2715 goto out;
2716 /* we can correct TD_BADSH */
2717 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2718 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2719 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2720 /* we correct si_type and si_size below */
2721 return_val = TD_OK;
2722 }
2723 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2724 &enable, sizeof (enable)) != PS_OK) {
2725 return_val = TD_DBERR;
2726 goto out;
2727 }
2728 if (enable != REGISTER_SYNC_ON)
2729 goto out;
2730
2731 /*
2732 * Get the address of the hash table in the target process.
2733 */
2734 if (ta_p->model == PR_MODEL_NATIVE) {
2735 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2736 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2737 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2738 return_val = TD_DBERR;
2739 goto out;
2740 }
2741 } else {
2742 #if defined(_LP64) && defined(_SYSCALL32)
2743 caddr32_t addr;
2744
2745 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2746 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2747 &addr, sizeof (addr)) != PS_OK) {
2748 return_val = TD_DBERR;
2749 goto out;
2750 }
2751 hashaddr = addr;
2752 #else
2753 return_val = TD_ERR;
2754 goto out;
2755 #endif /* _SYSCALL32 */
2756 }
2757
2758 if (hashaddr == 0)
2759 return_val = TD_BADSH;
2760 else
2761 return_val = read_sync_stats(ta_p, hashaddr,
2762 sh_p->sh_unique, &sync_stats);
2763 if (return_val != TD_OK)
2764 goto out;
2765
2766 /*
2767 * We have the hash table entry. Transfer the data to
2768 * the td_syncstats_t structure provided by the caller.
2769 */
2770 switch (sync_stats.un.type) {
2771 case TDB_MUTEX:
2772 {
2773 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2774
2775 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2776 ss_p->ss_info.si_size = sizeof (mutex_t);
2777 msp->mutex_lock =
2778 sync_stats.un.mutex.mutex_lock;
2779 msp->mutex_sleep =
2780 sync_stats.un.mutex.mutex_sleep;
2781 msp->mutex_sleep_time =
2782 sync_stats.un.mutex.mutex_sleep_time;
2783 msp->mutex_hold_time =
2784 sync_stats.un.mutex.mutex_hold_time;
2785 msp->mutex_try =
2786 sync_stats.un.mutex.mutex_try;
2787 msp->mutex_try_fail =
2788 sync_stats.un.mutex.mutex_try_fail;
2789 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2790 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2791 < ta_p->hash_size * sizeof (thr_hash_table_t))
2792 msp->mutex_internal =
2793 ix / sizeof (thr_hash_table_t) + 1;
2794 break;
2795 }
2796 case TDB_COND:
2797 {
2798 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2799
2800 ss_p->ss_info.si_type = TD_SYNC_COND;
2801 ss_p->ss_info.si_size = sizeof (cond_t);
2802 csp->cond_wait =
2803 sync_stats.un.cond.cond_wait;
2804 csp->cond_timedwait =
2805 sync_stats.un.cond.cond_timedwait;
2806 csp->cond_wait_sleep_time =
2807 sync_stats.un.cond.cond_wait_sleep_time;
2808 csp->cond_timedwait_sleep_time =
2809 sync_stats.un.cond.cond_timedwait_sleep_time;
2810 csp->cond_timedwait_timeout =
2811 sync_stats.un.cond.cond_timedwait_timeout;
2812 csp->cond_signal =
2813 sync_stats.un.cond.cond_signal;
2814 csp->cond_broadcast =
2815 sync_stats.un.cond.cond_broadcast;
2816 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2817 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2818 < ta_p->hash_size * sizeof (thr_hash_table_t))
2819 csp->cond_internal =
2820 ix / sizeof (thr_hash_table_t) + 1;
2821 break;
2822 }
2823 case TDB_RWLOCK:
2824 {
2825 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2826
2827 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2828 ss_p->ss_info.si_size = sizeof (rwlock_t);
2829 rwsp->rw_rdlock =
2830 sync_stats.un.rwlock.rw_rdlock;
2831 rwsp->rw_rdlock_try =
2832 sync_stats.un.rwlock.rw_rdlock_try;
2833 rwsp->rw_rdlock_try_fail =
2834 sync_stats.un.rwlock.rw_rdlock_try_fail;
2835 rwsp->rw_wrlock =
2836 sync_stats.un.rwlock.rw_wrlock;
2837 rwsp->rw_wrlock_hold_time =
2838 sync_stats.un.rwlock.rw_wrlock_hold_time;
2839 rwsp->rw_wrlock_try =
2840 sync_stats.un.rwlock.rw_wrlock_try;
2841 rwsp->rw_wrlock_try_fail =
2842 sync_stats.un.rwlock.rw_wrlock_try_fail;
2843 break;
2844 }
2845 case TDB_SEMA:
2846 {
2847 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2848
2849 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2850 ss_p->ss_info.si_size = sizeof (sema_t);
2851 ssp->sema_wait =
2852 sync_stats.un.sema.sema_wait;
2853 ssp->sema_wait_sleep =
2854 sync_stats.un.sema.sema_wait_sleep;
2855 ssp->sema_wait_sleep_time =
2856 sync_stats.un.sema.sema_wait_sleep_time;
2857 ssp->sema_trywait =
2858 sync_stats.un.sema.sema_trywait;
2859 ssp->sema_trywait_fail =
2860 sync_stats.un.sema.sema_trywait_fail;
2861 ssp->sema_post =
2862 sync_stats.un.sema.sema_post;
2863 ssp->sema_max_count =
2864 sync_stats.un.sema.sema_max_count;
2865 ssp->sema_min_count =
2866 sync_stats.un.sema.sema_min_count;
2867 break;
2868 }
2869 default:
2870 return_val = TD_BADSH;
2871 break;
2872 }
2873
2874 out:
2875 (void) ps_pcontinue(ph_p);
2876 ph_unlock(ta_p);
2877 return (return_val);
2878 }
2879
2880 /*
2881 * Change the state of a synchronization variable.
2882 * 1) mutex lock state set to value
2883 * 2) semaphore's count set to value
2884 * 3) writer's lock set by value < 0
2885 * 4) reader's lock number of readers set to value >= 0
2886 * Currently unused by dbx.
2887 */
2888 #pragma weak td_sync_setstate = __td_sync_setstate
2889 td_err_e
__td_sync_setstate(const td_synchandle_t * sh_p,int value)2890 __td_sync_setstate(const td_synchandle_t *sh_p, int value)
2891 {
2892 struct ps_prochandle *ph_p;
2893 int trunc = 0;
2894 td_err_e return_val;
2895 td_so_un_t generic_so;
2896 uint32_t *rwstate;
2897
2898 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2899 return (return_val);
2900 if (ps_pstop(ph_p) != PS_OK) {
2901 ph_unlock(sh_p->sh_ta_p);
2902 return (TD_DBERR);
2903 }
2904
2905 /*
2906 * Read the synch. variable information.
2907 * First attempt to read the whole union and if that fails
2908 * fall back to reading only the smallest member, the condvar.
2909 */
2910 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2911 sizeof (generic_so)) != PS_OK) {
2912 trunc = 1;
2913 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2914 sizeof (generic_so.condition)) != PS_OK) {
2915 (void) ps_pcontinue(ph_p);
2916 ph_unlock(sh_p->sh_ta_p);
2917 return (TD_DBERR);
2918 }
2919 }
2920
2921 /*
2922 * Set the new value in the sync. variable, read the synch. variable
2923 * information. from the process, reset its value and write it back.
2924 */
2925 switch (generic_so.condition.mutex_magic) {
2926 case MUTEX_MAGIC:
2927 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2928 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2929 return_val = TD_DBERR;
2930 break;
2931 }
2932 generic_so.lock.mutex_lockw = (uint8_t)value;
2933 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2934 sizeof (generic_so.lock)) != PS_OK)
2935 return_val = TD_DBERR;
2936 break;
2937 case SEMA_MAGIC:
2938 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2939 &generic_so.semaphore, sizeof (generic_so.semaphore))
2940 != PS_OK) {
2941 return_val = TD_DBERR;
2942 break;
2943 }
2944 generic_so.semaphore.count = value;
2945 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2946 sizeof (generic_so.semaphore)) != PS_OK)
2947 return_val = TD_DBERR;
2948 break;
2949 case COND_MAGIC:
2950 /* Operation not supported on a condition variable */
2951 return_val = TD_ERR;
2952 break;
2953 case RWL_MAGIC:
2954 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2955 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2956 return_val = TD_DBERR;
2957 break;
2958 }
2959 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2960 *rwstate &= URW_HAS_WAITERS;
2961 if (value < 0)
2962 *rwstate |= URW_WRITE_LOCKED;
2963 else
2964 *rwstate |= (value & URW_READERS_MASK);
2965 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2966 sizeof (generic_so.rwlock)) != PS_OK)
2967 return_val = TD_DBERR;
2968 break;
2969 default:
2970 /* Bad sync. object type */
2971 return_val = TD_BADSH;
2972 break;
2973 }
2974
2975 (void) ps_pcontinue(ph_p);
2976 ph_unlock(sh_p->sh_ta_p);
2977 return (return_val);
2978 }
2979
2980 typedef struct {
2981 td_thr_iter_f *waiter_cb;
2982 psaddr_t sync_obj_addr;
2983 uint16_t sync_magic;
2984 void *waiter_cb_arg;
2985 td_err_e errcode;
2986 } waiter_cb_ctl_t;
2987
2988 static int
waiters_cb(const td_thrhandle_t * th_p,void * arg)2989 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2990 {
2991 td_thragent_t *ta_p = th_p->th_ta_p;
2992 struct ps_prochandle *ph_p = ta_p->ph_p;
2993 waiter_cb_ctl_t *wcb = arg;
2994 caddr_t wchan;
2995
2996 if (ta_p->model == PR_MODEL_NATIVE) {
2997 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2998
2999 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3000 &wchan, sizeof (wchan)) != PS_OK) {
3001 wcb->errcode = TD_DBERR;
3002 return (1);
3003 }
3004 } else {
3005 #if defined(_LP64) && defined(_SYSCALL32)
3006 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3007 caddr32_t wchan32;
3008
3009 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3010 &wchan32, sizeof (wchan32)) != PS_OK) {
3011 wcb->errcode = TD_DBERR;
3012 return (1);
3013 }
3014 wchan = (caddr_t)(uintptr_t)wchan32;
3015 #else
3016 wcb->errcode = TD_ERR;
3017 return (1);
3018 #endif /* _SYSCALL32 */
3019 }
3020
3021 if (wchan == NULL)
3022 return (0);
3023
3024 if (wchan == (caddr_t)wcb->sync_obj_addr)
3025 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3026
3027 return (0);
3028 }
3029
3030 /*
3031 * For a given synchronization variable, iterate over the
3032 * set of waiting threads. The call back function is passed
3033 * two parameters, a pointer to a thread handle and a pointer
3034 * to extra call back data.
3035 */
3036 #pragma weak td_sync_waiters = __td_sync_waiters
3037 td_err_e
__td_sync_waiters(const td_synchandle_t * sh_p,td_thr_iter_f * cb,void * cb_data)3038 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3039 {
3040 struct ps_prochandle *ph_p;
3041 waiter_cb_ctl_t wcb;
3042 td_err_e return_val;
3043
3044 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3045 return (return_val);
3046 if (ps_pdread(ph_p,
3047 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3048 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3049 ph_unlock(sh_p->sh_ta_p);
3050 return (TD_DBERR);
3051 }
3052 ph_unlock(sh_p->sh_ta_p);
3053
3054 switch (wcb.sync_magic) {
3055 case MUTEX_MAGIC:
3056 case COND_MAGIC:
3057 case SEMA_MAGIC:
3058 case RWL_MAGIC:
3059 break;
3060 default:
3061 return (TD_BADSH);
3062 }
3063
3064 wcb.waiter_cb = cb;
3065 wcb.sync_obj_addr = sh_p->sh_unique;
3066 wcb.waiter_cb_arg = cb_data;
3067 wcb.errcode = TD_OK;
3068 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3069 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3070 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3071
3072 if (return_val != TD_OK)
3073 return (return_val);
3074
3075 return (wcb.errcode);
3076 }
3077