1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
29 */
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <stddef.h>
34 #include <unistd.h>
35 #include <thr_uberdata.h>
36 #include <thread_db.h>
37 #include <libc_int.h>
38
39 /*
40 * Private structures.
41 */
42
43 typedef union {
44 mutex_t lock;
45 rwlock_t rwlock;
46 sema_t semaphore;
47 cond_t condition;
48 } td_so_un_t;
49
50 struct td_thragent {
51 rwlock_t rwlock;
52 struct ps_prochandle *ph_p;
53 int initialized;
54 int sync_tracking;
55 int model;
56 int primary_map;
57 psaddr_t bootstrap_addr;
58 psaddr_t uberdata_addr;
59 psaddr_t tdb_eventmask_addr;
60 psaddr_t tdb_register_sync_addr;
61 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
62 psaddr_t hash_table_addr;
63 int hash_size;
64 lwpid_t single_lwpid;
65 psaddr_t single_ulwp_addr;
66 };
67
68 /*
69 * This is the name of the variable in libc that contains
70 * the uberdata address that we will need.
71 */
72 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
73 /*
74 * This is the actual name of uberdata, used in the event
75 * that tdb_bootstrap has not yet been initialized.
76 */
77 #define TD_UBERDATA_NAME "_uberdata"
78 /*
79 * The library name should end with ".so.1", but older versions of
80 * dbx expect the unadorned name and malfunction if ".1" is specified.
81 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
82 * is applied to another instance of itself (due to the presence of
83 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
84 */
85 #define TD_LIBRARY_NAME "libc.so"
86 #define TD_LIBRARY_NAME_1 "libc.so.1"
87
88 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
89
90 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
91 void *cbdata_p, td_thr_state_e state, int ti_pri,
92 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
93
94 /*
95 * Initialize threads debugging interface.
96 */
97 #pragma weak td_init = __td_init
98 td_err_e
__td_init()99 __td_init()
100 {
101 return (TD_OK);
102 }
103
104 /*
105 * This function does nothing, and never did.
106 * But the symbol is in the ABI, so we can't delete it.
107 */
108 #pragma weak td_log = __td_log
109 void
__td_log()110 __td_log()
111 {
112 }
113
114 /*
115 * Short-cut to read just the hash table size from the process,
116 * to avoid repeatedly reading the full uberdata structure when
117 * dealing with a single-threaded process.
118 */
119 static uint_t
td_read_hash_size(td_thragent_t * ta_p)120 td_read_hash_size(td_thragent_t *ta_p)
121 {
122 psaddr_t addr;
123 uint_t hash_size;
124
125 switch (ta_p->initialized) {
126 default: /* uninitialized */
127 return (0);
128 case 1: /* partially initialized */
129 break;
130 case 2: /* fully initialized */
131 return (ta_p->hash_size);
132 }
133
134 if (ta_p->model == PR_MODEL_NATIVE) {
135 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
136 } else {
137 #if defined(_LP64) && defined(_SYSCALL32)
138 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
139 #else
140 addr = 0;
141 #endif
142 }
143 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
144 != PS_OK)
145 return (0);
146 return (hash_size);
147 }
148
149 static td_err_e
td_read_uberdata(td_thragent_t * ta_p)150 td_read_uberdata(td_thragent_t *ta_p)
151 {
152 struct ps_prochandle *ph_p = ta_p->ph_p;
153 int i;
154
155 if (ta_p->model == PR_MODEL_NATIVE) {
156 uberdata_t uberdata;
157
158 if (ps_pdread(ph_p, ta_p->uberdata_addr,
159 &uberdata, sizeof (uberdata)) != PS_OK)
160 return (TD_DBERR);
161 ta_p->primary_map = uberdata.primary_map;
162 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
163 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
164 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
165 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
166 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
167 ta_p->hash_size = uberdata.hash_size;
168 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
169 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
170 return (TD_DBERR);
171 } else {
172 #if defined(_LP64) && defined(_SYSCALL32)
173 uberdata32_t uberdata;
174 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
175
176 if (ps_pdread(ph_p, ta_p->uberdata_addr,
177 &uberdata, sizeof (uberdata)) != PS_OK)
178 return (TD_DBERR);
179 ta_p->primary_map = uberdata.primary_map;
180 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
181 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
182 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
183 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
184 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
185 ta_p->hash_size = uberdata.hash_size;
186 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
187 tdb_events, sizeof (tdb_events)) != PS_OK)
188 return (TD_DBERR);
189 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
190 ta_p->tdb_events[i] = tdb_events[i];
191 #else
192 return (TD_DBERR);
193 #endif
194 }
195
196 /*
197 * Unfortunately, we are (implicitly) assuming that our uberdata
198 * definition precisely matches that of our target. If this is not
199 * true (that is, if we're examining a core file from a foreign
200 * system that has a different definition of uberdata), the failure
201 * modes can be frustratingly non-explicit. In an effort to catch
202 * this upon initialization (when the debugger may still be able to
203 * opt for another thread model or may be able to fail explicitly), we
204 * check that each of our tdb_events points to valid memory (these are
205 * putatively text upon which a breakpoint can be issued), with the
206 * hope that this is enough of a self-consistency check to lead to
207 * explicit failure on a mismatch.
208 */
209 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
210 uint8_t check;
211
212 if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
213 &check, sizeof (check)) != PS_OK) {
214 return (TD_DBERR);
215 }
216 }
217
218 if (ta_p->hash_size != 1) { /* multi-threaded */
219 ta_p->initialized = 2;
220 ta_p->single_lwpid = 0;
221 ta_p->single_ulwp_addr = NULL;
222 } else { /* single-threaded */
223 ta_p->initialized = 1;
224 /*
225 * Get the address and lwpid of the single thread/LWP.
226 * It may not be ulwp_one if this is a child of fork1().
227 */
228 if (ta_p->model == PR_MODEL_NATIVE) {
229 thr_hash_table_t head;
230 lwpid_t lwpid = 0;
231
232 if (ps_pdread(ph_p, ta_p->hash_table_addr,
233 &head, sizeof (head)) != PS_OK)
234 return (TD_DBERR);
235 if ((psaddr_t)head.hash_bucket == NULL)
236 ta_p->initialized = 0;
237 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
238 offsetof(ulwp_t, ul_lwpid),
239 &lwpid, sizeof (lwpid)) != PS_OK)
240 return (TD_DBERR);
241 ta_p->single_lwpid = lwpid;
242 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
243 } else {
244 #if defined(_LP64) && defined(_SYSCALL32)
245 thr_hash_table32_t head;
246 lwpid_t lwpid = 0;
247
248 if (ps_pdread(ph_p, ta_p->hash_table_addr,
249 &head, sizeof (head)) != PS_OK)
250 return (TD_DBERR);
251 if ((psaddr_t)head.hash_bucket == NULL)
252 ta_p->initialized = 0;
253 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
254 offsetof(ulwp32_t, ul_lwpid),
255 &lwpid, sizeof (lwpid)) != PS_OK)
256 return (TD_DBERR);
257 ta_p->single_lwpid = lwpid;
258 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
259 #else
260 return (TD_DBERR);
261 #endif
262 }
263 }
264 if (!ta_p->primary_map)
265 ta_p->initialized = 0;
266 return (TD_OK);
267 }
268
269 static td_err_e
td_read_bootstrap_data(td_thragent_t * ta_p)270 td_read_bootstrap_data(td_thragent_t *ta_p)
271 {
272 struct ps_prochandle *ph_p = ta_p->ph_p;
273 psaddr_t bootstrap_addr;
274 psaddr_t uberdata_addr;
275 ps_err_e db_return;
276 td_err_e return_val;
277 int do_1;
278
279 switch (ta_p->initialized) {
280 case 2: /* fully initialized */
281 return (TD_OK);
282 case 1: /* partially initialized */
283 if (td_read_hash_size(ta_p) == 1)
284 return (TD_OK);
285 return (td_read_uberdata(ta_p));
286 }
287
288 /*
289 * Uninitialized -- do the startup work.
290 * We set ta_p->initialized to -1 to cut off recursive calls
291 * into libc_db by code in the provider of ps_pglobal_lookup().
292 */
293 do_1 = 0;
294 ta_p->initialized = -1;
295 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
296 TD_BOOTSTRAP_NAME, &bootstrap_addr);
297 if (db_return == PS_NOSYM) {
298 do_1 = 1;
299 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
300 TD_BOOTSTRAP_NAME, &bootstrap_addr);
301 }
302 if (db_return == PS_NOSYM) /* libc is not linked yet */
303 return (TD_NOLIBTHREAD);
304 if (db_return != PS_OK)
305 return (TD_ERR);
306 db_return = ps_pglobal_lookup(ph_p,
307 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
308 TD_UBERDATA_NAME, &uberdata_addr);
309 if (db_return == PS_NOSYM) /* libc is not linked yet */
310 return (TD_NOLIBTHREAD);
311 if (db_return != PS_OK)
312 return (TD_ERR);
313
314 /*
315 * Read the uberdata address into the thread agent structure.
316 */
317 if (ta_p->model == PR_MODEL_NATIVE) {
318 psaddr_t psaddr;
319 if (ps_pdread(ph_p, bootstrap_addr,
320 &psaddr, sizeof (psaddr)) != PS_OK)
321 return (TD_DBERR);
322 if ((ta_p->bootstrap_addr = psaddr) == NULL)
323 psaddr = uberdata_addr;
324 else if (ps_pdread(ph_p, psaddr,
325 &psaddr, sizeof (psaddr)) != PS_OK)
326 return (TD_DBERR);
327 if (psaddr == NULL) {
328 /* primary linkmap in the tgt is not initialized */
329 ta_p->bootstrap_addr = NULL;
330 psaddr = uberdata_addr;
331 }
332 ta_p->uberdata_addr = psaddr;
333 } else {
334 #if defined(_LP64) && defined(_SYSCALL32)
335 caddr32_t psaddr;
336 if (ps_pdread(ph_p, bootstrap_addr,
337 &psaddr, sizeof (psaddr)) != PS_OK)
338 return (TD_DBERR);
339 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
340 psaddr = (caddr32_t)uberdata_addr;
341 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
342 &psaddr, sizeof (psaddr)) != PS_OK)
343 return (TD_DBERR);
344 if (psaddr == NULL) {
345 /* primary linkmap in the tgt is not initialized */
346 ta_p->bootstrap_addr = NULL;
347 psaddr = (caddr32_t)uberdata_addr;
348 }
349 ta_p->uberdata_addr = (psaddr_t)psaddr;
350 #else
351 return (TD_DBERR);
352 #endif /* _SYSCALL32 */
353 }
354
355 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
356 return (return_val);
357 if (ta_p->bootstrap_addr == NULL)
358 ta_p->initialized = 0;
359 return (TD_OK);
360 }
361
362 #pragma weak ps_kill
363 #pragma weak ps_lrolltoaddr
364
365 /*
366 * Allocate a new agent process handle ("thread agent").
367 */
368 #pragma weak td_ta_new = __td_ta_new
369 td_err_e
__td_ta_new(struct ps_prochandle * ph_p,td_thragent_t ** ta_pp)370 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
371 {
372 td_thragent_t *ta_p;
373 int model;
374 td_err_e return_val = TD_OK;
375
376 if (ph_p == NULL)
377 return (TD_BADPH);
378 if (ta_pp == NULL)
379 return (TD_ERR);
380 *ta_pp = NULL;
381 if (ps_pstop(ph_p) != PS_OK)
382 return (TD_DBERR);
383 /*
384 * ps_pdmodel might not be defined if this is an older client.
385 * Make it a weak symbol and test if it exists before calling.
386 */
387 #pragma weak ps_pdmodel
388 if (ps_pdmodel == NULL) {
389 model = PR_MODEL_NATIVE;
390 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
391 (void) ps_pcontinue(ph_p);
392 return (TD_ERR);
393 }
394 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
395 (void) ps_pcontinue(ph_p);
396 return (TD_MALLOC);
397 }
398
399 /*
400 * Initialize the agent process handle.
401 * Pick up the symbol value we need from the target process.
402 */
403 (void) memset(ta_p, 0, sizeof (*ta_p));
404 ta_p->ph_p = ph_p;
405 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
406 ta_p->model = model;
407 return_val = td_read_bootstrap_data(ta_p);
408
409 /*
410 * Because the old libthread_db enabled lock tracking by default,
411 * we must also do it. However, we do it only if the application
412 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
413 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
414 */
415 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
416 register_sync_t oldenable;
417 register_sync_t enable = REGISTER_SYNC_ENABLE;
418 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
419
420 if (ps_pdread(ph_p, psaddr,
421 &oldenable, sizeof (oldenable)) != PS_OK)
422 return_val = TD_DBERR;
423 else if (oldenable != REGISTER_SYNC_OFF ||
424 ps_pdwrite(ph_p, psaddr,
425 &enable, sizeof (enable)) != PS_OK) {
426 /*
427 * Lock tracking was already enabled or we
428 * failed to enable it, probably because we
429 * are examining a core file. In either case
430 * set the sync_tracking flag non-zero to
431 * indicate that we should not attempt to
432 * disable lock tracking when we delete the
433 * agent process handle in td_ta_delete().
434 */
435 ta_p->sync_tracking = 1;
436 }
437 }
438
439 if (return_val == TD_OK)
440 *ta_pp = ta_p;
441 else
442 free(ta_p);
443
444 (void) ps_pcontinue(ph_p);
445 return (return_val);
446 }
447
448 /*
449 * Utility function to grab the readers lock and return the prochandle,
450 * given an agent process handle. Performs standard error checking.
451 * Returns non-NULL with the lock held, or NULL with the lock not held.
452 */
453 static struct ps_prochandle *
ph_lock_ta(td_thragent_t * ta_p,td_err_e * err)454 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
455 {
456 struct ps_prochandle *ph_p = NULL;
457 td_err_e error;
458
459 if (ta_p == NULL || ta_p->initialized == -1) {
460 *err = TD_BADTA;
461 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
462 *err = TD_BADTA;
463 } else if ((ph_p = ta_p->ph_p) == NULL) {
464 (void) rw_unlock(&ta_p->rwlock);
465 *err = TD_BADPH;
466 } else if (ta_p->initialized != 2 &&
467 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
468 (void) rw_unlock(&ta_p->rwlock);
469 ph_p = NULL;
470 *err = error;
471 } else {
472 *err = TD_OK;
473 }
474
475 return (ph_p);
476 }
477
478 /*
479 * Utility function to grab the readers lock and return the prochandle,
480 * given an agent thread handle. Performs standard error checking.
481 * Returns non-NULL with the lock held, or NULL with the lock not held.
482 */
483 static struct ps_prochandle *
ph_lock_th(const td_thrhandle_t * th_p,td_err_e * err)484 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
485 {
486 if (th_p == NULL || th_p->th_unique == NULL) {
487 *err = TD_BADTH;
488 return (NULL);
489 }
490 return (ph_lock_ta(th_p->th_ta_p, err));
491 }
492
493 /*
494 * Utility function to grab the readers lock and return the prochandle,
495 * given a synchronization object handle. Performs standard error checking.
496 * Returns non-NULL with the lock held, or NULL with the lock not held.
497 */
498 static struct ps_prochandle *
ph_lock_sh(const td_synchandle_t * sh_p,td_err_e * err)499 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
500 {
501 if (sh_p == NULL || sh_p->sh_unique == NULL) {
502 *err = TD_BADSH;
503 return (NULL);
504 }
505 return (ph_lock_ta(sh_p->sh_ta_p, err));
506 }
507
508 /*
509 * Unlock the agent process handle obtained from ph_lock_*().
510 */
511 static void
ph_unlock(td_thragent_t * ta_p)512 ph_unlock(td_thragent_t *ta_p)
513 {
514 (void) rw_unlock(&ta_p->rwlock);
515 }
516
517 /*
518 * De-allocate an agent process handle,
519 * releasing all related resources.
520 *
521 * XXX -- This is hopelessly broken ---
522 * Storage for thread agent is not deallocated. The prochandle
523 * in the thread agent is set to NULL so that future uses of
524 * the thread agent can be detected and an error value returned.
525 * All functions in the external user interface that make
526 * use of the thread agent are expected
527 * to check for a NULL prochandle in the thread agent.
528 * All such functions are also expected to obtain a
529 * reader lock on the thread agent while it is using it.
530 */
531 #pragma weak td_ta_delete = __td_ta_delete
532 td_err_e
__td_ta_delete(td_thragent_t * ta_p)533 __td_ta_delete(td_thragent_t *ta_p)
534 {
535 struct ps_prochandle *ph_p;
536
537 /*
538 * This is the only place we grab the writer lock.
539 * We are going to NULL out the prochandle.
540 */
541 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
542 return (TD_BADTA);
543 if ((ph_p = ta_p->ph_p) == NULL) {
544 (void) rw_unlock(&ta_p->rwlock);
545 return (TD_BADPH);
546 }
547 /*
548 * If synch. tracking was disabled when td_ta_new() was called and
549 * if td_ta_sync_tracking_enable() was never called, then disable
550 * synch. tracking (it was enabled by default in td_ta_new()).
551 */
552 if (ta_p->sync_tracking == 0 &&
553 ps_kill != NULL && ps_lrolltoaddr != NULL) {
554 register_sync_t enable = REGISTER_SYNC_DISABLE;
555
556 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
557 &enable, sizeof (enable));
558 }
559 ta_p->ph_p = NULL;
560 (void) rw_unlock(&ta_p->rwlock);
561 return (TD_OK);
562 }
563
564 /*
565 * Map an agent process handle to a client prochandle.
566 * Currently unused by dbx.
567 */
568 #pragma weak td_ta_get_ph = __td_ta_get_ph
569 td_err_e
__td_ta_get_ph(td_thragent_t * ta_p,struct ps_prochandle ** ph_pp)570 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
571 {
572 td_err_e return_val;
573
574 if (ph_pp != NULL) /* protect stupid callers */
575 *ph_pp = NULL;
576 if (ph_pp == NULL)
577 return (TD_ERR);
578 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
579 return (return_val);
580 ph_unlock(ta_p);
581 return (TD_OK);
582 }
583
584 /*
585 * Set the process's suggested concurrency level.
586 * This is a no-op in a one-level model.
587 * Currently unused by dbx.
588 */
589 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
590 /* ARGSUSED1 */
591 td_err_e
__td_ta_setconcurrency(const td_thragent_t * ta_p,int level)592 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
593 {
594 if (ta_p == NULL)
595 return (TD_BADTA);
596 if (ta_p->ph_p == NULL)
597 return (TD_BADPH);
598 return (TD_OK);
599 }
600
601 /*
602 * Get the number of threads in the process.
603 */
604 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
605 td_err_e
__td_ta_get_nthreads(td_thragent_t * ta_p,int * nthread_p)606 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
607 {
608 struct ps_prochandle *ph_p;
609 td_err_e return_val;
610 int nthreads;
611 int nzombies;
612 psaddr_t nthreads_addr;
613 psaddr_t nzombies_addr;
614
615 if (ta_p->model == PR_MODEL_NATIVE) {
616 nthreads_addr = ta_p->uberdata_addr +
617 offsetof(uberdata_t, nthreads);
618 nzombies_addr = ta_p->uberdata_addr +
619 offsetof(uberdata_t, nzombies);
620 } else {
621 #if defined(_LP64) && defined(_SYSCALL32)
622 nthreads_addr = ta_p->uberdata_addr +
623 offsetof(uberdata32_t, nthreads);
624 nzombies_addr = ta_p->uberdata_addr +
625 offsetof(uberdata32_t, nzombies);
626 #else
627 nthreads_addr = 0;
628 nzombies_addr = 0;
629 #endif /* _SYSCALL32 */
630 }
631
632 if (nthread_p == NULL)
633 return (TD_ERR);
634 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
635 return (return_val);
636 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
637 return_val = TD_DBERR;
638 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
639 return_val = TD_DBERR;
640 ph_unlock(ta_p);
641 if (return_val == TD_OK)
642 *nthread_p = nthreads + nzombies;
643 return (return_val);
644 }
645
646 typedef struct {
647 thread_t tid;
648 int found;
649 td_thrhandle_t th;
650 } td_mapper_param_t;
651
652 /*
653 * Check the value in data against the thread id.
654 * If it matches, return 1 to terminate iterations.
655 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
656 */
657 static int
td_mapper_id2thr(td_thrhandle_t * th_p,td_mapper_param_t * data)658 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
659 {
660 td_thrinfo_t ti;
661
662 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
663 data->tid == ti.ti_tid) {
664 data->found = 1;
665 data->th = *th_p;
666 return (1);
667 }
668 return (0);
669 }
670
671 /*
672 * Given a thread identifier, return the corresponding thread handle.
673 */
674 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
675 td_err_e
__td_ta_map_id2thr(td_thragent_t * ta_p,thread_t tid,td_thrhandle_t * th_p)676 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
677 td_thrhandle_t *th_p)
678 {
679 td_err_e return_val;
680 td_mapper_param_t data;
681
682 if (th_p != NULL && /* optimize for a single thread */
683 ta_p != NULL &&
684 ta_p->initialized == 1 &&
685 (td_read_hash_size(ta_p) == 1 ||
686 td_read_uberdata(ta_p) == TD_OK) &&
687 ta_p->initialized == 1 &&
688 ta_p->single_lwpid == tid) {
689 th_p->th_ta_p = ta_p;
690 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
691 return (TD_NOTHR);
692 return (TD_OK);
693 }
694
695 /*
696 * LOCKING EXCEPTION - Locking is not required here because
697 * the locking and checking will be done in __td_ta_thr_iter.
698 */
699
700 if (ta_p == NULL)
701 return (TD_BADTA);
702 if (th_p == NULL)
703 return (TD_BADTH);
704 if (tid == 0)
705 return (TD_NOTHR);
706
707 data.tid = tid;
708 data.found = 0;
709 return_val = __td_ta_thr_iter(ta_p,
710 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
711 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
712 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
713 if (return_val == TD_OK) {
714 if (data.found == 0)
715 return_val = TD_NOTHR;
716 else
717 *th_p = data.th;
718 }
719
720 return (return_val);
721 }
722
723 /*
724 * Map the address of a synchronization object to a sync. object handle.
725 */
726 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
727 td_err_e
__td_ta_map_addr2sync(td_thragent_t * ta_p,psaddr_t addr,td_synchandle_t * sh_p)728 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
729 {
730 struct ps_prochandle *ph_p;
731 td_err_e return_val;
732 uint16_t sync_magic;
733
734 if (sh_p == NULL)
735 return (TD_BADSH);
736 if (addr == NULL)
737 return (TD_ERR);
738 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
739 return (return_val);
740 /*
741 * Check the magic number of the sync. object to make sure it's valid.
742 * The magic number is at the same offset for all sync. objects.
743 */
744 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
745 &sync_magic, sizeof (sync_magic)) != PS_OK) {
746 ph_unlock(ta_p);
747 return (TD_BADSH);
748 }
749 ph_unlock(ta_p);
750 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
751 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
752 return (TD_BADSH);
753 /*
754 * Just fill in the appropriate fields of the sync. handle.
755 */
756 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
757 sh_p->sh_unique = addr;
758 return (TD_OK);
759 }
760
761 /*
762 * Iterate over the set of global TSD keys.
763 * The call back function is called with three arguments,
764 * a key, a pointer to the destructor function, and the cbdata pointer.
765 * Currently unused by dbx.
766 */
767 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
768 td_err_e
__td_ta_tsd_iter(td_thragent_t * ta_p,td_key_iter_f * cb,void * cbdata_p)769 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
770 {
771 struct ps_prochandle *ph_p;
772 td_err_e return_val;
773 int key;
774 int numkeys;
775 psaddr_t dest_addr;
776 psaddr_t *destructors = NULL;
777 PFrV destructor;
778
779 if (cb == NULL)
780 return (TD_ERR);
781 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
782 return (return_val);
783 if (ps_pstop(ph_p) != PS_OK) {
784 ph_unlock(ta_p);
785 return (TD_DBERR);
786 }
787
788 if (ta_p->model == PR_MODEL_NATIVE) {
789 tsd_metadata_t tsdm;
790
791 if (ps_pdread(ph_p,
792 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
793 &tsdm, sizeof (tsdm)) != PS_OK)
794 return_val = TD_DBERR;
795 else {
796 numkeys = tsdm.tsdm_nused;
797 dest_addr = (psaddr_t)tsdm.tsdm_destro;
798 if (numkeys > 0)
799 destructors =
800 malloc(numkeys * sizeof (psaddr_t));
801 }
802 } else {
803 #if defined(_LP64) && defined(_SYSCALL32)
804 tsd_metadata32_t tsdm;
805
806 if (ps_pdread(ph_p,
807 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
808 &tsdm, sizeof (tsdm)) != PS_OK)
809 return_val = TD_DBERR;
810 else {
811 numkeys = tsdm.tsdm_nused;
812 dest_addr = (psaddr_t)tsdm.tsdm_destro;
813 if (numkeys > 0)
814 destructors =
815 malloc(numkeys * sizeof (caddr32_t));
816 }
817 #else
818 return_val = TD_DBERR;
819 #endif /* _SYSCALL32 */
820 }
821
822 if (return_val != TD_OK || numkeys <= 0) {
823 (void) ps_pcontinue(ph_p);
824 ph_unlock(ta_p);
825 return (return_val);
826 }
827
828 if (destructors == NULL)
829 return_val = TD_MALLOC;
830 else if (ta_p->model == PR_MODEL_NATIVE) {
831 if (ps_pdread(ph_p, dest_addr,
832 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
833 return_val = TD_DBERR;
834 else {
835 for (key = 1; key < numkeys; key++) {
836 destructor = (PFrV)destructors[key];
837 if (destructor != TSD_UNALLOCATED &&
838 (*cb)(key, destructor, cbdata_p))
839 break;
840 }
841 }
842 #if defined(_LP64) && defined(_SYSCALL32)
843 } else {
844 caddr32_t *destructors32 = (caddr32_t *)destructors;
845 caddr32_t destruct32;
846
847 if (ps_pdread(ph_p, dest_addr,
848 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
849 return_val = TD_DBERR;
850 else {
851 for (key = 1; key < numkeys; key++) {
852 destruct32 = destructors32[key];
853 if ((destruct32 !=
854 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
855 (*cb)(key, (PFrV)(uintptr_t)destruct32,
856 cbdata_p))
857 break;
858 }
859 }
860 #endif /* _SYSCALL32 */
861 }
862
863 if (destructors)
864 free(destructors);
865 (void) ps_pcontinue(ph_p);
866 ph_unlock(ta_p);
867 return (return_val);
868 }
869
870 int
sigequalset(const sigset_t * s1,const sigset_t * s2)871 sigequalset(const sigset_t *s1, const sigset_t *s2)
872 {
873 return (
874 s1->__sigbits[0] == s2->__sigbits[0] &&
875 s1->__sigbits[1] == s2->__sigbits[1] &&
876 s1->__sigbits[2] == s2->__sigbits[2] &&
877 s1->__sigbits[3] == s2->__sigbits[3]);
878 }
879
880 /*
881 * Description:
882 * Iterate over all threads. For each thread call
883 * the function pointed to by "cb" with a pointer
884 * to a thread handle, and a pointer to data which
885 * can be NULL. Only call td_thr_iter_f() on threads
886 * which match the properties of state, ti_pri,
887 * ti_sigmask_p, and ti_user_flags. If cb returns
888 * a non-zero value, terminate iterations.
889 *
890 * Input:
891 * *ta_p - thread agent
892 * *cb - call back function defined by user.
893 * td_thr_iter_f() takes a thread handle and
894 * cbdata_p as a parameter.
895 * cbdata_p - parameter for td_thr_iter_f().
896 *
897 * state - state of threads of interest. A value of
898 * TD_THR_ANY_STATE from enum td_thr_state_e
899 * does not restrict iterations by state.
900 * ti_pri - lower bound of priorities of threads of
901 * interest. A value of TD_THR_LOWEST_PRIORITY
902 * defined in thread_db.h does not restrict
903 * iterations by priority. A thread with priority
904 * less than ti_pri will NOT be passed to the callback
905 * function.
906 * ti_sigmask_p - signal mask of threads of interest.
907 * A value of TD_SIGNO_MASK defined in thread_db.h
908 * does not restrict iterations by signal mask.
909 * ti_user_flags - user flags of threads of interest. A
910 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
911 * does not restrict iterations by user flags.
912 */
913 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
914 td_err_e
__td_ta_thr_iter(td_thragent_t * ta_p,td_thr_iter_f * cb,void * cbdata_p,td_thr_state_e state,int ti_pri,sigset_t * ti_sigmask_p,unsigned ti_user_flags)915 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
916 void *cbdata_p, td_thr_state_e state, int ti_pri,
917 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
918 {
919 struct ps_prochandle *ph_p;
920 psaddr_t first_lwp_addr;
921 psaddr_t first_zombie_addr;
922 psaddr_t curr_lwp_addr;
923 psaddr_t next_lwp_addr;
924 td_thrhandle_t th;
925 ps_err_e db_return;
926 ps_err_e db_return2;
927 td_err_e return_val;
928
929 if (cb == NULL)
930 return (TD_ERR);
931 /*
932 * If state is not within bound, short circuit.
933 */
934 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
935 return (TD_OK);
936
937 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
938 return (return_val);
939 if (ps_pstop(ph_p) != PS_OK) {
940 ph_unlock(ta_p);
941 return (TD_DBERR);
942 }
943
944 /*
945 * For each ulwp_t in the circular linked lists pointed
946 * to by "all_lwps" and "all_zombies":
947 * (1) Filter each thread.
948 * (2) Create the thread_object for each thread that passes.
949 * (3) Call the call back function on each thread.
950 */
951
952 if (ta_p->model == PR_MODEL_NATIVE) {
953 db_return = ps_pdread(ph_p,
954 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
955 &first_lwp_addr, sizeof (first_lwp_addr));
956 db_return2 = ps_pdread(ph_p,
957 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
958 &first_zombie_addr, sizeof (first_zombie_addr));
959 } else {
960 #if defined(_LP64) && defined(_SYSCALL32)
961 caddr32_t addr32;
962
963 db_return = ps_pdread(ph_p,
964 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
965 &addr32, sizeof (addr32));
966 first_lwp_addr = addr32;
967 db_return2 = ps_pdread(ph_p,
968 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
969 &addr32, sizeof (addr32));
970 first_zombie_addr = addr32;
971 #else /* _SYSCALL32 */
972 db_return = PS_ERR;
973 db_return2 = PS_ERR;
974 #endif /* _SYSCALL32 */
975 }
976 if (db_return == PS_OK)
977 db_return = db_return2;
978
979 /*
980 * If first_lwp_addr and first_zombie_addr are both NULL,
981 * libc must not yet be initialized or all threads have
982 * exited. Return TD_NOTHR and all will be well.
983 */
984 if (db_return == PS_OK &&
985 first_lwp_addr == NULL && first_zombie_addr == NULL) {
986 (void) ps_pcontinue(ph_p);
987 ph_unlock(ta_p);
988 return (TD_NOTHR);
989 }
990 if (db_return != PS_OK) {
991 (void) ps_pcontinue(ph_p);
992 ph_unlock(ta_p);
993 return (TD_DBERR);
994 }
995
996 /*
997 * Run down the lists of all living and dead lwps.
998 */
999 if (first_lwp_addr == NULL)
1000 first_lwp_addr = first_zombie_addr;
1001 curr_lwp_addr = first_lwp_addr;
1002 for (;;) {
1003 td_thr_state_e ts_state;
1004 int userpri;
1005 unsigned userflags;
1006 sigset_t mask;
1007
1008 /*
1009 * Read the ulwp struct.
1010 */
1011 if (ta_p->model == PR_MODEL_NATIVE) {
1012 ulwp_t ulwp;
1013
1014 if (ps_pdread(ph_p, curr_lwp_addr,
1015 &ulwp, sizeof (ulwp)) != PS_OK &&
1016 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1017 ps_pdread(ph_p, curr_lwp_addr,
1018 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1019 return_val = TD_DBERR;
1020 break;
1021 }
1022 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1023
1024 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1025 ulwp.ul_stop? TD_THR_STOPPED :
1026 ulwp.ul_wchan? TD_THR_SLEEP :
1027 TD_THR_ACTIVE;
1028 userpri = ulwp.ul_pri;
1029 userflags = ulwp.ul_usropts;
1030 if (ulwp.ul_dead)
1031 (void) sigemptyset(&mask);
1032 else
1033 mask = *(sigset_t *)&ulwp.ul_sigmask;
1034 } else {
1035 #if defined(_LP64) && defined(_SYSCALL32)
1036 ulwp32_t ulwp;
1037
1038 if (ps_pdread(ph_p, curr_lwp_addr,
1039 &ulwp, sizeof (ulwp)) != PS_OK &&
1040 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1041 ps_pdread(ph_p, curr_lwp_addr,
1042 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1043 return_val = TD_DBERR;
1044 break;
1045 }
1046 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1047
1048 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1049 ulwp.ul_stop? TD_THR_STOPPED :
1050 ulwp.ul_wchan? TD_THR_SLEEP :
1051 TD_THR_ACTIVE;
1052 userpri = ulwp.ul_pri;
1053 userflags = ulwp.ul_usropts;
1054 if (ulwp.ul_dead)
1055 (void) sigemptyset(&mask);
1056 else
1057 mask = *(sigset_t *)&ulwp.ul_sigmask;
1058 #else /* _SYSCALL32 */
1059 return_val = TD_ERR;
1060 break;
1061 #endif /* _SYSCALL32 */
1062 }
1063
1064 /*
1065 * Filter on state, priority, sigmask, and user flags.
1066 */
1067
1068 if ((state != ts_state) &&
1069 (state != TD_THR_ANY_STATE))
1070 goto advance;
1071
1072 if (ti_pri > userpri)
1073 goto advance;
1074
1075 if (ti_sigmask_p != TD_SIGNO_MASK &&
1076 !sigequalset(ti_sigmask_p, &mask))
1077 goto advance;
1078
1079 if (ti_user_flags != userflags &&
1080 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1081 goto advance;
1082
1083 /*
1084 * Call back - break if the return
1085 * from the call back is non-zero.
1086 */
1087 th.th_ta_p = (td_thragent_t *)ta_p;
1088 th.th_unique = curr_lwp_addr;
1089 if ((*cb)(&th, cbdata_p))
1090 break;
1091
1092 advance:
1093 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1094 /*
1095 * Switch to the zombie list, unless it is NULL
1096 * or we have already been doing the zombie list,
1097 * in which case terminate the loop.
1098 */
1099 if (first_zombie_addr == NULL ||
1100 first_lwp_addr == first_zombie_addr)
1101 break;
1102 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1103 }
1104 }
1105
1106 (void) ps_pcontinue(ph_p);
1107 ph_unlock(ta_p);
1108 return (return_val);
1109 }
1110
1111 /*
1112 * Enable or disable process synchronization object tracking.
1113 * Currently unused by dbx.
1114 */
1115 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1116 td_err_e
__td_ta_sync_tracking_enable(td_thragent_t * ta_p,int onoff)1117 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1118 {
1119 struct ps_prochandle *ph_p;
1120 td_err_e return_val;
1121 register_sync_t enable;
1122
1123 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1124 return (return_val);
1125 /*
1126 * Values of tdb_register_sync in the victim process:
1127 * REGISTER_SYNC_ENABLE enables registration of synch objects
1128 * REGISTER_SYNC_DISABLE disables registration of synch objects
1129 * These cause the table to be cleared and tdb_register_sync set to:
1130 * REGISTER_SYNC_ON registration in effect
1131 * REGISTER_SYNC_OFF registration not in effect
1132 */
1133 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1134 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1135 &enable, sizeof (enable)) != PS_OK)
1136 return_val = TD_DBERR;
1137 /*
1138 * Remember that this interface was called (see td_ta_delete()).
1139 */
1140 ta_p->sync_tracking = 1;
1141 ph_unlock(ta_p);
1142 return (return_val);
1143 }
1144
1145 /*
1146 * Iterate over all known synchronization variables.
1147 * It is very possible that the list generated is incomplete,
1148 * because the iterator can only find synchronization variables
1149 * that have been registered by the process since synchronization
1150 * object registration was enabled.
1151 * The call back function cb is called for each synchronization
1152 * variable with two arguments: a pointer to the synchronization
1153 * handle and the passed-in argument cbdata.
1154 * If cb returns a non-zero value, iterations are terminated.
1155 */
1156 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1157 td_err_e
__td_ta_sync_iter(td_thragent_t * ta_p,td_sync_iter_f * cb,void * cbdata)1158 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1159 {
1160 struct ps_prochandle *ph_p;
1161 td_err_e return_val;
1162 int i;
1163 register_sync_t enable;
1164 psaddr_t next_desc;
1165 tdb_sync_stats_t sync_stats;
1166 td_synchandle_t synchandle;
1167 psaddr_t psaddr;
1168 void *vaddr;
1169 uint64_t *sync_addr_hash = NULL;
1170
1171 if (cb == NULL)
1172 return (TD_ERR);
1173 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1174 return (return_val);
1175 if (ps_pstop(ph_p) != PS_OK) {
1176 ph_unlock(ta_p);
1177 return (TD_DBERR);
1178 }
1179 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1180 &enable, sizeof (enable)) != PS_OK) {
1181 return_val = TD_DBERR;
1182 goto out;
1183 }
1184 if (enable != REGISTER_SYNC_ON)
1185 goto out;
1186
1187 /*
1188 * First read the hash table.
1189 * The hash table is large; allocate with mmap().
1190 */
1191 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1192 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1193 == MAP_FAILED) {
1194 return_val = TD_MALLOC;
1195 goto out;
1196 }
1197 sync_addr_hash = vaddr;
1198
1199 if (ta_p->model == PR_MODEL_NATIVE) {
1200 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1201 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1202 &psaddr, sizeof (&psaddr)) != PS_OK) {
1203 return_val = TD_DBERR;
1204 goto out;
1205 }
1206 } else {
1207 #ifdef _SYSCALL32
1208 caddr32_t addr;
1209
1210 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1211 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1212 &addr, sizeof (addr)) != PS_OK) {
1213 return_val = TD_DBERR;
1214 goto out;
1215 }
1216 psaddr = addr;
1217 #else
1218 return_val = TD_ERR;
1219 goto out;
1220 #endif /* _SYSCALL32 */
1221 }
1222
1223 if (psaddr == NULL)
1224 goto out;
1225 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1226 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1227 return_val = TD_DBERR;
1228 goto out;
1229 }
1230
1231 /*
1232 * Now scan the hash table.
1233 */
1234 for (i = 0; i < TDB_HASH_SIZE; i++) {
1235 for (next_desc = (psaddr_t)sync_addr_hash[i];
1236 next_desc != NULL;
1237 next_desc = (psaddr_t)sync_stats.next) {
1238 if (ps_pdread(ph_p, next_desc,
1239 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1240 return_val = TD_DBERR;
1241 goto out;
1242 }
1243 if (sync_stats.un.type == TDB_NONE) {
1244 /* not registered since registration enabled */
1245 continue;
1246 }
1247 synchandle.sh_ta_p = ta_p;
1248 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1249 if ((*cb)(&synchandle, cbdata) != 0)
1250 goto out;
1251 }
1252 }
1253
1254 out:
1255 if (sync_addr_hash != NULL)
1256 (void) munmap((void *)sync_addr_hash,
1257 TDB_HASH_SIZE * sizeof (uint64_t));
1258 (void) ps_pcontinue(ph_p);
1259 ph_unlock(ta_p);
1260 return (return_val);
1261 }
1262
1263 /*
1264 * Enable process statistics collection.
1265 */
1266 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1267 /* ARGSUSED */
1268 td_err_e
__td_ta_enable_stats(const td_thragent_t * ta_p,int onoff)1269 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1270 {
1271 return (TD_NOCAPAB);
1272 }
1273
1274 /*
1275 * Reset process statistics.
1276 */
1277 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1278 /* ARGSUSED */
1279 td_err_e
__td_ta_reset_stats(const td_thragent_t * ta_p)1280 __td_ta_reset_stats(const td_thragent_t *ta_p)
1281 {
1282 return (TD_NOCAPAB);
1283 }
1284
1285 /*
1286 * Read process statistics.
1287 */
1288 #pragma weak td_ta_get_stats = __td_ta_get_stats
1289 /* ARGSUSED */
1290 td_err_e
__td_ta_get_stats(const td_thragent_t * ta_p,td_ta_stats_t * tstats)1291 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1292 {
1293 return (TD_NOCAPAB);
1294 }
1295
1296 /*
1297 * Transfer information from lwp struct to thread information struct.
1298 * XXX -- lots of this needs cleaning up.
1299 */
1300 static void
td_thr2to(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp_t * ulwp,td_thrinfo_t * ti_p)1301 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1302 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1303 {
1304 lwpid_t lwpid;
1305
1306 if ((lwpid = ulwp->ul_lwpid) == 0)
1307 lwpid = 1;
1308 (void) memset(ti_p, 0, sizeof (*ti_p));
1309 ti_p->ti_ta_p = ta_p;
1310 ti_p->ti_user_flags = ulwp->ul_usropts;
1311 ti_p->ti_tid = lwpid;
1312 ti_p->ti_exitval = ulwp->ul_rval;
1313 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1314 if (!ulwp->ul_dead) {
1315 /*
1316 * The bloody fools got this backwards!
1317 */
1318 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1319 ti_p->ti_stksize = ulwp->ul_stksiz;
1320 }
1321 ti_p->ti_ro_area = ts_addr;
1322 ti_p->ti_ro_size = ulwp->ul_replace?
1323 REPLACEMENT_SIZE : sizeof (ulwp_t);
1324 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1325 ulwp->ul_stop? TD_THR_STOPPED :
1326 ulwp->ul_wchan? TD_THR_SLEEP :
1327 TD_THR_ACTIVE;
1328 ti_p->ti_db_suspended = 0;
1329 ti_p->ti_type = TD_THR_USER;
1330 ti_p->ti_sp = ulwp->ul_sp;
1331 ti_p->ti_flags = 0;
1332 ti_p->ti_pri = ulwp->ul_pri;
1333 ti_p->ti_lid = lwpid;
1334 if (!ulwp->ul_dead)
1335 ti_p->ti_sigmask = ulwp->ul_sigmask;
1336 ti_p->ti_traceme = 0;
1337 ti_p->ti_preemptflag = 0;
1338 ti_p->ti_pirecflag = 0;
1339 (void) sigemptyset(&ti_p->ti_pending);
1340 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1341 }
1342
1343 #if defined(_LP64) && defined(_SYSCALL32)
1344 static void
td_thr2to32(td_thragent_t * ta_p,psaddr_t ts_addr,ulwp32_t * ulwp,td_thrinfo_t * ti_p)1345 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1346 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1347 {
1348 lwpid_t lwpid;
1349
1350 if ((lwpid = ulwp->ul_lwpid) == 0)
1351 lwpid = 1;
1352 (void) memset(ti_p, 0, sizeof (*ti_p));
1353 ti_p->ti_ta_p = ta_p;
1354 ti_p->ti_user_flags = ulwp->ul_usropts;
1355 ti_p->ti_tid = lwpid;
1356 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1357 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1358 if (!ulwp->ul_dead) {
1359 /*
1360 * The bloody fools got this backwards!
1361 */
1362 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1363 ti_p->ti_stksize = ulwp->ul_stksiz;
1364 }
1365 ti_p->ti_ro_area = ts_addr;
1366 ti_p->ti_ro_size = ulwp->ul_replace?
1367 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1368 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1369 ulwp->ul_stop? TD_THR_STOPPED :
1370 ulwp->ul_wchan? TD_THR_SLEEP :
1371 TD_THR_ACTIVE;
1372 ti_p->ti_db_suspended = 0;
1373 ti_p->ti_type = TD_THR_USER;
1374 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1375 ti_p->ti_flags = 0;
1376 ti_p->ti_pri = ulwp->ul_pri;
1377 ti_p->ti_lid = lwpid;
1378 if (!ulwp->ul_dead)
1379 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1380 ti_p->ti_traceme = 0;
1381 ti_p->ti_preemptflag = 0;
1382 ti_p->ti_pirecflag = 0;
1383 (void) sigemptyset(&ti_p->ti_pending);
1384 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1385 }
1386 #endif /* _SYSCALL32 */
1387
1388 /*
1389 * Get thread information.
1390 */
1391 #pragma weak td_thr_get_info = __td_thr_get_info
1392 td_err_e
__td_thr_get_info(td_thrhandle_t * th_p,td_thrinfo_t * ti_p)1393 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1394 {
1395 struct ps_prochandle *ph_p;
1396 td_thragent_t *ta_p;
1397 td_err_e return_val;
1398 psaddr_t psaddr;
1399
1400 if (ti_p == NULL)
1401 return (TD_ERR);
1402 (void) memset(ti_p, NULL, sizeof (*ti_p));
1403
1404 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1405 return (return_val);
1406 ta_p = th_p->th_ta_p;
1407 if (ps_pstop(ph_p) != PS_OK) {
1408 ph_unlock(ta_p);
1409 return (TD_DBERR);
1410 }
1411
1412 /*
1413 * Read the ulwp struct from the process.
1414 * Transfer the ulwp struct to the thread information struct.
1415 */
1416 psaddr = th_p->th_unique;
1417 if (ta_p->model == PR_MODEL_NATIVE) {
1418 ulwp_t ulwp;
1419
1420 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1421 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1422 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1423 return_val = TD_DBERR;
1424 else
1425 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1426 } else {
1427 #if defined(_LP64) && defined(_SYSCALL32)
1428 ulwp32_t ulwp;
1429
1430 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1431 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1432 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1433 PS_OK)
1434 return_val = TD_DBERR;
1435 else
1436 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1437 #else
1438 return_val = TD_ERR;
1439 #endif /* _SYSCALL32 */
1440 }
1441
1442 (void) ps_pcontinue(ph_p);
1443 ph_unlock(ta_p);
1444 return (return_val);
1445 }
1446
1447 /*
1448 * Given a process and an event number, return information about
1449 * an address in the process or at which a breakpoint can be set
1450 * to monitor the event.
1451 */
1452 #pragma weak td_ta_event_addr = __td_ta_event_addr
1453 td_err_e
__td_ta_event_addr(td_thragent_t * ta_p,td_event_e event,td_notify_t * notify_p)1454 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1455 {
1456 if (ta_p == NULL)
1457 return (TD_BADTA);
1458 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1459 return (TD_NOEVENT);
1460 if (notify_p == NULL)
1461 return (TD_ERR);
1462
1463 notify_p->type = NOTIFY_BPT;
1464 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1465
1466 return (TD_OK);
1467 }
1468
1469 /*
1470 * Add the events in eventset 2 to eventset 1.
1471 */
1472 static void
eventsetaddset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1473 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1474 {
1475 int i;
1476
1477 for (i = 0; i < TD_EVENTSIZE; i++)
1478 event1_p->event_bits[i] |= event2_p->event_bits[i];
1479 }
1480
1481 /*
1482 * Delete the events in eventset 2 from eventset 1.
1483 */
1484 static void
eventsetdelset(td_thr_events_t * event1_p,td_thr_events_t * event2_p)1485 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1486 {
1487 int i;
1488
1489 for (i = 0; i < TD_EVENTSIZE; i++)
1490 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1491 }
1492
1493 /*
1494 * Either add or delete the given event set from a thread's event mask.
1495 */
1496 static td_err_e
mod_eventset(td_thrhandle_t * th_p,td_thr_events_t * events,int onoff)1497 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1498 {
1499 struct ps_prochandle *ph_p;
1500 td_err_e return_val = TD_OK;
1501 char enable;
1502 td_thr_events_t evset;
1503 psaddr_t psaddr_evset;
1504 psaddr_t psaddr_enab;
1505
1506 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1507 return (return_val);
1508 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1509 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1510 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1511 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1512 } else {
1513 #if defined(_LP64) && defined(_SYSCALL32)
1514 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1515 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1516 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1517 #else
1518 ph_unlock(th_p->th_ta_p);
1519 return (TD_ERR);
1520 #endif /* _SYSCALL32 */
1521 }
1522 if (ps_pstop(ph_p) != PS_OK) {
1523 ph_unlock(th_p->th_ta_p);
1524 return (TD_DBERR);
1525 }
1526
1527 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1528 return_val = TD_DBERR;
1529 else {
1530 if (onoff)
1531 eventsetaddset(&evset, events);
1532 else
1533 eventsetdelset(&evset, events);
1534 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1535 != PS_OK)
1536 return_val = TD_DBERR;
1537 else {
1538 enable = 0;
1539 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1540 enable = 1;
1541 if (ps_pdwrite(ph_p, psaddr_enab,
1542 &enable, sizeof (enable)) != PS_OK)
1543 return_val = TD_DBERR;
1544 }
1545 }
1546
1547 (void) ps_pcontinue(ph_p);
1548 ph_unlock(th_p->th_ta_p);
1549 return (return_val);
1550 }
1551
1552 /*
1553 * Enable or disable tracing for a given thread. Tracing
1554 * is filtered based on the event mask of each thread. Tracing
1555 * can be turned on/off for the thread without changing thread
1556 * event mask.
1557 * Currently unused by dbx.
1558 */
1559 #pragma weak td_thr_event_enable = __td_thr_event_enable
1560 td_err_e
__td_thr_event_enable(td_thrhandle_t * th_p,int onoff)1561 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1562 {
1563 td_thr_events_t evset;
1564
1565 td_event_emptyset(&evset);
1566 td_event_addset(&evset, TD_EVENTS_ENABLE);
1567 return (mod_eventset(th_p, &evset, onoff));
1568 }
1569
1570 /*
1571 * Set event mask to enable event. event is turned on in
1572 * event mask for thread. If a thread encounters an event
1573 * for which its event mask is on, notification will be sent
1574 * to the debugger.
1575 * Addresses for each event are provided to the
1576 * debugger. It is assumed that a breakpoint of some type will
1577 * be placed at that address. If the event mask for the thread
1578 * is on, the instruction at the address will be executed.
1579 * Otherwise, the instruction will be skipped.
1580 */
1581 #pragma weak td_thr_set_event = __td_thr_set_event
1582 td_err_e
__td_thr_set_event(td_thrhandle_t * th_p,td_thr_events_t * events)1583 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1584 {
1585 return (mod_eventset(th_p, events, 1));
1586 }
1587
1588 /*
1589 * Enable or disable a set of events in the process-global event mask,
1590 * depending on the value of onoff.
1591 */
1592 static td_err_e
td_ta_mod_event(td_thragent_t * ta_p,td_thr_events_t * events,int onoff)1593 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1594 {
1595 struct ps_prochandle *ph_p;
1596 td_thr_events_t targ_eventset;
1597 td_err_e return_val;
1598
1599 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1600 return (return_val);
1601 if (ps_pstop(ph_p) != PS_OK) {
1602 ph_unlock(ta_p);
1603 return (TD_DBERR);
1604 }
1605 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1606 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1607 return_val = TD_DBERR;
1608 else {
1609 if (onoff)
1610 eventsetaddset(&targ_eventset, events);
1611 else
1612 eventsetdelset(&targ_eventset, events);
1613 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1614 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1615 return_val = TD_DBERR;
1616 }
1617 (void) ps_pcontinue(ph_p);
1618 ph_unlock(ta_p);
1619 return (return_val);
1620 }
1621
1622 /*
1623 * Enable a set of events in the process-global event mask.
1624 */
1625 #pragma weak td_ta_set_event = __td_ta_set_event
1626 td_err_e
__td_ta_set_event(td_thragent_t * ta_p,td_thr_events_t * events)1627 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1628 {
1629 return (td_ta_mod_event(ta_p, events, 1));
1630 }
1631
1632 /*
1633 * Set event mask to disable the given event set; these events are cleared
1634 * from the event mask of the thread. Events that occur for a thread
1635 * with the event masked off will not cause notification to be
1636 * sent to the debugger (see td_thr_set_event for fuller description).
1637 */
1638 #pragma weak td_thr_clear_event = __td_thr_clear_event
1639 td_err_e
__td_thr_clear_event(td_thrhandle_t * th_p,td_thr_events_t * events)1640 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1641 {
1642 return (mod_eventset(th_p, events, 0));
1643 }
1644
1645 /*
1646 * Disable a set of events in the process-global event mask.
1647 */
1648 #pragma weak td_ta_clear_event = __td_ta_clear_event
1649 td_err_e
__td_ta_clear_event(td_thragent_t * ta_p,td_thr_events_t * events)1650 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1651 {
1652 return (td_ta_mod_event(ta_p, events, 0));
1653 }
1654
1655 /*
1656 * This function returns the most recent event message, if any,
1657 * associated with a thread. Given a thread handle, return the message
1658 * corresponding to the event encountered by the thread. Only one
1659 * message per thread is saved. Messages from earlier events are lost
1660 * when later events occur.
1661 */
1662 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1663 td_err_e
__td_thr_event_getmsg(td_thrhandle_t * th_p,td_event_msg_t * msg)1664 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1665 {
1666 struct ps_prochandle *ph_p;
1667 td_err_e return_val = TD_OK;
1668 psaddr_t psaddr;
1669
1670 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1671 return (return_val);
1672 if (ps_pstop(ph_p) != PS_OK) {
1673 ph_unlock(th_p->th_ta_p);
1674 return (TD_BADTA);
1675 }
1676 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1677 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1678 td_evbuf_t evbuf;
1679
1680 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1681 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1682 return_val = TD_DBERR;
1683 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1684 return_val = TD_NOEVENT;
1685 } else {
1686 msg->event = evbuf.eventnum;
1687 msg->th_p = (td_thrhandle_t *)th_p;
1688 msg->msg.data = (uintptr_t)evbuf.eventdata;
1689 /* "Consume" the message */
1690 evbuf.eventnum = TD_EVENT_NONE;
1691 evbuf.eventdata = NULL;
1692 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1693 != PS_OK)
1694 return_val = TD_DBERR;
1695 }
1696 } else {
1697 #if defined(_LP64) && defined(_SYSCALL32)
1698 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1699 td_evbuf32_t evbuf;
1700
1701 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1702 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1703 return_val = TD_DBERR;
1704 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1705 return_val = TD_NOEVENT;
1706 } else {
1707 msg->event = evbuf.eventnum;
1708 msg->th_p = (td_thrhandle_t *)th_p;
1709 msg->msg.data = (uintptr_t)evbuf.eventdata;
1710 /* "Consume" the message */
1711 evbuf.eventnum = TD_EVENT_NONE;
1712 evbuf.eventdata = NULL;
1713 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1714 != PS_OK)
1715 return_val = TD_DBERR;
1716 }
1717 #else
1718 return_val = TD_ERR;
1719 #endif /* _SYSCALL32 */
1720 }
1721
1722 (void) ps_pcontinue(ph_p);
1723 ph_unlock(th_p->th_ta_p);
1724 return (return_val);
1725 }
1726
1727 /*
1728 * The callback function td_ta_event_getmsg uses when looking for
1729 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1730 */
1731 static int
event_msg_cb(const td_thrhandle_t * th_p,void * arg)1732 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1733 {
1734 static td_thrhandle_t th;
1735 td_event_msg_t *msg = arg;
1736
1737 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1738 /*
1739 * Got an event, stop iterating.
1740 *
1741 * Because of past mistakes in interface definition,
1742 * we are forced to pass back a static local variable
1743 * for the thread handle because th_p is a pointer
1744 * to a local variable in __td_ta_thr_iter().
1745 * Grr...
1746 */
1747 th = *th_p;
1748 msg->th_p = &th;
1749 return (1);
1750 }
1751 return (0);
1752 }
1753
1754 /*
1755 * This function is just like td_thr_event_getmsg, except that it is
1756 * passed a process handle rather than a thread handle, and returns
1757 * an event message for some thread in the process that has an event
1758 * message pending. If no thread has an event message pending, this
1759 * routine returns TD_NOEVENT. Thus, all pending event messages may
1760 * be collected from a process by repeatedly calling this routine
1761 * until it returns TD_NOEVENT.
1762 */
1763 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1764 td_err_e
__td_ta_event_getmsg(td_thragent_t * ta_p,td_event_msg_t * msg)1765 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1766 {
1767 td_err_e return_val;
1768
1769 if (ta_p == NULL)
1770 return (TD_BADTA);
1771 if (ta_p->ph_p == NULL)
1772 return (TD_BADPH);
1773 if (msg == NULL)
1774 return (TD_ERR);
1775 msg->event = TD_EVENT_NONE;
1776 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1777 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1778 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1779 return (return_val);
1780 if (msg->event == TD_EVENT_NONE)
1781 return (TD_NOEVENT);
1782 return (TD_OK);
1783 }
1784
1785 static lwpid_t
thr_to_lwpid(const td_thrhandle_t * th_p)1786 thr_to_lwpid(const td_thrhandle_t *th_p)
1787 {
1788 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1789 lwpid_t lwpid;
1790
1791 /*
1792 * The caller holds the prochandle lock
1793 * and has already verfied everything.
1794 */
1795 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1796 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1797
1798 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1799 &lwpid, sizeof (lwpid)) != PS_OK)
1800 lwpid = 0;
1801 else if (lwpid == 0)
1802 lwpid = 1;
1803 } else {
1804 #if defined(_LP64) && defined(_SYSCALL32)
1805 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1806
1807 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1808 &lwpid, sizeof (lwpid)) != PS_OK)
1809 lwpid = 0;
1810 else if (lwpid == 0)
1811 lwpid = 1;
1812 #else
1813 lwpid = 0;
1814 #endif /* _SYSCALL32 */
1815 }
1816
1817 return (lwpid);
1818 }
1819
1820 /*
1821 * Suspend a thread.
1822 * XXX: What does this mean in a one-level model?
1823 */
1824 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1825 td_err_e
__td_thr_dbsuspend(const td_thrhandle_t * th_p)1826 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1827 {
1828 struct ps_prochandle *ph_p;
1829 td_err_e return_val;
1830
1831 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1832 return (return_val);
1833 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1834 return_val = TD_DBERR;
1835 ph_unlock(th_p->th_ta_p);
1836 return (return_val);
1837 }
1838
1839 /*
1840 * Resume a suspended thread.
1841 * XXX: What does this mean in a one-level model?
1842 */
1843 #pragma weak td_thr_dbresume = __td_thr_dbresume
1844 td_err_e
__td_thr_dbresume(const td_thrhandle_t * th_p)1845 __td_thr_dbresume(const td_thrhandle_t *th_p)
1846 {
1847 struct ps_prochandle *ph_p;
1848 td_err_e return_val;
1849
1850 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1851 return (return_val);
1852 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1853 return_val = TD_DBERR;
1854 ph_unlock(th_p->th_ta_p);
1855 return (return_val);
1856 }
1857
1858 /*
1859 * Set a thread's signal mask.
1860 * Currently unused by dbx.
1861 */
1862 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1863 /* ARGSUSED */
1864 td_err_e
__td_thr_sigsetmask(const td_thrhandle_t * th_p,const sigset_t ti_sigmask)1865 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1866 {
1867 return (TD_NOCAPAB);
1868 }
1869
1870 /*
1871 * Set a thread's "signals-pending" set.
1872 * Currently unused by dbx.
1873 */
1874 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1875 /* ARGSUSED */
1876 td_err_e
__td_thr_setsigpending(const td_thrhandle_t * th_p,uchar_t ti_pending_flag,const sigset_t ti_pending)1877 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1878 uchar_t ti_pending_flag, const sigset_t ti_pending)
1879 {
1880 return (TD_NOCAPAB);
1881 }
1882
1883 /*
1884 * Get a thread's general register set.
1885 */
1886 #pragma weak td_thr_getgregs = __td_thr_getgregs
1887 td_err_e
__td_thr_getgregs(td_thrhandle_t * th_p,prgregset_t regset)1888 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1889 {
1890 struct ps_prochandle *ph_p;
1891 td_err_e return_val;
1892
1893 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1894 return (return_val);
1895 if (ps_pstop(ph_p) != PS_OK) {
1896 ph_unlock(th_p->th_ta_p);
1897 return (TD_DBERR);
1898 }
1899
1900 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1901 return_val = TD_DBERR;
1902
1903 (void) ps_pcontinue(ph_p);
1904 ph_unlock(th_p->th_ta_p);
1905 return (return_val);
1906 }
1907
1908 /*
1909 * Set a thread's general register set.
1910 */
1911 #pragma weak td_thr_setgregs = __td_thr_setgregs
1912 td_err_e
__td_thr_setgregs(td_thrhandle_t * th_p,const prgregset_t regset)1913 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1914 {
1915 struct ps_prochandle *ph_p;
1916 td_err_e return_val;
1917
1918 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1919 return (return_val);
1920 if (ps_pstop(ph_p) != PS_OK) {
1921 ph_unlock(th_p->th_ta_p);
1922 return (TD_DBERR);
1923 }
1924
1925 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1926 return_val = TD_DBERR;
1927
1928 (void) ps_pcontinue(ph_p);
1929 ph_unlock(th_p->th_ta_p);
1930 return (return_val);
1931 }
1932
1933 /*
1934 * Get a thread's floating-point register set.
1935 */
1936 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1937 td_err_e
__td_thr_getfpregs(td_thrhandle_t * th_p,prfpregset_t * fpregset)1938 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1939 {
1940 struct ps_prochandle *ph_p;
1941 td_err_e return_val;
1942
1943 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1944 return (return_val);
1945 if (ps_pstop(ph_p) != PS_OK) {
1946 ph_unlock(th_p->th_ta_p);
1947 return (TD_DBERR);
1948 }
1949
1950 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1951 return_val = TD_DBERR;
1952
1953 (void) ps_pcontinue(ph_p);
1954 ph_unlock(th_p->th_ta_p);
1955 return (return_val);
1956 }
1957
1958 /*
1959 * Set a thread's floating-point register set.
1960 */
1961 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1962 td_err_e
__td_thr_setfpregs(td_thrhandle_t * th_p,const prfpregset_t * fpregset)1963 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1964 {
1965 struct ps_prochandle *ph_p;
1966 td_err_e return_val;
1967
1968 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1969 return (return_val);
1970 if (ps_pstop(ph_p) != PS_OK) {
1971 ph_unlock(th_p->th_ta_p);
1972 return (TD_DBERR);
1973 }
1974
1975 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1976 return_val = TD_DBERR;
1977
1978 (void) ps_pcontinue(ph_p);
1979 ph_unlock(th_p->th_ta_p);
1980 return (return_val);
1981 }
1982
1983 /*
1984 * Get the size of the extra state register set for this architecture.
1985 * Currently unused by dbx.
1986 */
1987 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1988 /* ARGSUSED */
1989 td_err_e
__td_thr_getxregsize(td_thrhandle_t * th_p,int * xregsize)1990 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1991 {
1992 #if defined(__sparc)
1993 struct ps_prochandle *ph_p;
1994 td_err_e return_val;
1995
1996 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1997 return (return_val);
1998 if (ps_pstop(ph_p) != PS_OK) {
1999 ph_unlock(th_p->th_ta_p);
2000 return (TD_DBERR);
2001 }
2002
2003 if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
2004 return_val = TD_DBERR;
2005
2006 (void) ps_pcontinue(ph_p);
2007 ph_unlock(th_p->th_ta_p);
2008 return (return_val);
2009 #else /* __sparc */
2010 return (TD_NOXREGS);
2011 #endif /* __sparc */
2012 }
2013
2014 /*
2015 * Get a thread's extra state register set.
2016 */
2017 #pragma weak td_thr_getxregs = __td_thr_getxregs
2018 /* ARGSUSED */
2019 td_err_e
__td_thr_getxregs(td_thrhandle_t * th_p,void * xregset)2020 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2021 {
2022 #if defined(__sparc)
2023 struct ps_prochandle *ph_p;
2024 td_err_e return_val;
2025
2026 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2027 return (return_val);
2028 if (ps_pstop(ph_p) != PS_OK) {
2029 ph_unlock(th_p->th_ta_p);
2030 return (TD_DBERR);
2031 }
2032
2033 if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2034 return_val = TD_DBERR;
2035
2036 (void) ps_pcontinue(ph_p);
2037 ph_unlock(th_p->th_ta_p);
2038 return (return_val);
2039 #else /* __sparc */
2040 return (TD_NOXREGS);
2041 #endif /* __sparc */
2042 }
2043
2044 /*
2045 * Set a thread's extra state register set.
2046 */
2047 #pragma weak td_thr_setxregs = __td_thr_setxregs
2048 /* ARGSUSED */
2049 td_err_e
__td_thr_setxregs(td_thrhandle_t * th_p,const void * xregset)2050 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2051 {
2052 #if defined(__sparc)
2053 struct ps_prochandle *ph_p;
2054 td_err_e return_val;
2055
2056 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2057 return (return_val);
2058 if (ps_pstop(ph_p) != PS_OK) {
2059 ph_unlock(th_p->th_ta_p);
2060 return (TD_DBERR);
2061 }
2062
2063 if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2064 return_val = TD_DBERR;
2065
2066 (void) ps_pcontinue(ph_p);
2067 ph_unlock(th_p->th_ta_p);
2068 return (return_val);
2069 #else /* __sparc */
2070 return (TD_NOXREGS);
2071 #endif /* __sparc */
2072 }
2073
2074 struct searcher {
2075 psaddr_t addr;
2076 int status;
2077 };
2078
2079 /*
2080 * Check the struct thread address in *th_p again first
2081 * value in "data". If value in data is found, set second value
2082 * in "data" to 1 and return 1 to terminate iterations.
2083 * This function is used by td_thr_validate() to verify that
2084 * a thread handle is valid.
2085 */
2086 static int
td_searcher(const td_thrhandle_t * th_p,void * data)2087 td_searcher(const td_thrhandle_t *th_p, void *data)
2088 {
2089 struct searcher *searcher_data = (struct searcher *)data;
2090
2091 if (searcher_data->addr == th_p->th_unique) {
2092 searcher_data->status = 1;
2093 return (1);
2094 }
2095 return (0);
2096 }
2097
2098 /*
2099 * Validate the thread handle. Check that
2100 * a thread exists in the thread agent/process that
2101 * corresponds to thread with handle *th_p.
2102 * Currently unused by dbx.
2103 */
2104 #pragma weak td_thr_validate = __td_thr_validate
2105 td_err_e
__td_thr_validate(const td_thrhandle_t * th_p)2106 __td_thr_validate(const td_thrhandle_t *th_p)
2107 {
2108 td_err_e return_val;
2109 struct searcher searcher_data = {0, 0};
2110
2111 if (th_p == NULL)
2112 return (TD_BADTH);
2113 if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2114 return (TD_BADTH);
2115
2116 /*
2117 * LOCKING EXCEPTION - Locking is not required
2118 * here because no use of the thread agent is made (other
2119 * than the sanity check) and checking of the thread
2120 * agent will be done in __td_ta_thr_iter.
2121 */
2122
2123 searcher_data.addr = th_p->th_unique;
2124 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2125 td_searcher, &searcher_data,
2126 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2127 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2128
2129 if (return_val == TD_OK && searcher_data.status == 0)
2130 return_val = TD_NOTHR;
2131
2132 return (return_val);
2133 }
2134
2135 /*
2136 * Get a thread's private binding to a given thread specific
2137 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2138 * have a binding for a particular key, then NULL is returned.
2139 */
2140 #pragma weak td_thr_tsd = __td_thr_tsd
2141 td_err_e
__td_thr_tsd(td_thrhandle_t * th_p,thread_key_t key,void ** data_pp)2142 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2143 {
2144 struct ps_prochandle *ph_p;
2145 td_thragent_t *ta_p;
2146 td_err_e return_val;
2147 int maxkey;
2148 int nkey;
2149 psaddr_t tsd_paddr;
2150
2151 if (data_pp == NULL)
2152 return (TD_ERR);
2153 *data_pp = NULL;
2154 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2155 return (return_val);
2156 ta_p = th_p->th_ta_p;
2157 if (ps_pstop(ph_p) != PS_OK) {
2158 ph_unlock(ta_p);
2159 return (TD_DBERR);
2160 }
2161
2162 if (ta_p->model == PR_MODEL_NATIVE) {
2163 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2164 tsd_metadata_t tsdm;
2165 tsd_t stsd;
2166
2167 if (ps_pdread(ph_p,
2168 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2169 &tsdm, sizeof (tsdm)) != PS_OK)
2170 return_val = TD_DBERR;
2171 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2172 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2173 return_val = TD_DBERR;
2174 else if (tsd_paddr != NULL &&
2175 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2176 return_val = TD_DBERR;
2177 else {
2178 maxkey = tsdm.tsdm_nused;
2179 nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2180
2181 if (key < TSD_NFAST)
2182 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2183 }
2184 } else {
2185 #if defined(_LP64) && defined(_SYSCALL32)
2186 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2187 tsd_metadata32_t tsdm;
2188 tsd32_t stsd;
2189 caddr32_t addr;
2190
2191 if (ps_pdread(ph_p,
2192 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2193 &tsdm, sizeof (tsdm)) != PS_OK)
2194 return_val = TD_DBERR;
2195 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2196 &addr, sizeof (addr)) != PS_OK)
2197 return_val = TD_DBERR;
2198 else if (addr != NULL &&
2199 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2200 return_val = TD_DBERR;
2201 else {
2202 maxkey = tsdm.tsdm_nused;
2203 nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2204
2205 if (key < TSD_NFAST) {
2206 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2207 } else {
2208 tsd_paddr = addr;
2209 }
2210 }
2211 #else
2212 return_val = TD_ERR;
2213 #endif /* _SYSCALL32 */
2214 }
2215
2216 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2217 return_val = TD_NOTSD;
2218 if (return_val != TD_OK || key >= nkey) {
2219 /* NULL has already been stored in data_pp */
2220 (void) ps_pcontinue(ph_p);
2221 ph_unlock(ta_p);
2222 return (return_val);
2223 }
2224
2225 /*
2226 * Read the value from the thread's tsd array.
2227 */
2228 if (ta_p->model == PR_MODEL_NATIVE) {
2229 void *value;
2230
2231 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2232 &value, sizeof (value)) != PS_OK)
2233 return_val = TD_DBERR;
2234 else
2235 *data_pp = value;
2236 #if defined(_LP64) && defined(_SYSCALL32)
2237 } else {
2238 caddr32_t value32;
2239
2240 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2241 &value32, sizeof (value32)) != PS_OK)
2242 return_val = TD_DBERR;
2243 else
2244 *data_pp = (void *)(uintptr_t)value32;
2245 #endif /* _SYSCALL32 */
2246 }
2247
2248 (void) ps_pcontinue(ph_p);
2249 ph_unlock(ta_p);
2250 return (return_val);
2251 }
2252
2253 /*
2254 * Get the base address of a thread's thread local storage (TLS) block
2255 * for the module (executable or shared object) identified by 'moduleid'.
2256 */
2257 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2258 td_err_e
__td_thr_tlsbase(td_thrhandle_t * th_p,ulong_t moduleid,psaddr_t * base)2259 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2260 {
2261 struct ps_prochandle *ph_p;
2262 td_thragent_t *ta_p;
2263 td_err_e return_val;
2264
2265 if (base == NULL)
2266 return (TD_ERR);
2267 *base = NULL;
2268 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2269 return (return_val);
2270 ta_p = th_p->th_ta_p;
2271 if (ps_pstop(ph_p) != PS_OK) {
2272 ph_unlock(ta_p);
2273 return (TD_DBERR);
2274 }
2275
2276 if (ta_p->model == PR_MODEL_NATIVE) {
2277 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2278 tls_metadata_t tls_metadata;
2279 TLS_modinfo tlsmod;
2280 tls_t tls;
2281
2282 if (ps_pdread(ph_p,
2283 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2284 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2285 return_val = TD_DBERR;
2286 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2287 return_val = TD_NOTLS;
2288 else if (ps_pdread(ph_p,
2289 (psaddr_t)((TLS_modinfo *)
2290 tls_metadata.tls_modinfo.tls_data + moduleid),
2291 &tlsmod, sizeof (tlsmod)) != PS_OK)
2292 return_val = TD_DBERR;
2293 else if (tlsmod.tm_memsz == 0)
2294 return_val = TD_NOTLS;
2295 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2296 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2297 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2298 &tls, sizeof (tls)) != PS_OK)
2299 return_val = TD_DBERR;
2300 else if (moduleid >= tls.tls_size)
2301 return_val = TD_TLSDEFER;
2302 else if (ps_pdread(ph_p,
2303 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2304 &tls, sizeof (tls)) != PS_OK)
2305 return_val = TD_DBERR;
2306 else if (tls.tls_size == 0)
2307 return_val = TD_TLSDEFER;
2308 else
2309 *base = (psaddr_t)tls.tls_data;
2310 } else {
2311 #if defined(_LP64) && defined(_SYSCALL32)
2312 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2313 tls_metadata32_t tls_metadata;
2314 TLS_modinfo32 tlsmod;
2315 tls32_t tls;
2316
2317 if (ps_pdread(ph_p,
2318 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2319 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2320 return_val = TD_DBERR;
2321 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2322 return_val = TD_NOTLS;
2323 else if (ps_pdread(ph_p,
2324 (psaddr_t)((TLS_modinfo32 *)
2325 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2326 &tlsmod, sizeof (tlsmod)) != PS_OK)
2327 return_val = TD_DBERR;
2328 else if (tlsmod.tm_memsz == 0)
2329 return_val = TD_NOTLS;
2330 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2331 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2332 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2333 &tls, sizeof (tls)) != PS_OK)
2334 return_val = TD_DBERR;
2335 else if (moduleid >= tls.tls_size)
2336 return_val = TD_TLSDEFER;
2337 else if (ps_pdread(ph_p,
2338 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2339 &tls, sizeof (tls)) != PS_OK)
2340 return_val = TD_DBERR;
2341 else if (tls.tls_size == 0)
2342 return_val = TD_TLSDEFER;
2343 else
2344 *base = (psaddr_t)tls.tls_data;
2345 #else
2346 return_val = TD_ERR;
2347 #endif /* _SYSCALL32 */
2348 }
2349
2350 (void) ps_pcontinue(ph_p);
2351 ph_unlock(ta_p);
2352 return (return_val);
2353 }
2354
2355 /*
2356 * Change a thread's priority to the value specified by ti_pri.
2357 * Currently unused by dbx.
2358 */
2359 #pragma weak td_thr_setprio = __td_thr_setprio
2360 /* ARGSUSED */
2361 td_err_e
__td_thr_setprio(td_thrhandle_t * th_p,int ti_pri)2362 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2363 {
2364 return (TD_NOCAPAB);
2365 }
2366
2367 /*
2368 * This structure links td_thr_lockowner and the lowner_cb callback function.
2369 */
2370 typedef struct {
2371 td_sync_iter_f *owner_cb;
2372 void *owner_cb_arg;
2373 td_thrhandle_t *th_p;
2374 } lowner_cb_ctl_t;
2375
2376 static int
lowner_cb(const td_synchandle_t * sh_p,void * arg)2377 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2378 {
2379 lowner_cb_ctl_t *ocb = arg;
2380 int trunc = 0;
2381 union {
2382 rwlock_t rwl;
2383 mutex_t mx;
2384 } rw_m;
2385
2386 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2387 &rw_m, sizeof (rw_m)) != PS_OK) {
2388 trunc = 1;
2389 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2390 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2391 return (0);
2392 }
2393 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2394 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2395 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2396 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2397 mutex_t *rwlock = &rw_m.rwl.mutex;
2398 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2399 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2400 }
2401 return (0);
2402 }
2403
2404 /*
2405 * Iterate over the set of locks owned by a specified thread.
2406 * If cb returns a non-zero value, terminate iterations.
2407 */
2408 #pragma weak td_thr_lockowner = __td_thr_lockowner
2409 td_err_e
__td_thr_lockowner(const td_thrhandle_t * th_p,td_sync_iter_f * cb,void * cb_data)2410 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2411 void *cb_data)
2412 {
2413 td_thragent_t *ta_p;
2414 td_err_e return_val;
2415 lowner_cb_ctl_t lcb;
2416
2417 /*
2418 * Just sanity checks.
2419 */
2420 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2421 return (return_val);
2422 ta_p = th_p->th_ta_p;
2423 ph_unlock(ta_p);
2424
2425 lcb.owner_cb = cb;
2426 lcb.owner_cb_arg = cb_data;
2427 lcb.th_p = (td_thrhandle_t *)th_p;
2428 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2429 }
2430
2431 /*
2432 * If a thread is asleep on a synchronization variable,
2433 * then get the synchronization handle.
2434 */
2435 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2436 td_err_e
__td_thr_sleepinfo(const td_thrhandle_t * th_p,td_synchandle_t * sh_p)2437 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2438 {
2439 struct ps_prochandle *ph_p;
2440 td_err_e return_val = TD_OK;
2441 uintptr_t wchan;
2442
2443 if (sh_p == NULL)
2444 return (TD_ERR);
2445 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2446 return (return_val);
2447
2448 /*
2449 * No need to stop the process for a simple read.
2450 */
2451 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2452 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2453
2454 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2455 &wchan, sizeof (wchan)) != PS_OK)
2456 return_val = TD_DBERR;
2457 } else {
2458 #if defined(_LP64) && defined(_SYSCALL32)
2459 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2460 caddr32_t wchan32;
2461
2462 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2463 &wchan32, sizeof (wchan32)) != PS_OK)
2464 return_val = TD_DBERR;
2465 wchan = wchan32;
2466 #else
2467 return_val = TD_ERR;
2468 #endif /* _SYSCALL32 */
2469 }
2470
2471 if (return_val != TD_OK || wchan == NULL) {
2472 sh_p->sh_ta_p = NULL;
2473 sh_p->sh_unique = NULL;
2474 if (return_val == TD_OK)
2475 return_val = TD_ERR;
2476 } else {
2477 sh_p->sh_ta_p = th_p->th_ta_p;
2478 sh_p->sh_unique = (psaddr_t)wchan;
2479 }
2480
2481 ph_unlock(th_p->th_ta_p);
2482 return (return_val);
2483 }
2484
2485 /*
2486 * Which thread is running on an lwp?
2487 */
2488 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2489 td_err_e
__td_ta_map_lwp2thr(td_thragent_t * ta_p,lwpid_t lwpid,td_thrhandle_t * th_p)2490 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2491 td_thrhandle_t *th_p)
2492 {
2493 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2494 }
2495
2496 /*
2497 * Common code for td_sync_get_info() and td_sync_get_stats()
2498 */
2499 static td_err_e
sync_get_info_common(const td_synchandle_t * sh_p,struct ps_prochandle * ph_p,td_syncinfo_t * si_p)2500 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2501 td_syncinfo_t *si_p)
2502 {
2503 int trunc = 0;
2504 td_so_un_t generic_so;
2505
2506 /*
2507 * Determine the sync. object type; a little type fudgery here.
2508 * First attempt to read the whole union. If that fails, attempt
2509 * to read just the condvar. A condvar is the smallest sync. object.
2510 */
2511 if (ps_pdread(ph_p, sh_p->sh_unique,
2512 &generic_so, sizeof (generic_so)) != PS_OK) {
2513 trunc = 1;
2514 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2515 sizeof (generic_so.condition)) != PS_OK)
2516 return (TD_DBERR);
2517 }
2518
2519 switch (generic_so.condition.cond_magic) {
2520 case MUTEX_MAGIC:
2521 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2522 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2523 return (TD_DBERR);
2524 si_p->si_type = TD_SYNC_MUTEX;
2525 si_p->si_shared_type =
2526 (generic_so.lock.mutex_type & USYNC_PROCESS);
2527 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2528 sizeof (generic_so.lock.mutex_flag));
2529 si_p->si_state.mutex_locked =
2530 (generic_so.lock.mutex_lockw != 0);
2531 si_p->si_size = sizeof (generic_so.lock);
2532 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2533 si_p->si_rcount = generic_so.lock.mutex_rcount;
2534 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2535 if (si_p->si_state.mutex_locked) {
2536 if (si_p->si_shared_type & USYNC_PROCESS)
2537 si_p->si_ownerpid =
2538 generic_so.lock.mutex_ownerpid;
2539 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2540 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2541 }
2542 break;
2543 case COND_MAGIC:
2544 si_p->si_type = TD_SYNC_COND;
2545 si_p->si_shared_type =
2546 (generic_so.condition.cond_type & USYNC_PROCESS);
2547 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2548 sizeof (generic_so.condition.flags.flag));
2549 si_p->si_size = sizeof (generic_so.condition);
2550 si_p->si_has_waiters =
2551 (generic_so.condition.cond_waiters_user |
2552 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2553 break;
2554 case SEMA_MAGIC:
2555 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2556 &generic_so.semaphore, sizeof (generic_so.semaphore))
2557 != PS_OK)
2558 return (TD_DBERR);
2559 si_p->si_type = TD_SYNC_SEMA;
2560 si_p->si_shared_type =
2561 (generic_so.semaphore.type & USYNC_PROCESS);
2562 si_p->si_state.sem_count = generic_so.semaphore.count;
2563 si_p->si_size = sizeof (generic_so.semaphore);
2564 si_p->si_has_waiters =
2565 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2566 /* this is useless but the old interface provided it */
2567 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2568 break;
2569 case RWL_MAGIC:
2570 {
2571 uint32_t rwstate;
2572
2573 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2574 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2575 return (TD_DBERR);
2576 si_p->si_type = TD_SYNC_RWLOCK;
2577 si_p->si_shared_type =
2578 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2579 si_p->si_size = sizeof (generic_so.rwlock);
2580
2581 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2582 if (rwstate & URW_WRITE_LOCKED) {
2583 si_p->si_state.nreaders = -1;
2584 si_p->si_is_wlock = 1;
2585 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2586 si_p->si_owner.th_unique =
2587 generic_so.rwlock.rwlock_owner;
2588 if (si_p->si_shared_type & USYNC_PROCESS)
2589 si_p->si_ownerpid =
2590 generic_so.rwlock.rwlock_ownerpid;
2591 } else {
2592 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2593 }
2594 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2595
2596 /* this is useless but the old interface provided it */
2597 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2598 break;
2599 }
2600 default:
2601 return (TD_BADSH);
2602 }
2603
2604 si_p->si_ta_p = sh_p->sh_ta_p;
2605 si_p->si_sv_addr = sh_p->sh_unique;
2606 return (TD_OK);
2607 }
2608
2609 /*
2610 * Given a synchronization handle, fill in the
2611 * information for the synchronization variable into *si_p.
2612 */
2613 #pragma weak td_sync_get_info = __td_sync_get_info
2614 td_err_e
__td_sync_get_info(const td_synchandle_t * sh_p,td_syncinfo_t * si_p)2615 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2616 {
2617 struct ps_prochandle *ph_p;
2618 td_err_e return_val;
2619
2620 if (si_p == NULL)
2621 return (TD_ERR);
2622 (void) memset(si_p, 0, sizeof (*si_p));
2623 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2624 return (return_val);
2625 if (ps_pstop(ph_p) != PS_OK) {
2626 ph_unlock(sh_p->sh_ta_p);
2627 return (TD_DBERR);
2628 }
2629
2630 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2631
2632 (void) ps_pcontinue(ph_p);
2633 ph_unlock(sh_p->sh_ta_p);
2634 return (return_val);
2635 }
2636
2637 static uint_t
tdb_addr_hash64(uint64_t addr)2638 tdb_addr_hash64(uint64_t addr)
2639 {
2640 uint64_t value60 = (addr >> 4);
2641 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2642 return ((value30 >> 15) ^ (value30 & 0x7fff));
2643 }
2644
2645 static uint_t
tdb_addr_hash32(uint64_t addr)2646 tdb_addr_hash32(uint64_t addr)
2647 {
2648 uint32_t value30 = (addr >> 2); /* 30 bits */
2649 return ((value30 >> 15) ^ (value30 & 0x7fff));
2650 }
2651
2652 static td_err_e
read_sync_stats(td_thragent_t * ta_p,psaddr_t hash_table,psaddr_t sync_obj_addr,tdb_sync_stats_t * sync_stats)2653 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2654 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2655 {
2656 psaddr_t next_desc;
2657 uint64_t first;
2658 uint_t ix;
2659
2660 /*
2661 * Compute the hash table index from the synch object's address.
2662 */
2663 if (ta_p->model == PR_MODEL_LP64)
2664 ix = tdb_addr_hash64(sync_obj_addr);
2665 else
2666 ix = tdb_addr_hash32(sync_obj_addr);
2667
2668 /*
2669 * Get the address of the first element in the linked list.
2670 */
2671 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2672 &first, sizeof (first)) != PS_OK)
2673 return (TD_DBERR);
2674
2675 /*
2676 * Search the linked list for an entry for the synch object..
2677 */
2678 for (next_desc = (psaddr_t)first; next_desc != NULL;
2679 next_desc = (psaddr_t)sync_stats->next) {
2680 if (ps_pdread(ta_p->ph_p, next_desc,
2681 sync_stats, sizeof (*sync_stats)) != PS_OK)
2682 return (TD_DBERR);
2683 if (sync_stats->sync_addr == sync_obj_addr)
2684 return (TD_OK);
2685 }
2686
2687 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2688 return (TD_OK);
2689 }
2690
2691 /*
2692 * Given a synchronization handle, fill in the
2693 * statistics for the synchronization variable into *ss_p.
2694 */
2695 #pragma weak td_sync_get_stats = __td_sync_get_stats
2696 td_err_e
__td_sync_get_stats(const td_synchandle_t * sh_p,td_syncstats_t * ss_p)2697 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2698 {
2699 struct ps_prochandle *ph_p;
2700 td_thragent_t *ta_p;
2701 td_err_e return_val;
2702 register_sync_t enable;
2703 psaddr_t hashaddr;
2704 tdb_sync_stats_t sync_stats;
2705 size_t ix;
2706
2707 if (ss_p == NULL)
2708 return (TD_ERR);
2709 (void) memset(ss_p, 0, sizeof (*ss_p));
2710 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2711 return (return_val);
2712 ta_p = sh_p->sh_ta_p;
2713 if (ps_pstop(ph_p) != PS_OK) {
2714 ph_unlock(ta_p);
2715 return (TD_DBERR);
2716 }
2717
2718 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2719 != TD_OK) {
2720 if (return_val != TD_BADSH)
2721 goto out;
2722 /* we can correct TD_BADSH */
2723 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2724 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2725 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2726 /* we correct si_type and si_size below */
2727 return_val = TD_OK;
2728 }
2729 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2730 &enable, sizeof (enable)) != PS_OK) {
2731 return_val = TD_DBERR;
2732 goto out;
2733 }
2734 if (enable != REGISTER_SYNC_ON)
2735 goto out;
2736
2737 /*
2738 * Get the address of the hash table in the target process.
2739 */
2740 if (ta_p->model == PR_MODEL_NATIVE) {
2741 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2742 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2743 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2744 return_val = TD_DBERR;
2745 goto out;
2746 }
2747 } else {
2748 #if defined(_LP64) && defined(_SYSCALL32)
2749 caddr32_t addr;
2750
2751 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2752 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2753 &addr, sizeof (addr)) != PS_OK) {
2754 return_val = TD_DBERR;
2755 goto out;
2756 }
2757 hashaddr = addr;
2758 #else
2759 return_val = TD_ERR;
2760 goto out;
2761 #endif /* _SYSCALL32 */
2762 }
2763
2764 if (hashaddr == 0)
2765 return_val = TD_BADSH;
2766 else
2767 return_val = read_sync_stats(ta_p, hashaddr,
2768 sh_p->sh_unique, &sync_stats);
2769 if (return_val != TD_OK)
2770 goto out;
2771
2772 /*
2773 * We have the hash table entry. Transfer the data to
2774 * the td_syncstats_t structure provided by the caller.
2775 */
2776 switch (sync_stats.un.type) {
2777 case TDB_MUTEX:
2778 {
2779 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2780
2781 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2782 ss_p->ss_info.si_size = sizeof (mutex_t);
2783 msp->mutex_lock =
2784 sync_stats.un.mutex.mutex_lock;
2785 msp->mutex_sleep =
2786 sync_stats.un.mutex.mutex_sleep;
2787 msp->mutex_sleep_time =
2788 sync_stats.un.mutex.mutex_sleep_time;
2789 msp->mutex_hold_time =
2790 sync_stats.un.mutex.mutex_hold_time;
2791 msp->mutex_try =
2792 sync_stats.un.mutex.mutex_try;
2793 msp->mutex_try_fail =
2794 sync_stats.un.mutex.mutex_try_fail;
2795 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2796 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2797 < ta_p->hash_size * sizeof (thr_hash_table_t))
2798 msp->mutex_internal =
2799 ix / sizeof (thr_hash_table_t) + 1;
2800 break;
2801 }
2802 case TDB_COND:
2803 {
2804 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2805
2806 ss_p->ss_info.si_type = TD_SYNC_COND;
2807 ss_p->ss_info.si_size = sizeof (cond_t);
2808 csp->cond_wait =
2809 sync_stats.un.cond.cond_wait;
2810 csp->cond_timedwait =
2811 sync_stats.un.cond.cond_timedwait;
2812 csp->cond_wait_sleep_time =
2813 sync_stats.un.cond.cond_wait_sleep_time;
2814 csp->cond_timedwait_sleep_time =
2815 sync_stats.un.cond.cond_timedwait_sleep_time;
2816 csp->cond_timedwait_timeout =
2817 sync_stats.un.cond.cond_timedwait_timeout;
2818 csp->cond_signal =
2819 sync_stats.un.cond.cond_signal;
2820 csp->cond_broadcast =
2821 sync_stats.un.cond.cond_broadcast;
2822 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2823 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2824 < ta_p->hash_size * sizeof (thr_hash_table_t))
2825 csp->cond_internal =
2826 ix / sizeof (thr_hash_table_t) + 1;
2827 break;
2828 }
2829 case TDB_RWLOCK:
2830 {
2831 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2832
2833 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2834 ss_p->ss_info.si_size = sizeof (rwlock_t);
2835 rwsp->rw_rdlock =
2836 sync_stats.un.rwlock.rw_rdlock;
2837 rwsp->rw_rdlock_try =
2838 sync_stats.un.rwlock.rw_rdlock_try;
2839 rwsp->rw_rdlock_try_fail =
2840 sync_stats.un.rwlock.rw_rdlock_try_fail;
2841 rwsp->rw_wrlock =
2842 sync_stats.un.rwlock.rw_wrlock;
2843 rwsp->rw_wrlock_hold_time =
2844 sync_stats.un.rwlock.rw_wrlock_hold_time;
2845 rwsp->rw_wrlock_try =
2846 sync_stats.un.rwlock.rw_wrlock_try;
2847 rwsp->rw_wrlock_try_fail =
2848 sync_stats.un.rwlock.rw_wrlock_try_fail;
2849 break;
2850 }
2851 case TDB_SEMA:
2852 {
2853 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2854
2855 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2856 ss_p->ss_info.si_size = sizeof (sema_t);
2857 ssp->sema_wait =
2858 sync_stats.un.sema.sema_wait;
2859 ssp->sema_wait_sleep =
2860 sync_stats.un.sema.sema_wait_sleep;
2861 ssp->sema_wait_sleep_time =
2862 sync_stats.un.sema.sema_wait_sleep_time;
2863 ssp->sema_trywait =
2864 sync_stats.un.sema.sema_trywait;
2865 ssp->sema_trywait_fail =
2866 sync_stats.un.sema.sema_trywait_fail;
2867 ssp->sema_post =
2868 sync_stats.un.sema.sema_post;
2869 ssp->sema_max_count =
2870 sync_stats.un.sema.sema_max_count;
2871 ssp->sema_min_count =
2872 sync_stats.un.sema.sema_min_count;
2873 break;
2874 }
2875 default:
2876 return_val = TD_BADSH;
2877 break;
2878 }
2879
2880 out:
2881 (void) ps_pcontinue(ph_p);
2882 ph_unlock(ta_p);
2883 return (return_val);
2884 }
2885
2886 /*
2887 * Change the state of a synchronization variable.
2888 * 1) mutex lock state set to value
2889 * 2) semaphore's count set to value
2890 * 3) writer's lock set by value < 0
2891 * 4) reader's lock number of readers set to value >= 0
2892 * Currently unused by dbx.
2893 */
2894 #pragma weak td_sync_setstate = __td_sync_setstate
2895 td_err_e
__td_sync_setstate(const td_synchandle_t * sh_p,long lvalue)2896 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2897 {
2898 struct ps_prochandle *ph_p;
2899 int trunc = 0;
2900 td_err_e return_val;
2901 td_so_un_t generic_so;
2902 uint32_t *rwstate;
2903 int value = (int)lvalue;
2904
2905 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2906 return (return_val);
2907 if (ps_pstop(ph_p) != PS_OK) {
2908 ph_unlock(sh_p->sh_ta_p);
2909 return (TD_DBERR);
2910 }
2911
2912 /*
2913 * Read the synch. variable information.
2914 * First attempt to read the whole union and if that fails
2915 * fall back to reading only the smallest member, the condvar.
2916 */
2917 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2918 sizeof (generic_so)) != PS_OK) {
2919 trunc = 1;
2920 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2921 sizeof (generic_so.condition)) != PS_OK) {
2922 (void) ps_pcontinue(ph_p);
2923 ph_unlock(sh_p->sh_ta_p);
2924 return (TD_DBERR);
2925 }
2926 }
2927
2928 /*
2929 * Set the new value in the sync. variable, read the synch. variable
2930 * information. from the process, reset its value and write it back.
2931 */
2932 switch (generic_so.condition.mutex_magic) {
2933 case MUTEX_MAGIC:
2934 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2935 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2936 return_val = TD_DBERR;
2937 break;
2938 }
2939 generic_so.lock.mutex_lockw = (uint8_t)value;
2940 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2941 sizeof (generic_so.lock)) != PS_OK)
2942 return_val = TD_DBERR;
2943 break;
2944 case SEMA_MAGIC:
2945 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2946 &generic_so.semaphore, sizeof (generic_so.semaphore))
2947 != PS_OK) {
2948 return_val = TD_DBERR;
2949 break;
2950 }
2951 generic_so.semaphore.count = value;
2952 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2953 sizeof (generic_so.semaphore)) != PS_OK)
2954 return_val = TD_DBERR;
2955 break;
2956 case COND_MAGIC:
2957 /* Operation not supported on a condition variable */
2958 return_val = TD_ERR;
2959 break;
2960 case RWL_MAGIC:
2961 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2962 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2963 return_val = TD_DBERR;
2964 break;
2965 }
2966 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2967 *rwstate &= URW_HAS_WAITERS;
2968 if (value < 0)
2969 *rwstate |= URW_WRITE_LOCKED;
2970 else
2971 *rwstate |= (value & URW_READERS_MASK);
2972 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2973 sizeof (generic_so.rwlock)) != PS_OK)
2974 return_val = TD_DBERR;
2975 break;
2976 default:
2977 /* Bad sync. object type */
2978 return_val = TD_BADSH;
2979 break;
2980 }
2981
2982 (void) ps_pcontinue(ph_p);
2983 ph_unlock(sh_p->sh_ta_p);
2984 return (return_val);
2985 }
2986
2987 typedef struct {
2988 td_thr_iter_f *waiter_cb;
2989 psaddr_t sync_obj_addr;
2990 uint16_t sync_magic;
2991 void *waiter_cb_arg;
2992 td_err_e errcode;
2993 } waiter_cb_ctl_t;
2994
2995 static int
waiters_cb(const td_thrhandle_t * th_p,void * arg)2996 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2997 {
2998 td_thragent_t *ta_p = th_p->th_ta_p;
2999 struct ps_prochandle *ph_p = ta_p->ph_p;
3000 waiter_cb_ctl_t *wcb = arg;
3001 caddr_t wchan;
3002
3003 if (ta_p->model == PR_MODEL_NATIVE) {
3004 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
3005
3006 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3007 &wchan, sizeof (wchan)) != PS_OK) {
3008 wcb->errcode = TD_DBERR;
3009 return (1);
3010 }
3011 } else {
3012 #if defined(_LP64) && defined(_SYSCALL32)
3013 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3014 caddr32_t wchan32;
3015
3016 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3017 &wchan32, sizeof (wchan32)) != PS_OK) {
3018 wcb->errcode = TD_DBERR;
3019 return (1);
3020 }
3021 wchan = (caddr_t)(uintptr_t)wchan32;
3022 #else
3023 wcb->errcode = TD_ERR;
3024 return (1);
3025 #endif /* _SYSCALL32 */
3026 }
3027
3028 if (wchan == NULL)
3029 return (0);
3030
3031 if (wchan == (caddr_t)wcb->sync_obj_addr)
3032 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3033
3034 return (0);
3035 }
3036
3037 /*
3038 * For a given synchronization variable, iterate over the
3039 * set of waiting threads. The call back function is passed
3040 * two parameters, a pointer to a thread handle and a pointer
3041 * to extra call back data.
3042 */
3043 #pragma weak td_sync_waiters = __td_sync_waiters
3044 td_err_e
__td_sync_waiters(const td_synchandle_t * sh_p,td_thr_iter_f * cb,void * cb_data)3045 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3046 {
3047 struct ps_prochandle *ph_p;
3048 waiter_cb_ctl_t wcb;
3049 td_err_e return_val;
3050
3051 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3052 return (return_val);
3053 if (ps_pdread(ph_p,
3054 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3055 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3056 ph_unlock(sh_p->sh_ta_p);
3057 return (TD_DBERR);
3058 }
3059 ph_unlock(sh_p->sh_ta_p);
3060
3061 switch (wcb.sync_magic) {
3062 case MUTEX_MAGIC:
3063 case COND_MAGIC:
3064 case SEMA_MAGIC:
3065 case RWL_MAGIC:
3066 break;
3067 default:
3068 return (TD_BADSH);
3069 }
3070
3071 wcb.waiter_cb = cb;
3072 wcb.sync_obj_addr = sh_p->sh_unique;
3073 wcb.waiter_cb_arg = cb_data;
3074 wcb.errcode = TD_OK;
3075 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3076 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3077 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3078
3079 if (return_val != TD_OK)
3080 return (return_val);
3081
3082 return (wcb.errcode);
3083 }
3084