xref: /titanic_52/usr/src/lib/libc_db/common/thread_db.c (revision 84ab085a13f931bc78e7415e7ce921dbaa14fcb3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stddef.h>
32 #include <unistd.h>
33 #include <thr_uberdata.h>
34 #include <thread_db.h>
35 #include <libc_int.h>
36 
37 /*
38  * Private structures.
39  */
40 
41 typedef union {
42 	mutex_t		lock;
43 	rwlock_t	rwlock;
44 	sema_t		semaphore;
45 	cond_t		condition;
46 } td_so_un_t;
47 
48 struct td_thragent {
49 	rwlock_t	rwlock;
50 	struct ps_prochandle *ph_p;
51 	int		initialized;
52 	int		sync_tracking;
53 	int		model;
54 	int		primary_map;
55 	psaddr_t	bootstrap_addr;
56 	psaddr_t	uberdata_addr;
57 	psaddr_t	tdb_eventmask_addr;
58 	psaddr_t	tdb_register_sync_addr;
59 	psaddr_t	tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
60 	psaddr_t	hash_table_addr;
61 	int		hash_size;
62 	lwpid_t		single_lwpid;
63 	psaddr_t	single_ulwp_addr;
64 };
65 
66 /*
67  * This is the name of the variable in libc that contains
68  * the uberdata address that we will need.
69  */
70 #define	TD_BOOTSTRAP_NAME	"_tdb_bootstrap"
71 /*
72  * This is the actual name of uberdata, used in the event
73  * that tdb_bootstrap has not yet been initialized.
74  */
75 #define	TD_UBERDATA_NAME	"_uberdata"
76 /*
77  * The library name should end with ".so.1", but older versions of
78  * dbx expect the unadorned name and malfunction if ".1" is specified.
79  * Unfortunately, if ".1" is not specified, mdb malfunctions when it
80  * is applied to another instance of itself (due to the presence of
81  * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
82  */
83 #define	TD_LIBRARY_NAME		"libc.so"
84 #define	TD_LIBRARY_NAME_1	"libc.so.1"
85 
86 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
87 
88 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
89 	void *cbdata_p, td_thr_state_e state, int ti_pri,
90 	sigset_t *ti_sigmask_p, unsigned ti_user_flags);
91 
92 /*
93  * Initialize threads debugging interface.
94  */
95 #pragma weak td_init = __td_init
96 td_err_e
97 __td_init()
98 {
99 	return (TD_OK);
100 }
101 
102 /*
103  * This function does nothing, and never did.
104  * But the symbol is in the ABI, so we can't delete it.
105  */
106 #pragma weak td_log = __td_log
107 void
108 __td_log()
109 {
110 }
111 
112 /*
113  * Short-cut to read just the hash table size from the process,
114  * to avoid repeatedly reading the full uberdata structure when
115  * dealing with a single-threaded process.
116  */
117 static uint_t
118 td_read_hash_size(td_thragent_t *ta_p)
119 {
120 	psaddr_t addr;
121 	uint_t hash_size;
122 
123 	switch (ta_p->initialized) {
124 	default:	/* uninitialized */
125 		return (0);
126 	case 1:		/* partially initialized */
127 		break;
128 	case 2:		/* fully initialized */
129 		return (ta_p->hash_size);
130 	}
131 
132 	if (ta_p->model == PR_MODEL_NATIVE) {
133 		addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
134 	} else {
135 #if defined(_LP64) && defined(_SYSCALL32)
136 		addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
137 #else
138 		addr = 0;
139 #endif
140 	}
141 	if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
142 	    != PS_OK)
143 		return (0);
144 	return (hash_size);
145 }
146 
147 static td_err_e
148 td_read_uberdata(td_thragent_t *ta_p)
149 {
150 	struct ps_prochandle *ph_p = ta_p->ph_p;
151 
152 	if (ta_p->model == PR_MODEL_NATIVE) {
153 		uberdata_t uberdata;
154 #ifdef __ia64
155 		int i;
156 #endif
157 
158 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
159 		    &uberdata, sizeof (uberdata)) != PS_OK)
160 			return (TD_DBERR);
161 		ta_p->primary_map = uberdata.primary_map;
162 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
163 			offsetof(uberdata_t, tdb.tdb_ev_global_mask);
164 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
165 			offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
166 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
167 		ta_p->hash_size = uberdata.hash_size;
168 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
169 		    ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
170 			return (TD_DBERR);
171 #ifdef __ia64
172 		/*
173 		 * Deal with stupid ia64 function descriptors.
174 		 * We have to go indirect to get the actual function address.
175 		 */
176 		for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
177 			psaddr_t addr;
178 			if (ps_pdread(ph_p, ta_p->tdb_events[i],
179 			    &addr, sizeof (addr)) == PS_OK)
180 				ta_p->tdb_events[i] = addr;
181 		}
182 #endif	/* __ia64 */
183 
184 	} else {
185 #if defined(_LP64) && defined(_SYSCALL32)
186 		uberdata32_t uberdata;
187 		caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
188 		int i;
189 
190 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
191 		    &uberdata, sizeof (uberdata)) != PS_OK)
192 			return (TD_DBERR);
193 		ta_p->primary_map = uberdata.primary_map;
194 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
195 			offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
196 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
197 			offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
198 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
199 		ta_p->hash_size = uberdata.hash_size;
200 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
201 		    tdb_events, sizeof (tdb_events)) != PS_OK)
202 			return (TD_DBERR);
203 		for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
204 			ta_p->tdb_events[i] = tdb_events[i];
205 #else
206 		return (TD_DBERR);
207 #endif
208 	}
209 	if (ta_p->hash_size != 1) {	/* multi-threaded */
210 		ta_p->initialized = 2;
211 		ta_p->single_lwpid = 0;
212 		ta_p->single_ulwp_addr = NULL;
213 	} else {			/* single-threaded */
214 		ta_p->initialized = 1;
215 		/*
216 		 * Get the address and lwpid of the single thread/LWP.
217 		 * It may not be ulwp_one if this is a child of fork1().
218 		 */
219 		if (ta_p->model == PR_MODEL_NATIVE) {
220 			thr_hash_table_t head;
221 			lwpid_t lwpid = 0;
222 
223 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
224 			    &head, sizeof (head)) != PS_OK)
225 				return (TD_DBERR);
226 			if ((psaddr_t)head.hash_bucket == NULL)
227 				ta_p->initialized = 0;
228 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
229 			    offsetof(ulwp_t, ul_lwpid),
230 			    &lwpid, sizeof (lwpid)) != PS_OK)
231 				return (TD_DBERR);
232 			ta_p->single_lwpid = lwpid;
233 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
234 		} else {
235 #if defined(_LP64) && defined(_SYSCALL32)
236 			thr_hash_table32_t head;
237 			lwpid_t lwpid = 0;
238 
239 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
240 			    &head, sizeof (head)) != PS_OK)
241 				return (TD_DBERR);
242 			if ((psaddr_t)head.hash_bucket == NULL)
243 				ta_p->initialized = 0;
244 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
245 			    offsetof(ulwp32_t, ul_lwpid),
246 			    &lwpid, sizeof (lwpid)) != PS_OK)
247 				return (TD_DBERR);
248 			ta_p->single_lwpid = lwpid;
249 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
250 #else
251 			return (TD_DBERR);
252 #endif
253 		}
254 	}
255 	if (!ta_p->primary_map)
256 		ta_p->initialized = 0;
257 	return (TD_OK);
258 }
259 
260 static td_err_e
261 td_read_bootstrap_data(td_thragent_t *ta_p)
262 {
263 	struct ps_prochandle *ph_p = ta_p->ph_p;
264 	psaddr_t bootstrap_addr;
265 	psaddr_t uberdata_addr;
266 	ps_err_e db_return;
267 	td_err_e return_val;
268 	int do_1;
269 
270 	switch (ta_p->initialized) {
271 	case 2:			/* fully initialized */
272 		return (TD_OK);
273 	case 1:			/* partially initialized */
274 		if (td_read_hash_size(ta_p) == 1)
275 			return (TD_OK);
276 		return (td_read_uberdata(ta_p));
277 	}
278 
279 	/*
280 	 * Uninitialized -- do the startup work.
281 	 * We set ta_p->initialized to -1 to cut off recursive calls
282 	 * into libc_db by code in the provider of ps_pglobal_lookup().
283 	 */
284 	do_1 = 0;
285 	ta_p->initialized = -1;
286 	db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
287 	    TD_BOOTSTRAP_NAME, &bootstrap_addr);
288 	if (db_return == PS_NOSYM) {
289 		do_1 = 1;
290 		db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
291 		    TD_BOOTSTRAP_NAME, &bootstrap_addr);
292 	}
293 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
294 		return (TD_NOLIBTHREAD);
295 	if (db_return != PS_OK)
296 		return (TD_ERR);
297 	db_return = ps_pglobal_lookup(ph_p,
298 	    do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
299 	    TD_UBERDATA_NAME, &uberdata_addr);
300 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
301 		return (TD_NOLIBTHREAD);
302 	if (db_return != PS_OK)
303 		return (TD_ERR);
304 
305 	/*
306 	 * Read the uberdata address into the thread agent structure.
307 	 */
308 	if (ta_p->model == PR_MODEL_NATIVE) {
309 		psaddr_t psaddr;
310 		if (ps_pdread(ph_p, bootstrap_addr,
311 		    &psaddr, sizeof (psaddr)) != PS_OK)
312 			return (TD_DBERR);
313 		if ((ta_p->bootstrap_addr = psaddr) == NULL)
314 			psaddr = uberdata_addr;
315 		else if (ps_pdread(ph_p, psaddr,
316 		    &psaddr, sizeof (psaddr)) != PS_OK)
317 			return (TD_DBERR);
318 		ta_p->uberdata_addr = psaddr;
319 	} else {
320 #if defined(_LP64) && defined(_SYSCALL32)
321 		caddr32_t psaddr;
322 		if (ps_pdread(ph_p, bootstrap_addr,
323 		    &psaddr, sizeof (psaddr)) != PS_OK)
324 			return (TD_DBERR);
325 		if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
326 			psaddr = (caddr32_t)uberdata_addr;
327 		else if (ps_pdread(ph_p, (psaddr_t)psaddr,
328 		    &psaddr, sizeof (psaddr)) != PS_OK)
329 			return (TD_DBERR);
330 		ta_p->uberdata_addr = (psaddr_t)psaddr;
331 #else
332 		return (TD_DBERR);
333 #endif	/* _SYSCALL32 */
334 	}
335 
336 	if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
337 		return (return_val);
338 	if (ta_p->bootstrap_addr == NULL)
339 		ta_p->initialized = 0;
340 	return (TD_OK);
341 }
342 
343 #pragma weak ps_kill
344 #pragma weak ps_lrolltoaddr
345 
346 /*
347  * Allocate a new agent process handle ("thread agent").
348  */
349 #pragma weak td_ta_new = __td_ta_new
350 td_err_e
351 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
352 {
353 	td_thragent_t *ta_p;
354 	int model;
355 	td_err_e return_val = TD_OK;
356 
357 	if (ph_p == NULL)
358 		return (TD_BADPH);
359 	if (ta_pp == NULL)
360 		return (TD_ERR);
361 	*ta_pp = NULL;
362 	if (ps_pstop(ph_p) != PS_OK)
363 		return (TD_DBERR);
364 	/*
365 	 * ps_pdmodel might not be defined if this is an older client.
366 	 * Make it a weak symbol and test if it exists before calling.
367 	 */
368 #pragma weak ps_pdmodel
369 	if (ps_pdmodel == NULL) {
370 		model = PR_MODEL_NATIVE;
371 	} else if (ps_pdmodel(ph_p, &model) != PS_OK) {
372 		(void) ps_pcontinue(ph_p);
373 		return (TD_ERR);
374 	}
375 	if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
376 		(void) ps_pcontinue(ph_p);
377 		return (TD_MALLOC);
378 	}
379 
380 	/*
381 	 * Initialize the agent process handle.
382 	 * Pick up the symbol value we need from the target process.
383 	 */
384 	(void) memset(ta_p, 0, sizeof (*ta_p));
385 	ta_p->ph_p = ph_p;
386 	(void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
387 	ta_p->model = model;
388 	return_val = td_read_bootstrap_data(ta_p);
389 
390 	/*
391 	 * Because the old libthread_db enabled lock tracking by default,
392 	 * we must also do it.  However, we do it only if the application
393 	 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
394 	 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
395 	 */
396 	if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
397 		register_sync_t oldenable;
398 		register_sync_t enable = REGISTER_SYNC_ENABLE;
399 		psaddr_t psaddr = ta_p->tdb_register_sync_addr;
400 
401 		if (ps_pdread(ph_p, psaddr,
402 		    &oldenable, sizeof (oldenable)) != PS_OK)
403 			return_val = TD_DBERR;
404 		else if (oldenable != REGISTER_SYNC_OFF ||
405 		    ps_pdwrite(ph_p, psaddr,
406 		    &enable, sizeof (enable)) != PS_OK) {
407 			/*
408 			 * Lock tracking was already enabled or we
409 			 * failed to enable it, probably because we
410 			 * are examining a core file.  In either case
411 			 * set the sync_tracking flag non-zero to
412 			 * indicate that we should not attempt to
413 			 * disable lock tracking when we delete the
414 			 * agent process handle in td_ta_delete().
415 			 */
416 			ta_p->sync_tracking = 1;
417 		}
418 	}
419 
420 	if (return_val == TD_OK)
421 		*ta_pp = ta_p;
422 	else
423 		free(ta_p);
424 
425 	(void) ps_pcontinue(ph_p);
426 	return (return_val);
427 }
428 
429 /*
430  * Utility function to grab the readers lock and return the prochandle,
431  * given an agent process handle.  Performs standard error checking.
432  * Returns non-NULL with the lock held, or NULL with the lock not held.
433  */
434 static struct ps_prochandle *
435 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
436 {
437 	struct ps_prochandle *ph_p = NULL;
438 	td_err_e error;
439 
440 	if (ta_p == NULL || ta_p->initialized == -1) {
441 		*err = TD_BADTA;
442 	} else if (rw_rdlock(&ta_p->rwlock) != 0) {	/* can't happen? */
443 		*err = TD_BADTA;
444 	} else if ((ph_p = ta_p->ph_p) == NULL) {
445 		(void) rw_unlock(&ta_p->rwlock);
446 		*err = TD_BADPH;
447 	} else if (ta_p->initialized != 2 &&
448 	    (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
449 		(void) rw_unlock(&ta_p->rwlock);
450 		ph_p = NULL;
451 		*err = error;
452 	} else {
453 		*err = TD_OK;
454 	}
455 
456 	return (ph_p);
457 }
458 
459 /*
460  * Utility function to grab the readers lock and return the prochandle,
461  * given an agent thread handle.  Performs standard error checking.
462  * Returns non-NULL with the lock held, or NULL with the lock not held.
463  */
464 static struct ps_prochandle *
465 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
466 {
467 	if (th_p == NULL || th_p->th_unique == NULL) {
468 		*err = TD_BADTH;
469 		return (NULL);
470 	}
471 	return (ph_lock_ta(th_p->th_ta_p, err));
472 }
473 
474 /*
475  * Utility function to grab the readers lock and return the prochandle,
476  * given a synchronization object handle.  Performs standard error checking.
477  * Returns non-NULL with the lock held, or NULL with the lock not held.
478  */
479 static struct ps_prochandle *
480 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
481 {
482 	if (sh_p == NULL || sh_p->sh_unique == NULL) {
483 		*err = TD_BADSH;
484 		return (NULL);
485 	}
486 	return (ph_lock_ta(sh_p->sh_ta_p, err));
487 }
488 
489 /*
490  * Unlock the agent process handle obtained from ph_lock_*().
491  */
492 static void
493 ph_unlock(td_thragent_t *ta_p)
494 {
495 	(void) rw_unlock(&ta_p->rwlock);
496 }
497 
498 /*
499  * De-allocate an agent process handle,
500  * releasing all related resources.
501  *
502  * XXX -- This is hopelessly broken ---
503  * Storage for thread agent is not deallocated.  The prochandle
504  * in the thread agent is set to NULL so that future uses of
505  * the thread agent can be detected and an error value returned.
506  * All functions in the external user interface that make
507  * use of the thread agent are expected
508  * to check for a NULL prochandle in the thread agent.
509  * All such functions are also expected to obtain a
510  * reader lock on the thread agent while it is using it.
511  */
512 #pragma weak td_ta_delete = __td_ta_delete
513 td_err_e
514 __td_ta_delete(td_thragent_t *ta_p)
515 {
516 	struct ps_prochandle *ph_p;
517 
518 	/*
519 	 * This is the only place we grab the writer lock.
520 	 * We are going to NULL out the prochandle.
521 	 */
522 	if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
523 		return (TD_BADTA);
524 	if ((ph_p = ta_p->ph_p) == NULL) {
525 		(void) rw_unlock(&ta_p->rwlock);
526 		return (TD_BADPH);
527 	}
528 	/*
529 	 * If synch. tracking was disabled when td_ta_new() was called and
530 	 * if td_ta_sync_tracking_enable() was never called, then disable
531 	 * synch. tracking (it was enabled by default in td_ta_new()).
532 	 */
533 	if (ta_p->sync_tracking == 0 &&
534 	    ps_kill != NULL && ps_lrolltoaddr != NULL) {
535 		register_sync_t enable = REGISTER_SYNC_DISABLE;
536 
537 		(void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
538 		    &enable, sizeof (enable));
539 	}
540 	ta_p->ph_p = NULL;
541 	(void) rw_unlock(&ta_p->rwlock);
542 	return (TD_OK);
543 }
544 
545 /*
546  * Map an agent process handle to a client prochandle.
547  * Currently unused by dbx.
548  */
549 #pragma weak td_ta_get_ph = __td_ta_get_ph
550 td_err_e
551 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
552 {
553 	td_err_e return_val;
554 
555 	if (ph_pp != NULL)	/* protect stupid callers */
556 		*ph_pp = NULL;
557 	if (ph_pp == NULL)
558 		return (TD_ERR);
559 	if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
560 		return (return_val);
561 	ph_unlock(ta_p);
562 	return (TD_OK);
563 }
564 
565 /*
566  * Set the process's suggested concurrency level.
567  * This is a no-op in a one-level model.
568  * Currently unused by dbx.
569  */
570 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
571 /* ARGSUSED1 */
572 td_err_e
573 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
574 {
575 	if (ta_p == NULL)
576 		return (TD_BADTA);
577 	if (ta_p->ph_p == NULL)
578 		return (TD_BADPH);
579 	return (TD_OK);
580 }
581 
582 /*
583  * Get the number of threads in the process.
584  */
585 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
586 td_err_e
587 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
588 {
589 	struct ps_prochandle *ph_p;
590 	td_err_e return_val;
591 	int nthreads;
592 	int nzombies;
593 	psaddr_t nthreads_addr;
594 	psaddr_t nzombies_addr;
595 
596 	if (ta_p->model == PR_MODEL_NATIVE) {
597 		nthreads_addr = ta_p->uberdata_addr +
598 			offsetof(uberdata_t, nthreads);
599 		nzombies_addr = ta_p->uberdata_addr +
600 			offsetof(uberdata_t, nzombies);
601 	} else {
602 #if defined(_LP64) && defined(_SYSCALL32)
603 		nthreads_addr = ta_p->uberdata_addr +
604 			offsetof(uberdata32_t, nthreads);
605 		nzombies_addr = ta_p->uberdata_addr +
606 			offsetof(uberdata32_t, nzombies);
607 #else
608 		nthreads_addr = 0;
609 		nzombies_addr = 0;
610 #endif	/* _SYSCALL32 */
611 	}
612 
613 	if (nthread_p == NULL)
614 		return (TD_ERR);
615 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
616 		return (return_val);
617 	if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
618 		return_val = TD_DBERR;
619 	if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
620 		return_val = TD_DBERR;
621 	ph_unlock(ta_p);
622 	if (return_val == TD_OK)
623 		*nthread_p = nthreads + nzombies;
624 	return (return_val);
625 }
626 
627 typedef struct {
628 	thread_t	tid;
629 	int		found;
630 	td_thrhandle_t	th;
631 } td_mapper_param_t;
632 
633 /*
634  * Check the value in data against the thread id.
635  * If it matches, return 1 to terminate iterations.
636  * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
637  */
638 static int
639 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
640 {
641 	td_thrinfo_t ti;
642 
643 	if (__td_thr_get_info(th_p, &ti) == TD_OK &&
644 	    data->tid == ti.ti_tid) {
645 		data->found = 1;
646 		data->th = *th_p;
647 		return (1);
648 	}
649 	return (0);
650 }
651 
652 /*
653  * Given a thread identifier, return the corresponding thread handle.
654  */
655 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
656 td_err_e
657 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
658 	td_thrhandle_t *th_p)
659 {
660 	td_err_e		return_val;
661 	td_mapper_param_t	data;
662 
663 	if (th_p != NULL &&	/* optimize for a single thread */
664 	    ta_p != NULL &&
665 	    ta_p->initialized == 1 &&
666 	    (td_read_hash_size(ta_p) == 1 ||
667 	    td_read_uberdata(ta_p) == TD_OK) &&
668 	    ta_p->initialized == 1 &&
669 	    ta_p->single_lwpid == tid) {
670 		th_p->th_ta_p = ta_p;
671 		if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
672 			return (TD_NOTHR);
673 		return (TD_OK);
674 	}
675 
676 	/*
677 	 * LOCKING EXCEPTION - Locking is not required here because
678 	 * the locking and checking will be done in __td_ta_thr_iter.
679 	 */
680 
681 	if (ta_p == NULL)
682 		return (TD_BADTA);
683 	if (th_p == NULL)
684 		return (TD_BADTH);
685 	if (tid == 0)
686 		return (TD_NOTHR);
687 
688 	data.tid = tid;
689 	data.found = 0;
690 	return_val = __td_ta_thr_iter(ta_p,
691 		(td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
692 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
693 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
694 	if (return_val == TD_OK) {
695 		if (data.found == 0)
696 			return_val = TD_NOTHR;
697 		else
698 			*th_p = data.th;
699 	}
700 
701 	return (return_val);
702 }
703 
704 /*
705  * Map the address of a synchronization object to a sync. object handle.
706  */
707 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
708 td_err_e
709 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
710 {
711 	struct ps_prochandle *ph_p;
712 	td_err_e return_val;
713 	uint16_t sync_magic;
714 
715 	if (sh_p == NULL)
716 		return (TD_BADSH);
717 	if (addr == NULL)
718 		return (TD_ERR);
719 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
720 		return (return_val);
721 	/*
722 	 * Check the magic number of the sync. object to make sure it's valid.
723 	 * The magic number is at the same offset for all sync. objects.
724 	 */
725 	if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
726 	    &sync_magic, sizeof (sync_magic)) != PS_OK) {
727 		ph_unlock(ta_p);
728 		return (TD_BADSH);
729 	}
730 	ph_unlock(ta_p);
731 	if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
732 	    sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
733 		return (TD_BADSH);
734 	/*
735 	 * Just fill in the appropriate fields of the sync. handle.
736 	 */
737 	sh_p->sh_ta_p = (td_thragent_t *)ta_p;
738 	sh_p->sh_unique = addr;
739 	return (TD_OK);
740 }
741 
742 /*
743  * Iterate over the set of global TSD keys.
744  * The call back function is called with three arguments,
745  * a key, a pointer to the destructor function, and the cbdata pointer.
746  * Currently unused by dbx.
747  */
748 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
749 td_err_e
750 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
751 {
752 	struct ps_prochandle *ph_p;
753 	td_err_e	return_val;
754 	int		key;
755 	int		numkeys;
756 	psaddr_t	dest_addr;
757 	psaddr_t	*destructors = NULL;
758 	PFrV		destructor;
759 
760 	if (cb == NULL)
761 		return (TD_ERR);
762 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
763 		return (return_val);
764 	if (ps_pstop(ph_p) != PS_OK) {
765 		ph_unlock(ta_p);
766 		return (TD_DBERR);
767 	}
768 
769 	if (ta_p->model == PR_MODEL_NATIVE) {
770 		tsd_metadata_t tsdm;
771 
772 		if (ps_pdread(ph_p,
773 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
774 		    &tsdm, sizeof (tsdm)) != PS_OK)
775 			return_val = TD_DBERR;
776 		else {
777 			numkeys = tsdm.tsdm_nused;
778 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
779 			if (numkeys > 0)
780 				destructors =
781 				    malloc(numkeys * sizeof (psaddr_t));
782 		}
783 	} else {
784 #if defined(_LP64) && defined(_SYSCALL32)
785 		tsd_metadata32_t tsdm;
786 
787 		if (ps_pdread(ph_p,
788 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
789 		    &tsdm, sizeof (tsdm)) != PS_OK)
790 			return_val = TD_DBERR;
791 		else {
792 			numkeys = tsdm.tsdm_nused;
793 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
794 			if (numkeys > 0)
795 				destructors =
796 				    malloc(numkeys * sizeof (caddr32_t));
797 		}
798 #else
799 		return_val = TD_DBERR;
800 #endif	/* _SYSCALL32 */
801 	}
802 
803 	if (return_val != TD_OK || numkeys <= 0) {
804 		(void) ps_pcontinue(ph_p);
805 		ph_unlock(ta_p);
806 		return (return_val);
807 	}
808 
809 	if (destructors == NULL)
810 		return_val = TD_MALLOC;
811 	else if (ta_p->model == PR_MODEL_NATIVE) {
812 		if (ps_pdread(ph_p, dest_addr,
813 		    destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
814 			return_val = TD_DBERR;
815 		else {
816 			for (key = 1; key < numkeys; key++) {
817 				destructor = (PFrV)destructors[key];
818 				if (destructor != TSD_UNALLOCATED &&
819 				    (*cb)(key, destructor, cbdata_p))
820 					break;
821 			}
822 		}
823 #if defined(_LP64) && defined(_SYSCALL32)
824 	} else {
825 		caddr32_t *destructors32 = (caddr32_t *)destructors;
826 		caddr32_t destruct32;
827 
828 		if (ps_pdread(ph_p, dest_addr,
829 		    destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
830 			return_val = TD_DBERR;
831 		else {
832 			for (key = 1; key < numkeys; key++) {
833 				destruct32 = destructors32[key];
834 				if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
835 				    (*cb)(key, (PFrV)(uintptr_t)destruct32,
836 				    cbdata_p))
837 					break;
838 			}
839 		}
840 #endif	/* _SYSCALL32 */
841 	}
842 
843 	if (destructors)
844 		free(destructors);
845 	(void) ps_pcontinue(ph_p);
846 	ph_unlock(ta_p);
847 	return (return_val);
848 }
849 
850 int
851 sigequalset(const sigset_t *s1, const sigset_t *s2)
852 {
853 	return (s1->__sigbits[0] == s2->__sigbits[0] &&
854 		s1->__sigbits[1] == s2->__sigbits[1] &&
855 		s1->__sigbits[2] == s2->__sigbits[2] &&
856 		s1->__sigbits[3] == s2->__sigbits[3]);
857 }
858 
859 /*
860  * Description:
861  *   Iterate over all threads. For each thread call
862  * the function pointed to by "cb" with a pointer
863  * to a thread handle, and a pointer to data which
864  * can be NULL. Only call td_thr_iter_f() on threads
865  * which match the properties of state, ti_pri,
866  * ti_sigmask_p, and ti_user_flags.  If cb returns
867  * a non-zero value, terminate iterations.
868  *
869  * Input:
870  *   *ta_p - thread agent
871  *   *cb - call back function defined by user.
872  * td_thr_iter_f() takes a thread handle and
873  * cbdata_p as a parameter.
874  *   cbdata_p - parameter for td_thr_iter_f().
875  *
876  *   state - state of threads of interest.  A value of
877  * TD_THR_ANY_STATE from enum td_thr_state_e
878  * does not restrict iterations by state.
879  *   ti_pri - lower bound of priorities of threads of
880  * interest.  A value of TD_THR_LOWEST_PRIORITY
881  * defined in thread_db.h does not restrict
882  * iterations by priority.  A thread with priority
883  * less than ti_pri will NOT be passed to the callback
884  * function.
885  *   ti_sigmask_p - signal mask of threads of interest.
886  * A value of TD_SIGNO_MASK defined in thread_db.h
887  * does not restrict iterations by signal mask.
888  *   ti_user_flags - user flags of threads of interest.  A
889  * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
890  * does not restrict iterations by user flags.
891  */
892 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
893 td_err_e
894 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
895 	void *cbdata_p, td_thr_state_e state, int ti_pri,
896 	sigset_t *ti_sigmask_p, unsigned ti_user_flags)
897 {
898 	struct ps_prochandle *ph_p;
899 	psaddr_t	first_lwp_addr;
900 	psaddr_t	first_zombie_addr;
901 	psaddr_t	curr_lwp_addr;
902 	psaddr_t	next_lwp_addr;
903 	td_thrhandle_t	th;
904 	ps_err_e	db_return;
905 	ps_err_e	db_return2;
906 	td_err_e	return_val;
907 
908 	if (cb == NULL)
909 		return (TD_ERR);
910 	/*
911 	 * If state is not within bound, short circuit.
912 	 */
913 	if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
914 		return (TD_OK);
915 
916 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
917 		return (return_val);
918 	if (ps_pstop(ph_p) != PS_OK) {
919 		ph_unlock(ta_p);
920 		return (TD_DBERR);
921 	}
922 
923 	/*
924 	 * For each ulwp_t in the circular linked lists pointed
925 	 * to by "all_lwps" and "all_zombies":
926 	 * (1) Filter each thread.
927 	 * (2) Create the thread_object for each thread that passes.
928 	 * (3) Call the call back function on each thread.
929 	 */
930 
931 	if (ta_p->model == PR_MODEL_NATIVE) {
932 		db_return = ps_pdread(ph_p,
933 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
934 		    &first_lwp_addr, sizeof (first_lwp_addr));
935 		db_return2 = ps_pdread(ph_p,
936 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
937 		    &first_zombie_addr, sizeof (first_zombie_addr));
938 	} else {
939 #if defined(_LP64) && defined(_SYSCALL32)
940 		caddr32_t addr32;
941 
942 		db_return = ps_pdread(ph_p,
943 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
944 		    &addr32, sizeof (addr32));
945 		first_lwp_addr = addr32;
946 		db_return2 = ps_pdread(ph_p,
947 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
948 		    &addr32, sizeof (addr32));
949 		first_zombie_addr = addr32;
950 #else	/* _SYSCALL32 */
951 		db_return = PS_ERR;
952 		db_return2 = PS_ERR;
953 #endif	/* _SYSCALL32 */
954 	}
955 	if (db_return == PS_OK)
956 		db_return = db_return2;
957 
958 	/*
959 	 * If first_lwp_addr and first_zombie_addr are both NULL,
960 	 * libc must not yet be initialized or all threads have
961 	 * exited.  Return TD_NOTHR and all will be well.
962 	 */
963 	if (db_return == PS_OK &&
964 	    first_lwp_addr == NULL && first_zombie_addr == NULL) {
965 		(void) ps_pcontinue(ph_p);
966 		ph_unlock(ta_p);
967 		return (TD_NOTHR);
968 	}
969 	if (db_return != PS_OK) {
970 		(void) ps_pcontinue(ph_p);
971 		ph_unlock(ta_p);
972 		return (TD_DBERR);
973 	}
974 
975 	/*
976 	 * Run down the lists of all living and dead lwps.
977 	 */
978 	if (first_lwp_addr == NULL)
979 		first_lwp_addr = first_zombie_addr;
980 	curr_lwp_addr = first_lwp_addr;
981 	for (;;) {
982 		td_thr_state_e ts_state;
983 		int userpri;
984 		unsigned userflags;
985 		sigset_t mask;
986 
987 		/*
988 		 * Read the ulwp struct.
989 		 */
990 		if (ta_p->model == PR_MODEL_NATIVE) {
991 			ulwp_t ulwp;
992 
993 			if (ps_pdread(ph_p, curr_lwp_addr,
994 			    &ulwp, sizeof (ulwp)) != PS_OK &&
995 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
996 			    ps_pdread(ph_p, curr_lwp_addr,
997 			    &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
998 				return_val = TD_DBERR;
999 				break;
1000 			}
1001 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1002 
1003 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1004 				ulwp.ul_stop? TD_THR_STOPPED :
1005 				ulwp.ul_wchan? TD_THR_SLEEP :
1006 				TD_THR_ACTIVE;
1007 			userpri = ulwp.ul_pri;
1008 			userflags = ulwp.ul_usropts;
1009 			if (ulwp.ul_dead)
1010 				(void) sigemptyset(&mask);
1011 			else
1012 				mask = *(sigset_t *)&ulwp.ul_sigmask;
1013 		} else {
1014 #if defined(_LP64) && defined(_SYSCALL32)
1015 			ulwp32_t ulwp;
1016 
1017 			if (ps_pdread(ph_p, curr_lwp_addr,
1018 			    &ulwp, sizeof (ulwp)) != PS_OK &&
1019 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1020 			    ps_pdread(ph_p, curr_lwp_addr,
1021 			    &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1022 				return_val = TD_DBERR;
1023 				break;
1024 			}
1025 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1026 
1027 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1028 				ulwp.ul_stop? TD_THR_STOPPED :
1029 				ulwp.ul_wchan? TD_THR_SLEEP :
1030 				TD_THR_ACTIVE;
1031 			userpri = ulwp.ul_pri;
1032 			userflags = ulwp.ul_usropts;
1033 			if (ulwp.ul_dead)
1034 				(void) sigemptyset(&mask);
1035 			else
1036 				mask = *(sigset_t *)&ulwp.ul_sigmask;
1037 #else	/* _SYSCALL32 */
1038 			return_val = TD_ERR;
1039 			break;
1040 #endif	/* _SYSCALL32 */
1041 		}
1042 
1043 		/*
1044 		 * Filter on state, priority, sigmask, and user flags.
1045 		 */
1046 
1047 		if ((state != ts_state) &&
1048 		    (state != TD_THR_ANY_STATE))
1049 			goto advance;
1050 
1051 		if (ti_pri > userpri)
1052 			goto advance;
1053 
1054 		if (ti_sigmask_p != TD_SIGNO_MASK &&
1055 		    !sigequalset(ti_sigmask_p, &mask))
1056 			goto advance;
1057 
1058 		if (ti_user_flags != userflags &&
1059 		    ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1060 			goto advance;
1061 
1062 		/*
1063 		 * Call back - break if the return
1064 		 * from the call back is non-zero.
1065 		 */
1066 		th.th_ta_p = (td_thragent_t *)ta_p;
1067 		th.th_unique = curr_lwp_addr;
1068 		if ((*cb)(&th, cbdata_p))
1069 			break;
1070 
1071 advance:
1072 		if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1073 			/*
1074 			 * Switch to the zombie list, unless it is NULL
1075 			 * or we have already been doing the zombie list,
1076 			 * in which case terminate the loop.
1077 			 */
1078 			if (first_zombie_addr == NULL ||
1079 			    first_lwp_addr == first_zombie_addr)
1080 				break;
1081 			curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1082 		}
1083 	}
1084 
1085 	(void) ps_pcontinue(ph_p);
1086 	ph_unlock(ta_p);
1087 	return (return_val);
1088 }
1089 
1090 /*
1091  * Enable or disable process synchronization object tracking.
1092  * Currently unused by dbx.
1093  */
1094 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1095 td_err_e
1096 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1097 {
1098 	struct ps_prochandle *ph_p;
1099 	td_err_e return_val;
1100 	register_sync_t enable;
1101 
1102 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1103 		return (return_val);
1104 	/*
1105 	 * Values of tdb_register_sync in the victim process:
1106 	 *	REGISTER_SYNC_ENABLE	enables registration of synch objects
1107 	 *	REGISTER_SYNC_DISABLE	disables registration of synch objects
1108 	 * These cause the table to be cleared and tdb_register_sync set to:
1109 	 *	REGISTER_SYNC_ON	registration in effect
1110 	 *	REGISTER_SYNC_OFF	registration not in effect
1111 	 */
1112 	enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1113 	if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1114 	    &enable, sizeof (enable)) != PS_OK)
1115 		return_val = TD_DBERR;
1116 	/*
1117 	 * Remember that this interface was called (see td_ta_delete()).
1118 	 */
1119 	ta_p->sync_tracking = 1;
1120 	ph_unlock(ta_p);
1121 	return (return_val);
1122 }
1123 
1124 /*
1125  * Iterate over all known synchronization variables.
1126  * It is very possible that the list generated is incomplete,
1127  * because the iterator can only find synchronization variables
1128  * that have been registered by the process since synchronization
1129  * object registration was enabled.
1130  * The call back function cb is called for each synchronization
1131  * variable with two arguments: a pointer to the synchronization
1132  * handle and the passed-in argument cbdata.
1133  * If cb returns a non-zero value, iterations are terminated.
1134  */
1135 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1136 td_err_e
1137 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1138 {
1139 	struct ps_prochandle *ph_p;
1140 	td_err_e	return_val;
1141 	int		i;
1142 	register_sync_t	enable;
1143 	psaddr_t	next_desc;
1144 	tdb_sync_stats_t sync_stats;
1145 	td_synchandle_t	synchandle;
1146 	psaddr_t	psaddr;
1147 	void		*vaddr;
1148 	uint64_t	*sync_addr_hash = NULL;
1149 
1150 	if (cb == NULL)
1151 		return (TD_ERR);
1152 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1153 		return (return_val);
1154 	if (ps_pstop(ph_p) != PS_OK) {
1155 		ph_unlock(ta_p);
1156 		return (TD_DBERR);
1157 	}
1158 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1159 	    &enable, sizeof (enable)) != PS_OK) {
1160 		return_val = TD_DBERR;
1161 		goto out;
1162 	}
1163 	if (enable != REGISTER_SYNC_ON)
1164 		goto out;
1165 
1166 	/*
1167 	 * First read the hash table.
1168 	 * The hash table is large; allocate with mmap().
1169 	 */
1170 	if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1171 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1172 	    == MAP_FAILED) {
1173 		return_val = TD_MALLOC;
1174 		goto out;
1175 	}
1176 	sync_addr_hash = vaddr;
1177 
1178 	if (ta_p->model == PR_MODEL_NATIVE) {
1179 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
1180 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1181 		    &psaddr, sizeof (&psaddr)) != PS_OK) {
1182 			return_val = TD_DBERR;
1183 			goto out;
1184 		}
1185 	} else {
1186 #ifdef  _SYSCALL32
1187 		caddr32_t addr;
1188 
1189 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
1190 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1191 		    &addr, sizeof (addr)) != PS_OK) {
1192 			return_val = TD_DBERR;
1193 			goto out;
1194 		}
1195 		psaddr = addr;
1196 #else
1197 		return_val = TD_ERR;
1198 		goto out;
1199 #endif /* _SYSCALL32 */
1200 	}
1201 
1202 	if (psaddr == NULL)
1203 		goto out;
1204 	if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1205 	    TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1206 		return_val = TD_DBERR;
1207 		goto out;
1208 	}
1209 
1210 	/*
1211 	 * Now scan the hash table.
1212 	 */
1213 	for (i = 0; i < TDB_HASH_SIZE; i++) {
1214 		for (next_desc = (psaddr_t)sync_addr_hash[i];
1215 		    next_desc != NULL;
1216 		    next_desc = (psaddr_t)sync_stats.next) {
1217 			if (ps_pdread(ph_p, next_desc,
1218 			    &sync_stats, sizeof (sync_stats)) != PS_OK) {
1219 				return_val = TD_DBERR;
1220 				goto out;
1221 			}
1222 			if (sync_stats.un.type == TDB_NONE) {
1223 				/* not registered since registration enabled */
1224 				continue;
1225 			}
1226 			synchandle.sh_ta_p = ta_p;
1227 			synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1228 			if ((*cb)(&synchandle, cbdata) != 0)
1229 				goto out;
1230 		}
1231 	}
1232 
1233 out:
1234 	if (sync_addr_hash != NULL)
1235 		(void) munmap((void *)sync_addr_hash,
1236 		    TDB_HASH_SIZE * sizeof (uint64_t));
1237 	(void) ps_pcontinue(ph_p);
1238 	ph_unlock(ta_p);
1239 	return (return_val);
1240 }
1241 
1242 /*
1243  * Enable process statistics collection.
1244  */
1245 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1246 /* ARGSUSED */
1247 td_err_e
1248 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1249 {
1250 	return (TD_NOCAPAB);
1251 }
1252 
1253 /*
1254  * Reset process statistics.
1255  */
1256 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1257 /* ARGSUSED */
1258 td_err_e
1259 __td_ta_reset_stats(const td_thragent_t *ta_p)
1260 {
1261 	return (TD_NOCAPAB);
1262 }
1263 
1264 /*
1265  * Read process statistics.
1266  */
1267 #pragma weak td_ta_get_stats = __td_ta_get_stats
1268 /* ARGSUSED */
1269 td_err_e
1270 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1271 {
1272 	return (TD_NOCAPAB);
1273 }
1274 
1275 /*
1276  * Transfer information from lwp struct to thread information struct.
1277  * XXX -- lots of this needs cleaning up.
1278  */
1279 static void
1280 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1281 	ulwp_t *ulwp, td_thrinfo_t *ti_p)
1282 {
1283 	lwpid_t lwpid;
1284 
1285 	if ((lwpid = ulwp->ul_lwpid) == 0)
1286 		lwpid = 1;
1287 	(void) memset(ti_p, 0, sizeof (*ti_p));
1288 	ti_p->ti_ta_p = ta_p;
1289 	ti_p->ti_user_flags = ulwp->ul_usropts;
1290 	ti_p->ti_tid = lwpid;
1291 	ti_p->ti_exitval = ulwp->ul_rval;
1292 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1293 	if (!ulwp->ul_dead) {
1294 		/*
1295 		 * The bloody fools got this backwards!
1296 		 */
1297 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1298 		ti_p->ti_stksize = ulwp->ul_stksiz;
1299 	}
1300 	ti_p->ti_ro_area = ts_addr;
1301 	ti_p->ti_ro_size = ulwp->ul_replace?
1302 		REPLACEMENT_SIZE : sizeof (ulwp_t);
1303 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1304 		ulwp->ul_stop? TD_THR_STOPPED :
1305 		ulwp->ul_wchan? TD_THR_SLEEP :
1306 		TD_THR_ACTIVE;
1307 	ti_p->ti_db_suspended = 0;
1308 	ti_p->ti_type = TD_THR_USER;
1309 	ti_p->ti_sp = ulwp->ul_sp;
1310 	ti_p->ti_flags = 0;
1311 	ti_p->ti_pri = ulwp->ul_pri;
1312 	ti_p->ti_lid = lwpid;
1313 	if (!ulwp->ul_dead)
1314 		ti_p->ti_sigmask = ulwp->ul_sigmask;
1315 	ti_p->ti_traceme = 0;
1316 	ti_p->ti_preemptflag = 0;
1317 	ti_p->ti_pirecflag = 0;
1318 	(void) sigemptyset(&ti_p->ti_pending);
1319 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1320 }
1321 
1322 #if defined(_LP64) && defined(_SYSCALL32)
1323 static void
1324 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1325 	ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1326 {
1327 	lwpid_t lwpid;
1328 
1329 	if ((lwpid = ulwp->ul_lwpid) == 0)
1330 		lwpid = 1;
1331 	(void) memset(ti_p, 0, sizeof (*ti_p));
1332 	ti_p->ti_ta_p = ta_p;
1333 	ti_p->ti_user_flags = ulwp->ul_usropts;
1334 	ti_p->ti_tid = lwpid;
1335 	ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1336 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1337 	if (!ulwp->ul_dead) {
1338 		/*
1339 		 * The bloody fools got this backwards!
1340 		 */
1341 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1342 		ti_p->ti_stksize = ulwp->ul_stksiz;
1343 	}
1344 	ti_p->ti_ro_area = ts_addr;
1345 	ti_p->ti_ro_size = ulwp->ul_replace?
1346 		REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1347 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1348 		ulwp->ul_stop? TD_THR_STOPPED :
1349 		ulwp->ul_wchan? TD_THR_SLEEP :
1350 		TD_THR_ACTIVE;
1351 	ti_p->ti_db_suspended = 0;
1352 	ti_p->ti_type = TD_THR_USER;
1353 	ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1354 	ti_p->ti_flags = 0;
1355 	ti_p->ti_pri = ulwp->ul_pri;
1356 	ti_p->ti_lid = lwpid;
1357 	if (!ulwp->ul_dead)
1358 		ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1359 	ti_p->ti_traceme = 0;
1360 	ti_p->ti_preemptflag = 0;
1361 	ti_p->ti_pirecflag = 0;
1362 	(void) sigemptyset(&ti_p->ti_pending);
1363 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1364 }
1365 #endif	/* _SYSCALL32 */
1366 
1367 /*
1368  * Get thread information.
1369  */
1370 #pragma weak td_thr_get_info = __td_thr_get_info
1371 td_err_e
1372 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1373 {
1374 	struct ps_prochandle *ph_p;
1375 	td_thragent_t	*ta_p;
1376 	td_err_e	return_val;
1377 	psaddr_t	psaddr;
1378 
1379 	if (ti_p == NULL)
1380 		return (TD_ERR);
1381 	(void) memset(ti_p, NULL, sizeof (*ti_p));
1382 
1383 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1384 		return (return_val);
1385 	ta_p = th_p->th_ta_p;
1386 	if (ps_pstop(ph_p) != PS_OK) {
1387 		ph_unlock(ta_p);
1388 		return (TD_DBERR);
1389 	}
1390 
1391 	/*
1392 	 * Read the ulwp struct from the process.
1393 	 * Transfer the ulwp struct to the thread information struct.
1394 	 */
1395 	psaddr = th_p->th_unique;
1396 	if (ta_p->model == PR_MODEL_NATIVE) {
1397 		ulwp_t ulwp;
1398 
1399 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1400 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1401 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1402 			return_val = TD_DBERR;
1403 		else
1404 			td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1405 	} else {
1406 #if defined(_LP64) && defined(_SYSCALL32)
1407 		ulwp32_t ulwp;
1408 
1409 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1410 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1411 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1412 				PS_OK)
1413 			return_val = TD_DBERR;
1414 		else
1415 			td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1416 #else
1417 		return_val = TD_ERR;
1418 #endif	/* _SYSCALL32 */
1419 	}
1420 
1421 	(void) ps_pcontinue(ph_p);
1422 	ph_unlock(ta_p);
1423 	return (return_val);
1424 }
1425 
1426 /*
1427  * Given a process and an event number, return information about
1428  * an address in the process or at which a breakpoint can be set
1429  * to monitor the event.
1430  */
1431 #pragma weak td_ta_event_addr = __td_ta_event_addr
1432 td_err_e
1433 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1434 {
1435 	if (ta_p == NULL)
1436 		return (TD_BADTA);
1437 	if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1438 		return (TD_NOEVENT);
1439 	if (notify_p == NULL)
1440 		return (TD_ERR);
1441 
1442 	notify_p->type = NOTIFY_BPT;
1443 	notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1444 
1445 	return (TD_OK);
1446 }
1447 
1448 /*
1449  * Add the events in eventset 2 to eventset 1.
1450  */
1451 static void
1452 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1453 {
1454 	int	i;
1455 
1456 	for (i = 0; i < TD_EVENTSIZE; i++)
1457 		event1_p->event_bits[i] |= event2_p->event_bits[i];
1458 }
1459 
1460 /*
1461  * Delete the events in eventset 2 from eventset 1.
1462  */
1463 static void
1464 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1465 {
1466 	int	i;
1467 
1468 	for (i = 0; i < TD_EVENTSIZE; i++)
1469 		event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1470 }
1471 
1472 /*
1473  * Either add or delete the given event set from a thread's event mask.
1474  */
1475 static td_err_e
1476 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1477 {
1478 	struct ps_prochandle *ph_p;
1479 	td_err_e	return_val = TD_OK;
1480 	char		enable;
1481 	td_thr_events_t	evset;
1482 	psaddr_t	psaddr_evset;
1483 	psaddr_t	psaddr_enab;
1484 
1485 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1486 		return (return_val);
1487 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1488 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1489 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1490 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1491 	} else {
1492 #if defined(_LP64) && defined(_SYSCALL32)
1493 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1494 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1495 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1496 #else
1497 		ph_unlock(th_p->th_ta_p);
1498 		return (TD_ERR);
1499 #endif	/* _SYSCALL32 */
1500 	}
1501 	if (ps_pstop(ph_p) != PS_OK) {
1502 		ph_unlock(th_p->th_ta_p);
1503 		return (TD_DBERR);
1504 	}
1505 
1506 	if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1507 		return_val = TD_DBERR;
1508 	else {
1509 		if (onoff)
1510 			eventsetaddset(&evset, events);
1511 		else
1512 			eventsetdelset(&evset, events);
1513 		if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1514 		    != PS_OK)
1515 			return_val = TD_DBERR;
1516 		else {
1517 			enable = 0;
1518 			if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1519 				enable = 1;
1520 			if (ps_pdwrite(ph_p, psaddr_enab,
1521 			    &enable, sizeof (enable)) != PS_OK)
1522 				return_val = TD_DBERR;
1523 		}
1524 	}
1525 
1526 	(void) ps_pcontinue(ph_p);
1527 	ph_unlock(th_p->th_ta_p);
1528 	return (return_val);
1529 }
1530 
1531 /*
1532  * Enable or disable tracing for a given thread.  Tracing
1533  * is filtered based on the event mask of each thread.  Tracing
1534  * can be turned on/off for the thread without changing thread
1535  * event mask.
1536  * Currently unused by dbx.
1537  */
1538 #pragma weak td_thr_event_enable = __td_thr_event_enable
1539 td_err_e
1540 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1541 {
1542 	td_thr_events_t	evset;
1543 
1544 	td_event_emptyset(&evset);
1545 	td_event_addset(&evset, TD_EVENTS_ENABLE);
1546 	return (mod_eventset(th_p, &evset, onoff));
1547 }
1548 
1549 /*
1550  * Set event mask to enable event. event is turned on in
1551  * event mask for thread.  If a thread encounters an event
1552  * for which its event mask is on, notification will be sent
1553  * to the debugger.
1554  * Addresses for each event are provided to the
1555  * debugger.  It is assumed that a breakpoint of some type will
1556  * be placed at that address.  If the event mask for the thread
1557  * is on, the instruction at the address will be executed.
1558  * Otherwise, the instruction will be skipped.
1559  */
1560 #pragma weak td_thr_set_event = __td_thr_set_event
1561 td_err_e
1562 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1563 {
1564 	return (mod_eventset(th_p, events, 1));
1565 }
1566 
1567 /*
1568  * Enable or disable a set of events in the process-global event mask,
1569  * depending on the value of onoff.
1570  */
1571 static td_err_e
1572 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1573 {
1574 	struct ps_prochandle *ph_p;
1575 	td_thr_events_t targ_eventset;
1576 	td_err_e	return_val;
1577 
1578 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1579 		return (return_val);
1580 	if (ps_pstop(ph_p) != PS_OK) {
1581 		ph_unlock(ta_p);
1582 		return (TD_DBERR);
1583 	}
1584 	if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1585 	    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1586 		return_val = TD_DBERR;
1587 	else {
1588 		if (onoff)
1589 			eventsetaddset(&targ_eventset, events);
1590 		else
1591 			eventsetdelset(&targ_eventset, events);
1592 		if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1593 		    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1594 			return_val = TD_DBERR;
1595 	}
1596 	(void) ps_pcontinue(ph_p);
1597 	ph_unlock(ta_p);
1598 	return (return_val);
1599 }
1600 
1601 /*
1602  * Enable a set of events in the process-global event mask.
1603  */
1604 #pragma weak td_ta_set_event = __td_ta_set_event
1605 td_err_e
1606 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1607 {
1608 	return (td_ta_mod_event(ta_p, events, 1));
1609 }
1610 
1611 /*
1612  * Set event mask to disable the given event set; these events are cleared
1613  * from the event mask of the thread.  Events that occur for a thread
1614  * with the event masked off will not cause notification to be
1615  * sent to the debugger (see td_thr_set_event for fuller description).
1616  */
1617 #pragma weak td_thr_clear_event = __td_thr_clear_event
1618 td_err_e
1619 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1620 {
1621 	return (mod_eventset(th_p, events, 0));
1622 }
1623 
1624 /*
1625  * Disable a set of events in the process-global event mask.
1626  */
1627 #pragma weak td_ta_clear_event = __td_ta_clear_event
1628 td_err_e
1629 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1630 {
1631 	return (td_ta_mod_event(ta_p, events, 0));
1632 }
1633 
1634 /*
1635  * This function returns the most recent event message, if any,
1636  * associated with a thread.  Given a thread handle, return the message
1637  * corresponding to the event encountered by the thread.  Only one
1638  * message per thread is saved.  Messages from earlier events are lost
1639  * when later events occur.
1640  */
1641 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1642 td_err_e
1643 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1644 {
1645 	struct ps_prochandle *ph_p;
1646 	td_err_e	return_val = TD_OK;
1647 	psaddr_t	psaddr;
1648 
1649 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1650 		return (return_val);
1651 	if (ps_pstop(ph_p) != PS_OK) {
1652 		ph_unlock(th_p->th_ta_p);
1653 		return (TD_BADTA);
1654 	}
1655 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1656 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1657 		td_evbuf_t evbuf;
1658 
1659 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1660 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1661 			return_val = TD_DBERR;
1662 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
1663 			return_val = TD_NOEVENT;
1664 		} else {
1665 			msg->event = evbuf.eventnum;
1666 			msg->th_p = (td_thrhandle_t *)th_p;
1667 			msg->msg.data = (uintptr_t)evbuf.eventdata;
1668 			/* "Consume" the message */
1669 			evbuf.eventnum = TD_EVENT_NONE;
1670 			evbuf.eventdata = NULL;
1671 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1672 			    != PS_OK)
1673 				return_val = TD_DBERR;
1674 		}
1675 	} else {
1676 #if defined(_LP64) && defined(_SYSCALL32)
1677 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1678 		td_evbuf32_t evbuf;
1679 
1680 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1681 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1682 			return_val = TD_DBERR;
1683 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
1684 			return_val = TD_NOEVENT;
1685 		} else {
1686 			msg->event = evbuf.eventnum;
1687 			msg->th_p = (td_thrhandle_t *)th_p;
1688 			msg->msg.data = (uintptr_t)evbuf.eventdata;
1689 			/* "Consume" the message */
1690 			evbuf.eventnum = TD_EVENT_NONE;
1691 			evbuf.eventdata = NULL;
1692 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1693 			    != PS_OK)
1694 				return_val = TD_DBERR;
1695 		}
1696 #else
1697 		return_val = TD_ERR;
1698 #endif	/* _SYSCALL32 */
1699 	}
1700 
1701 	(void) ps_pcontinue(ph_p);
1702 	ph_unlock(th_p->th_ta_p);
1703 	return (return_val);
1704 }
1705 
1706 /*
1707  * The callback function td_ta_event_getmsg uses when looking for
1708  * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
1709  */
1710 static int
1711 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1712 {
1713 	static td_thrhandle_t th;
1714 	td_event_msg_t *msg = arg;
1715 
1716 	if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1717 		/*
1718 		 * Got an event, stop iterating.
1719 		 *
1720 		 * Because of past mistakes in interface definition,
1721 		 * we are forced to pass back a static local variable
1722 		 * for the thread handle because th_p is a pointer
1723 		 * to a local variable in __td_ta_thr_iter().
1724 		 * Grr...
1725 		 */
1726 		th = *th_p;
1727 		msg->th_p = &th;
1728 		return (1);
1729 	}
1730 	return (0);
1731 }
1732 
1733 /*
1734  * This function is just like td_thr_event_getmsg, except that it is
1735  * passed a process handle rather than a thread handle, and returns
1736  * an event message for some thread in the process that has an event
1737  * message pending.  If no thread has an event message pending, this
1738  * routine returns TD_NOEVENT.  Thus, all pending event messages may
1739  * be collected from a process by repeatedly calling this routine
1740  * until it returns TD_NOEVENT.
1741  */
1742 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1743 td_err_e
1744 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1745 {
1746 	td_err_e return_val;
1747 
1748 	if (ta_p == NULL)
1749 		return (TD_BADTA);
1750 	if (ta_p->ph_p == NULL)
1751 		return (TD_BADPH);
1752 	if (msg == NULL)
1753 		return (TD_ERR);
1754 	msg->event = TD_EVENT_NONE;
1755 	if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1756 	    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1757 	    TD_THR_ANY_USER_FLAGS)) != TD_OK)
1758 		return (return_val);
1759 	if (msg->event == TD_EVENT_NONE)
1760 		return (TD_NOEVENT);
1761 	return (TD_OK);
1762 }
1763 
1764 static lwpid_t
1765 thr_to_lwpid(const td_thrhandle_t *th_p)
1766 {
1767 	struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1768 	lwpid_t lwpid;
1769 
1770 	/*
1771 	 * The caller holds the prochandle lock
1772 	 * and has already verfied everything.
1773 	 */
1774 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1775 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1776 
1777 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1778 		    &lwpid, sizeof (lwpid)) != PS_OK)
1779 			lwpid = 0;
1780 		else if (lwpid == 0)
1781 			lwpid = 1;
1782 	} else {
1783 #if defined(_LP64) && defined(_SYSCALL32)
1784 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1785 
1786 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1787 		    &lwpid, sizeof (lwpid)) != PS_OK)
1788 			lwpid = 0;
1789 		else if (lwpid == 0)
1790 			lwpid = 1;
1791 #else
1792 		lwpid = 0;
1793 #endif	/* _SYSCALL32 */
1794 	}
1795 
1796 	return (lwpid);
1797 }
1798 
1799 /*
1800  * Suspend a thread.
1801  * XXX: What does this mean in a one-level model?
1802  */
1803 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1804 td_err_e
1805 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1806 {
1807 	struct ps_prochandle *ph_p;
1808 	td_err_e return_val;
1809 
1810 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1811 		return (return_val);
1812 	if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1813 		return_val = TD_DBERR;
1814 	ph_unlock(th_p->th_ta_p);
1815 	return (return_val);
1816 }
1817 
1818 /*
1819  * Resume a suspended thread.
1820  * XXX: What does this mean in a one-level model?
1821  */
1822 #pragma weak td_thr_dbresume = __td_thr_dbresume
1823 td_err_e
1824 __td_thr_dbresume(const td_thrhandle_t *th_p)
1825 {
1826 	struct ps_prochandle *ph_p;
1827 	td_err_e return_val;
1828 
1829 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1830 		return (return_val);
1831 	if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1832 		return_val = TD_DBERR;
1833 	ph_unlock(th_p->th_ta_p);
1834 	return (return_val);
1835 }
1836 
1837 /*
1838  * Set a thread's signal mask.
1839  * Currently unused by dbx.
1840  */
1841 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1842 /* ARGSUSED */
1843 td_err_e
1844 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1845 {
1846 	return (TD_NOCAPAB);
1847 }
1848 
1849 /*
1850  * Set a thread's "signals-pending" set.
1851  * Currently unused by dbx.
1852  */
1853 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1854 /* ARGSUSED */
1855 td_err_e
1856 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1857 	uchar_t ti_pending_flag, const sigset_t ti_pending)
1858 {
1859 	return (TD_NOCAPAB);
1860 }
1861 
1862 /*
1863  * Get a thread's general register set.
1864  */
1865 #pragma weak td_thr_getgregs = __td_thr_getgregs
1866 td_err_e
1867 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1868 {
1869 	struct ps_prochandle *ph_p;
1870 	td_err_e return_val;
1871 
1872 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1873 		return (return_val);
1874 	if (ps_pstop(ph_p) != PS_OK) {
1875 		ph_unlock(th_p->th_ta_p);
1876 		return (TD_DBERR);
1877 	}
1878 
1879 	if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1880 		return_val = TD_DBERR;
1881 
1882 	(void) ps_pcontinue(ph_p);
1883 	ph_unlock(th_p->th_ta_p);
1884 	return (return_val);
1885 }
1886 
1887 /*
1888  * Set a thread's general register set.
1889  */
1890 #pragma weak td_thr_setgregs = __td_thr_setgregs
1891 td_err_e
1892 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1893 {
1894 	struct ps_prochandle *ph_p;
1895 	td_err_e return_val;
1896 
1897 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1898 		return (return_val);
1899 	if (ps_pstop(ph_p) != PS_OK) {
1900 		ph_unlock(th_p->th_ta_p);
1901 		return (TD_DBERR);
1902 	}
1903 
1904 	if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1905 		return_val = TD_DBERR;
1906 
1907 	(void) ps_pcontinue(ph_p);
1908 	ph_unlock(th_p->th_ta_p);
1909 	return (return_val);
1910 }
1911 
1912 /*
1913  * Get a thread's floating-point register set.
1914  */
1915 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1916 td_err_e
1917 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1918 {
1919 	struct ps_prochandle *ph_p;
1920 	td_err_e return_val;
1921 
1922 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1923 		return (return_val);
1924 	if (ps_pstop(ph_p) != PS_OK) {
1925 		ph_unlock(th_p->th_ta_p);
1926 		return (TD_DBERR);
1927 	}
1928 
1929 	if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1930 		return_val = TD_DBERR;
1931 
1932 	(void) ps_pcontinue(ph_p);
1933 	ph_unlock(th_p->th_ta_p);
1934 	return (return_val);
1935 }
1936 
1937 /*
1938  * Set a thread's floating-point register set.
1939  */
1940 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1941 td_err_e
1942 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1943 {
1944 	struct ps_prochandle *ph_p;
1945 	td_err_e return_val;
1946 
1947 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1948 		return (return_val);
1949 	if (ps_pstop(ph_p) != PS_OK) {
1950 		ph_unlock(th_p->th_ta_p);
1951 		return (TD_DBERR);
1952 	}
1953 
1954 	if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1955 		return_val = TD_DBERR;
1956 
1957 	(void) ps_pcontinue(ph_p);
1958 	ph_unlock(th_p->th_ta_p);
1959 	return (return_val);
1960 }
1961 
1962 /*
1963  * Get the size of the extra state register set for this architecture.
1964  * Currently unused by dbx.
1965  */
1966 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1967 /* ARGSUSED */
1968 td_err_e
1969 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1970 {
1971 #if defined(__sparc)
1972 	struct ps_prochandle *ph_p;
1973 	td_err_e return_val;
1974 
1975 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1976 		return (return_val);
1977 	if (ps_pstop(ph_p) != PS_OK) {
1978 		ph_unlock(th_p->th_ta_p);
1979 		return (TD_DBERR);
1980 	}
1981 
1982 	if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
1983 		return_val = TD_DBERR;
1984 
1985 	(void) ps_pcontinue(ph_p);
1986 	ph_unlock(th_p->th_ta_p);
1987 	return (return_val);
1988 #else	/* __sparc */
1989 	return (TD_NOXREGS);
1990 #endif	/* __sparc */
1991 }
1992 
1993 /*
1994  * Get a thread's extra state register set.
1995  */
1996 #pragma weak td_thr_getxregs = __td_thr_getxregs
1997 /* ARGSUSED */
1998 td_err_e
1999 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2000 {
2001 #if defined(__sparc)
2002 	struct ps_prochandle *ph_p;
2003 	td_err_e return_val;
2004 
2005 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2006 		return (return_val);
2007 	if (ps_pstop(ph_p) != PS_OK) {
2008 		ph_unlock(th_p->th_ta_p);
2009 		return (TD_DBERR);
2010 	}
2011 
2012 	if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2013 		return_val = TD_DBERR;
2014 
2015 	(void) ps_pcontinue(ph_p);
2016 	ph_unlock(th_p->th_ta_p);
2017 	return (return_val);
2018 #else	/* __sparc */
2019 	return (TD_NOXREGS);
2020 #endif	/* __sparc */
2021 }
2022 
2023 /*
2024  * Set a thread's extra state register set.
2025  */
2026 #pragma weak td_thr_setxregs = __td_thr_setxregs
2027 /* ARGSUSED */
2028 td_err_e
2029 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2030 {
2031 #if defined(__sparc)
2032 	struct ps_prochandle *ph_p;
2033 	td_err_e return_val;
2034 
2035 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2036 		return (return_val);
2037 	if (ps_pstop(ph_p) != PS_OK) {
2038 		ph_unlock(th_p->th_ta_p);
2039 		return (TD_DBERR);
2040 	}
2041 
2042 	if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2043 		return_val = TD_DBERR;
2044 
2045 	(void) ps_pcontinue(ph_p);
2046 	ph_unlock(th_p->th_ta_p);
2047 	return (return_val);
2048 #else	/* __sparc */
2049 	return (TD_NOXREGS);
2050 #endif	/* __sparc */
2051 }
2052 
2053 struct searcher {
2054 	psaddr_t	addr;
2055 	int		status;
2056 };
2057 
2058 /*
2059  * Check the struct thread address in *th_p again first
2060  * value in "data".  If value in data is found, set second value
2061  * in "data" to 1 and return 1 to terminate iterations.
2062  * This function is used by td_thr_validate() to verify that
2063  * a thread handle is valid.
2064  */
2065 static int
2066 td_searcher(const td_thrhandle_t *th_p, void *data)
2067 {
2068 	struct searcher *searcher_data = (struct searcher *)data;
2069 
2070 	if (searcher_data->addr == th_p->th_unique) {
2071 		searcher_data->status = 1;
2072 		return (1);
2073 	}
2074 	return (0);
2075 }
2076 
2077 /*
2078  * Validate the thread handle.  Check that
2079  * a thread exists in the thread agent/process that
2080  * corresponds to thread with handle *th_p.
2081  * Currently unused by dbx.
2082  */
2083 #pragma weak td_thr_validate = __td_thr_validate
2084 td_err_e
2085 __td_thr_validate(const td_thrhandle_t *th_p)
2086 {
2087 	td_err_e return_val;
2088 	struct searcher searcher_data = {0, 0};
2089 
2090 	if (th_p == NULL)
2091 		return (TD_BADTH);
2092 	if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2093 		return (TD_BADTH);
2094 
2095 	/*
2096 	 * LOCKING EXCEPTION - Locking is not required
2097 	 * here because no use of the thread agent is made (other
2098 	 * than the sanity check) and checking of the thread
2099 	 * agent will be done in __td_ta_thr_iter.
2100 	 */
2101 
2102 	searcher_data.addr = th_p->th_unique;
2103 	return_val = __td_ta_thr_iter(th_p->th_ta_p,
2104 		td_searcher, &searcher_data,
2105 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2106 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2107 
2108 	if (return_val == TD_OK && searcher_data.status == 0)
2109 		return_val = TD_NOTHR;
2110 
2111 	return (return_val);
2112 }
2113 
2114 /*
2115  * Get a thread's private binding to a given thread specific
2116  * data(TSD) key(see thr_getspecific(3T).  If the thread doesn't
2117  * have a binding for a particular key, then NULL is returned.
2118  */
2119 #pragma weak td_thr_tsd = __td_thr_tsd
2120 td_err_e
2121 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2122 {
2123 	struct ps_prochandle *ph_p;
2124 	td_thragent_t	*ta_p;
2125 	td_err_e	return_val;
2126 	int		maxkey;
2127 	int		nkey;
2128 	psaddr_t	tsd_paddr;
2129 
2130 	if (data_pp == NULL)
2131 		return (TD_ERR);
2132 	*data_pp = NULL;
2133 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2134 		return (return_val);
2135 	ta_p = th_p->th_ta_p;
2136 	if (ps_pstop(ph_p) != PS_OK) {
2137 		ph_unlock(ta_p);
2138 		return (TD_DBERR);
2139 	}
2140 
2141 	if (ta_p->model == PR_MODEL_NATIVE) {
2142 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2143 		tsd_metadata_t tsdm;
2144 		tsd_t stsd;
2145 
2146 		if (ps_pdread(ph_p,
2147 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2148 		    &tsdm, sizeof (tsdm)) != PS_OK)
2149 			return_val = TD_DBERR;
2150 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2151 		    &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2152 			return_val = TD_DBERR;
2153 		else if (tsd_paddr != NULL &&
2154 		    ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2155 			return_val = TD_DBERR;
2156 		else {
2157 			maxkey = tsdm.tsdm_nused;
2158 			nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2159 
2160 			if (key < TSD_NFAST)
2161 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2162 		}
2163 	} else {
2164 #if defined(_LP64) && defined(_SYSCALL32)
2165 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2166 		tsd_metadata32_t tsdm;
2167 		tsd32_t stsd;
2168 		caddr32_t addr;
2169 
2170 		if (ps_pdread(ph_p,
2171 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2172 		    &tsdm, sizeof (tsdm)) != PS_OK)
2173 			return_val = TD_DBERR;
2174 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2175 		    &addr, sizeof (addr)) != PS_OK)
2176 			return_val = TD_DBERR;
2177 		else if (addr != NULL &&
2178 		    ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2179 			return_val = TD_DBERR;
2180 		else {
2181 			maxkey = tsdm.tsdm_nused;
2182 			nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2183 
2184 			if (key < TSD_NFAST) {
2185 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2186 			} else {
2187 				tsd_paddr = addr;
2188 			}
2189 		}
2190 #else
2191 		return_val = TD_ERR;
2192 #endif	/* _SYSCALL32 */
2193 	}
2194 
2195 	if (return_val == TD_OK && (key < 1 || key >= maxkey))
2196 		return_val = TD_NOTSD;
2197 	if (return_val != TD_OK || key >= nkey) {
2198 		/* NULL has already been stored in data_pp */
2199 		(void) ps_pcontinue(ph_p);
2200 		ph_unlock(ta_p);
2201 		return (return_val);
2202 	}
2203 
2204 	/*
2205 	 * Read the value from the thread's tsd array.
2206 	 */
2207 	if (ta_p->model == PR_MODEL_NATIVE) {
2208 		void *value;
2209 
2210 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2211 		    &value, sizeof (value)) != PS_OK)
2212 			return_val = TD_DBERR;
2213 		else
2214 			*data_pp = value;
2215 #if defined(_LP64) && defined(_SYSCALL32)
2216 	} else {
2217 		caddr32_t value32;
2218 
2219 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2220 		    &value32, sizeof (value32)) != PS_OK)
2221 			return_val = TD_DBERR;
2222 		else
2223 			*data_pp = (void *)(uintptr_t)value32;
2224 #endif	/* _SYSCALL32 */
2225 	}
2226 
2227 	(void) ps_pcontinue(ph_p);
2228 	ph_unlock(ta_p);
2229 	return (return_val);
2230 }
2231 
2232 /*
2233  * Get the base address of a thread's thread local storage (TLS) block
2234  * for the module (executable or shared object) identified by 'moduleid'.
2235  */
2236 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2237 td_err_e
2238 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2239 {
2240 	struct ps_prochandle *ph_p;
2241 	td_thragent_t	*ta_p;
2242 	td_err_e	return_val;
2243 
2244 	if (base == NULL)
2245 		return (TD_ERR);
2246 	*base = NULL;
2247 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2248 		return (return_val);
2249 	ta_p = th_p->th_ta_p;
2250 	if (ps_pstop(ph_p) != PS_OK) {
2251 		ph_unlock(ta_p);
2252 		return (TD_DBERR);
2253 	}
2254 
2255 	if (ta_p->model == PR_MODEL_NATIVE) {
2256 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2257 		tls_metadata_t tls_metadata;
2258 		TLS_modinfo tlsmod;
2259 		tls_t tls;
2260 
2261 		if (ps_pdread(ph_p,
2262 		    ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2263 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2264 			return_val = TD_DBERR;
2265 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2266 			return_val = TD_NOTLS;
2267 		else if (ps_pdread(ph_p,
2268 		    (psaddr_t)((TLS_modinfo *)
2269 		    tls_metadata.tls_modinfo.tls_data + moduleid),
2270 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
2271 			return_val = TD_DBERR;
2272 		else if (tlsmod.tm_memsz == 0)
2273 			return_val = TD_NOTLS;
2274 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2275 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2276 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2277 		    &tls, sizeof (tls)) != PS_OK)
2278 			return_val = TD_DBERR;
2279 		else if (moduleid >= tls.tls_size)
2280 			return_val = TD_TLSDEFER;
2281 		else if (ps_pdread(ph_p,
2282 		    (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2283 		    &tls, sizeof (tls)) != PS_OK)
2284 			return_val = TD_DBERR;
2285 		else if (tls.tls_size == 0)
2286 			return_val = TD_TLSDEFER;
2287 		else
2288 			*base = (psaddr_t)tls.tls_data;
2289 	} else {
2290 #if defined(_LP64) && defined(_SYSCALL32)
2291 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2292 		tls_metadata32_t tls_metadata;
2293 		TLS_modinfo32 tlsmod;
2294 		tls32_t tls;
2295 
2296 		if (ps_pdread(ph_p,
2297 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2298 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2299 			return_val = TD_DBERR;
2300 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2301 			return_val = TD_NOTLS;
2302 		else if (ps_pdread(ph_p,
2303 		    (psaddr_t)((TLS_modinfo32 *)
2304 		    (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2305 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
2306 			return_val = TD_DBERR;
2307 		else if (tlsmod.tm_memsz == 0)
2308 			return_val = TD_NOTLS;
2309 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2310 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2311 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2312 		    &tls, sizeof (tls)) != PS_OK)
2313 			return_val = TD_DBERR;
2314 		else if (moduleid >= tls.tls_size)
2315 			return_val = TD_TLSDEFER;
2316 		else if (ps_pdread(ph_p,
2317 		    (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2318 		    &tls, sizeof (tls)) != PS_OK)
2319 			return_val = TD_DBERR;
2320 		else if (tls.tls_size == 0)
2321 			return_val = TD_TLSDEFER;
2322 		else
2323 			*base = (psaddr_t)tls.tls_data;
2324 #else
2325 		return_val = TD_ERR;
2326 #endif	/* _SYSCALL32 */
2327 	}
2328 
2329 	(void) ps_pcontinue(ph_p);
2330 	ph_unlock(ta_p);
2331 	return (return_val);
2332 }
2333 
2334 /*
2335  * Change a thread's priority to the value specified by ti_pri.
2336  * Currently unused by dbx.
2337  */
2338 #pragma weak td_thr_setprio = __td_thr_setprio
2339 td_err_e
2340 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2341 {
2342 	struct ps_prochandle *ph_p;
2343 	pri_t		priority = ti_pri;
2344 	td_err_e	return_val = TD_OK;
2345 
2346 	if (ti_pri < THREAD_MIN_PRIORITY || ti_pri > THREAD_MAX_PRIORITY)
2347 		return (TD_ERR);
2348 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2349 		return (return_val);
2350 
2351 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2352 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2353 
2354 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
2355 		    &priority, sizeof (priority)) != PS_OK)
2356 			return_val = TD_DBERR;
2357 	} else {
2358 #if defined(_LP64) && defined(_SYSCALL32)
2359 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2360 
2361 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
2362 		    &priority, sizeof (priority)) != PS_OK)
2363 			return_val = TD_DBERR;
2364 #else
2365 		return_val = TD_ERR;
2366 #endif	/* _SYSCALL32 */
2367 	}
2368 
2369 	ph_unlock(th_p->th_ta_p);
2370 	return (return_val);
2371 }
2372 
2373 /*
2374  * This structure links td_thr_lockowner and the lowner_cb callback function.
2375  */
2376 typedef struct {
2377 	td_sync_iter_f	*owner_cb;
2378 	void		*owner_cb_arg;
2379 	td_thrhandle_t	*th_p;
2380 } lowner_cb_ctl_t;
2381 
2382 static int
2383 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2384 {
2385 	lowner_cb_ctl_t *ocb = arg;
2386 	int trunc = 0;
2387 	union {
2388 		rwlock_t rwl;
2389 		mutex_t mx;
2390 	} rw_m;
2391 
2392 	if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2393 	    &rw_m, sizeof (rw_m)) != PS_OK) {
2394 		trunc = 1;
2395 		if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2396 		    &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2397 			return (0);
2398 	}
2399 	if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2400 	    rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2401 		return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2402 	if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2403 		mutex_t *rwlock = &rw_m.rwl.mutex;
2404 		if (rwlock->mutex_owner == ocb->th_p->th_unique)
2405 			return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2406 	}
2407 	return (0);
2408 }
2409 
2410 /*
2411  * Iterate over the set of locks owned by a specified thread.
2412  * If cb returns a non-zero value, terminate iterations.
2413  */
2414 #pragma weak td_thr_lockowner = __td_thr_lockowner
2415 td_err_e
2416 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2417 	void *cb_data)
2418 {
2419 	td_thragent_t	*ta_p;
2420 	td_err_e	return_val;
2421 	lowner_cb_ctl_t	lcb;
2422 
2423 	/*
2424 	 * Just sanity checks.
2425 	 */
2426 	if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2427 		return (return_val);
2428 	ta_p = th_p->th_ta_p;
2429 	ph_unlock(ta_p);
2430 
2431 	lcb.owner_cb = cb;
2432 	lcb.owner_cb_arg = cb_data;
2433 	lcb.th_p = (td_thrhandle_t *)th_p;
2434 	return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2435 }
2436 
2437 /*
2438  * If a thread is asleep on a synchronization variable,
2439  * then get the synchronization handle.
2440  */
2441 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2442 td_err_e
2443 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2444 {
2445 	struct ps_prochandle *ph_p;
2446 	td_err_e	return_val = TD_OK;
2447 	uintptr_t	wchan;
2448 
2449 	if (sh_p == NULL)
2450 		return (TD_ERR);
2451 	if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2452 		return (return_val);
2453 
2454 	/*
2455 	 * No need to stop the process for a simple read.
2456 	 */
2457 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2458 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2459 
2460 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2461 		    &wchan, sizeof (wchan)) != PS_OK)
2462 			return_val = TD_DBERR;
2463 	} else {
2464 #if defined(_LP64) && defined(_SYSCALL32)
2465 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2466 		caddr32_t wchan32;
2467 
2468 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2469 		    &wchan32, sizeof (wchan32)) != PS_OK)
2470 			return_val = TD_DBERR;
2471 		wchan = wchan32;
2472 #else
2473 		return_val = TD_ERR;
2474 #endif	/* _SYSCALL32 */
2475 	}
2476 
2477 	if (return_val != TD_OK || wchan == NULL) {
2478 		sh_p->sh_ta_p = NULL;
2479 		sh_p->sh_unique = NULL;
2480 		if (return_val == TD_OK)
2481 			return_val = TD_ERR;
2482 	} else {
2483 		sh_p->sh_ta_p = th_p->th_ta_p;
2484 		sh_p->sh_unique = (psaddr_t)wchan;
2485 	}
2486 
2487 	ph_unlock(th_p->th_ta_p);
2488 	return (return_val);
2489 }
2490 
2491 /*
2492  * Which thread is running on an lwp?
2493  */
2494 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2495 td_err_e
2496 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2497 	td_thrhandle_t *th_p)
2498 {
2499 	return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2500 }
2501 
2502 /*
2503  * Common code for td_sync_get_info() and td_sync_get_stats()
2504  */
2505 static td_err_e
2506 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2507 	td_syncinfo_t *si_p)
2508 {
2509 	int trunc = 0;
2510 	td_so_un_t generic_so;
2511 
2512 	/*
2513 	 * Determine the sync. object type; a little type fudgery here.
2514 	 * First attempt to read the whole union.  If that fails, attempt
2515 	 * to read just the condvar.  A condvar is the smallest sync. object.
2516 	 */
2517 	if (ps_pdread(ph_p, sh_p->sh_unique,
2518 	    &generic_so, sizeof (generic_so)) != PS_OK) {
2519 		trunc = 1;
2520 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2521 		    sizeof (generic_so.condition)) != PS_OK)
2522 			return (TD_DBERR);
2523 	}
2524 
2525 	switch (generic_so.condition.cond_magic) {
2526 	case MUTEX_MAGIC:
2527 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2528 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2529 			return (TD_DBERR);
2530 		si_p->si_type = TD_SYNC_MUTEX;
2531 		si_p->si_shared_type = generic_so.lock.mutex_type;
2532 		(void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2533 		    sizeof (generic_so.lock.mutex_flag));
2534 		si_p->si_state.mutex_locked =
2535 		    (generic_so.lock.mutex_lockw != 0);
2536 		si_p->si_size = sizeof (generic_so.lock);
2537 		si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2538 		si_p->si_rcount = generic_so.lock.mutex_rcount;
2539 		si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2540 		if (si_p->si_state.mutex_locked) {
2541 			if (si_p->si_shared_type &
2542 			    (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
2543 				si_p->si_ownerpid =
2544 					generic_so.lock.mutex_ownerpid;
2545 			si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2546 			si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2547 		}
2548 		break;
2549 	case COND_MAGIC:
2550 		si_p->si_type = TD_SYNC_COND;
2551 		si_p->si_shared_type = generic_so.condition.cond_type;
2552 		(void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2553 		    sizeof (generic_so.condition.flags.flag));
2554 		si_p->si_size = sizeof (generic_so.condition);
2555 		si_p->si_has_waiters =
2556 			(generic_so.condition.cond_waiters_user |
2557 			generic_so.condition.cond_waiters_kernel)? 1 : 0;
2558 		break;
2559 	case SEMA_MAGIC:
2560 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2561 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
2562 		    != PS_OK)
2563 			return (TD_DBERR);
2564 		si_p->si_type = TD_SYNC_SEMA;
2565 		si_p->si_shared_type = generic_so.semaphore.type;
2566 		si_p->si_state.sem_count = generic_so.semaphore.count;
2567 		si_p->si_size = sizeof (generic_so.semaphore);
2568 		si_p->si_has_waiters =
2569 		    ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2570 		/* this is useless but the old interface provided it */
2571 		si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2572 		break;
2573 	case RWL_MAGIC:
2574 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2575 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2576 			return (TD_DBERR);
2577 		si_p->si_type = TD_SYNC_RWLOCK;
2578 		si_p->si_shared_type = generic_so.rwlock.rwlock_type;
2579 		si_p->si_size = sizeof (generic_so.rwlock);
2580 		if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) {
2581 			uint32_t *rwstate =
2582 			    (uint32_t *)&si_p->si_state.nreaders;
2583 
2584 			if (*rwstate & URW_WRITE_LOCKED) {
2585 				si_p->si_state.nreaders = -1;
2586 				si_p->si_is_wlock = 1;
2587 				si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2588 				si_p->si_owner.th_unique =
2589 					generic_so.rwlock.rwlock_owner;
2590 			} else if (*rwstate & URW_READERS_MASK)
2591 				si_p->si_state.nreaders =
2592 				    *rwstate & URW_READERS_MASK;
2593 			else
2594 				si_p->si_state.nreaders = 0;
2595 			si_p->si_has_waiters = (*rwstate & URW_HAS_WAITERS);
2596 		} else {
2597 			si_p->si_state.nreaders = generic_so.rwlock.readers;
2598 			si_p->si_has_waiters =
2599 			    generic_so.rwlock.rwlock_mwaiters;
2600 			if (si_p->si_state.nreaders == -1) {
2601 				si_p->si_is_wlock = 1;
2602 				si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2603 				si_p->si_owner.th_unique =
2604 					generic_so.rwlock.rwlock_mowner;
2605 			}
2606 		}
2607 		/* this is useless but the old interface provided it */
2608 		si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2609 		break;
2610 	default:
2611 		return (TD_BADSH);
2612 	}
2613 
2614 	si_p->si_ta_p = sh_p->sh_ta_p;
2615 	si_p->si_sv_addr = sh_p->sh_unique;
2616 	return (TD_OK);
2617 }
2618 
2619 /*
2620  * Given a synchronization handle, fill in the
2621  * information for the synchronization variable into *si_p.
2622  */
2623 #pragma weak td_sync_get_info = __td_sync_get_info
2624 td_err_e
2625 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2626 {
2627 	struct ps_prochandle *ph_p;
2628 	td_err_e return_val;
2629 
2630 	if (si_p == NULL)
2631 		return (TD_ERR);
2632 	(void) memset(si_p, 0, sizeof (*si_p));
2633 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2634 		return (return_val);
2635 	if (ps_pstop(ph_p) != PS_OK) {
2636 		ph_unlock(sh_p->sh_ta_p);
2637 		return (TD_DBERR);
2638 	}
2639 
2640 	return_val = sync_get_info_common(sh_p, ph_p, si_p);
2641 
2642 	(void) ps_pcontinue(ph_p);
2643 	ph_unlock(sh_p->sh_ta_p);
2644 	return (return_val);
2645 }
2646 
2647 static uint_t
2648 tdb_addr_hash64(uint64_t addr)
2649 {
2650 	uint64_t value60 = (addr >> 4);
2651 	uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2652 	return ((value30 >> 15) ^ (value30 & 0x7fff));
2653 }
2654 
2655 static uint_t
2656 tdb_addr_hash32(uint64_t addr)
2657 {
2658 	uint32_t value30 = (addr >> 2);		/* 30 bits */
2659 	return ((value30 >> 15) ^ (value30 & 0x7fff));
2660 }
2661 
2662 static td_err_e
2663 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2664 	psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2665 {
2666 	psaddr_t next_desc;
2667 	uint64_t first;
2668 	uint_t ix;
2669 
2670 	/*
2671 	 * Compute the hash table index from the synch object's address.
2672 	 */
2673 	if (ta_p->model == PR_MODEL_LP64)
2674 		ix = tdb_addr_hash64(sync_obj_addr);
2675 	else
2676 		ix = tdb_addr_hash32(sync_obj_addr);
2677 
2678 	/*
2679 	 * Get the address of the first element in the linked list.
2680 	 */
2681 	if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2682 	    &first, sizeof (first)) != PS_OK)
2683 		return (TD_DBERR);
2684 
2685 	/*
2686 	 * Search the linked list for an entry for the synch object..
2687 	 */
2688 	for (next_desc = (psaddr_t)first; next_desc != NULL;
2689 	    next_desc = (psaddr_t)sync_stats->next) {
2690 		if (ps_pdread(ta_p->ph_p, next_desc,
2691 		    sync_stats, sizeof (*sync_stats)) != PS_OK)
2692 			return (TD_DBERR);
2693 		if (sync_stats->sync_addr == sync_obj_addr)
2694 			return (TD_OK);
2695 	}
2696 
2697 	(void) memset(sync_stats, 0, sizeof (*sync_stats));
2698 	return (TD_OK);
2699 }
2700 
2701 /*
2702  * Given a synchronization handle, fill in the
2703  * statistics for the synchronization variable into *ss_p.
2704  */
2705 #pragma weak td_sync_get_stats = __td_sync_get_stats
2706 td_err_e
2707 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2708 {
2709 	struct ps_prochandle *ph_p;
2710 	td_thragent_t *ta_p;
2711 	td_err_e return_val;
2712 	register_sync_t enable;
2713 	psaddr_t hashaddr;
2714 	tdb_sync_stats_t sync_stats;
2715 	size_t ix;
2716 
2717 	if (ss_p == NULL)
2718 		return (TD_ERR);
2719 	(void) memset(ss_p, 0, sizeof (*ss_p));
2720 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2721 		return (return_val);
2722 	ta_p = sh_p->sh_ta_p;
2723 	if (ps_pstop(ph_p) != PS_OK) {
2724 		ph_unlock(ta_p);
2725 		return (TD_DBERR);
2726 	}
2727 
2728 	if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2729 	    != TD_OK) {
2730 		if (return_val != TD_BADSH)
2731 			goto out;
2732 		/* we can correct TD_BADSH */
2733 		(void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2734 		ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2735 		ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2736 		/* we correct si_type and si_size below */
2737 		return_val = TD_OK;
2738 	}
2739 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2740 	    &enable, sizeof (enable)) != PS_OK) {
2741 		return_val = TD_DBERR;
2742 		goto out;
2743 	}
2744 	if (enable != REGISTER_SYNC_ON)
2745 		goto out;
2746 
2747 	/*
2748 	 * Get the address of the hash table in the target process.
2749 	 */
2750 	if (ta_p->model == PR_MODEL_NATIVE) {
2751 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
2752 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2753 		    &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2754 			return_val = TD_DBERR;
2755 			goto out;
2756 		}
2757 	} else {
2758 #if defined(_LP64) && defined(_SYSCALL32)
2759 		caddr32_t addr;
2760 
2761 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
2762 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2763 		    &addr, sizeof (addr)) != PS_OK) {
2764 			return_val = TD_DBERR;
2765 			goto out;
2766 		}
2767 		hashaddr = addr;
2768 #else
2769 		return_val = TD_ERR;
2770 		goto out;
2771 #endif	/* _SYSCALL32 */
2772 	}
2773 
2774 	if (hashaddr == 0)
2775 		return_val = TD_BADSH;
2776 	else
2777 		return_val = read_sync_stats(ta_p, hashaddr,
2778 			sh_p->sh_unique, &sync_stats);
2779 	if (return_val != TD_OK)
2780 		goto out;
2781 
2782 	/*
2783 	 * We have the hash table entry.  Transfer the data to
2784 	 * the td_syncstats_t structure provided by the caller.
2785 	 */
2786 	switch (sync_stats.un.type) {
2787 	case TDB_MUTEX:
2788 	    {
2789 		td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2790 
2791 		ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2792 		ss_p->ss_info.si_size = sizeof (mutex_t);
2793 		msp->mutex_lock =
2794 			sync_stats.un.mutex.mutex_lock;
2795 		msp->mutex_sleep =
2796 			sync_stats.un.mutex.mutex_sleep;
2797 		msp->mutex_sleep_time =
2798 			sync_stats.un.mutex.mutex_sleep_time;
2799 		msp->mutex_hold_time =
2800 			sync_stats.un.mutex.mutex_hold_time;
2801 		msp->mutex_try =
2802 			sync_stats.un.mutex.mutex_try;
2803 		msp->mutex_try_fail =
2804 			sync_stats.un.mutex.mutex_try_fail;
2805 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2806 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2807 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
2808 			msp->mutex_internal =
2809 				ix / sizeof (thr_hash_table_t) + 1;
2810 		break;
2811 	    }
2812 	case TDB_COND:
2813 	    {
2814 		td_cond_stats_t *csp = &ss_p->ss_un.cond;
2815 
2816 		ss_p->ss_info.si_type = TD_SYNC_COND;
2817 		ss_p->ss_info.si_size = sizeof (cond_t);
2818 		csp->cond_wait =
2819 			sync_stats.un.cond.cond_wait;
2820 		csp->cond_timedwait =
2821 			sync_stats.un.cond.cond_timedwait;
2822 		csp->cond_wait_sleep_time =
2823 			sync_stats.un.cond.cond_wait_sleep_time;
2824 		csp->cond_timedwait_sleep_time =
2825 			sync_stats.un.cond.cond_timedwait_sleep_time;
2826 		csp->cond_timedwait_timeout =
2827 			sync_stats.un.cond.cond_timedwait_timeout;
2828 		csp->cond_signal =
2829 			sync_stats.un.cond.cond_signal;
2830 		csp->cond_broadcast =
2831 			sync_stats.un.cond.cond_broadcast;
2832 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2833 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2834 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
2835 			csp->cond_internal =
2836 				ix / sizeof (thr_hash_table_t) + 1;
2837 		break;
2838 	    }
2839 	case TDB_RWLOCK:
2840 	    {
2841 		psaddr_t cond_addr;
2842 		tdb_sync_stats_t cond_stats;
2843 		td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2844 
2845 		ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2846 		ss_p->ss_info.si_size = sizeof (rwlock_t);
2847 		rwsp->rw_rdlock =
2848 			sync_stats.un.rwlock.rw_rdlock;
2849 		cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->readercv;
2850 		if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats)
2851 		    == TD_OK) {
2852 			rwsp->rw_rdlock_sleep =
2853 				cond_stats.un.cond.cond_wait;
2854 			rwsp->rw_rdlock_sleep_time =
2855 				cond_stats.un.cond.cond_wait_sleep_time;
2856 		}
2857 		rwsp->rw_rdlock_try =
2858 			sync_stats.un.rwlock.rw_rdlock_try;
2859 		rwsp->rw_rdlock_try_fail =
2860 			sync_stats.un.rwlock.rw_rdlock_try_fail;
2861 		rwsp->rw_wrlock =
2862 			sync_stats.un.rwlock.rw_wrlock;
2863 		cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->writercv;
2864 		if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats)
2865 		    == TD_OK) {
2866 			rwsp->rw_wrlock_sleep =
2867 				cond_stats.un.cond.cond_wait;
2868 			rwsp->rw_wrlock_sleep_time =
2869 				cond_stats.un.cond.cond_wait_sleep_time;
2870 		}
2871 		rwsp->rw_wrlock_hold_time =
2872 			sync_stats.un.rwlock.rw_wrlock_hold_time;
2873 		rwsp->rw_wrlock_try =
2874 			sync_stats.un.rwlock.rw_wrlock_try;
2875 		rwsp->rw_wrlock_try_fail =
2876 			sync_stats.un.rwlock.rw_wrlock_try_fail;
2877 		break;
2878 	    }
2879 	case TDB_SEMA:
2880 	    {
2881 		td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2882 
2883 		ss_p->ss_info.si_type = TD_SYNC_SEMA;
2884 		ss_p->ss_info.si_size = sizeof (sema_t);
2885 		ssp->sema_wait =
2886 			sync_stats.un.sema.sema_wait;
2887 		ssp->sema_wait_sleep =
2888 			sync_stats.un.sema.sema_wait_sleep;
2889 		ssp->sema_wait_sleep_time =
2890 			sync_stats.un.sema.sema_wait_sleep_time;
2891 		ssp->sema_trywait =
2892 			sync_stats.un.sema.sema_trywait;
2893 		ssp->sema_trywait_fail =
2894 			sync_stats.un.sema.sema_trywait_fail;
2895 		ssp->sema_post =
2896 			sync_stats.un.sema.sema_post;
2897 		ssp->sema_max_count =
2898 			sync_stats.un.sema.sema_max_count;
2899 		ssp->sema_min_count =
2900 			sync_stats.un.sema.sema_min_count;
2901 		break;
2902 	    }
2903 	default:
2904 		return_val = TD_BADSH;
2905 		break;
2906 	}
2907 
2908 out:
2909 	(void) ps_pcontinue(ph_p);
2910 	ph_unlock(ta_p);
2911 	return (return_val);
2912 }
2913 
2914 /*
2915  * Change the state of a synchronization variable.
2916  *	1) mutex lock state set to value
2917  *	2) semaphore's count set to value
2918  *	3) writer's lock set to value
2919  *	4) reader's lock number of readers set to value
2920  * Currently unused by dbx.
2921  */
2922 #pragma weak td_sync_setstate = __td_sync_setstate
2923 td_err_e
2924 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2925 {
2926 	struct ps_prochandle *ph_p;
2927 	int		trunc = 0;
2928 	td_err_e	return_val;
2929 	td_so_un_t	generic_so;
2930 	int		value = (int)lvalue;
2931 
2932 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2933 		return (return_val);
2934 	if (ps_pstop(ph_p) != PS_OK) {
2935 		ph_unlock(sh_p->sh_ta_p);
2936 		return (TD_DBERR);
2937 	}
2938 
2939 	/*
2940 	 * Read the synch. variable information.
2941 	 * First attempt to read the whole union and if that fails
2942 	 * fall back to reading only the smallest member, the condvar.
2943 	 */
2944 	if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2945 	    sizeof (generic_so)) != PS_OK) {
2946 		trunc = 1;
2947 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2948 		    sizeof (generic_so.condition)) != PS_OK) {
2949 			(void) ps_pcontinue(ph_p);
2950 			ph_unlock(sh_p->sh_ta_p);
2951 			return (TD_DBERR);
2952 		}
2953 	}
2954 
2955 	/*
2956 	 * Set the new value in the sync. variable, read the synch. variable
2957 	 * information. from the process, reset its value and write it back.
2958 	 */
2959 	switch (generic_so.condition.mutex_magic) {
2960 	case MUTEX_MAGIC:
2961 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2962 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2963 			return_val = TD_DBERR;
2964 			break;
2965 		}
2966 		generic_so.lock.mutex_lockw = (uint8_t)value;
2967 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2968 		    sizeof (generic_so.lock)) != PS_OK)
2969 			return_val = TD_DBERR;
2970 		break;
2971 	case SEMA_MAGIC:
2972 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2973 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
2974 		    != PS_OK) {
2975 			return_val = TD_DBERR;
2976 			break;
2977 		}
2978 		generic_so.semaphore.count = value;
2979 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2980 		    sizeof (generic_so.semaphore)) != PS_OK)
2981 			return_val = TD_DBERR;
2982 		break;
2983 	case COND_MAGIC:
2984 		/* Operation not supported on a condition variable */
2985 		return_val = TD_ERR;
2986 		break;
2987 	case RWL_MAGIC:
2988 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2989 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2990 			return_val = TD_DBERR;
2991 			break;
2992 		}
2993 		if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) {
2994 			uint32_t *rwstate =
2995 			    (uint32_t *)&generic_so.rwlock.readers;
2996 			if (value < 0)
2997 				*rwstate = URW_WRITE_LOCKED;
2998 			else if (value > 0)
2999 				*rwstate = (value & URW_READERS_MASK);
3000 			else
3001 				*rwstate = 0;
3002 		} else
3003 			generic_so.rwlock.readers = value;
3004 
3005 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
3006 		    sizeof (generic_so.rwlock)) != PS_OK)
3007 			return_val = TD_DBERR;
3008 		break;
3009 	default:
3010 		/* Bad sync. object type */
3011 		return_val = TD_BADSH;
3012 		break;
3013 	}
3014 
3015 	(void) ps_pcontinue(ph_p);
3016 	ph_unlock(sh_p->sh_ta_p);
3017 	return (return_val);
3018 }
3019 
3020 typedef struct {
3021 	td_thr_iter_f	*waiter_cb;
3022 	psaddr_t	sync_obj_addr;
3023 	uint16_t	sync_magic;
3024 	void		*waiter_cb_arg;
3025 	td_err_e	errcode;
3026 } waiter_cb_ctl_t;
3027 
3028 static int
3029 waiters_cb(const td_thrhandle_t *th_p, void *arg)
3030 {
3031 	td_thragent_t	*ta_p = th_p->th_ta_p;
3032 	struct ps_prochandle *ph_p = ta_p->ph_p;
3033 	waiter_cb_ctl_t	*wcb = arg;
3034 	caddr_t		wchan;
3035 
3036 	if (ta_p->model == PR_MODEL_NATIVE) {
3037 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
3038 
3039 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3040 		    &wchan, sizeof (wchan)) != PS_OK) {
3041 			wcb->errcode = TD_DBERR;
3042 			return (1);
3043 		}
3044 	} else {
3045 #if defined(_LP64) && defined(_SYSCALL32)
3046 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3047 		caddr32_t wchan32;
3048 
3049 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3050 		    &wchan32, sizeof (wchan32)) != PS_OK) {
3051 			wcb->errcode = TD_DBERR;
3052 			return (1);
3053 		}
3054 		wchan = (caddr_t)(uintptr_t)wchan32;
3055 #else
3056 		wcb->errcode = TD_ERR;
3057 		return (1);
3058 #endif	/* _SYSCALL32 */
3059 	}
3060 
3061 	if (wchan == NULL)
3062 		return (0);
3063 
3064 	if (wchan == (caddr_t)wcb->sync_obj_addr)
3065 		return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3066 
3067 	return (0);
3068 }
3069 
3070 /*
3071  * For a given synchronization variable, iterate over the
3072  * set of waiting threads.  The call back function is passed
3073  * two parameters, a pointer to a thread handle and a pointer
3074  * to extra call back data.
3075  */
3076 #pragma weak td_sync_waiters = __td_sync_waiters
3077 td_err_e
3078 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3079 {
3080 	struct ps_prochandle *ph_p;
3081 	waiter_cb_ctl_t	wcb;
3082 	td_err_e	return_val;
3083 
3084 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3085 		return (return_val);
3086 	if (ps_pdread(ph_p,
3087 	    (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3088 	    (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3089 		ph_unlock(sh_p->sh_ta_p);
3090 		return (TD_DBERR);
3091 	}
3092 	ph_unlock(sh_p->sh_ta_p);
3093 
3094 	switch (wcb.sync_magic) {
3095 	case MUTEX_MAGIC:
3096 	case COND_MAGIC:
3097 	case SEMA_MAGIC:
3098 	case RWL_MAGIC:
3099 		break;
3100 	default:
3101 		return (TD_BADSH);
3102 	}
3103 
3104 	wcb.waiter_cb = cb;
3105 	wcb.sync_obj_addr = sh_p->sh_unique;
3106 	wcb.waiter_cb_arg = cb_data;
3107 	wcb.errcode = TD_OK;
3108 	return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3109 		TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3110 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3111 
3112 	if (return_val != TD_OK)
3113 		return (return_val);
3114 
3115 	return (wcb.errcode);
3116 }
3117