xref: /titanic_44/usr/src/lib/libc_db/common/thread_db.c (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stddef.h>
32 #include <unistd.h>
33 #include <thr_uberdata.h>
34 #include <thread_db.h>
35 #include <libc_int.h>
36 
37 /*
38  * Private structures.
39  */
40 
41 typedef union {
42 	mutex_t		lock;
43 	rwlock_t	rwlock;
44 	sema_t		semaphore;
45 	cond_t		condition;
46 } td_so_un_t;
47 
48 struct td_thragent {
49 	rwlock_t	rwlock;
50 	struct ps_prochandle *ph_p;
51 	int		initialized;
52 	int		sync_tracking;
53 	int		model;
54 	int		primary_map;
55 	psaddr_t	bootstrap_addr;
56 	psaddr_t	uberdata_addr;
57 	psaddr_t	tdb_eventmask_addr;
58 	psaddr_t	tdb_register_sync_addr;
59 	psaddr_t	tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
60 	psaddr_t	hash_table_addr;
61 	int		hash_size;
62 	lwpid_t		single_lwpid;
63 	psaddr_t	single_ulwp_addr;
64 };
65 
66 /*
67  * This is the name of the variable in libc that contains
68  * the uberdata address that we will need.
69  */
70 #define	TD_BOOTSTRAP_NAME	"_tdb_bootstrap"
71 /*
72  * This is the actual name of uberdata, used in the event
73  * that tdb_bootstrap has not yet been initialized.
74  */
75 #define	TD_UBERDATA_NAME	"_uberdata"
76 /*
77  * The library name should end with ".so.1", but older versions of
78  * dbx expect the unadorned name and malfunction if ".1" is specified.
79  * Unfortunately, if ".1" is not specified, mdb malfunctions when it
80  * is applied to another instance of itself (due to the presence of
81  * /usr/lib/mdb/proc/libc.so).  So we try it both ways.
82  */
83 #define	TD_LIBRARY_NAME		"libc.so"
84 #define	TD_LIBRARY_NAME_1	"libc.so.1"
85 
86 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
87 
88 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
89 	void *cbdata_p, td_thr_state_e state, int ti_pri,
90 	sigset_t *ti_sigmask_p, unsigned ti_user_flags);
91 
92 /*
93  * Initialize threads debugging interface.
94  */
95 #pragma weak td_init = __td_init
96 td_err_e
97 __td_init()
98 {
99 	return (TD_OK);
100 }
101 
102 /*
103  * This function does nothing, and never did.
104  * But the symbol is in the ABI, so we can't delete it.
105  */
106 #pragma weak td_log = __td_log
107 void
108 __td_log()
109 {
110 }
111 
112 /*
113  * Short-cut to read just the hash table size from the process,
114  * to avoid repeatedly reading the full uberdata structure when
115  * dealing with a single-threaded process.
116  */
117 static uint_t
118 td_read_hash_size(td_thragent_t *ta_p)
119 {
120 	psaddr_t addr;
121 	uint_t hash_size;
122 
123 	switch (ta_p->initialized) {
124 	default:	/* uninitialized */
125 		return (0);
126 	case 1:		/* partially initialized */
127 		break;
128 	case 2:		/* fully initialized */
129 		return (ta_p->hash_size);
130 	}
131 
132 	if (ta_p->model == PR_MODEL_NATIVE) {
133 		addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
134 	} else {
135 #if defined(_LP64) && defined(_SYSCALL32)
136 		addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
137 #else
138 		addr = 0;
139 #endif
140 	}
141 	if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
142 	    != PS_OK)
143 		return (0);
144 	return (hash_size);
145 }
146 
147 static td_err_e
148 td_read_uberdata(td_thragent_t *ta_p)
149 {
150 	struct ps_prochandle *ph_p = ta_p->ph_p;
151 
152 	if (ta_p->model == PR_MODEL_NATIVE) {
153 		uberdata_t uberdata;
154 
155 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
156 		    &uberdata, sizeof (uberdata)) != PS_OK)
157 			return (TD_DBERR);
158 		ta_p->primary_map = uberdata.primary_map;
159 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
160 			offsetof(uberdata_t, tdb.tdb_ev_global_mask);
161 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
162 			offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
163 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
164 		ta_p->hash_size = uberdata.hash_size;
165 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
166 		    ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
167 			return (TD_DBERR);
168 
169 	} else {
170 #if defined(_LP64) && defined(_SYSCALL32)
171 		uberdata32_t uberdata;
172 		caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
173 		int i;
174 
175 		if (ps_pdread(ph_p, ta_p->uberdata_addr,
176 		    &uberdata, sizeof (uberdata)) != PS_OK)
177 			return (TD_DBERR);
178 		ta_p->primary_map = uberdata.primary_map;
179 		ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
180 			offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
181 		ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
182 			offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
183 		ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
184 		ta_p->hash_size = uberdata.hash_size;
185 		if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
186 		    tdb_events, sizeof (tdb_events)) != PS_OK)
187 			return (TD_DBERR);
188 		for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
189 			ta_p->tdb_events[i] = tdb_events[i];
190 #else
191 		return (TD_DBERR);
192 #endif
193 	}
194 	if (ta_p->hash_size != 1) {	/* multi-threaded */
195 		ta_p->initialized = 2;
196 		ta_p->single_lwpid = 0;
197 		ta_p->single_ulwp_addr = NULL;
198 	} else {			/* single-threaded */
199 		ta_p->initialized = 1;
200 		/*
201 		 * Get the address and lwpid of the single thread/LWP.
202 		 * It may not be ulwp_one if this is a child of fork1().
203 		 */
204 		if (ta_p->model == PR_MODEL_NATIVE) {
205 			thr_hash_table_t head;
206 			lwpid_t lwpid = 0;
207 
208 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
209 			    &head, sizeof (head)) != PS_OK)
210 				return (TD_DBERR);
211 			if ((psaddr_t)head.hash_bucket == NULL)
212 				ta_p->initialized = 0;
213 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
214 			    offsetof(ulwp_t, ul_lwpid),
215 			    &lwpid, sizeof (lwpid)) != PS_OK)
216 				return (TD_DBERR);
217 			ta_p->single_lwpid = lwpid;
218 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
219 		} else {
220 #if defined(_LP64) && defined(_SYSCALL32)
221 			thr_hash_table32_t head;
222 			lwpid_t lwpid = 0;
223 
224 			if (ps_pdread(ph_p, ta_p->hash_table_addr,
225 			    &head, sizeof (head)) != PS_OK)
226 				return (TD_DBERR);
227 			if ((psaddr_t)head.hash_bucket == NULL)
228 				ta_p->initialized = 0;
229 			else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
230 			    offsetof(ulwp32_t, ul_lwpid),
231 			    &lwpid, sizeof (lwpid)) != PS_OK)
232 				return (TD_DBERR);
233 			ta_p->single_lwpid = lwpid;
234 			ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
235 #else
236 			return (TD_DBERR);
237 #endif
238 		}
239 	}
240 	if (!ta_p->primary_map)
241 		ta_p->initialized = 0;
242 	return (TD_OK);
243 }
244 
245 static td_err_e
246 td_read_bootstrap_data(td_thragent_t *ta_p)
247 {
248 	struct ps_prochandle *ph_p = ta_p->ph_p;
249 	psaddr_t bootstrap_addr;
250 	psaddr_t uberdata_addr;
251 	ps_err_e db_return;
252 	td_err_e return_val;
253 	int do_1;
254 
255 	switch (ta_p->initialized) {
256 	case 2:			/* fully initialized */
257 		return (TD_OK);
258 	case 1:			/* partially initialized */
259 		if (td_read_hash_size(ta_p) == 1)
260 			return (TD_OK);
261 		return (td_read_uberdata(ta_p));
262 	}
263 
264 	/*
265 	 * Uninitialized -- do the startup work.
266 	 * We set ta_p->initialized to -1 to cut off recursive calls
267 	 * into libc_db by code in the provider of ps_pglobal_lookup().
268 	 */
269 	do_1 = 0;
270 	ta_p->initialized = -1;
271 	db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
272 	    TD_BOOTSTRAP_NAME, &bootstrap_addr);
273 	if (db_return == PS_NOSYM) {
274 		do_1 = 1;
275 		db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
276 		    TD_BOOTSTRAP_NAME, &bootstrap_addr);
277 	}
278 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
279 		return (TD_NOLIBTHREAD);
280 	if (db_return != PS_OK)
281 		return (TD_ERR);
282 	db_return = ps_pglobal_lookup(ph_p,
283 	    do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
284 	    TD_UBERDATA_NAME, &uberdata_addr);
285 	if (db_return == PS_NOSYM)	/* libc is not linked yet */
286 		return (TD_NOLIBTHREAD);
287 	if (db_return != PS_OK)
288 		return (TD_ERR);
289 
290 	/*
291 	 * Read the uberdata address into the thread agent structure.
292 	 */
293 	if (ta_p->model == PR_MODEL_NATIVE) {
294 		psaddr_t psaddr;
295 		if (ps_pdread(ph_p, bootstrap_addr,
296 		    &psaddr, sizeof (psaddr)) != PS_OK)
297 			return (TD_DBERR);
298 		if ((ta_p->bootstrap_addr = psaddr) == NULL)
299 			psaddr = uberdata_addr;
300 		else if (ps_pdread(ph_p, psaddr,
301 		    &psaddr, sizeof (psaddr)) != PS_OK)
302 			return (TD_DBERR);
303 		ta_p->uberdata_addr = psaddr;
304 	} else {
305 #if defined(_LP64) && defined(_SYSCALL32)
306 		caddr32_t psaddr;
307 		if (ps_pdread(ph_p, bootstrap_addr,
308 		    &psaddr, sizeof (psaddr)) != PS_OK)
309 			return (TD_DBERR);
310 		if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
311 			psaddr = (caddr32_t)uberdata_addr;
312 		else if (ps_pdread(ph_p, (psaddr_t)psaddr,
313 		    &psaddr, sizeof (psaddr)) != PS_OK)
314 			return (TD_DBERR);
315 		ta_p->uberdata_addr = (psaddr_t)psaddr;
316 #else
317 		return (TD_DBERR);
318 #endif	/* _SYSCALL32 */
319 	}
320 
321 	if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
322 		return (return_val);
323 	if (ta_p->bootstrap_addr == NULL)
324 		ta_p->initialized = 0;
325 	return (TD_OK);
326 }
327 
328 #pragma weak ps_kill
329 #pragma weak ps_lrolltoaddr
330 
331 /*
332  * Allocate a new agent process handle ("thread agent").
333  */
334 #pragma weak td_ta_new = __td_ta_new
335 td_err_e
336 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
337 {
338 	td_thragent_t *ta_p;
339 	int model;
340 	td_err_e return_val = TD_OK;
341 
342 	if (ph_p == NULL)
343 		return (TD_BADPH);
344 	if (ta_pp == NULL)
345 		return (TD_ERR);
346 	*ta_pp = NULL;
347 	if (ps_pstop(ph_p) != PS_OK)
348 		return (TD_DBERR);
349 	/*
350 	 * ps_pdmodel might not be defined if this is an older client.
351 	 * Make it a weak symbol and test if it exists before calling.
352 	 */
353 #pragma weak ps_pdmodel
354 	if (ps_pdmodel == NULL) {
355 		model = PR_MODEL_NATIVE;
356 	} else if (ps_pdmodel(ph_p, &model) != PS_OK) {
357 		(void) ps_pcontinue(ph_p);
358 		return (TD_ERR);
359 	}
360 	if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
361 		(void) ps_pcontinue(ph_p);
362 		return (TD_MALLOC);
363 	}
364 
365 	/*
366 	 * Initialize the agent process handle.
367 	 * Pick up the symbol value we need from the target process.
368 	 */
369 	(void) memset(ta_p, 0, sizeof (*ta_p));
370 	ta_p->ph_p = ph_p;
371 	(void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
372 	ta_p->model = model;
373 	return_val = td_read_bootstrap_data(ta_p);
374 
375 	/*
376 	 * Because the old libthread_db enabled lock tracking by default,
377 	 * we must also do it.  However, we do it only if the application
378 	 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
379 	 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
380 	 */
381 	if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
382 		register_sync_t oldenable;
383 		register_sync_t enable = REGISTER_SYNC_ENABLE;
384 		psaddr_t psaddr = ta_p->tdb_register_sync_addr;
385 
386 		if (ps_pdread(ph_p, psaddr,
387 		    &oldenable, sizeof (oldenable)) != PS_OK)
388 			return_val = TD_DBERR;
389 		else if (oldenable != REGISTER_SYNC_OFF ||
390 		    ps_pdwrite(ph_p, psaddr,
391 		    &enable, sizeof (enable)) != PS_OK) {
392 			/*
393 			 * Lock tracking was already enabled or we
394 			 * failed to enable it, probably because we
395 			 * are examining a core file.  In either case
396 			 * set the sync_tracking flag non-zero to
397 			 * indicate that we should not attempt to
398 			 * disable lock tracking when we delete the
399 			 * agent process handle in td_ta_delete().
400 			 */
401 			ta_p->sync_tracking = 1;
402 		}
403 	}
404 
405 	if (return_val == TD_OK)
406 		*ta_pp = ta_p;
407 	else
408 		free(ta_p);
409 
410 	(void) ps_pcontinue(ph_p);
411 	return (return_val);
412 }
413 
414 /*
415  * Utility function to grab the readers lock and return the prochandle,
416  * given an agent process handle.  Performs standard error checking.
417  * Returns non-NULL with the lock held, or NULL with the lock not held.
418  */
419 static struct ps_prochandle *
420 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
421 {
422 	struct ps_prochandle *ph_p = NULL;
423 	td_err_e error;
424 
425 	if (ta_p == NULL || ta_p->initialized == -1) {
426 		*err = TD_BADTA;
427 	} else if (rw_rdlock(&ta_p->rwlock) != 0) {	/* can't happen? */
428 		*err = TD_BADTA;
429 	} else if ((ph_p = ta_p->ph_p) == NULL) {
430 		(void) rw_unlock(&ta_p->rwlock);
431 		*err = TD_BADPH;
432 	} else if (ta_p->initialized != 2 &&
433 	    (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
434 		(void) rw_unlock(&ta_p->rwlock);
435 		ph_p = NULL;
436 		*err = error;
437 	} else {
438 		*err = TD_OK;
439 	}
440 
441 	return (ph_p);
442 }
443 
444 /*
445  * Utility function to grab the readers lock and return the prochandle,
446  * given an agent thread handle.  Performs standard error checking.
447  * Returns non-NULL with the lock held, or NULL with the lock not held.
448  */
449 static struct ps_prochandle *
450 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
451 {
452 	if (th_p == NULL || th_p->th_unique == NULL) {
453 		*err = TD_BADTH;
454 		return (NULL);
455 	}
456 	return (ph_lock_ta(th_p->th_ta_p, err));
457 }
458 
459 /*
460  * Utility function to grab the readers lock and return the prochandle,
461  * given a synchronization object handle.  Performs standard error checking.
462  * Returns non-NULL with the lock held, or NULL with the lock not held.
463  */
464 static struct ps_prochandle *
465 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
466 {
467 	if (sh_p == NULL || sh_p->sh_unique == NULL) {
468 		*err = TD_BADSH;
469 		return (NULL);
470 	}
471 	return (ph_lock_ta(sh_p->sh_ta_p, err));
472 }
473 
474 /*
475  * Unlock the agent process handle obtained from ph_lock_*().
476  */
477 static void
478 ph_unlock(td_thragent_t *ta_p)
479 {
480 	(void) rw_unlock(&ta_p->rwlock);
481 }
482 
483 /*
484  * De-allocate an agent process handle,
485  * releasing all related resources.
486  *
487  * XXX -- This is hopelessly broken ---
488  * Storage for thread agent is not deallocated.  The prochandle
489  * in the thread agent is set to NULL so that future uses of
490  * the thread agent can be detected and an error value returned.
491  * All functions in the external user interface that make
492  * use of the thread agent are expected
493  * to check for a NULL prochandle in the thread agent.
494  * All such functions are also expected to obtain a
495  * reader lock on the thread agent while it is using it.
496  */
497 #pragma weak td_ta_delete = __td_ta_delete
498 td_err_e
499 __td_ta_delete(td_thragent_t *ta_p)
500 {
501 	struct ps_prochandle *ph_p;
502 
503 	/*
504 	 * This is the only place we grab the writer lock.
505 	 * We are going to NULL out the prochandle.
506 	 */
507 	if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
508 		return (TD_BADTA);
509 	if ((ph_p = ta_p->ph_p) == NULL) {
510 		(void) rw_unlock(&ta_p->rwlock);
511 		return (TD_BADPH);
512 	}
513 	/*
514 	 * If synch. tracking was disabled when td_ta_new() was called and
515 	 * if td_ta_sync_tracking_enable() was never called, then disable
516 	 * synch. tracking (it was enabled by default in td_ta_new()).
517 	 */
518 	if (ta_p->sync_tracking == 0 &&
519 	    ps_kill != NULL && ps_lrolltoaddr != NULL) {
520 		register_sync_t enable = REGISTER_SYNC_DISABLE;
521 
522 		(void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
523 		    &enable, sizeof (enable));
524 	}
525 	ta_p->ph_p = NULL;
526 	(void) rw_unlock(&ta_p->rwlock);
527 	return (TD_OK);
528 }
529 
530 /*
531  * Map an agent process handle to a client prochandle.
532  * Currently unused by dbx.
533  */
534 #pragma weak td_ta_get_ph = __td_ta_get_ph
535 td_err_e
536 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
537 {
538 	td_err_e return_val;
539 
540 	if (ph_pp != NULL)	/* protect stupid callers */
541 		*ph_pp = NULL;
542 	if (ph_pp == NULL)
543 		return (TD_ERR);
544 	if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
545 		return (return_val);
546 	ph_unlock(ta_p);
547 	return (TD_OK);
548 }
549 
550 /*
551  * Set the process's suggested concurrency level.
552  * This is a no-op in a one-level model.
553  * Currently unused by dbx.
554  */
555 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
556 /* ARGSUSED1 */
557 td_err_e
558 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
559 {
560 	if (ta_p == NULL)
561 		return (TD_BADTA);
562 	if (ta_p->ph_p == NULL)
563 		return (TD_BADPH);
564 	return (TD_OK);
565 }
566 
567 /*
568  * Get the number of threads in the process.
569  */
570 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
571 td_err_e
572 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
573 {
574 	struct ps_prochandle *ph_p;
575 	td_err_e return_val;
576 	int nthreads;
577 	int nzombies;
578 	psaddr_t nthreads_addr;
579 	psaddr_t nzombies_addr;
580 
581 	if (ta_p->model == PR_MODEL_NATIVE) {
582 		nthreads_addr = ta_p->uberdata_addr +
583 			offsetof(uberdata_t, nthreads);
584 		nzombies_addr = ta_p->uberdata_addr +
585 			offsetof(uberdata_t, nzombies);
586 	} else {
587 #if defined(_LP64) && defined(_SYSCALL32)
588 		nthreads_addr = ta_p->uberdata_addr +
589 			offsetof(uberdata32_t, nthreads);
590 		nzombies_addr = ta_p->uberdata_addr +
591 			offsetof(uberdata32_t, nzombies);
592 #else
593 		nthreads_addr = 0;
594 		nzombies_addr = 0;
595 #endif	/* _SYSCALL32 */
596 	}
597 
598 	if (nthread_p == NULL)
599 		return (TD_ERR);
600 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
601 		return (return_val);
602 	if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
603 		return_val = TD_DBERR;
604 	if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
605 		return_val = TD_DBERR;
606 	ph_unlock(ta_p);
607 	if (return_val == TD_OK)
608 		*nthread_p = nthreads + nzombies;
609 	return (return_val);
610 }
611 
612 typedef struct {
613 	thread_t	tid;
614 	int		found;
615 	td_thrhandle_t	th;
616 } td_mapper_param_t;
617 
618 /*
619  * Check the value in data against the thread id.
620  * If it matches, return 1 to terminate iterations.
621  * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
622  */
623 static int
624 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
625 {
626 	td_thrinfo_t ti;
627 
628 	if (__td_thr_get_info(th_p, &ti) == TD_OK &&
629 	    data->tid == ti.ti_tid) {
630 		data->found = 1;
631 		data->th = *th_p;
632 		return (1);
633 	}
634 	return (0);
635 }
636 
637 /*
638  * Given a thread identifier, return the corresponding thread handle.
639  */
640 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
641 td_err_e
642 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
643 	td_thrhandle_t *th_p)
644 {
645 	td_err_e		return_val;
646 	td_mapper_param_t	data;
647 
648 	if (th_p != NULL &&	/* optimize for a single thread */
649 	    ta_p != NULL &&
650 	    ta_p->initialized == 1 &&
651 	    (td_read_hash_size(ta_p) == 1 ||
652 	    td_read_uberdata(ta_p) == TD_OK) &&
653 	    ta_p->initialized == 1 &&
654 	    ta_p->single_lwpid == tid) {
655 		th_p->th_ta_p = ta_p;
656 		if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
657 			return (TD_NOTHR);
658 		return (TD_OK);
659 	}
660 
661 	/*
662 	 * LOCKING EXCEPTION - Locking is not required here because
663 	 * the locking and checking will be done in __td_ta_thr_iter.
664 	 */
665 
666 	if (ta_p == NULL)
667 		return (TD_BADTA);
668 	if (th_p == NULL)
669 		return (TD_BADTH);
670 	if (tid == 0)
671 		return (TD_NOTHR);
672 
673 	data.tid = tid;
674 	data.found = 0;
675 	return_val = __td_ta_thr_iter(ta_p,
676 		(td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
677 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
678 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
679 	if (return_val == TD_OK) {
680 		if (data.found == 0)
681 			return_val = TD_NOTHR;
682 		else
683 			*th_p = data.th;
684 	}
685 
686 	return (return_val);
687 }
688 
689 /*
690  * Map the address of a synchronization object to a sync. object handle.
691  */
692 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
693 td_err_e
694 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
695 {
696 	struct ps_prochandle *ph_p;
697 	td_err_e return_val;
698 	uint16_t sync_magic;
699 
700 	if (sh_p == NULL)
701 		return (TD_BADSH);
702 	if (addr == NULL)
703 		return (TD_ERR);
704 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
705 		return (return_val);
706 	/*
707 	 * Check the magic number of the sync. object to make sure it's valid.
708 	 * The magic number is at the same offset for all sync. objects.
709 	 */
710 	if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
711 	    &sync_magic, sizeof (sync_magic)) != PS_OK) {
712 		ph_unlock(ta_p);
713 		return (TD_BADSH);
714 	}
715 	ph_unlock(ta_p);
716 	if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
717 	    sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
718 		return (TD_BADSH);
719 	/*
720 	 * Just fill in the appropriate fields of the sync. handle.
721 	 */
722 	sh_p->sh_ta_p = (td_thragent_t *)ta_p;
723 	sh_p->sh_unique = addr;
724 	return (TD_OK);
725 }
726 
727 /*
728  * Iterate over the set of global TSD keys.
729  * The call back function is called with three arguments,
730  * a key, a pointer to the destructor function, and the cbdata pointer.
731  * Currently unused by dbx.
732  */
733 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
734 td_err_e
735 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
736 {
737 	struct ps_prochandle *ph_p;
738 	td_err_e	return_val;
739 	int		key;
740 	int		numkeys;
741 	psaddr_t	dest_addr;
742 	psaddr_t	*destructors = NULL;
743 	PFrV		destructor;
744 
745 	if (cb == NULL)
746 		return (TD_ERR);
747 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
748 		return (return_val);
749 	if (ps_pstop(ph_p) != PS_OK) {
750 		ph_unlock(ta_p);
751 		return (TD_DBERR);
752 	}
753 
754 	if (ta_p->model == PR_MODEL_NATIVE) {
755 		tsd_metadata_t tsdm;
756 
757 		if (ps_pdread(ph_p,
758 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
759 		    &tsdm, sizeof (tsdm)) != PS_OK)
760 			return_val = TD_DBERR;
761 		else {
762 			numkeys = tsdm.tsdm_nused;
763 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
764 			if (numkeys > 0)
765 				destructors =
766 				    malloc(numkeys * sizeof (psaddr_t));
767 		}
768 	} else {
769 #if defined(_LP64) && defined(_SYSCALL32)
770 		tsd_metadata32_t tsdm;
771 
772 		if (ps_pdread(ph_p,
773 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
774 		    &tsdm, sizeof (tsdm)) != PS_OK)
775 			return_val = TD_DBERR;
776 		else {
777 			numkeys = tsdm.tsdm_nused;
778 			dest_addr = (psaddr_t)tsdm.tsdm_destro;
779 			if (numkeys > 0)
780 				destructors =
781 				    malloc(numkeys * sizeof (caddr32_t));
782 		}
783 #else
784 		return_val = TD_DBERR;
785 #endif	/* _SYSCALL32 */
786 	}
787 
788 	if (return_val != TD_OK || numkeys <= 0) {
789 		(void) ps_pcontinue(ph_p);
790 		ph_unlock(ta_p);
791 		return (return_val);
792 	}
793 
794 	if (destructors == NULL)
795 		return_val = TD_MALLOC;
796 	else if (ta_p->model == PR_MODEL_NATIVE) {
797 		if (ps_pdread(ph_p, dest_addr,
798 		    destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
799 			return_val = TD_DBERR;
800 		else {
801 			for (key = 1; key < numkeys; key++) {
802 				destructor = (PFrV)destructors[key];
803 				if (destructor != TSD_UNALLOCATED &&
804 				    (*cb)(key, destructor, cbdata_p))
805 					break;
806 			}
807 		}
808 #if defined(_LP64) && defined(_SYSCALL32)
809 	} else {
810 		caddr32_t *destructors32 = (caddr32_t *)destructors;
811 		caddr32_t destruct32;
812 
813 		if (ps_pdread(ph_p, dest_addr,
814 		    destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
815 			return_val = TD_DBERR;
816 		else {
817 			for (key = 1; key < numkeys; key++) {
818 				destruct32 = destructors32[key];
819 				if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
820 				    (*cb)(key, (PFrV)(uintptr_t)destruct32,
821 				    cbdata_p))
822 					break;
823 			}
824 		}
825 #endif	/* _SYSCALL32 */
826 	}
827 
828 	if (destructors)
829 		free(destructors);
830 	(void) ps_pcontinue(ph_p);
831 	ph_unlock(ta_p);
832 	return (return_val);
833 }
834 
835 int
836 sigequalset(const sigset_t *s1, const sigset_t *s2)
837 {
838 	return (s1->__sigbits[0] == s2->__sigbits[0] &&
839 		s1->__sigbits[1] == s2->__sigbits[1] &&
840 		s1->__sigbits[2] == s2->__sigbits[2] &&
841 		s1->__sigbits[3] == s2->__sigbits[3]);
842 }
843 
844 /*
845  * Description:
846  *   Iterate over all threads. For each thread call
847  * the function pointed to by "cb" with a pointer
848  * to a thread handle, and a pointer to data which
849  * can be NULL. Only call td_thr_iter_f() on threads
850  * which match the properties of state, ti_pri,
851  * ti_sigmask_p, and ti_user_flags.  If cb returns
852  * a non-zero value, terminate iterations.
853  *
854  * Input:
855  *   *ta_p - thread agent
856  *   *cb - call back function defined by user.
857  * td_thr_iter_f() takes a thread handle and
858  * cbdata_p as a parameter.
859  *   cbdata_p - parameter for td_thr_iter_f().
860  *
861  *   state - state of threads of interest.  A value of
862  * TD_THR_ANY_STATE from enum td_thr_state_e
863  * does not restrict iterations by state.
864  *   ti_pri - lower bound of priorities of threads of
865  * interest.  A value of TD_THR_LOWEST_PRIORITY
866  * defined in thread_db.h does not restrict
867  * iterations by priority.  A thread with priority
868  * less than ti_pri will NOT be passed to the callback
869  * function.
870  *   ti_sigmask_p - signal mask of threads of interest.
871  * A value of TD_SIGNO_MASK defined in thread_db.h
872  * does not restrict iterations by signal mask.
873  *   ti_user_flags - user flags of threads of interest.  A
874  * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
875  * does not restrict iterations by user flags.
876  */
877 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
878 td_err_e
879 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
880 	void *cbdata_p, td_thr_state_e state, int ti_pri,
881 	sigset_t *ti_sigmask_p, unsigned ti_user_flags)
882 {
883 	struct ps_prochandle *ph_p;
884 	psaddr_t	first_lwp_addr;
885 	psaddr_t	first_zombie_addr;
886 	psaddr_t	curr_lwp_addr;
887 	psaddr_t	next_lwp_addr;
888 	td_thrhandle_t	th;
889 	ps_err_e	db_return;
890 	ps_err_e	db_return2;
891 	td_err_e	return_val;
892 
893 	if (cb == NULL)
894 		return (TD_ERR);
895 	/*
896 	 * If state is not within bound, short circuit.
897 	 */
898 	if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
899 		return (TD_OK);
900 
901 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
902 		return (return_val);
903 	if (ps_pstop(ph_p) != PS_OK) {
904 		ph_unlock(ta_p);
905 		return (TD_DBERR);
906 	}
907 
908 	/*
909 	 * For each ulwp_t in the circular linked lists pointed
910 	 * to by "all_lwps" and "all_zombies":
911 	 * (1) Filter each thread.
912 	 * (2) Create the thread_object for each thread that passes.
913 	 * (3) Call the call back function on each thread.
914 	 */
915 
916 	if (ta_p->model == PR_MODEL_NATIVE) {
917 		db_return = ps_pdread(ph_p,
918 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
919 		    &first_lwp_addr, sizeof (first_lwp_addr));
920 		db_return2 = ps_pdread(ph_p,
921 		    ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
922 		    &first_zombie_addr, sizeof (first_zombie_addr));
923 	} else {
924 #if defined(_LP64) && defined(_SYSCALL32)
925 		caddr32_t addr32;
926 
927 		db_return = ps_pdread(ph_p,
928 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
929 		    &addr32, sizeof (addr32));
930 		first_lwp_addr = addr32;
931 		db_return2 = ps_pdread(ph_p,
932 		    ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
933 		    &addr32, sizeof (addr32));
934 		first_zombie_addr = addr32;
935 #else	/* _SYSCALL32 */
936 		db_return = PS_ERR;
937 		db_return2 = PS_ERR;
938 #endif	/* _SYSCALL32 */
939 	}
940 	if (db_return == PS_OK)
941 		db_return = db_return2;
942 
943 	/*
944 	 * If first_lwp_addr and first_zombie_addr are both NULL,
945 	 * libc must not yet be initialized or all threads have
946 	 * exited.  Return TD_NOTHR and all will be well.
947 	 */
948 	if (db_return == PS_OK &&
949 	    first_lwp_addr == NULL && first_zombie_addr == NULL) {
950 		(void) ps_pcontinue(ph_p);
951 		ph_unlock(ta_p);
952 		return (TD_NOTHR);
953 	}
954 	if (db_return != PS_OK) {
955 		(void) ps_pcontinue(ph_p);
956 		ph_unlock(ta_p);
957 		return (TD_DBERR);
958 	}
959 
960 	/*
961 	 * Run down the lists of all living and dead lwps.
962 	 */
963 	if (first_lwp_addr == NULL)
964 		first_lwp_addr = first_zombie_addr;
965 	curr_lwp_addr = first_lwp_addr;
966 	for (;;) {
967 		td_thr_state_e ts_state;
968 		int userpri;
969 		unsigned userflags;
970 		sigset_t mask;
971 
972 		/*
973 		 * Read the ulwp struct.
974 		 */
975 		if (ta_p->model == PR_MODEL_NATIVE) {
976 			ulwp_t ulwp;
977 
978 			if (ps_pdread(ph_p, curr_lwp_addr,
979 			    &ulwp, sizeof (ulwp)) != PS_OK &&
980 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
981 			    ps_pdread(ph_p, curr_lwp_addr,
982 			    &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
983 				return_val = TD_DBERR;
984 				break;
985 			}
986 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
987 
988 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
989 				ulwp.ul_stop? TD_THR_STOPPED :
990 				ulwp.ul_wchan? TD_THR_SLEEP :
991 				TD_THR_ACTIVE;
992 			userpri = ulwp.ul_pri;
993 			userflags = ulwp.ul_usropts;
994 			if (ulwp.ul_dead)
995 				(void) sigemptyset(&mask);
996 			else
997 				mask = *(sigset_t *)&ulwp.ul_sigmask;
998 		} else {
999 #if defined(_LP64) && defined(_SYSCALL32)
1000 			ulwp32_t ulwp;
1001 
1002 			if (ps_pdread(ph_p, curr_lwp_addr,
1003 			    &ulwp, sizeof (ulwp)) != PS_OK &&
1004 			    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1005 			    ps_pdread(ph_p, curr_lwp_addr,
1006 			    &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1007 				return_val = TD_DBERR;
1008 				break;
1009 			}
1010 			next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1011 
1012 			ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1013 				ulwp.ul_stop? TD_THR_STOPPED :
1014 				ulwp.ul_wchan? TD_THR_SLEEP :
1015 				TD_THR_ACTIVE;
1016 			userpri = ulwp.ul_pri;
1017 			userflags = ulwp.ul_usropts;
1018 			if (ulwp.ul_dead)
1019 				(void) sigemptyset(&mask);
1020 			else
1021 				mask = *(sigset_t *)&ulwp.ul_sigmask;
1022 #else	/* _SYSCALL32 */
1023 			return_val = TD_ERR;
1024 			break;
1025 #endif	/* _SYSCALL32 */
1026 		}
1027 
1028 		/*
1029 		 * Filter on state, priority, sigmask, and user flags.
1030 		 */
1031 
1032 		if ((state != ts_state) &&
1033 		    (state != TD_THR_ANY_STATE))
1034 			goto advance;
1035 
1036 		if (ti_pri > userpri)
1037 			goto advance;
1038 
1039 		if (ti_sigmask_p != TD_SIGNO_MASK &&
1040 		    !sigequalset(ti_sigmask_p, &mask))
1041 			goto advance;
1042 
1043 		if (ti_user_flags != userflags &&
1044 		    ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1045 			goto advance;
1046 
1047 		/*
1048 		 * Call back - break if the return
1049 		 * from the call back is non-zero.
1050 		 */
1051 		th.th_ta_p = (td_thragent_t *)ta_p;
1052 		th.th_unique = curr_lwp_addr;
1053 		if ((*cb)(&th, cbdata_p))
1054 			break;
1055 
1056 advance:
1057 		if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1058 			/*
1059 			 * Switch to the zombie list, unless it is NULL
1060 			 * or we have already been doing the zombie list,
1061 			 * in which case terminate the loop.
1062 			 */
1063 			if (first_zombie_addr == NULL ||
1064 			    first_lwp_addr == first_zombie_addr)
1065 				break;
1066 			curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1067 		}
1068 	}
1069 
1070 	(void) ps_pcontinue(ph_p);
1071 	ph_unlock(ta_p);
1072 	return (return_val);
1073 }
1074 
1075 /*
1076  * Enable or disable process synchronization object tracking.
1077  * Currently unused by dbx.
1078  */
1079 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1080 td_err_e
1081 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1082 {
1083 	struct ps_prochandle *ph_p;
1084 	td_err_e return_val;
1085 	register_sync_t enable;
1086 
1087 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1088 		return (return_val);
1089 	/*
1090 	 * Values of tdb_register_sync in the victim process:
1091 	 *	REGISTER_SYNC_ENABLE	enables registration of synch objects
1092 	 *	REGISTER_SYNC_DISABLE	disables registration of synch objects
1093 	 * These cause the table to be cleared and tdb_register_sync set to:
1094 	 *	REGISTER_SYNC_ON	registration in effect
1095 	 *	REGISTER_SYNC_OFF	registration not in effect
1096 	 */
1097 	enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1098 	if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1099 	    &enable, sizeof (enable)) != PS_OK)
1100 		return_val = TD_DBERR;
1101 	/*
1102 	 * Remember that this interface was called (see td_ta_delete()).
1103 	 */
1104 	ta_p->sync_tracking = 1;
1105 	ph_unlock(ta_p);
1106 	return (return_val);
1107 }
1108 
1109 /*
1110  * Iterate over all known synchronization variables.
1111  * It is very possible that the list generated is incomplete,
1112  * because the iterator can only find synchronization variables
1113  * that have been registered by the process since synchronization
1114  * object registration was enabled.
1115  * The call back function cb is called for each synchronization
1116  * variable with two arguments: a pointer to the synchronization
1117  * handle and the passed-in argument cbdata.
1118  * If cb returns a non-zero value, iterations are terminated.
1119  */
1120 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1121 td_err_e
1122 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1123 {
1124 	struct ps_prochandle *ph_p;
1125 	td_err_e	return_val;
1126 	int		i;
1127 	register_sync_t	enable;
1128 	psaddr_t	next_desc;
1129 	tdb_sync_stats_t sync_stats;
1130 	td_synchandle_t	synchandle;
1131 	psaddr_t	psaddr;
1132 	void		*vaddr;
1133 	uint64_t	*sync_addr_hash = NULL;
1134 
1135 	if (cb == NULL)
1136 		return (TD_ERR);
1137 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1138 		return (return_val);
1139 	if (ps_pstop(ph_p) != PS_OK) {
1140 		ph_unlock(ta_p);
1141 		return (TD_DBERR);
1142 	}
1143 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1144 	    &enable, sizeof (enable)) != PS_OK) {
1145 		return_val = TD_DBERR;
1146 		goto out;
1147 	}
1148 	if (enable != REGISTER_SYNC_ON)
1149 		goto out;
1150 
1151 	/*
1152 	 * First read the hash table.
1153 	 * The hash table is large; allocate with mmap().
1154 	 */
1155 	if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1156 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1157 	    == MAP_FAILED) {
1158 		return_val = TD_MALLOC;
1159 		goto out;
1160 	}
1161 	sync_addr_hash = vaddr;
1162 
1163 	if (ta_p->model == PR_MODEL_NATIVE) {
1164 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
1165 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1166 		    &psaddr, sizeof (&psaddr)) != PS_OK) {
1167 			return_val = TD_DBERR;
1168 			goto out;
1169 		}
1170 	} else {
1171 #ifdef  _SYSCALL32
1172 		caddr32_t addr;
1173 
1174 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
1175 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1176 		    &addr, sizeof (addr)) != PS_OK) {
1177 			return_val = TD_DBERR;
1178 			goto out;
1179 		}
1180 		psaddr = addr;
1181 #else
1182 		return_val = TD_ERR;
1183 		goto out;
1184 #endif /* _SYSCALL32 */
1185 	}
1186 
1187 	if (psaddr == NULL)
1188 		goto out;
1189 	if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1190 	    TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1191 		return_val = TD_DBERR;
1192 		goto out;
1193 	}
1194 
1195 	/*
1196 	 * Now scan the hash table.
1197 	 */
1198 	for (i = 0; i < TDB_HASH_SIZE; i++) {
1199 		for (next_desc = (psaddr_t)sync_addr_hash[i];
1200 		    next_desc != NULL;
1201 		    next_desc = (psaddr_t)sync_stats.next) {
1202 			if (ps_pdread(ph_p, next_desc,
1203 			    &sync_stats, sizeof (sync_stats)) != PS_OK) {
1204 				return_val = TD_DBERR;
1205 				goto out;
1206 			}
1207 			if (sync_stats.un.type == TDB_NONE) {
1208 				/* not registered since registration enabled */
1209 				continue;
1210 			}
1211 			synchandle.sh_ta_p = ta_p;
1212 			synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1213 			if ((*cb)(&synchandle, cbdata) != 0)
1214 				goto out;
1215 		}
1216 	}
1217 
1218 out:
1219 	if (sync_addr_hash != NULL)
1220 		(void) munmap((void *)sync_addr_hash,
1221 		    TDB_HASH_SIZE * sizeof (uint64_t));
1222 	(void) ps_pcontinue(ph_p);
1223 	ph_unlock(ta_p);
1224 	return (return_val);
1225 }
1226 
1227 /*
1228  * Enable process statistics collection.
1229  */
1230 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1231 /* ARGSUSED */
1232 td_err_e
1233 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1234 {
1235 	return (TD_NOCAPAB);
1236 }
1237 
1238 /*
1239  * Reset process statistics.
1240  */
1241 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1242 /* ARGSUSED */
1243 td_err_e
1244 __td_ta_reset_stats(const td_thragent_t *ta_p)
1245 {
1246 	return (TD_NOCAPAB);
1247 }
1248 
1249 /*
1250  * Read process statistics.
1251  */
1252 #pragma weak td_ta_get_stats = __td_ta_get_stats
1253 /* ARGSUSED */
1254 td_err_e
1255 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1256 {
1257 	return (TD_NOCAPAB);
1258 }
1259 
1260 /*
1261  * Transfer information from lwp struct to thread information struct.
1262  * XXX -- lots of this needs cleaning up.
1263  */
1264 static void
1265 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1266 	ulwp_t *ulwp, td_thrinfo_t *ti_p)
1267 {
1268 	lwpid_t lwpid;
1269 
1270 	if ((lwpid = ulwp->ul_lwpid) == 0)
1271 		lwpid = 1;
1272 	(void) memset(ti_p, 0, sizeof (*ti_p));
1273 	ti_p->ti_ta_p = ta_p;
1274 	ti_p->ti_user_flags = ulwp->ul_usropts;
1275 	ti_p->ti_tid = lwpid;
1276 	ti_p->ti_exitval = ulwp->ul_rval;
1277 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1278 	if (!ulwp->ul_dead) {
1279 		/*
1280 		 * The bloody fools got this backwards!
1281 		 */
1282 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1283 		ti_p->ti_stksize = ulwp->ul_stksiz;
1284 	}
1285 	ti_p->ti_ro_area = ts_addr;
1286 	ti_p->ti_ro_size = ulwp->ul_replace?
1287 		REPLACEMENT_SIZE : sizeof (ulwp_t);
1288 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1289 		ulwp->ul_stop? TD_THR_STOPPED :
1290 		ulwp->ul_wchan? TD_THR_SLEEP :
1291 		TD_THR_ACTIVE;
1292 	ti_p->ti_db_suspended = 0;
1293 	ti_p->ti_type = TD_THR_USER;
1294 	ti_p->ti_sp = ulwp->ul_sp;
1295 	ti_p->ti_flags = 0;
1296 	ti_p->ti_pri = ulwp->ul_pri;
1297 	ti_p->ti_lid = lwpid;
1298 	if (!ulwp->ul_dead)
1299 		ti_p->ti_sigmask = ulwp->ul_sigmask;
1300 	ti_p->ti_traceme = 0;
1301 	ti_p->ti_preemptflag = 0;
1302 	ti_p->ti_pirecflag = 0;
1303 	(void) sigemptyset(&ti_p->ti_pending);
1304 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1305 }
1306 
1307 #if defined(_LP64) && defined(_SYSCALL32)
1308 static void
1309 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1310 	ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1311 {
1312 	lwpid_t lwpid;
1313 
1314 	if ((lwpid = ulwp->ul_lwpid) == 0)
1315 		lwpid = 1;
1316 	(void) memset(ti_p, 0, sizeof (*ti_p));
1317 	ti_p->ti_ta_p = ta_p;
1318 	ti_p->ti_user_flags = ulwp->ul_usropts;
1319 	ti_p->ti_tid = lwpid;
1320 	ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1321 	ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1322 	if (!ulwp->ul_dead) {
1323 		/*
1324 		 * The bloody fools got this backwards!
1325 		 */
1326 		ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1327 		ti_p->ti_stksize = ulwp->ul_stksiz;
1328 	}
1329 	ti_p->ti_ro_area = ts_addr;
1330 	ti_p->ti_ro_size = ulwp->ul_replace?
1331 		REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1332 	ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1333 		ulwp->ul_stop? TD_THR_STOPPED :
1334 		ulwp->ul_wchan? TD_THR_SLEEP :
1335 		TD_THR_ACTIVE;
1336 	ti_p->ti_db_suspended = 0;
1337 	ti_p->ti_type = TD_THR_USER;
1338 	ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1339 	ti_p->ti_flags = 0;
1340 	ti_p->ti_pri = ulwp->ul_pri;
1341 	ti_p->ti_lid = lwpid;
1342 	if (!ulwp->ul_dead)
1343 		ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1344 	ti_p->ti_traceme = 0;
1345 	ti_p->ti_preemptflag = 0;
1346 	ti_p->ti_pirecflag = 0;
1347 	(void) sigemptyset(&ti_p->ti_pending);
1348 	ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1349 }
1350 #endif	/* _SYSCALL32 */
1351 
1352 /*
1353  * Get thread information.
1354  */
1355 #pragma weak td_thr_get_info = __td_thr_get_info
1356 td_err_e
1357 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1358 {
1359 	struct ps_prochandle *ph_p;
1360 	td_thragent_t	*ta_p;
1361 	td_err_e	return_val;
1362 	psaddr_t	psaddr;
1363 
1364 	if (ti_p == NULL)
1365 		return (TD_ERR);
1366 	(void) memset(ti_p, NULL, sizeof (*ti_p));
1367 
1368 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1369 		return (return_val);
1370 	ta_p = th_p->th_ta_p;
1371 	if (ps_pstop(ph_p) != PS_OK) {
1372 		ph_unlock(ta_p);
1373 		return (TD_DBERR);
1374 	}
1375 
1376 	/*
1377 	 * Read the ulwp struct from the process.
1378 	 * Transfer the ulwp struct to the thread information struct.
1379 	 */
1380 	psaddr = th_p->th_unique;
1381 	if (ta_p->model == PR_MODEL_NATIVE) {
1382 		ulwp_t ulwp;
1383 
1384 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1385 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1386 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1387 			return_val = TD_DBERR;
1388 		else
1389 			td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1390 	} else {
1391 #if defined(_LP64) && defined(_SYSCALL32)
1392 		ulwp32_t ulwp;
1393 
1394 		if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1395 		    ((void) memset(&ulwp, 0, sizeof (ulwp)),
1396 		    ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1397 				PS_OK)
1398 			return_val = TD_DBERR;
1399 		else
1400 			td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1401 #else
1402 		return_val = TD_ERR;
1403 #endif	/* _SYSCALL32 */
1404 	}
1405 
1406 	(void) ps_pcontinue(ph_p);
1407 	ph_unlock(ta_p);
1408 	return (return_val);
1409 }
1410 
1411 /*
1412  * Given a process and an event number, return information about
1413  * an address in the process or at which a breakpoint can be set
1414  * to monitor the event.
1415  */
1416 #pragma weak td_ta_event_addr = __td_ta_event_addr
1417 td_err_e
1418 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1419 {
1420 	if (ta_p == NULL)
1421 		return (TD_BADTA);
1422 	if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1423 		return (TD_NOEVENT);
1424 	if (notify_p == NULL)
1425 		return (TD_ERR);
1426 
1427 	notify_p->type = NOTIFY_BPT;
1428 	notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1429 
1430 	return (TD_OK);
1431 }
1432 
1433 /*
1434  * Add the events in eventset 2 to eventset 1.
1435  */
1436 static void
1437 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1438 {
1439 	int	i;
1440 
1441 	for (i = 0; i < TD_EVENTSIZE; i++)
1442 		event1_p->event_bits[i] |= event2_p->event_bits[i];
1443 }
1444 
1445 /*
1446  * Delete the events in eventset 2 from eventset 1.
1447  */
1448 static void
1449 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1450 {
1451 	int	i;
1452 
1453 	for (i = 0; i < TD_EVENTSIZE; i++)
1454 		event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1455 }
1456 
1457 /*
1458  * Either add or delete the given event set from a thread's event mask.
1459  */
1460 static td_err_e
1461 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1462 {
1463 	struct ps_prochandle *ph_p;
1464 	td_err_e	return_val = TD_OK;
1465 	char		enable;
1466 	td_thr_events_t	evset;
1467 	psaddr_t	psaddr_evset;
1468 	psaddr_t	psaddr_enab;
1469 
1470 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1471 		return (return_val);
1472 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1473 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1474 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1475 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1476 	} else {
1477 #if defined(_LP64) && defined(_SYSCALL32)
1478 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1479 		psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1480 		psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1481 #else
1482 		ph_unlock(th_p->th_ta_p);
1483 		return (TD_ERR);
1484 #endif	/* _SYSCALL32 */
1485 	}
1486 	if (ps_pstop(ph_p) != PS_OK) {
1487 		ph_unlock(th_p->th_ta_p);
1488 		return (TD_DBERR);
1489 	}
1490 
1491 	if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1492 		return_val = TD_DBERR;
1493 	else {
1494 		if (onoff)
1495 			eventsetaddset(&evset, events);
1496 		else
1497 			eventsetdelset(&evset, events);
1498 		if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1499 		    != PS_OK)
1500 			return_val = TD_DBERR;
1501 		else {
1502 			enable = 0;
1503 			if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1504 				enable = 1;
1505 			if (ps_pdwrite(ph_p, psaddr_enab,
1506 			    &enable, sizeof (enable)) != PS_OK)
1507 				return_val = TD_DBERR;
1508 		}
1509 	}
1510 
1511 	(void) ps_pcontinue(ph_p);
1512 	ph_unlock(th_p->th_ta_p);
1513 	return (return_val);
1514 }
1515 
1516 /*
1517  * Enable or disable tracing for a given thread.  Tracing
1518  * is filtered based on the event mask of each thread.  Tracing
1519  * can be turned on/off for the thread without changing thread
1520  * event mask.
1521  * Currently unused by dbx.
1522  */
1523 #pragma weak td_thr_event_enable = __td_thr_event_enable
1524 td_err_e
1525 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1526 {
1527 	td_thr_events_t	evset;
1528 
1529 	td_event_emptyset(&evset);
1530 	td_event_addset(&evset, TD_EVENTS_ENABLE);
1531 	return (mod_eventset(th_p, &evset, onoff));
1532 }
1533 
1534 /*
1535  * Set event mask to enable event. event is turned on in
1536  * event mask for thread.  If a thread encounters an event
1537  * for which its event mask is on, notification will be sent
1538  * to the debugger.
1539  * Addresses for each event are provided to the
1540  * debugger.  It is assumed that a breakpoint of some type will
1541  * be placed at that address.  If the event mask for the thread
1542  * is on, the instruction at the address will be executed.
1543  * Otherwise, the instruction will be skipped.
1544  */
1545 #pragma weak td_thr_set_event = __td_thr_set_event
1546 td_err_e
1547 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1548 {
1549 	return (mod_eventset(th_p, events, 1));
1550 }
1551 
1552 /*
1553  * Enable or disable a set of events in the process-global event mask,
1554  * depending on the value of onoff.
1555  */
1556 static td_err_e
1557 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1558 {
1559 	struct ps_prochandle *ph_p;
1560 	td_thr_events_t targ_eventset;
1561 	td_err_e	return_val;
1562 
1563 	if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1564 		return (return_val);
1565 	if (ps_pstop(ph_p) != PS_OK) {
1566 		ph_unlock(ta_p);
1567 		return (TD_DBERR);
1568 	}
1569 	if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1570 	    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1571 		return_val = TD_DBERR;
1572 	else {
1573 		if (onoff)
1574 			eventsetaddset(&targ_eventset, events);
1575 		else
1576 			eventsetdelset(&targ_eventset, events);
1577 		if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1578 		    &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1579 			return_val = TD_DBERR;
1580 	}
1581 	(void) ps_pcontinue(ph_p);
1582 	ph_unlock(ta_p);
1583 	return (return_val);
1584 }
1585 
1586 /*
1587  * Enable a set of events in the process-global event mask.
1588  */
1589 #pragma weak td_ta_set_event = __td_ta_set_event
1590 td_err_e
1591 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1592 {
1593 	return (td_ta_mod_event(ta_p, events, 1));
1594 }
1595 
1596 /*
1597  * Set event mask to disable the given event set; these events are cleared
1598  * from the event mask of the thread.  Events that occur for a thread
1599  * with the event masked off will not cause notification to be
1600  * sent to the debugger (see td_thr_set_event for fuller description).
1601  */
1602 #pragma weak td_thr_clear_event = __td_thr_clear_event
1603 td_err_e
1604 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1605 {
1606 	return (mod_eventset(th_p, events, 0));
1607 }
1608 
1609 /*
1610  * Disable a set of events in the process-global event mask.
1611  */
1612 #pragma weak td_ta_clear_event = __td_ta_clear_event
1613 td_err_e
1614 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1615 {
1616 	return (td_ta_mod_event(ta_p, events, 0));
1617 }
1618 
1619 /*
1620  * This function returns the most recent event message, if any,
1621  * associated with a thread.  Given a thread handle, return the message
1622  * corresponding to the event encountered by the thread.  Only one
1623  * message per thread is saved.  Messages from earlier events are lost
1624  * when later events occur.
1625  */
1626 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1627 td_err_e
1628 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1629 {
1630 	struct ps_prochandle *ph_p;
1631 	td_err_e	return_val = TD_OK;
1632 	psaddr_t	psaddr;
1633 
1634 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1635 		return (return_val);
1636 	if (ps_pstop(ph_p) != PS_OK) {
1637 		ph_unlock(th_p->th_ta_p);
1638 		return (TD_BADTA);
1639 	}
1640 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1641 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1642 		td_evbuf_t evbuf;
1643 
1644 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1645 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1646 			return_val = TD_DBERR;
1647 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
1648 			return_val = TD_NOEVENT;
1649 		} else {
1650 			msg->event = evbuf.eventnum;
1651 			msg->th_p = (td_thrhandle_t *)th_p;
1652 			msg->msg.data = (uintptr_t)evbuf.eventdata;
1653 			/* "Consume" the message */
1654 			evbuf.eventnum = TD_EVENT_NONE;
1655 			evbuf.eventdata = NULL;
1656 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1657 			    != PS_OK)
1658 				return_val = TD_DBERR;
1659 		}
1660 	} else {
1661 #if defined(_LP64) && defined(_SYSCALL32)
1662 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1663 		td_evbuf32_t evbuf;
1664 
1665 		psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1666 		if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1667 			return_val = TD_DBERR;
1668 		} else if (evbuf.eventnum == TD_EVENT_NONE) {
1669 			return_val = TD_NOEVENT;
1670 		} else {
1671 			msg->event = evbuf.eventnum;
1672 			msg->th_p = (td_thrhandle_t *)th_p;
1673 			msg->msg.data = (uintptr_t)evbuf.eventdata;
1674 			/* "Consume" the message */
1675 			evbuf.eventnum = TD_EVENT_NONE;
1676 			evbuf.eventdata = NULL;
1677 			if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1678 			    != PS_OK)
1679 				return_val = TD_DBERR;
1680 		}
1681 #else
1682 		return_val = TD_ERR;
1683 #endif	/* _SYSCALL32 */
1684 	}
1685 
1686 	(void) ps_pcontinue(ph_p);
1687 	ph_unlock(th_p->th_ta_p);
1688 	return (return_val);
1689 }
1690 
1691 /*
1692  * The callback function td_ta_event_getmsg uses when looking for
1693  * a thread with an event.  A thin wrapper around td_thr_event_getmsg.
1694  */
1695 static int
1696 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1697 {
1698 	static td_thrhandle_t th;
1699 	td_event_msg_t *msg = arg;
1700 
1701 	if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1702 		/*
1703 		 * Got an event, stop iterating.
1704 		 *
1705 		 * Because of past mistakes in interface definition,
1706 		 * we are forced to pass back a static local variable
1707 		 * for the thread handle because th_p is a pointer
1708 		 * to a local variable in __td_ta_thr_iter().
1709 		 * Grr...
1710 		 */
1711 		th = *th_p;
1712 		msg->th_p = &th;
1713 		return (1);
1714 	}
1715 	return (0);
1716 }
1717 
1718 /*
1719  * This function is just like td_thr_event_getmsg, except that it is
1720  * passed a process handle rather than a thread handle, and returns
1721  * an event message for some thread in the process that has an event
1722  * message pending.  If no thread has an event message pending, this
1723  * routine returns TD_NOEVENT.  Thus, all pending event messages may
1724  * be collected from a process by repeatedly calling this routine
1725  * until it returns TD_NOEVENT.
1726  */
1727 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1728 td_err_e
1729 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1730 {
1731 	td_err_e return_val;
1732 
1733 	if (ta_p == NULL)
1734 		return (TD_BADTA);
1735 	if (ta_p->ph_p == NULL)
1736 		return (TD_BADPH);
1737 	if (msg == NULL)
1738 		return (TD_ERR);
1739 	msg->event = TD_EVENT_NONE;
1740 	if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1741 	    TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1742 	    TD_THR_ANY_USER_FLAGS)) != TD_OK)
1743 		return (return_val);
1744 	if (msg->event == TD_EVENT_NONE)
1745 		return (TD_NOEVENT);
1746 	return (TD_OK);
1747 }
1748 
1749 static lwpid_t
1750 thr_to_lwpid(const td_thrhandle_t *th_p)
1751 {
1752 	struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1753 	lwpid_t lwpid;
1754 
1755 	/*
1756 	 * The caller holds the prochandle lock
1757 	 * and has already verfied everything.
1758 	 */
1759 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1760 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1761 
1762 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1763 		    &lwpid, sizeof (lwpid)) != PS_OK)
1764 			lwpid = 0;
1765 		else if (lwpid == 0)
1766 			lwpid = 1;
1767 	} else {
1768 #if defined(_LP64) && defined(_SYSCALL32)
1769 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1770 
1771 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1772 		    &lwpid, sizeof (lwpid)) != PS_OK)
1773 			lwpid = 0;
1774 		else if (lwpid == 0)
1775 			lwpid = 1;
1776 #else
1777 		lwpid = 0;
1778 #endif	/* _SYSCALL32 */
1779 	}
1780 
1781 	return (lwpid);
1782 }
1783 
1784 /*
1785  * Suspend a thread.
1786  * XXX: What does this mean in a one-level model?
1787  */
1788 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1789 td_err_e
1790 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1791 {
1792 	struct ps_prochandle *ph_p;
1793 	td_err_e return_val;
1794 
1795 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1796 		return (return_val);
1797 	if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1798 		return_val = TD_DBERR;
1799 	ph_unlock(th_p->th_ta_p);
1800 	return (return_val);
1801 }
1802 
1803 /*
1804  * Resume a suspended thread.
1805  * XXX: What does this mean in a one-level model?
1806  */
1807 #pragma weak td_thr_dbresume = __td_thr_dbresume
1808 td_err_e
1809 __td_thr_dbresume(const td_thrhandle_t *th_p)
1810 {
1811 	struct ps_prochandle *ph_p;
1812 	td_err_e return_val;
1813 
1814 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1815 		return (return_val);
1816 	if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1817 		return_val = TD_DBERR;
1818 	ph_unlock(th_p->th_ta_p);
1819 	return (return_val);
1820 }
1821 
1822 /*
1823  * Set a thread's signal mask.
1824  * Currently unused by dbx.
1825  */
1826 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1827 /* ARGSUSED */
1828 td_err_e
1829 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1830 {
1831 	return (TD_NOCAPAB);
1832 }
1833 
1834 /*
1835  * Set a thread's "signals-pending" set.
1836  * Currently unused by dbx.
1837  */
1838 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1839 /* ARGSUSED */
1840 td_err_e
1841 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1842 	uchar_t ti_pending_flag, const sigset_t ti_pending)
1843 {
1844 	return (TD_NOCAPAB);
1845 }
1846 
1847 /*
1848  * Get a thread's general register set.
1849  */
1850 #pragma weak td_thr_getgregs = __td_thr_getgregs
1851 td_err_e
1852 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1853 {
1854 	struct ps_prochandle *ph_p;
1855 	td_err_e return_val;
1856 
1857 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1858 		return (return_val);
1859 	if (ps_pstop(ph_p) != PS_OK) {
1860 		ph_unlock(th_p->th_ta_p);
1861 		return (TD_DBERR);
1862 	}
1863 
1864 	if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1865 		return_val = TD_DBERR;
1866 
1867 	(void) ps_pcontinue(ph_p);
1868 	ph_unlock(th_p->th_ta_p);
1869 	return (return_val);
1870 }
1871 
1872 /*
1873  * Set a thread's general register set.
1874  */
1875 #pragma weak td_thr_setgregs = __td_thr_setgregs
1876 td_err_e
1877 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1878 {
1879 	struct ps_prochandle *ph_p;
1880 	td_err_e return_val;
1881 
1882 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1883 		return (return_val);
1884 	if (ps_pstop(ph_p) != PS_OK) {
1885 		ph_unlock(th_p->th_ta_p);
1886 		return (TD_DBERR);
1887 	}
1888 
1889 	if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1890 		return_val = TD_DBERR;
1891 
1892 	(void) ps_pcontinue(ph_p);
1893 	ph_unlock(th_p->th_ta_p);
1894 	return (return_val);
1895 }
1896 
1897 /*
1898  * Get a thread's floating-point register set.
1899  */
1900 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1901 td_err_e
1902 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1903 {
1904 	struct ps_prochandle *ph_p;
1905 	td_err_e return_val;
1906 
1907 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1908 		return (return_val);
1909 	if (ps_pstop(ph_p) != PS_OK) {
1910 		ph_unlock(th_p->th_ta_p);
1911 		return (TD_DBERR);
1912 	}
1913 
1914 	if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1915 		return_val = TD_DBERR;
1916 
1917 	(void) ps_pcontinue(ph_p);
1918 	ph_unlock(th_p->th_ta_p);
1919 	return (return_val);
1920 }
1921 
1922 /*
1923  * Set a thread's floating-point register set.
1924  */
1925 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1926 td_err_e
1927 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1928 {
1929 	struct ps_prochandle *ph_p;
1930 	td_err_e return_val;
1931 
1932 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1933 		return (return_val);
1934 	if (ps_pstop(ph_p) != PS_OK) {
1935 		ph_unlock(th_p->th_ta_p);
1936 		return (TD_DBERR);
1937 	}
1938 
1939 	if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1940 		return_val = TD_DBERR;
1941 
1942 	(void) ps_pcontinue(ph_p);
1943 	ph_unlock(th_p->th_ta_p);
1944 	return (return_val);
1945 }
1946 
1947 /*
1948  * Get the size of the extra state register set for this architecture.
1949  * Currently unused by dbx.
1950  */
1951 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1952 /* ARGSUSED */
1953 td_err_e
1954 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1955 {
1956 #if defined(__sparc)
1957 	struct ps_prochandle *ph_p;
1958 	td_err_e return_val;
1959 
1960 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1961 		return (return_val);
1962 	if (ps_pstop(ph_p) != PS_OK) {
1963 		ph_unlock(th_p->th_ta_p);
1964 		return (TD_DBERR);
1965 	}
1966 
1967 	if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
1968 		return_val = TD_DBERR;
1969 
1970 	(void) ps_pcontinue(ph_p);
1971 	ph_unlock(th_p->th_ta_p);
1972 	return (return_val);
1973 #else	/* __sparc */
1974 	return (TD_NOXREGS);
1975 #endif	/* __sparc */
1976 }
1977 
1978 /*
1979  * Get a thread's extra state register set.
1980  */
1981 #pragma weak td_thr_getxregs = __td_thr_getxregs
1982 /* ARGSUSED */
1983 td_err_e
1984 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
1985 {
1986 #if defined(__sparc)
1987 	struct ps_prochandle *ph_p;
1988 	td_err_e return_val;
1989 
1990 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1991 		return (return_val);
1992 	if (ps_pstop(ph_p) != PS_OK) {
1993 		ph_unlock(th_p->th_ta_p);
1994 		return (TD_DBERR);
1995 	}
1996 
1997 	if (ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
1998 		return_val = TD_DBERR;
1999 
2000 	(void) ps_pcontinue(ph_p);
2001 	ph_unlock(th_p->th_ta_p);
2002 	return (return_val);
2003 #else	/* __sparc */
2004 	return (TD_NOXREGS);
2005 #endif	/* __sparc */
2006 }
2007 
2008 /*
2009  * Set a thread's extra state register set.
2010  */
2011 #pragma weak td_thr_setxregs = __td_thr_setxregs
2012 /* ARGSUSED */
2013 td_err_e
2014 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2015 {
2016 #if defined(__sparc)
2017 	struct ps_prochandle *ph_p;
2018 	td_err_e return_val;
2019 
2020 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2021 		return (return_val);
2022 	if (ps_pstop(ph_p) != PS_OK) {
2023 		ph_unlock(th_p->th_ta_p);
2024 		return (TD_DBERR);
2025 	}
2026 
2027 	if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
2028 		return_val = TD_DBERR;
2029 
2030 	(void) ps_pcontinue(ph_p);
2031 	ph_unlock(th_p->th_ta_p);
2032 	return (return_val);
2033 #else	/* __sparc */
2034 	return (TD_NOXREGS);
2035 #endif	/* __sparc */
2036 }
2037 
2038 struct searcher {
2039 	psaddr_t	addr;
2040 	int		status;
2041 };
2042 
2043 /*
2044  * Check the struct thread address in *th_p again first
2045  * value in "data".  If value in data is found, set second value
2046  * in "data" to 1 and return 1 to terminate iterations.
2047  * This function is used by td_thr_validate() to verify that
2048  * a thread handle is valid.
2049  */
2050 static int
2051 td_searcher(const td_thrhandle_t *th_p, void *data)
2052 {
2053 	struct searcher *searcher_data = (struct searcher *)data;
2054 
2055 	if (searcher_data->addr == th_p->th_unique) {
2056 		searcher_data->status = 1;
2057 		return (1);
2058 	}
2059 	return (0);
2060 }
2061 
2062 /*
2063  * Validate the thread handle.  Check that
2064  * a thread exists in the thread agent/process that
2065  * corresponds to thread with handle *th_p.
2066  * Currently unused by dbx.
2067  */
2068 #pragma weak td_thr_validate = __td_thr_validate
2069 td_err_e
2070 __td_thr_validate(const td_thrhandle_t *th_p)
2071 {
2072 	td_err_e return_val;
2073 	struct searcher searcher_data = {0, 0};
2074 
2075 	if (th_p == NULL)
2076 		return (TD_BADTH);
2077 	if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2078 		return (TD_BADTH);
2079 
2080 	/*
2081 	 * LOCKING EXCEPTION - Locking is not required
2082 	 * here because no use of the thread agent is made (other
2083 	 * than the sanity check) and checking of the thread
2084 	 * agent will be done in __td_ta_thr_iter.
2085 	 */
2086 
2087 	searcher_data.addr = th_p->th_unique;
2088 	return_val = __td_ta_thr_iter(th_p->th_ta_p,
2089 		td_searcher, &searcher_data,
2090 		TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2091 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2092 
2093 	if (return_val == TD_OK && searcher_data.status == 0)
2094 		return_val = TD_NOTHR;
2095 
2096 	return (return_val);
2097 }
2098 
2099 /*
2100  * Get a thread's private binding to a given thread specific
2101  * data(TSD) key(see thr_getspecific(3T).  If the thread doesn't
2102  * have a binding for a particular key, then NULL is returned.
2103  */
2104 #pragma weak td_thr_tsd = __td_thr_tsd
2105 td_err_e
2106 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2107 {
2108 	struct ps_prochandle *ph_p;
2109 	td_thragent_t	*ta_p;
2110 	td_err_e	return_val;
2111 	int		maxkey;
2112 	int		nkey;
2113 	psaddr_t	tsd_paddr;
2114 
2115 	if (data_pp == NULL)
2116 		return (TD_ERR);
2117 	*data_pp = NULL;
2118 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2119 		return (return_val);
2120 	ta_p = th_p->th_ta_p;
2121 	if (ps_pstop(ph_p) != PS_OK) {
2122 		ph_unlock(ta_p);
2123 		return (TD_DBERR);
2124 	}
2125 
2126 	if (ta_p->model == PR_MODEL_NATIVE) {
2127 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2128 		tsd_metadata_t tsdm;
2129 		tsd_t stsd;
2130 
2131 		if (ps_pdread(ph_p,
2132 		    ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2133 		    &tsdm, sizeof (tsdm)) != PS_OK)
2134 			return_val = TD_DBERR;
2135 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2136 		    &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2137 			return_val = TD_DBERR;
2138 		else if (tsd_paddr != NULL &&
2139 		    ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2140 			return_val = TD_DBERR;
2141 		else {
2142 			maxkey = tsdm.tsdm_nused;
2143 			nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2144 
2145 			if (key < TSD_NFAST)
2146 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2147 		}
2148 	} else {
2149 #if defined(_LP64) && defined(_SYSCALL32)
2150 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2151 		tsd_metadata32_t tsdm;
2152 		tsd32_t stsd;
2153 		caddr32_t addr;
2154 
2155 		if (ps_pdread(ph_p,
2156 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2157 		    &tsdm, sizeof (tsdm)) != PS_OK)
2158 			return_val = TD_DBERR;
2159 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2160 		    &addr, sizeof (addr)) != PS_OK)
2161 			return_val = TD_DBERR;
2162 		else if (addr != NULL &&
2163 		    ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2164 			return_val = TD_DBERR;
2165 		else {
2166 			maxkey = tsdm.tsdm_nused;
2167 			nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2168 
2169 			if (key < TSD_NFAST) {
2170 				tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2171 			} else {
2172 				tsd_paddr = addr;
2173 			}
2174 		}
2175 #else
2176 		return_val = TD_ERR;
2177 #endif	/* _SYSCALL32 */
2178 	}
2179 
2180 	if (return_val == TD_OK && (key < 1 || key >= maxkey))
2181 		return_val = TD_NOTSD;
2182 	if (return_val != TD_OK || key >= nkey) {
2183 		/* NULL has already been stored in data_pp */
2184 		(void) ps_pcontinue(ph_p);
2185 		ph_unlock(ta_p);
2186 		return (return_val);
2187 	}
2188 
2189 	/*
2190 	 * Read the value from the thread's tsd array.
2191 	 */
2192 	if (ta_p->model == PR_MODEL_NATIVE) {
2193 		void *value;
2194 
2195 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2196 		    &value, sizeof (value)) != PS_OK)
2197 			return_val = TD_DBERR;
2198 		else
2199 			*data_pp = value;
2200 #if defined(_LP64) && defined(_SYSCALL32)
2201 	} else {
2202 		caddr32_t value32;
2203 
2204 		if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2205 		    &value32, sizeof (value32)) != PS_OK)
2206 			return_val = TD_DBERR;
2207 		else
2208 			*data_pp = (void *)(uintptr_t)value32;
2209 #endif	/* _SYSCALL32 */
2210 	}
2211 
2212 	(void) ps_pcontinue(ph_p);
2213 	ph_unlock(ta_p);
2214 	return (return_val);
2215 }
2216 
2217 /*
2218  * Get the base address of a thread's thread local storage (TLS) block
2219  * for the module (executable or shared object) identified by 'moduleid'.
2220  */
2221 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2222 td_err_e
2223 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2224 {
2225 	struct ps_prochandle *ph_p;
2226 	td_thragent_t	*ta_p;
2227 	td_err_e	return_val;
2228 
2229 	if (base == NULL)
2230 		return (TD_ERR);
2231 	*base = NULL;
2232 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2233 		return (return_val);
2234 	ta_p = th_p->th_ta_p;
2235 	if (ps_pstop(ph_p) != PS_OK) {
2236 		ph_unlock(ta_p);
2237 		return (TD_DBERR);
2238 	}
2239 
2240 	if (ta_p->model == PR_MODEL_NATIVE) {
2241 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2242 		tls_metadata_t tls_metadata;
2243 		TLS_modinfo tlsmod;
2244 		tls_t tls;
2245 
2246 		if (ps_pdread(ph_p,
2247 		    ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2248 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2249 			return_val = TD_DBERR;
2250 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2251 			return_val = TD_NOTLS;
2252 		else if (ps_pdread(ph_p,
2253 		    (psaddr_t)((TLS_modinfo *)
2254 		    tls_metadata.tls_modinfo.tls_data + moduleid),
2255 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
2256 			return_val = TD_DBERR;
2257 		else if (tlsmod.tm_memsz == 0)
2258 			return_val = TD_NOTLS;
2259 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2260 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2261 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2262 		    &tls, sizeof (tls)) != PS_OK)
2263 			return_val = TD_DBERR;
2264 		else if (moduleid >= tls.tls_size)
2265 			return_val = TD_TLSDEFER;
2266 		else if (ps_pdread(ph_p,
2267 		    (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2268 		    &tls, sizeof (tls)) != PS_OK)
2269 			return_val = TD_DBERR;
2270 		else if (tls.tls_size == 0)
2271 			return_val = TD_TLSDEFER;
2272 		else
2273 			*base = (psaddr_t)tls.tls_data;
2274 	} else {
2275 #if defined(_LP64) && defined(_SYSCALL32)
2276 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2277 		tls_metadata32_t tls_metadata;
2278 		TLS_modinfo32 tlsmod;
2279 		tls32_t tls;
2280 
2281 		if (ps_pdread(ph_p,
2282 		    ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2283 		    &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2284 			return_val = TD_DBERR;
2285 		else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2286 			return_val = TD_NOTLS;
2287 		else if (ps_pdread(ph_p,
2288 		    (psaddr_t)((TLS_modinfo32 *)
2289 		    (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2290 		    &tlsmod, sizeof (tlsmod)) != PS_OK)
2291 			return_val = TD_DBERR;
2292 		else if (tlsmod.tm_memsz == 0)
2293 			return_val = TD_NOTLS;
2294 		else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2295 			*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2296 		else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2297 		    &tls, sizeof (tls)) != PS_OK)
2298 			return_val = TD_DBERR;
2299 		else if (moduleid >= tls.tls_size)
2300 			return_val = TD_TLSDEFER;
2301 		else if (ps_pdread(ph_p,
2302 		    (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2303 		    &tls, sizeof (tls)) != PS_OK)
2304 			return_val = TD_DBERR;
2305 		else if (tls.tls_size == 0)
2306 			return_val = TD_TLSDEFER;
2307 		else
2308 			*base = (psaddr_t)tls.tls_data;
2309 #else
2310 		return_val = TD_ERR;
2311 #endif	/* _SYSCALL32 */
2312 	}
2313 
2314 	(void) ps_pcontinue(ph_p);
2315 	ph_unlock(ta_p);
2316 	return (return_val);
2317 }
2318 
2319 /*
2320  * Change a thread's priority to the value specified by ti_pri.
2321  * Currently unused by dbx.
2322  */
2323 #pragma weak td_thr_setprio = __td_thr_setprio
2324 td_err_e
2325 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2326 {
2327 	struct ps_prochandle *ph_p;
2328 	pri_t		priority = ti_pri;
2329 	td_err_e	return_val = TD_OK;
2330 
2331 	if (ti_pri < THREAD_MIN_PRIORITY || ti_pri > THREAD_MAX_PRIORITY)
2332 		return (TD_ERR);
2333 	if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2334 		return (return_val);
2335 
2336 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2337 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2338 
2339 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
2340 		    &priority, sizeof (priority)) != PS_OK)
2341 			return_val = TD_DBERR;
2342 	} else {
2343 #if defined(_LP64) && defined(_SYSCALL32)
2344 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2345 
2346 		if (ps_pdwrite(ph_p, (psaddr_t)&ulwp->ul_pri,
2347 		    &priority, sizeof (priority)) != PS_OK)
2348 			return_val = TD_DBERR;
2349 #else
2350 		return_val = TD_ERR;
2351 #endif	/* _SYSCALL32 */
2352 	}
2353 
2354 	ph_unlock(th_p->th_ta_p);
2355 	return (return_val);
2356 }
2357 
2358 /*
2359  * This structure links td_thr_lockowner and the lowner_cb callback function.
2360  */
2361 typedef struct {
2362 	td_sync_iter_f	*owner_cb;
2363 	void		*owner_cb_arg;
2364 	td_thrhandle_t	*th_p;
2365 } lowner_cb_ctl_t;
2366 
2367 static int
2368 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2369 {
2370 	lowner_cb_ctl_t *ocb = arg;
2371 	int trunc = 0;
2372 	union {
2373 		rwlock_t rwl;
2374 		mutex_t mx;
2375 	} rw_m;
2376 
2377 	if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2378 	    &rw_m, sizeof (rw_m)) != PS_OK) {
2379 		trunc = 1;
2380 		if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2381 		    &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2382 			return (0);
2383 	}
2384 	if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2385 	    rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2386 		return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2387 	if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2388 		mutex_t *rwlock = &rw_m.rwl.mutex;
2389 		if (rwlock->mutex_owner == ocb->th_p->th_unique)
2390 			return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2391 	}
2392 	return (0);
2393 }
2394 
2395 /*
2396  * Iterate over the set of locks owned by a specified thread.
2397  * If cb returns a non-zero value, terminate iterations.
2398  */
2399 #pragma weak td_thr_lockowner = __td_thr_lockowner
2400 td_err_e
2401 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2402 	void *cb_data)
2403 {
2404 	td_thragent_t	*ta_p;
2405 	td_err_e	return_val;
2406 	lowner_cb_ctl_t	lcb;
2407 
2408 	/*
2409 	 * Just sanity checks.
2410 	 */
2411 	if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2412 		return (return_val);
2413 	ta_p = th_p->th_ta_p;
2414 	ph_unlock(ta_p);
2415 
2416 	lcb.owner_cb = cb;
2417 	lcb.owner_cb_arg = cb_data;
2418 	lcb.th_p = (td_thrhandle_t *)th_p;
2419 	return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2420 }
2421 
2422 /*
2423  * If a thread is asleep on a synchronization variable,
2424  * then get the synchronization handle.
2425  */
2426 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2427 td_err_e
2428 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2429 {
2430 	struct ps_prochandle *ph_p;
2431 	td_err_e	return_val = TD_OK;
2432 	uintptr_t	wchan;
2433 
2434 	if (sh_p == NULL)
2435 		return (TD_ERR);
2436 	if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2437 		return (return_val);
2438 
2439 	/*
2440 	 * No need to stop the process for a simple read.
2441 	 */
2442 	if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2443 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2444 
2445 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2446 		    &wchan, sizeof (wchan)) != PS_OK)
2447 			return_val = TD_DBERR;
2448 	} else {
2449 #if defined(_LP64) && defined(_SYSCALL32)
2450 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2451 		caddr32_t wchan32;
2452 
2453 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2454 		    &wchan32, sizeof (wchan32)) != PS_OK)
2455 			return_val = TD_DBERR;
2456 		wchan = wchan32;
2457 #else
2458 		return_val = TD_ERR;
2459 #endif	/* _SYSCALL32 */
2460 	}
2461 
2462 	if (return_val != TD_OK || wchan == NULL) {
2463 		sh_p->sh_ta_p = NULL;
2464 		sh_p->sh_unique = NULL;
2465 		if (return_val == TD_OK)
2466 			return_val = TD_ERR;
2467 	} else {
2468 		sh_p->sh_ta_p = th_p->th_ta_p;
2469 		sh_p->sh_unique = (psaddr_t)wchan;
2470 	}
2471 
2472 	ph_unlock(th_p->th_ta_p);
2473 	return (return_val);
2474 }
2475 
2476 /*
2477  * Which thread is running on an lwp?
2478  */
2479 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2480 td_err_e
2481 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2482 	td_thrhandle_t *th_p)
2483 {
2484 	return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2485 }
2486 
2487 /*
2488  * Common code for td_sync_get_info() and td_sync_get_stats()
2489  */
2490 static td_err_e
2491 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2492 	td_syncinfo_t *si_p)
2493 {
2494 	int trunc = 0;
2495 	td_so_un_t generic_so;
2496 
2497 	/*
2498 	 * Determine the sync. object type; a little type fudgery here.
2499 	 * First attempt to read the whole union.  If that fails, attempt
2500 	 * to read just the condvar.  A condvar is the smallest sync. object.
2501 	 */
2502 	if (ps_pdread(ph_p, sh_p->sh_unique,
2503 	    &generic_so, sizeof (generic_so)) != PS_OK) {
2504 		trunc = 1;
2505 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2506 		    sizeof (generic_so.condition)) != PS_OK)
2507 			return (TD_DBERR);
2508 	}
2509 
2510 	switch (generic_so.condition.cond_magic) {
2511 	case MUTEX_MAGIC:
2512 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2513 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2514 			return (TD_DBERR);
2515 		si_p->si_type = TD_SYNC_MUTEX;
2516 		si_p->si_shared_type = generic_so.lock.mutex_type;
2517 		(void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2518 		    sizeof (generic_so.lock.mutex_flag));
2519 		si_p->si_state.mutex_locked =
2520 		    (generic_so.lock.mutex_lockw != 0);
2521 		si_p->si_size = sizeof (generic_so.lock);
2522 		si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2523 		si_p->si_rcount = generic_so.lock.mutex_rcount;
2524 		si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2525 		if (si_p->si_state.mutex_locked) {
2526 			if (si_p->si_shared_type &
2527 			    (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
2528 				si_p->si_ownerpid =
2529 					generic_so.lock.mutex_ownerpid;
2530 			si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2531 			si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2532 		}
2533 		break;
2534 	case COND_MAGIC:
2535 		si_p->si_type = TD_SYNC_COND;
2536 		si_p->si_shared_type = generic_so.condition.cond_type;
2537 		(void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2538 		    sizeof (generic_so.condition.flags.flag));
2539 		si_p->si_size = sizeof (generic_so.condition);
2540 		si_p->si_has_waiters =
2541 			(generic_so.condition.cond_waiters_user |
2542 			generic_so.condition.cond_waiters_kernel)? 1 : 0;
2543 		break;
2544 	case SEMA_MAGIC:
2545 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2546 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
2547 		    != PS_OK)
2548 			return (TD_DBERR);
2549 		si_p->si_type = TD_SYNC_SEMA;
2550 		si_p->si_shared_type = generic_so.semaphore.type;
2551 		si_p->si_state.sem_count = generic_so.semaphore.count;
2552 		si_p->si_size = sizeof (generic_so.semaphore);
2553 		si_p->si_has_waiters =
2554 		    ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2555 		/* this is useless but the old interface provided it */
2556 		si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2557 		break;
2558 	case RWL_MAGIC:
2559 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2560 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2561 			return (TD_DBERR);
2562 		si_p->si_type = TD_SYNC_RWLOCK;
2563 		si_p->si_shared_type = generic_so.rwlock.rwlock_type;
2564 		si_p->si_size = sizeof (generic_so.rwlock);
2565 		if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) {
2566 			uint32_t *rwstate =
2567 			    (uint32_t *)&si_p->si_state.nreaders;
2568 
2569 			if (*rwstate & URW_WRITE_LOCKED) {
2570 				si_p->si_state.nreaders = -1;
2571 				si_p->si_is_wlock = 1;
2572 				si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2573 				si_p->si_owner.th_unique =
2574 					generic_so.rwlock.rwlock_owner;
2575 			} else if (*rwstate & URW_READERS_MASK)
2576 				si_p->si_state.nreaders =
2577 				    *rwstate & URW_READERS_MASK;
2578 			else
2579 				si_p->si_state.nreaders = 0;
2580 			si_p->si_has_waiters = (*rwstate & URW_HAS_WAITERS);
2581 		} else {
2582 			si_p->si_state.nreaders = generic_so.rwlock.readers;
2583 			si_p->si_has_waiters =
2584 			    generic_so.rwlock.rwlock_mwaiters;
2585 			if (si_p->si_state.nreaders == -1) {
2586 				si_p->si_is_wlock = 1;
2587 				si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2588 				si_p->si_owner.th_unique =
2589 					generic_so.rwlock.rwlock_mowner;
2590 			}
2591 		}
2592 		/* this is useless but the old interface provided it */
2593 		si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2594 		break;
2595 	default:
2596 		return (TD_BADSH);
2597 	}
2598 
2599 	si_p->si_ta_p = sh_p->sh_ta_p;
2600 	si_p->si_sv_addr = sh_p->sh_unique;
2601 	return (TD_OK);
2602 }
2603 
2604 /*
2605  * Given a synchronization handle, fill in the
2606  * information for the synchronization variable into *si_p.
2607  */
2608 #pragma weak td_sync_get_info = __td_sync_get_info
2609 td_err_e
2610 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2611 {
2612 	struct ps_prochandle *ph_p;
2613 	td_err_e return_val;
2614 
2615 	if (si_p == NULL)
2616 		return (TD_ERR);
2617 	(void) memset(si_p, 0, sizeof (*si_p));
2618 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2619 		return (return_val);
2620 	if (ps_pstop(ph_p) != PS_OK) {
2621 		ph_unlock(sh_p->sh_ta_p);
2622 		return (TD_DBERR);
2623 	}
2624 
2625 	return_val = sync_get_info_common(sh_p, ph_p, si_p);
2626 
2627 	(void) ps_pcontinue(ph_p);
2628 	ph_unlock(sh_p->sh_ta_p);
2629 	return (return_val);
2630 }
2631 
2632 static uint_t
2633 tdb_addr_hash64(uint64_t addr)
2634 {
2635 	uint64_t value60 = (addr >> 4);
2636 	uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2637 	return ((value30 >> 15) ^ (value30 & 0x7fff));
2638 }
2639 
2640 static uint_t
2641 tdb_addr_hash32(uint64_t addr)
2642 {
2643 	uint32_t value30 = (addr >> 2);		/* 30 bits */
2644 	return ((value30 >> 15) ^ (value30 & 0x7fff));
2645 }
2646 
2647 static td_err_e
2648 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2649 	psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2650 {
2651 	psaddr_t next_desc;
2652 	uint64_t first;
2653 	uint_t ix;
2654 
2655 	/*
2656 	 * Compute the hash table index from the synch object's address.
2657 	 */
2658 	if (ta_p->model == PR_MODEL_LP64)
2659 		ix = tdb_addr_hash64(sync_obj_addr);
2660 	else
2661 		ix = tdb_addr_hash32(sync_obj_addr);
2662 
2663 	/*
2664 	 * Get the address of the first element in the linked list.
2665 	 */
2666 	if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2667 	    &first, sizeof (first)) != PS_OK)
2668 		return (TD_DBERR);
2669 
2670 	/*
2671 	 * Search the linked list for an entry for the synch object..
2672 	 */
2673 	for (next_desc = (psaddr_t)first; next_desc != NULL;
2674 	    next_desc = (psaddr_t)sync_stats->next) {
2675 		if (ps_pdread(ta_p->ph_p, next_desc,
2676 		    sync_stats, sizeof (*sync_stats)) != PS_OK)
2677 			return (TD_DBERR);
2678 		if (sync_stats->sync_addr == sync_obj_addr)
2679 			return (TD_OK);
2680 	}
2681 
2682 	(void) memset(sync_stats, 0, sizeof (*sync_stats));
2683 	return (TD_OK);
2684 }
2685 
2686 /*
2687  * Given a synchronization handle, fill in the
2688  * statistics for the synchronization variable into *ss_p.
2689  */
2690 #pragma weak td_sync_get_stats = __td_sync_get_stats
2691 td_err_e
2692 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2693 {
2694 	struct ps_prochandle *ph_p;
2695 	td_thragent_t *ta_p;
2696 	td_err_e return_val;
2697 	register_sync_t enable;
2698 	psaddr_t hashaddr;
2699 	tdb_sync_stats_t sync_stats;
2700 	size_t ix;
2701 
2702 	if (ss_p == NULL)
2703 		return (TD_ERR);
2704 	(void) memset(ss_p, 0, sizeof (*ss_p));
2705 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2706 		return (return_val);
2707 	ta_p = sh_p->sh_ta_p;
2708 	if (ps_pstop(ph_p) != PS_OK) {
2709 		ph_unlock(ta_p);
2710 		return (TD_DBERR);
2711 	}
2712 
2713 	if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2714 	    != TD_OK) {
2715 		if (return_val != TD_BADSH)
2716 			goto out;
2717 		/* we can correct TD_BADSH */
2718 		(void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2719 		ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2720 		ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2721 		/* we correct si_type and si_size below */
2722 		return_val = TD_OK;
2723 	}
2724 	if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2725 	    &enable, sizeof (enable)) != PS_OK) {
2726 		return_val = TD_DBERR;
2727 		goto out;
2728 	}
2729 	if (enable != REGISTER_SYNC_ON)
2730 		goto out;
2731 
2732 	/*
2733 	 * Get the address of the hash table in the target process.
2734 	 */
2735 	if (ta_p->model == PR_MODEL_NATIVE) {
2736 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
2737 		    offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2738 		    &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2739 			return_val = TD_DBERR;
2740 			goto out;
2741 		}
2742 	} else {
2743 #if defined(_LP64) && defined(_SYSCALL32)
2744 		caddr32_t addr;
2745 
2746 		if (ps_pdread(ph_p, ta_p->uberdata_addr +
2747 		    offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2748 		    &addr, sizeof (addr)) != PS_OK) {
2749 			return_val = TD_DBERR;
2750 			goto out;
2751 		}
2752 		hashaddr = addr;
2753 #else
2754 		return_val = TD_ERR;
2755 		goto out;
2756 #endif	/* _SYSCALL32 */
2757 	}
2758 
2759 	if (hashaddr == 0)
2760 		return_val = TD_BADSH;
2761 	else
2762 		return_val = read_sync_stats(ta_p, hashaddr,
2763 			sh_p->sh_unique, &sync_stats);
2764 	if (return_val != TD_OK)
2765 		goto out;
2766 
2767 	/*
2768 	 * We have the hash table entry.  Transfer the data to
2769 	 * the td_syncstats_t structure provided by the caller.
2770 	 */
2771 	switch (sync_stats.un.type) {
2772 	case TDB_MUTEX:
2773 	    {
2774 		td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2775 
2776 		ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2777 		ss_p->ss_info.si_size = sizeof (mutex_t);
2778 		msp->mutex_lock =
2779 			sync_stats.un.mutex.mutex_lock;
2780 		msp->mutex_sleep =
2781 			sync_stats.un.mutex.mutex_sleep;
2782 		msp->mutex_sleep_time =
2783 			sync_stats.un.mutex.mutex_sleep_time;
2784 		msp->mutex_hold_time =
2785 			sync_stats.un.mutex.mutex_hold_time;
2786 		msp->mutex_try =
2787 			sync_stats.un.mutex.mutex_try;
2788 		msp->mutex_try_fail =
2789 			sync_stats.un.mutex.mutex_try_fail;
2790 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2791 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2792 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
2793 			msp->mutex_internal =
2794 				ix / sizeof (thr_hash_table_t) + 1;
2795 		break;
2796 	    }
2797 	case TDB_COND:
2798 	    {
2799 		td_cond_stats_t *csp = &ss_p->ss_un.cond;
2800 
2801 		ss_p->ss_info.si_type = TD_SYNC_COND;
2802 		ss_p->ss_info.si_size = sizeof (cond_t);
2803 		csp->cond_wait =
2804 			sync_stats.un.cond.cond_wait;
2805 		csp->cond_timedwait =
2806 			sync_stats.un.cond.cond_timedwait;
2807 		csp->cond_wait_sleep_time =
2808 			sync_stats.un.cond.cond_wait_sleep_time;
2809 		csp->cond_timedwait_sleep_time =
2810 			sync_stats.un.cond.cond_timedwait_sleep_time;
2811 		csp->cond_timedwait_timeout =
2812 			sync_stats.un.cond.cond_timedwait_timeout;
2813 		csp->cond_signal =
2814 			sync_stats.un.cond.cond_signal;
2815 		csp->cond_broadcast =
2816 			sync_stats.un.cond.cond_broadcast;
2817 		if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2818 		    (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2819 		    < ta_p->hash_size * sizeof (thr_hash_table_t))
2820 			csp->cond_internal =
2821 				ix / sizeof (thr_hash_table_t) + 1;
2822 		break;
2823 	    }
2824 	case TDB_RWLOCK:
2825 	    {
2826 		psaddr_t cond_addr;
2827 		tdb_sync_stats_t cond_stats;
2828 		td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2829 
2830 		ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2831 		ss_p->ss_info.si_size = sizeof (rwlock_t);
2832 		rwsp->rw_rdlock =
2833 			sync_stats.un.rwlock.rw_rdlock;
2834 		cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->readercv;
2835 		if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats)
2836 		    == TD_OK) {
2837 			rwsp->rw_rdlock_sleep =
2838 				cond_stats.un.cond.cond_wait;
2839 			rwsp->rw_rdlock_sleep_time =
2840 				cond_stats.un.cond.cond_wait_sleep_time;
2841 		}
2842 		rwsp->rw_rdlock_try =
2843 			sync_stats.un.rwlock.rw_rdlock_try;
2844 		rwsp->rw_rdlock_try_fail =
2845 			sync_stats.un.rwlock.rw_rdlock_try_fail;
2846 		rwsp->rw_wrlock =
2847 			sync_stats.un.rwlock.rw_wrlock;
2848 		cond_addr = (psaddr_t)&((rwlock_t *)sh_p->sh_unique)->writercv;
2849 		if (read_sync_stats(ta_p, hashaddr, cond_addr, &cond_stats)
2850 		    == TD_OK) {
2851 			rwsp->rw_wrlock_sleep =
2852 				cond_stats.un.cond.cond_wait;
2853 			rwsp->rw_wrlock_sleep_time =
2854 				cond_stats.un.cond.cond_wait_sleep_time;
2855 		}
2856 		rwsp->rw_wrlock_hold_time =
2857 			sync_stats.un.rwlock.rw_wrlock_hold_time;
2858 		rwsp->rw_wrlock_try =
2859 			sync_stats.un.rwlock.rw_wrlock_try;
2860 		rwsp->rw_wrlock_try_fail =
2861 			sync_stats.un.rwlock.rw_wrlock_try_fail;
2862 		break;
2863 	    }
2864 	case TDB_SEMA:
2865 	    {
2866 		td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2867 
2868 		ss_p->ss_info.si_type = TD_SYNC_SEMA;
2869 		ss_p->ss_info.si_size = sizeof (sema_t);
2870 		ssp->sema_wait =
2871 			sync_stats.un.sema.sema_wait;
2872 		ssp->sema_wait_sleep =
2873 			sync_stats.un.sema.sema_wait_sleep;
2874 		ssp->sema_wait_sleep_time =
2875 			sync_stats.un.sema.sema_wait_sleep_time;
2876 		ssp->sema_trywait =
2877 			sync_stats.un.sema.sema_trywait;
2878 		ssp->sema_trywait_fail =
2879 			sync_stats.un.sema.sema_trywait_fail;
2880 		ssp->sema_post =
2881 			sync_stats.un.sema.sema_post;
2882 		ssp->sema_max_count =
2883 			sync_stats.un.sema.sema_max_count;
2884 		ssp->sema_min_count =
2885 			sync_stats.un.sema.sema_min_count;
2886 		break;
2887 	    }
2888 	default:
2889 		return_val = TD_BADSH;
2890 		break;
2891 	}
2892 
2893 out:
2894 	(void) ps_pcontinue(ph_p);
2895 	ph_unlock(ta_p);
2896 	return (return_val);
2897 }
2898 
2899 /*
2900  * Change the state of a synchronization variable.
2901  *	1) mutex lock state set to value
2902  *	2) semaphore's count set to value
2903  *	3) writer's lock set to value
2904  *	4) reader's lock number of readers set to value
2905  * Currently unused by dbx.
2906  */
2907 #pragma weak td_sync_setstate = __td_sync_setstate
2908 td_err_e
2909 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2910 {
2911 	struct ps_prochandle *ph_p;
2912 	int		trunc = 0;
2913 	td_err_e	return_val;
2914 	td_so_un_t	generic_so;
2915 	int		value = (int)lvalue;
2916 
2917 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2918 		return (return_val);
2919 	if (ps_pstop(ph_p) != PS_OK) {
2920 		ph_unlock(sh_p->sh_ta_p);
2921 		return (TD_DBERR);
2922 	}
2923 
2924 	/*
2925 	 * Read the synch. variable information.
2926 	 * First attempt to read the whole union and if that fails
2927 	 * fall back to reading only the smallest member, the condvar.
2928 	 */
2929 	if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2930 	    sizeof (generic_so)) != PS_OK) {
2931 		trunc = 1;
2932 		if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2933 		    sizeof (generic_so.condition)) != PS_OK) {
2934 			(void) ps_pcontinue(ph_p);
2935 			ph_unlock(sh_p->sh_ta_p);
2936 			return (TD_DBERR);
2937 		}
2938 	}
2939 
2940 	/*
2941 	 * Set the new value in the sync. variable, read the synch. variable
2942 	 * information. from the process, reset its value and write it back.
2943 	 */
2944 	switch (generic_so.condition.mutex_magic) {
2945 	case MUTEX_MAGIC:
2946 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2947 		    &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2948 			return_val = TD_DBERR;
2949 			break;
2950 		}
2951 		generic_so.lock.mutex_lockw = (uint8_t)value;
2952 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2953 		    sizeof (generic_so.lock)) != PS_OK)
2954 			return_val = TD_DBERR;
2955 		break;
2956 	case SEMA_MAGIC:
2957 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2958 		    &generic_so.semaphore, sizeof (generic_so.semaphore))
2959 		    != PS_OK) {
2960 			return_val = TD_DBERR;
2961 			break;
2962 		}
2963 		generic_so.semaphore.count = value;
2964 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2965 		    sizeof (generic_so.semaphore)) != PS_OK)
2966 			return_val = TD_DBERR;
2967 		break;
2968 	case COND_MAGIC:
2969 		/* Operation not supported on a condition variable */
2970 		return_val = TD_ERR;
2971 		break;
2972 	case RWL_MAGIC:
2973 		if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2974 		    &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2975 			return_val = TD_DBERR;
2976 			break;
2977 		}
2978 		if (generic_so.rwlock.rwlock_type == USYNC_PROCESS) {
2979 			uint32_t *rwstate =
2980 			    (uint32_t *)&generic_so.rwlock.readers;
2981 			if (value < 0)
2982 				*rwstate = URW_WRITE_LOCKED;
2983 			else if (value > 0)
2984 				*rwstate = (value & URW_READERS_MASK);
2985 			else
2986 				*rwstate = 0;
2987 		} else
2988 			generic_so.rwlock.readers = value;
2989 
2990 		if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2991 		    sizeof (generic_so.rwlock)) != PS_OK)
2992 			return_val = TD_DBERR;
2993 		break;
2994 	default:
2995 		/* Bad sync. object type */
2996 		return_val = TD_BADSH;
2997 		break;
2998 	}
2999 
3000 	(void) ps_pcontinue(ph_p);
3001 	ph_unlock(sh_p->sh_ta_p);
3002 	return (return_val);
3003 }
3004 
3005 typedef struct {
3006 	td_thr_iter_f	*waiter_cb;
3007 	psaddr_t	sync_obj_addr;
3008 	uint16_t	sync_magic;
3009 	void		*waiter_cb_arg;
3010 	td_err_e	errcode;
3011 } waiter_cb_ctl_t;
3012 
3013 static int
3014 waiters_cb(const td_thrhandle_t *th_p, void *arg)
3015 {
3016 	td_thragent_t	*ta_p = th_p->th_ta_p;
3017 	struct ps_prochandle *ph_p = ta_p->ph_p;
3018 	waiter_cb_ctl_t	*wcb = arg;
3019 	caddr_t		wchan;
3020 
3021 	if (ta_p->model == PR_MODEL_NATIVE) {
3022 		ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
3023 
3024 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3025 		    &wchan, sizeof (wchan)) != PS_OK) {
3026 			wcb->errcode = TD_DBERR;
3027 			return (1);
3028 		}
3029 	} else {
3030 #if defined(_LP64) && defined(_SYSCALL32)
3031 		ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
3032 		caddr32_t wchan32;
3033 
3034 		if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
3035 		    &wchan32, sizeof (wchan32)) != PS_OK) {
3036 			wcb->errcode = TD_DBERR;
3037 			return (1);
3038 		}
3039 		wchan = (caddr_t)(uintptr_t)wchan32;
3040 #else
3041 		wcb->errcode = TD_ERR;
3042 		return (1);
3043 #endif	/* _SYSCALL32 */
3044 	}
3045 
3046 	if (wchan == NULL)
3047 		return (0);
3048 
3049 	if (wchan == (caddr_t)wcb->sync_obj_addr)
3050 		return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
3051 
3052 	return (0);
3053 }
3054 
3055 /*
3056  * For a given synchronization variable, iterate over the
3057  * set of waiting threads.  The call back function is passed
3058  * two parameters, a pointer to a thread handle and a pointer
3059  * to extra call back data.
3060  */
3061 #pragma weak td_sync_waiters = __td_sync_waiters
3062 td_err_e
3063 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
3064 {
3065 	struct ps_prochandle *ph_p;
3066 	waiter_cb_ctl_t	wcb;
3067 	td_err_e	return_val;
3068 
3069 	if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
3070 		return (return_val);
3071 	if (ps_pdread(ph_p,
3072 	    (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
3073 	    (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
3074 		ph_unlock(sh_p->sh_ta_p);
3075 		return (TD_DBERR);
3076 	}
3077 	ph_unlock(sh_p->sh_ta_p);
3078 
3079 	switch (wcb.sync_magic) {
3080 	case MUTEX_MAGIC:
3081 	case COND_MAGIC:
3082 	case SEMA_MAGIC:
3083 	case RWL_MAGIC:
3084 		break;
3085 	default:
3086 		return (TD_BADSH);
3087 	}
3088 
3089 	wcb.waiter_cb = cb;
3090 	wcb.sync_obj_addr = sh_p->sh_unique;
3091 	wcb.waiter_cb_arg = cb_data;
3092 	wcb.errcode = TD_OK;
3093 	return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3094 		TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3095 		TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3096 
3097 	if (return_val != TD_OK)
3098 		return (return_val);
3099 
3100 	return (wcb.errcode);
3101 }
3102