xref: /illumos-gate/usr/src/cmd/mdb/common/mdb/mdb_target.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * MDB Target Layer
30  *
31  * The *target* is the program being inspected by the debugger.  The MDB target
32  * layer provides a set of functions that insulate common debugger code,
33  * including the MDB Module API, from the implementation details of how the
34  * debugger accesses information from a given target.  Each target exports a
35  * standard set of properties, including one or more address  spaces, one or
36  * more symbol tables, a set of load objects, and a set of threads that can be
37  * examined using the interfaces in <mdb/mdb_target.h>.  This technique has
38  * been employed successfully in other debuggers, including [1], primarily
39  * to improve portability, although the term "target" often refers to the
40  * encapsulation of architectural or operating system-specific details.  The
41  * target abstraction is useful for MDB because it allows us to easily extend
42  * the debugger to examine a variety of different program forms.  Primarily,
43  * the target functions validate input arguments and then call an appropriate
44  * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
45  * However, this interface layer provides a very high level of flexibility for
46  * separating the debugger interface from instrumentation details.  Experience
47  * has shown this kind of design can facilitate separating out debugger
48  * instrumentation into an external agent [2] and enable the development of
49  * advanced instrumentation frameworks [3].  We want MDB to be an ideal
50  * extensible framework for the development of such applications.
51  *
52  * Aside from a set of wrapper functions, the target layer also provides event
53  * management for targets that represent live executing programs.  Our model of
54  * events is also extensible, and is based upon work in [3] and [4].  We define
55  * a *software event* as a state transition in the target program (for example,
56  * the transition of the program counter to a location of interest) that is
57  * observed by the debugger or its agent.  A *software event specifier* is a
58  * description of a class of software events that is used by the debugger to
59  * instrument the target so that the corresponding software events can be
60  * observed.  In MDB, software event specifiers are represented by the
61  * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>.  As the user,
62  * the internal debugger code, and MDB modules may all wish to observe software
63  * events and receive appropriate notification and callbacks, we do not expose
64  * software event specifiers directly as part of the user interface.  Instead,
65  * clients of the target layer request that events be observed by creating
66  * new *virtual event specifiers*.  Each virtual specifier is named by a unique
67  * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
68  * One or more virtual specifiers are then associated with each underlying
69  * software event specifier.  This design enforces the constraint that the
70  * target must only insert one set of instrumentation, regardless of how many
71  * times the target layer was asked to trace a given event.  For example, if
72  * multiple clients request a breakpoint at a particular address, the virtual
73  * specifiers will map to the same sespec, ensuring that only one breakpoint
74  * trap instruction is actually planted at the given target address.  When no
75  * virtual specifiers refer to an sespec, it is no longer needed and can be
76  * removed, along with the corresponding instrumentation.
77  *
78  * The following state transition diagram illustrates the life cycle of a
79  * software event specifier and example transitions:
80  *
81  *                                         cont/
82  *     +--------+   delete   +--------+    stop    +-------+
83  *    (|( DEAD )|) <------- (  ACTIVE  ) <------> (  ARMED  )
84  *     +--------+            +--------+            +-------+
85  *          ^   load/unload  ^        ^   failure/     |
86  *   delete |        object /          \  reset        | failure
87  *          |              v            v              |
88  *          |      +--------+          +-------+       |
89  *          +---- (   IDLE   )        (   ERR   ) <----+
90  *          |      +--------+          +-------+
91  *          |                              |
92  *          +------------------------------+
93  *
94  * The MDB execution control model is based upon the synchronous debugging
95  * model exported by Solaris proc(4).  A target program is set running or the
96  * debugger is attached to a running target.  On ISTOP (stop on event of
97  * interest), one target thread is selected as the representative.  The
98  * algorithm for selecting the representative is target-specific, but we assume
99  * that if an observed software event has occurred, the target will select the
100  * thread that triggered the state transition of interest.  The other threads
101  * are stopped in sympathy with the representative as soon as possible.  Prior
102  * to continuing the target, we plant our instrumentation, transitioning event
103  * specifiers from the ACTIVE to the ARMED state, and then back again when the
104  * target stops.  We then query each active event specifier to learn which ones
105  * are matched, and then invoke the callbacks associated with their vespecs.
106  * If an OS error occurs while attempting to arm or disarm a specifier, the
107  * specifier is transitioned to the ERROR state; we will attempt to arm it
108  * again at the next continue.  If no target process is under our control or
109  * if an event is not currently applicable (e.g. a deferred breakpoint on an
110  * object that is not yet loaded), it remains in the IDLE state.  The target
111  * implementation should intercept object load events and then transition the
112  * specifier to the ACTIVE state when the corresponding object is loaded.
113  *
114  * To simplify the debugger implementation and allow targets to easily provide
115  * new types of observable events, most of the event specifier management is
116  * done by the target layer.  Each software event specifier provides an ops
117  * vector of subroutines that the target layer can call to perform the
118  * various state transitions described above.  The target maintains two lists
119  * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
120  * (ACTIVE, ARMED, and ERROR states).  Each mdb_sespec_t maintains a list of
121  * associated mdb_vespec_t's.  If an sespec is IDLE or ERROR, its se_errno
122  * field will have an errno value specifying the reason for its inactivity.
123  * The vespec stores the client's callback function and private data, and the
124  * arguments used to construct the sespec.  All objects are reference counted
125  * so we can destroy an object when it is no longer needed.  The mdb_sespec_t
126  * invariants for the respective states are as follows:
127  *
128  *   IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
129  * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
130  *  ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
131  *  ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
132  *
133  * Additional commentary on specific state transitions and issues involving
134  * event management can be found below near the target layer functions.
135  *
136  * References
137  *
138  * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
139  *     1.84 edition, 1994.
140  *
141  * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
142  *     Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
143  *
144  * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
145  *     Technical Report CS-97-12, Department of Computer Science,
146  *     Brown University.
147  *
148  * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
149  *     Report CS-98-05, Department of Computer Science, Brown University.
150  */
151 
152 #include <mdb/mdb_target_impl.h>
153 #include <mdb/mdb_debug.h>
154 #include <mdb/mdb_modapi.h>
155 #include <mdb/mdb_err.h>
156 #include <mdb/mdb_callb.h>
157 #include <mdb/mdb_gelf.h>
158 #include <mdb/mdb_io_impl.h>
159 #include <mdb/mdb_string.h>
160 #include <mdb/mdb_signal.h>
161 #include <mdb/mdb_frame.h>
162 #include <mdb/mdb.h>
163 
164 #include <sys/stat.h>
165 #include <sys/param.h>
166 #include <sys/signal.h>
167 #include <strings.h>
168 #include <stdlib.h>
169 #include <errno.h>
170 
171 /*
172  * Define convenience macros for referencing the set of vespec flag bits that
173  * are preserved by the target implementation, and the set of bits that
174  * determine automatic ve_hits == ve_limit behavior.
175  */
176 #define	T_IMPL_BITS	\
177 	(MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
178 	MDB_TGT_SPEC_DELETED)
179 
180 #define	T_AUTO_BITS	\
181 	(MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
182 
183 /*
184  * Define convenience macro for referencing target flag pending continue bits.
185  */
186 #define	T_CONT_BITS	\
187 	(MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_STEP_BRANCH | \
188 	MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
189 
190 mdb_tgt_t *
191 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[])
192 {
193 	mdb_module_t *mp;
194 	mdb_tgt_t *t;
195 
196 	if (flags & ~MDB_TGT_F_ALL) {
197 		(void) set_errno(EINVAL);
198 		return (NULL);
199 	}
200 
201 	t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP);
202 	mdb_list_append(&mdb.m_tgtlist, t);
203 
204 	t->t_module = &mdb.m_rmod;
205 	t->t_matched = T_SE_END;
206 	t->t_flags = flags;
207 	t->t_vepos = 1;
208 	t->t_veneg = 1;
209 
210 	for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) {
211 		if (ctor == mp->mod_tgt_ctor) {
212 			t->t_module = mp;
213 			break;
214 		}
215 	}
216 
217 	if (ctor(t, argc, argv) != 0) {
218 		mdb_list_delete(&mdb.m_tgtlist, t);
219 		mdb_free(t, sizeof (mdb_tgt_t));
220 		return (NULL);
221 	}
222 
223 	mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n",
224 	    t->t_module->mod_name, (void *)t);
225 
226 	(void) t->t_ops->t_status(t, &t->t_status);
227 	return (t);
228 }
229 
230 int
231 mdb_tgt_getflags(mdb_tgt_t *t)
232 {
233 	return (t->t_flags);
234 }
235 
236 int
237 mdb_tgt_setflags(mdb_tgt_t *t, int flags)
238 {
239 	if (flags & ~MDB_TGT_F_ALL)
240 		return (set_errno(EINVAL));
241 
242 	return (t->t_ops->t_setflags(t, flags));
243 }
244 
245 int
246 mdb_tgt_setcontext(mdb_tgt_t *t, void *context)
247 {
248 	return (t->t_ops->t_setcontext(t, context));
249 }
250 
251 /*ARGSUSED*/
252 static int
253 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data)
254 {
255 	(void) mdb_tgt_vespec_delete(t, vid);
256 	return (0);
257 }
258 
259 void
260 mdb_tgt_destroy(mdb_tgt_t *t)
261 {
262 	mdb_xdata_t *xdp, *nxdp;
263 
264 	if (mdb.m_target == t) {
265 		mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
266 		    t->t_module->mod_name, (void *)t);
267 		t->t_ops->t_deactivate(t);
268 		mdb.m_target = NULL;
269 	}
270 
271 	mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n",
272 	    t->t_module->mod_name, (void *)t);
273 
274 	for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) {
275 		nxdp = mdb_list_next(xdp);
276 		mdb_list_delete(&t->t_xdlist, xdp);
277 		mdb_free(xdp, sizeof (mdb_xdata_t));
278 	}
279 
280 	mdb_tgt_sespec_idle_all(t, EBUSY, TRUE);
281 	(void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL);
282 	t->t_ops->t_destroy(t);
283 
284 	mdb_list_delete(&mdb.m_tgtlist, t);
285 	mdb_free(t, sizeof (mdb_tgt_t));
286 
287 	if (mdb.m_target == NULL)
288 		mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist));
289 }
290 
291 void
292 mdb_tgt_activate(mdb_tgt_t *t)
293 {
294 	mdb_tgt_t *otgt = mdb.m_target;
295 
296 	if (mdb.m_target != NULL) {
297 		mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
298 		    mdb.m_target->t_module->mod_name, (void *)mdb.m_target);
299 		mdb.m_target->t_ops->t_deactivate(mdb.m_target);
300 	}
301 
302 	if ((mdb.m_target = t) != NULL) {
303 		const char *v = strstr(mdb.m_root, "%V");
304 
305 		mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n",
306 		    t->t_module->mod_name, (void *)t);
307 
308 		/*
309 		 * If the root was explicitly set with -R and contains %V,
310 		 * expand it like a path.  If the resulting directory is
311 		 * not present, then replace %V with "latest" and re-evaluate.
312 		 */
313 		if (v != NULL) {
314 			char old_root[MAXPATHLEN];
315 			const char **p;
316 #ifndef _KMDB
317 			struct stat s;
318 #endif
319 			size_t len;
320 
321 			p = mdb_path_alloc(mdb.m_root, &len);
322 			(void) strcpy(old_root, mdb.m_root);
323 			(void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
324 			mdb.m_root[MAXPATHLEN - 1] = '\0';
325 			mdb_path_free(p, len);
326 
327 #ifndef _KMDB
328 			if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) {
329 				mdb.m_flags |= MDB_FL_LATEST;
330 				p = mdb_path_alloc(old_root, &len);
331 				(void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
332 				mdb.m_root[MAXPATHLEN - 1] = '\0';
333 				mdb_path_free(p, len);
334 			}
335 #endif
336 		}
337 
338 		/*
339 		 * Re-evaluate the macro and dmod paths now that we have the
340 		 * new target set and m_root figured out.
341 		 */
342 		if (otgt == NULL) {
343 			mdb_set_ipath(mdb.m_ipathstr);
344 			mdb_set_lpath(mdb.m_lpathstr);
345 		}
346 
347 		t->t_ops->t_activate(t);
348 	}
349 }
350 
351 void
352 mdb_tgt_periodic(mdb_tgt_t *t)
353 {
354 	t->t_ops->t_periodic(t);
355 }
356 
357 const char *
358 mdb_tgt_name(mdb_tgt_t *t)
359 {
360 	return (t->t_ops->t_name(t));
361 }
362 
363 const char *
364 mdb_tgt_isa(mdb_tgt_t *t)
365 {
366 	return (t->t_ops->t_isa(t));
367 }
368 
369 const char *
370 mdb_tgt_platform(mdb_tgt_t *t)
371 {
372 	return (t->t_ops->t_platform(t));
373 }
374 
375 int
376 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp)
377 {
378 	return (t->t_ops->t_uname(t, utsp));
379 }
380 
381 int
382 mdb_tgt_dmodel(mdb_tgt_t *t)
383 {
384 	return (t->t_ops->t_dmodel(t));
385 }
386 
387 int
388 mdb_tgt_auxv(mdb_tgt_t *t, const auxv_t **auxvp)
389 {
390 	return (t->t_ops->t_auxv(t, auxvp));
391 }
392 
393 ssize_t
394 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as,
395 	void *buf, size_t n, mdb_tgt_addr_t addr)
396 {
397 	if (t->t_flags & MDB_TGT_F_ASIO)
398 		return (t->t_ops->t_aread(t, as, buf, n, addr));
399 
400 	switch ((uintptr_t)as) {
401 	case (uintptr_t)MDB_TGT_AS_VIRT:
402 		return (t->t_ops->t_vread(t, buf, n, addr));
403 	case (uintptr_t)MDB_TGT_AS_PHYS:
404 		return (t->t_ops->t_pread(t, buf, n, addr));
405 	case (uintptr_t)MDB_TGT_AS_FILE:
406 		return (t->t_ops->t_fread(t, buf, n, addr));
407 	case (uintptr_t)MDB_TGT_AS_IO:
408 		return (t->t_ops->t_ioread(t, buf, n, addr));
409 	}
410 	return (t->t_ops->t_aread(t, as, buf, n, addr));
411 }
412 
413 ssize_t
414 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as,
415 	const void *buf, size_t n, mdb_tgt_addr_t addr)
416 {
417 	if (!(t->t_flags & MDB_TGT_F_RDWR))
418 		return (set_errno(EMDB_TGTRDONLY));
419 
420 	if (t->t_flags & MDB_TGT_F_ASIO)
421 		return (t->t_ops->t_awrite(t, as, buf, n, addr));
422 
423 	switch ((uintptr_t)as) {
424 	case (uintptr_t)MDB_TGT_AS_VIRT:
425 		return (t->t_ops->t_vwrite(t, buf, n, addr));
426 	case (uintptr_t)MDB_TGT_AS_PHYS:
427 		return (t->t_ops->t_pwrite(t, buf, n, addr));
428 	case (uintptr_t)MDB_TGT_AS_FILE:
429 		return (t->t_ops->t_fwrite(t, buf, n, addr));
430 	case (uintptr_t)MDB_TGT_AS_IO:
431 		return (t->t_ops->t_iowrite(t, buf, n, addr));
432 	}
433 	return (t->t_ops->t_awrite(t, as, buf, n, addr));
434 }
435 
436 ssize_t
437 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
438 {
439 	return (t->t_ops->t_vread(t, buf, n, addr));
440 }
441 
442 ssize_t
443 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
444 {
445 	if (t->t_flags & MDB_TGT_F_RDWR)
446 		return (t->t_ops->t_vwrite(t, buf, n, addr));
447 
448 	return (set_errno(EMDB_TGTRDONLY));
449 }
450 
451 ssize_t
452 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr)
453 {
454 	return (t->t_ops->t_pread(t, buf, n, addr));
455 }
456 
457 ssize_t
458 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr)
459 {
460 	if (t->t_flags & MDB_TGT_F_RDWR)
461 		return (t->t_ops->t_pwrite(t, buf, n, addr));
462 
463 	return (set_errno(EMDB_TGTRDONLY));
464 }
465 
466 ssize_t
467 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
468 {
469 	return (t->t_ops->t_fread(t, buf, n, addr));
470 }
471 
472 ssize_t
473 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
474 {
475 	if (t->t_flags & MDB_TGT_F_RDWR)
476 		return (t->t_ops->t_fwrite(t, buf, n, addr));
477 
478 	return (set_errno(EMDB_TGTRDONLY));
479 }
480 
481 ssize_t
482 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
483 {
484 	return (t->t_ops->t_ioread(t, buf, n, addr));
485 }
486 
487 ssize_t
488 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
489 {
490 	if (t->t_flags & MDB_TGT_F_RDWR)
491 		return (t->t_ops->t_iowrite(t, buf, n, addr));
492 
493 	return (set_errno(EMDB_TGTRDONLY));
494 }
495 
496 int
497 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
498 {
499 	return (t->t_ops->t_vtop(t, as, va, pap));
500 }
501 
502 ssize_t
503 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf,
504 	size_t nbytes, mdb_tgt_addr_t addr)
505 {
506 	ssize_t n, nread = mdb_tgt_aread(t, as, buf, nbytes, addr);
507 	char *p;
508 
509 	if (nread >= 0) {
510 		if ((p = memchr(buf, '\0', nread)) != NULL)
511 			nread = (size_t)(p - buf);
512 		goto done;
513 	}
514 
515 	nread = 0;
516 	p = &buf[0];
517 
518 	while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) {
519 		if (*p == '\0')
520 			return (nread);
521 		nread++;
522 		addr++;
523 		p++;
524 	}
525 
526 	if (nread == 0 && n == -1)
527 		return (-1); /* If we can't even read a byte, return -1 */
528 
529 done:
530 	if (nbytes != 0)
531 		buf[MIN(nread, nbytes - 1)] = '\0';
532 
533 	return (nread);
534 }
535 
536 ssize_t
537 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as,
538 	const char *buf, mdb_tgt_addr_t addr)
539 {
540 	ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr);
541 	return (nwritten > 0 ? nwritten - 1 : nwritten);
542 }
543 
544 int
545 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj,
546 	const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
547 {
548 	mdb_syminfo_t info;
549 	GElf_Sym sym;
550 	uint_t id;
551 
552 	if (name == NULL || t == NULL)
553 		return (set_errno(EINVAL));
554 
555 	if (obj == MDB_TGT_OBJ_EVERY &&
556 	    mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) {
557 		info.sym_table = MDB_TGT_PRVSYM;
558 		info.sym_id = id;
559 		goto found;
560 	}
561 
562 	if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0)
563 		goto found;
564 
565 	return (-1);
566 
567 found:
568 	if (symp != NULL)
569 		*symp = sym;
570 	if (sip != NULL)
571 		*sip = info;
572 	return (0);
573 }
574 
575 int
576 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
577 	char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
578 {
579 	mdb_syminfo_t info;
580 	GElf_Sym sym;
581 
582 	if (t == NULL)
583 		return (set_errno(EINVAL));
584 
585 	if (t->t_ops->t_lookup_by_addr(t, addr, flags,
586 	    buf, len, &sym, &info) == 0) {
587 		if (symp != NULL)
588 			*symp = sym;
589 		if (sip != NULL)
590 			*sip = info;
591 		return (0);
592 	}
593 
594 	return (-1);
595 }
596 
597 /*
598  * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
599  * wants to look up a scoped symbol name such as "object`symbol".  It is
600  * implemented as a simple wrapper around mdb_tgt_lookup_by_name.  Note that
601  * we split on the *last* occurrence of "`", so the object name itself may
602  * contain additional scopes whose evaluation is left to the target.  This
603  * allows targets to implement additional scopes, such as source files,
604  * function names, link map identifiers, etc.
605  */
606 int
607 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp,
608 	mdb_syminfo_t *sip)
609 {
610 	const char *object = MDB_TGT_OBJ_EVERY;
611 	const char *name = s;
612 	char buf[MDB_TGT_SYM_NAMLEN];
613 
614 	if (t == NULL)
615 		return (set_errno(EINVAL));
616 
617 	if (strchr(name, '`') != NULL) {
618 
619 		(void) strncpy(buf, s, sizeof (buf));
620 		buf[sizeof (buf) - 1] = '\0';
621 		name = buf;
622 
623 		if ((s = strrsplit(buf, '`')) != NULL) {
624 			object = buf;
625 			name = s;
626 			if (*object == '\0')
627 				return (set_errno(EMDB_NOOBJ));
628 			if (*name == '\0')
629 				return (set_errno(EMDB_NOSYM));
630 		}
631 	}
632 
633 	return (mdb_tgt_lookup_by_name(t, object, name, symp, sip));
634 }
635 
636 int
637 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which,
638 	uint_t type, mdb_tgt_sym_f *cb, void *p)
639 {
640 	if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) ||
641 	    (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0)
642 		return (set_errno(EINVAL));
643 
644 	return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p));
645 }
646 
647 ssize_t
648 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes,
649 	const char *obj, const char *name)
650 {
651 	GElf_Sym sym;
652 
653 	if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
654 		return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value));
655 
656 	return (-1);
657 }
658 
659 ssize_t
660 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
661 	size_t nbytes, const char *obj, const char *name)
662 {
663 	GElf_Sym sym;
664 
665 	if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
666 		return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value));
667 
668 	return (-1);
669 }
670 
671 int
672 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
673 {
674 	return (t->t_ops->t_mapping_iter(t, cb, p));
675 }
676 
677 int
678 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
679 {
680 	return (t->t_ops->t_object_iter(t, cb, p));
681 }
682 
683 const mdb_map_t *
684 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
685 {
686 	return (t->t_ops->t_addr_to_map(t, addr));
687 }
688 
689 const mdb_map_t *
690 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name)
691 {
692 	return (t->t_ops->t_name_to_map(t, name));
693 }
694 
695 struct ctf_file *
696 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
697 {
698 	return (t->t_ops->t_addr_to_ctf(t, addr));
699 }
700 
701 struct ctf_file *
702 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name)
703 {
704 	return (t->t_ops->t_name_to_ctf(t, name));
705 }
706 
707 /*
708  * Return the latest target status.  We just copy out our cached copy.  The
709  * status only needs to change when the target is run, stepped, or continued.
710  */
711 int
712 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
713 {
714 	uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP);
715 	uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP);
716 	uint_t state = t->t_status.st_state;
717 
718 	if (tsp == NULL)
719 		return (set_errno(EINVAL));
720 
721 	/*
722 	 * If we're called with the address of the target's internal status,
723 	 * then call down to update it; otherwise copy out the saved status.
724 	 */
725 	if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0)
726 		return (-1); /* errno is set for us */
727 
728 	/*
729 	 * Assert that our state is valid before returning it.  The state must
730 	 * be valid, and DSTOP and ISTOP cannot be set simultaneously.  ISTOP
731 	 * is only valid when stopped.  DSTOP is only valid when running or
732 	 * stopped.  If any test fails, abort the debugger.
733 	 */
734 	if (state > MDB_TGT_LOST)
735 		fail("invalid target state (%u)\n", state);
736 	if (state != MDB_TGT_STOPPED && istop)
737 		fail("target state is (%u) and ISTOP is set\n", state);
738 	if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop)
739 		fail("target state is (%u) and DSTOP is set\n", state);
740 	if (istop && dstop)
741 		fail("target has ISTOP and DSTOP set simultaneously\n");
742 
743 	if (tsp != &t->t_status)
744 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
745 
746 	return (0);
747 }
748 
749 /*
750  * For the given sespec, scan its list of vespecs for ones that are marked
751  * temporary and delete them.  We use the same method as vespec_delete below.
752  */
753 /*ARGSUSED*/
754 void
755 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep)
756 {
757 	mdb_vespec_t *vep, *nvep;
758 
759 	for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
760 		nvep = mdb_list_next(vep);
761 
762 		if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED |
763 		    MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) {
764 			vep->ve_flags |= MDB_TGT_SPEC_DELETED;
765 			mdb_tgt_vespec_rele(t, vep);
766 		}
767 	}
768 }
769 
770 /*
771  * Prune each sespec on the active list of temporary vespecs.  This function
772  * is called, for example, after the target finishes a continue operation.
773  */
774 void
775 mdb_tgt_sespec_prune_all(mdb_tgt_t *t)
776 {
777 	mdb_sespec_t *sep, *nsep;
778 
779 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
780 		nsep = mdb_list_next(sep);
781 		mdb_tgt_sespec_prune_one(t, sep);
782 	}
783 }
784 
785 /*
786  * Transition the given sespec to the IDLE state.  We invoke the destructor,
787  * and then move the sespec from the active list to the idle list.
788  */
789 void
790 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason)
791 {
792 	ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
793 
794 	if (sep->se_state == MDB_TGT_SPEC_ARMED)
795 		(void) sep->se_ops->se_disarm(t, sep);
796 
797 	sep->se_ops->se_dtor(t, sep);
798 	sep->se_data = NULL;
799 
800 	sep->se_state = MDB_TGT_SPEC_IDLE;
801 	sep->se_errno = reason;
802 
803 	mdb_list_delete(&t->t_active, sep);
804 	mdb_list_append(&t->t_idle, sep);
805 
806 	mdb_tgt_sespec_prune_one(t, sep);
807 }
808 
809 /*
810  * Transition each sespec on the active list to the IDLE state.  This function
811  * is called, for example, after the target terminates execution.
812  */
813 void
814 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched)
815 {
816 	mdb_sespec_t *sep, *nsep;
817 	mdb_vespec_t *vep;
818 
819 	while ((sep = t->t_matched) != T_SE_END && clear_matched) {
820 		for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
821 			vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
822 			vep = mdb_list_next(vep);
823 		}
824 
825 		t->t_matched = sep->se_matched;
826 		sep->se_matched = NULL;
827 		mdb_tgt_sespec_rele(t, sep);
828 	}
829 
830 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
831 		nsep = mdb_list_next(sep);
832 		mdb_tgt_sespec_idle_one(t, sep, reason);
833 	}
834 }
835 
836 /*
837  * Attempt to transition the given sespec from the IDLE to ACTIVE state.  We
838  * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
839  * and return -1 with errno set.  One strange case we need to deal with here is
840  * the possibility that a given vespec is sitting on the idle list with its
841  * corresponding sespec, but it is actually a duplicate of another sespec on the
842  * active list.  This can happen if the sespec is associated with a
843  * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
844  * activated.  A more interesting reason this situation might arise is the case
845  * where a virtual address breakpoint is set at an address just mmap'ed by
846  * dlmopen.  Since no symbol table information is available for this mapping
847  * yet, a pre-existing deferred symbolic breakpoint may already exist for this
848  * address, but it is on the idle list.  When the symbol table is ready and the
849  * DLACTIVITY event occurs, we now discover that the virtual address obtained by
850  * evaluating the symbolic breakpoint matches the explicit virtual address of
851  * the active virtual breakpoint.  To resolve this conflict in either case, we
852  * destroy the idle sespec, and attach its list of vespecs to the existing
853  * active sespec.
854  */
855 int
856 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep)
857 {
858 	mdb_vespec_t *vep = mdb_list_next(&sep->se_velist);
859 
860 	mdb_vespec_t *nvep;
861 	mdb_sespec_t *dup;
862 
863 	ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE);
864 	ASSERT(vep != NULL);
865 
866 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
867 		return (0); /* cannot be activated while disabled bit set */
868 
869 	/*
870 	 * First search the active list for an existing, duplicate sespec to
871 	 * handle the special case described above.
872 	 */
873 	for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) {
874 		if (dup->se_ops == sep->se_ops &&
875 		    dup->se_ops->se_secmp(t, dup, vep->ve_args)) {
876 			ASSERT(dup != sep);
877 			break;
878 		}
879 	}
880 
881 	/*
882 	 * If a duplicate is found, destroy the existing, idle sespec, and
883 	 * attach all of its vespecs to the duplicate sespec.
884 	 */
885 	if (dup != NULL) {
886 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
887 			mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n",
888 			    vep->ve_id, (void *)dup);
889 
890 			if (dup->se_matched != NULL)
891 				vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
892 
893 			nvep = mdb_list_next(vep);
894 			vep->ve_hits = 0;
895 
896 			mdb_list_delete(&sep->se_velist, vep);
897 			mdb_tgt_sespec_rele(t, sep);
898 
899 			mdb_list_append(&dup->se_velist, vep);
900 			mdb_tgt_sespec_hold(t, dup);
901 			vep->ve_se = dup;
902 		}
903 
904 		mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n",
905 		    (void *)sep, (void *)dup);
906 		return (0);
907 	}
908 
909 	/*
910 	 * If no duplicate is found, call the sespec's constructor.  If this
911 	 * is successful, move the sespec to the active list.
912 	 */
913 	if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) {
914 		sep->se_errno = errno;
915 		sep->se_data = NULL;
916 
917 		return (-1);
918 	}
919 
920 	for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
921 		nvep = mdb_list_next(vep);
922 		vep->ve_hits = 0;
923 	}
924 	mdb_list_delete(&t->t_idle, sep);
925 	mdb_list_append(&t->t_active, sep);
926 	sep->se_state = MDB_TGT_SPEC_ACTIVE;
927 	sep->se_errno = 0;
928 
929 	return (0);
930 }
931 
932 /*
933  * Transition each sespec on the idle list to the ACTIVE state.  This function
934  * is called, for example, after the target's t_run() function returns.  If
935  * the se_ctor() function fails, the specifier is not yet applicable; it will
936  * remain on the idle list and can be activated later.
937  *
938  * Returns 1 if there weren't any unexpected activation failures; 0 if there
939  * were.
940  */
941 int
942 mdb_tgt_sespec_activate_all(mdb_tgt_t *t)
943 {
944 	mdb_sespec_t *sep, *nsep;
945 	int rc = 1;
946 
947 	for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) {
948 		nsep = mdb_list_next(sep);
949 
950 		if (mdb_tgt_sespec_activate_one(t, sep) < 0 &&
951 		    sep->se_errno != EMDB_NOOBJ)
952 			rc = 0;
953 	}
954 
955 	return (rc);
956 }
957 
958 /*
959  * Transition the given sespec to the ARMED state.  Note that we attempt to
960  * re-arm sespecs previously in the ERROR state.  If se_arm() fails the sespec
961  * transitions to the ERROR state but stays on the active list.
962  */
963 void
964 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep)
965 {
966 	ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
967 
968 	if (sep->se_state == MDB_TGT_SPEC_ARMED)
969 		return; /* do not arm sespecs more than once */
970 
971 	if (sep->se_ops->se_arm(t, sep) == -1) {
972 		sep->se_state = MDB_TGT_SPEC_ERROR;
973 		sep->se_errno = errno;
974 	} else {
975 		sep->se_state = MDB_TGT_SPEC_ARMED;
976 		sep->se_errno = 0;
977 	}
978 }
979 
980 /*
981  * Transition each sespec on the active list (except matched specs) to the
982  * ARMED state.  This function is called prior to continuing the target.
983  */
984 void
985 mdb_tgt_sespec_arm_all(mdb_tgt_t *t)
986 {
987 	mdb_sespec_t *sep, *nsep;
988 
989 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
990 		nsep = mdb_list_next(sep);
991 		if (sep->se_matched == NULL)
992 			mdb_tgt_sespec_arm_one(t, sep);
993 	}
994 }
995 
996 /*
997  * Transition each sespec on the active list that is in the ARMED state to
998  * the ACTIVE state.  If se_disarm() fails, the sespec is transitioned to
999  * the ERROR state instead, but left on the active list.
1000  */
1001 static void
1002 tgt_disarm_sespecs(mdb_tgt_t *t)
1003 {
1004 	mdb_sespec_t *sep;
1005 
1006 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1007 		if (sep->se_state != MDB_TGT_SPEC_ARMED)
1008 			continue; /* do not disarm if in ERROR state */
1009 
1010 		if (sep->se_ops->se_disarm(t, sep) == -1) {
1011 			sep->se_state = MDB_TGT_SPEC_ERROR;
1012 			sep->se_errno = errno;
1013 		} else {
1014 			sep->se_state = MDB_TGT_SPEC_ACTIVE;
1015 			sep->se_errno = 0;
1016 		}
1017 	}
1018 }
1019 
1020 /*
1021  * Determine if the software event that triggered the most recent stop matches
1022  * any of the active event specifiers.  If 'all' is TRUE, we consider all
1023  * sespecs in our search.   If 'all' is FALSE, we only consider ARMED sespecs.
1024  * If we successfully match an event, we add it to the t_matched list and
1025  * place an additional hold on it.
1026  */
1027 static mdb_sespec_t *
1028 tgt_match_sespecs(mdb_tgt_t *t, int all)
1029 {
1030 	mdb_sespec_t *sep;
1031 
1032 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1033 		if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED)
1034 			continue; /* restrict search to ARMED sespecs */
1035 
1036 		if (sep->se_state != MDB_TGT_SPEC_ERROR &&
1037 		    sep->se_ops->se_match(t, sep, &t->t_status)) {
1038 			mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep);
1039 			mdb_tgt_sespec_hold(t, sep);
1040 			sep->se_matched = t->t_matched;
1041 			t->t_matched = sep;
1042 		}
1043 	}
1044 
1045 	return (t->t_matched);
1046 }
1047 
1048 /*
1049  * This function provides the low-level target continue algorithm.  We proceed
1050  * in three phases: (1) we arm the active sespecs, except the specs matched at
1051  * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1052  * step over these event transitions, and then arm the corresponding sespecs,
1053  * and (3) we call the appropriate low-level continue routine.  Once the
1054  * target stops again, we determine which sespecs were matched, and invoke the
1055  * appropriate vespec callbacks and perform other vespec maintenance.
1056  */
1057 static int
1058 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp,
1059     int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1060 {
1061 	mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits");
1062 	uintptr_t pc = t->t_status.st_pc;
1063 	int error = 0;
1064 
1065 	mdb_sespec_t *sep, *nsep, *matched;
1066 	mdb_vespec_t *vep, *nvep;
1067 	uintptr_t addr;
1068 
1069 	uint_t cbits = 0;	/* union of pending continue bits */
1070 	uint_t ncont = 0;	/* # of callbacks that requested cont */
1071 	uint_t n = 0;		/* # of callbacks */
1072 
1073 	/*
1074 	 * If the target is undead, dead, or lost, we no longer allow continue.
1075 	 * This effectively forces the user to use ::kill or ::run after death.
1076 	 */
1077 	if (t->t_status.st_state == MDB_TGT_UNDEAD)
1078 		return (set_errno(EMDB_TGTZOMB));
1079 	if (t->t_status.st_state == MDB_TGT_DEAD)
1080 		return (set_errno(EMDB_TGTCORE));
1081 	if (t->t_status.st_state == MDB_TGT_LOST)
1082 		return (set_errno(EMDB_TGTLOST));
1083 
1084 	/*
1085 	 * If any of single-step, step-over, or step-out is pending, it takes
1086 	 * precedence over an explicit or pending continue, because these are
1087 	 * all different specialized forms of continue.
1088 	 */
1089 	if (t->t_flags & MDB_TGT_F_STEP)
1090 		t_cont = t->t_ops->t_step;
1091 	else if (t->t_flags & MDB_TGT_F_NEXT)
1092 		t_cont = t->t_ops->t_step;
1093 	else if (t->t_flags & MDB_TGT_F_STEP_BRANCH)
1094 		t_cont = t->t_ops->t_cont;
1095 	else if (t->t_flags & MDB_TGT_F_STEP_OUT)
1096 		t_cont = t->t_ops->t_cont;
1097 
1098 	/*
1099 	 * To handle step-over, we ask the target to find the address past the
1100 	 * next control transfer instruction.  If an address is found, we plant
1101 	 * a temporary breakpoint there and continue; otherwise just step.
1102 	 */
1103 	if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) {
1104 		if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t,
1105 		    addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY,
1106 		    no_se_f, NULL) == 0) {
1107 			mdb_dprintf(MDB_DBG_TGT, "next falling back to step: "
1108 			    "%s\n", mdb_strerror(errno));
1109 		} else
1110 			t_cont = t->t_ops->t_cont;
1111 	}
1112 
1113 	/*
1114 	 * To handle step-out, we ask the target to find the return address of
1115 	 * the current frame, plant a temporary breakpoint there, and continue.
1116 	 */
1117 	if (t->t_flags & MDB_TGT_F_STEP_OUT) {
1118 		if (t->t_ops->t_step_out(t, &addr) == -1)
1119 			return (-1); /* errno is set for us */
1120 
1121 		if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN |
1122 		    MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0)
1123 			return (-1); /* errno is set for us */
1124 	}
1125 
1126 	/*
1127 	 * To handle step-branch, we ask the target to enable it for the coming
1128 	 * continue.  Step-branch is incompatible with step, so don't enable it
1129 	 * if we're going to be stepping.
1130 	 */
1131 	if (t->t_flags & MDB_TGT_F_STEP_BRANCH && t_cont == t->t_ops->t_cont) {
1132 		if (t->t_ops->t_step_branch(t) == -1)
1133 			return (-1); /* errno is set for us */
1134 	}
1135 
1136 	(void) mdb_signal_block(SIGHUP);
1137 	(void) mdb_signal_block(SIGTERM);
1138 	mdb_intr_disable();
1139 
1140 	t->t_flags &= ~T_CONT_BITS;
1141 	t->t_flags |= MDB_TGT_F_BUSY;
1142 	mdb_tgt_sespec_arm_all(t);
1143 
1144 	ASSERT(t->t_matched != NULL);
1145 	matched = t->t_matched;
1146 	t->t_matched = T_SE_END;
1147 
1148 	if (mdb.m_term != NULL)
1149 		IOP_SUSPEND(mdb.m_term);
1150 
1151 	/*
1152 	 * Iterate over the matched sespec list, performing autostop processing
1153 	 * and clearing the matched bit for each associated vespec.  We then
1154 	 * invoke each sespec's se_cont callback in order to continue past
1155 	 * the corresponding event.  If the matched list has more than one
1156 	 * sespec, we assume that the se_cont callbacks are non-interfering.
1157 	 */
1158 	for (sep = matched; sep != T_SE_END; sep = sep->se_matched) {
1159 		for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
1160 			if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) &&
1161 			    (vep->ve_limit && vep->ve_hits == vep->ve_limit))
1162 				vep->ve_hits = 0;
1163 
1164 			vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1165 			vep = mdb_list_next(vep);
1166 		}
1167 
1168 		if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) {
1169 			error = errno ? errno : -1;
1170 			tgt_disarm_sespecs(t);
1171 			break;
1172 		}
1173 
1174 		if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) {
1175 			tgt_disarm_sespecs(t);
1176 			if (t->t_status.st_state == MDB_TGT_UNDEAD)
1177 				mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1178 			else if (t->t_status.st_state == MDB_TGT_LOST)
1179 				mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1180 			break;
1181 		}
1182 	}
1183 
1184 	/*
1185 	 * Clear the se_matched field for each matched sespec, and drop the
1186 	 * reference count since the sespec is no longer on the matched list.
1187 	 */
1188 	for (sep = matched; sep != T_SE_END; sep = nsep) {
1189 		nsep = sep->se_matched;
1190 		sep->se_matched = NULL;
1191 		mdb_tgt_sespec_rele(t, sep);
1192 	}
1193 
1194 	/*
1195 	 * If the matched list was non-empty, see if we hit another event while
1196 	 * performing se_cont() processing.  If so, don't bother continuing any
1197 	 * further.  If not, arm the sespecs on the old matched list by calling
1198 	 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1199 	 */
1200 	if (matched != T_SE_END) {
1201 		if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP))
1202 			goto out; /* abort now if se_cont() failed */
1203 
1204 		if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) {
1205 			tgt_disarm_sespecs(t);
1206 			goto out;
1207 		}
1208 
1209 		mdb_tgt_sespec_arm_all(t);
1210 	}
1211 
1212 	if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) {
1213 		if (t_cont(t, &t->t_status) != 0)
1214 			error = errno ? errno : -1;
1215 	}
1216 
1217 	tgt_disarm_sespecs(t);
1218 
1219 	if (t->t_flags & MDB_TGT_F_UNLOAD)
1220 		longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT);
1221 
1222 	if (t->t_status.st_state == MDB_TGT_UNDEAD)
1223 		mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1224 	else if (t->t_status.st_state == MDB_TGT_LOST)
1225 		mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1226 	else if (t->t_status.st_flags & MDB_TGT_ISTOP)
1227 		t->t_matched = tgt_match_sespecs(t, TRUE);
1228 out:
1229 	if (mdb.m_term != NULL)
1230 		IOP_RESUME(mdb.m_term);
1231 
1232 	(void) mdb_signal_unblock(SIGTERM);
1233 	(void) mdb_signal_unblock(SIGHUP);
1234 	mdb_intr_enable();
1235 
1236 	for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) {
1237 		/*
1238 		 * When we invoke a ve_callback, it may in turn request that the
1239 		 * target continue immediately after callback processing is
1240 		 * complete.  We only allow this to occur if *all* callbacks
1241 		 * agree to continue.  To implement this behavior, we keep a
1242 		 * count (ncont) of such requests, and only apply the cumulative
1243 		 * continue bits (cbits) to the target if ncont is equal to the
1244 		 * total number of callbacks that are invoked (n).
1245 		 */
1246 		for (vep = mdb_list_next(&sep->se_velist);
1247 		    vep != NULL; vep = nvep, n++) {
1248 			/*
1249 			 * Place an extra hold on the current vespec and pick
1250 			 * up the next pointer before invoking the callback: we
1251 			 * must be prepared for the vespec to be deleted or
1252 			 * moved to a different list by the callback.
1253 			 */
1254 			mdb_tgt_vespec_hold(t, vep);
1255 			nvep = mdb_list_next(vep);
1256 
1257 			vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
1258 			vep->ve_hits++;
1259 
1260 			mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc);
1261 			mdb_nv_set_value(hitv, vep->ve_hits);
1262 
1263 			ASSERT((t->t_flags & T_CONT_BITS) == 0);
1264 			vep->ve_callback(t, vep->ve_id, vep->ve_data);
1265 
1266 			ncont += (t->t_flags & T_CONT_BITS) != 0;
1267 			cbits |= (t->t_flags & T_CONT_BITS);
1268 			t->t_flags &= ~T_CONT_BITS;
1269 
1270 			if (vep->ve_limit && vep->ve_hits == vep->ve_limit) {
1271 				if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL)
1272 					(void) mdb_tgt_vespec_delete(t,
1273 					    vep->ve_id);
1274 				else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS)
1275 					(void) mdb_tgt_vespec_disable(t,
1276 					    vep->ve_id);
1277 			}
1278 
1279 			if (vep->ve_limit && vep->ve_hits < vep->ve_limit) {
1280 				if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP)
1281 					(void) mdb_tgt_continue(t, NULL);
1282 			}
1283 
1284 			mdb_tgt_vespec_rele(t, vep);
1285 		}
1286 	}
1287 
1288 	if (t->t_matched != T_SE_END && ncont == n)
1289 		t->t_flags |= cbits; /* apply continues (see above) */
1290 
1291 	mdb_tgt_sespec_prune_all(t);
1292 
1293 	t->t_status.st_flags &= ~MDB_TGT_BUSY;
1294 	t->t_flags &= ~MDB_TGT_F_BUSY;
1295 
1296 	if (tsp != NULL)
1297 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
1298 
1299 	if (error != 0)
1300 		return (set_errno(error));
1301 
1302 	return (0);
1303 }
1304 
1305 /*
1306  * This function is the common glue that connects the high-level target layer
1307  * continue functions (e.g. step and cont below) with the low-level
1308  * tgt_continue() function above.  Since vespec callbacks may perform any
1309  * actions, including attempting to continue the target itself, we must be
1310  * prepared to be called while the target is still marked F_BUSY.  In this
1311  * case, we just set a pending bit and return.  When we return from the call
1312  * to tgt_continue() that made us busy into the tgt_request_continue() call
1313  * that is still on the stack, we will loop around and call tgt_continue()
1314  * again.  This allows vespecs to continue the target without recursion.
1315  */
1316 static int
1317 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag,
1318     int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1319 {
1320 	mdb_tgt_spec_desc_t desc;
1321 	mdb_sespec_t *sep;
1322 	char buf[BUFSIZ];
1323 	int status;
1324 
1325 	if (t->t_flags & MDB_TGT_F_BUSY) {
1326 		t->t_flags |= tflag;
1327 		return (0);
1328 	}
1329 
1330 	do {
1331 		status = tgt_continue(t, tsp, t_cont);
1332 	} while (status == 0 && (t->t_flags & T_CONT_BITS));
1333 
1334 	if (status == 0) {
1335 		for (sep = t->t_matched; sep != T_SE_END;
1336 		    sep = sep->se_matched) {
1337 			mdb_vespec_t *vep;
1338 
1339 			for (vep = mdb_list_next(&sep->se_velist); vep;
1340 			    vep = mdb_list_next(vep)) {
1341 				if (vep->ve_flags & MDB_TGT_SPEC_SILENT)
1342 					continue;
1343 				warn("%s\n", sep->se_ops->se_info(t, sep,
1344 				    vep, &desc, buf, sizeof (buf)));
1345 			}
1346 		}
1347 
1348 		mdb_callb_fire(MDB_CALLB_STCHG);
1349 	}
1350 
1351 	t->t_flags &= ~T_CONT_BITS;
1352 	return (status);
1353 }
1354 
1355 /*
1356  * Restart target execution: we rely upon the underlying target implementation
1357  * to do most of the work for us.  In particular, we assume it will properly
1358  * preserve the state of our event lists if the run fails for some reason,
1359  * and that it will reset all events to the IDLE state if the run succeeds.
1360  * If it is successful, we attempt to activate all of the idle sespecs.  The
1361  * t_run() operation is defined to leave the target stopped at the earliest
1362  * possible point in execution, and then return control to the debugger,
1363  * awaiting a step or continue operation to set it running again.
1364  */
1365 int
1366 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv)
1367 {
1368 	int i;
1369 
1370 	for (i = 0; i < argc; i++) {
1371 		if (argv->a_type != MDB_TYPE_STRING)
1372 			return (set_errno(EINVAL));
1373 	}
1374 
1375 	if (t->t_ops->t_run(t, argc, argv) == -1)
1376 		return (-1); /* errno is set for us */
1377 
1378 	t->t_flags &= ~T_CONT_BITS;
1379 	(void) mdb_tgt_sespec_activate_all(t);
1380 
1381 	if (mdb.m_term != NULL)
1382 		IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL);
1383 
1384 	return (0);
1385 }
1386 
1387 int
1388 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1389 {
1390 	return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step));
1391 }
1392 
1393 int
1394 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1395 {
1396 	t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */
1397 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1398 }
1399 
1400 int
1401 mdb_tgt_step_branch(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1402 {
1403 	t->t_flags |= MDB_TGT_F_STEP_BRANCH; /* set flag even if tgt not busy */
1404 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1405 }
1406 
1407 int
1408 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1409 {
1410 	t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */
1411 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step));
1412 }
1413 
1414 int
1415 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1416 {
1417 	return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont));
1418 }
1419 
1420 int
1421 mdb_tgt_signal(mdb_tgt_t *t, int sig)
1422 {
1423 	return (t->t_ops->t_signal(t, sig));
1424 }
1425 
1426 void *
1427 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid)
1428 {
1429 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1430 
1431 	if (vep == NULL) {
1432 		(void) set_errno(EMDB_NOSESPEC);
1433 		return (NULL);
1434 	}
1435 
1436 	return (vep->ve_data);
1437 }
1438 
1439 /*
1440  * Return a structured description and comment string for the given vespec.
1441  * We fill in the common information from the vespec, and then call down to
1442  * the underlying sespec to provide the comment string and modify any
1443  * event type-specific information.
1444  */
1445 char *
1446 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp,
1447     char *buf, size_t nbytes)
1448 {
1449 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1450 
1451 	mdb_tgt_spec_desc_t desc;
1452 	mdb_sespec_t *sep;
1453 
1454 	if (vep == NULL) {
1455 		if (sp != NULL)
1456 			bzero(sp, sizeof (mdb_tgt_spec_desc_t));
1457 		(void) set_errno(EMDB_NOSESPEC);
1458 		return (NULL);
1459 	}
1460 
1461 	if (sp == NULL)
1462 		sp = &desc;
1463 
1464 	sep = vep->ve_se;
1465 
1466 	sp->spec_id = vep->ve_id;
1467 	sp->spec_flags = vep->ve_flags;
1468 	sp->spec_hits = vep->ve_hits;
1469 	sp->spec_limit = vep->ve_limit;
1470 	sp->spec_state = sep->se_state;
1471 	sp->spec_errno = sep->se_errno;
1472 	sp->spec_base = NULL;
1473 	sp->spec_size = 0;
1474 	sp->spec_data = vep->ve_data;
1475 
1476 	return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes));
1477 }
1478 
1479 /*
1480  * Qsort callback for sorting vespecs by VID, used below.
1481  */
1482 static int
1483 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp)
1484 {
1485 	return ((*lp)->ve_id - (*rp)->ve_id);
1486 }
1487 
1488 /*
1489  * Iterate over all vespecs and call the specified callback function with the
1490  * corresponding VID and caller data pointer.  We want the callback function
1491  * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1492  * to take actions such as deleting the vespec itself, so we cannot simply
1493  * iterate over the lists.  Instead, we pre-allocate an array of vespec
1494  * pointers, fill it in and place an additional hold on each vespec, and then
1495  * sort it.  After the callback has been executed on each vespec in the
1496  * sorted array, we remove our hold and free the temporary array.
1497  */
1498 int
1499 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p)
1500 {
1501 	mdb_vespec_t **veps, **vepp, **vend;
1502 	mdb_vespec_t *vep, *nvep;
1503 	mdb_sespec_t *sep;
1504 
1505 	uint_t vecnt = t->t_vecnt;
1506 
1507 	veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP);
1508 	vend = veps + vecnt;
1509 	vepp = veps;
1510 
1511 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1512 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1513 			mdb_tgt_vespec_hold(t, vep);
1514 			nvep = mdb_list_next(vep);
1515 			*vepp++ = vep;
1516 		}
1517 	}
1518 
1519 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
1520 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1521 			mdb_tgt_vespec_hold(t, vep);
1522 			nvep = mdb_list_next(vep);
1523 			*vepp++ = vep;
1524 		}
1525 	}
1526 
1527 	if (vepp != vend) {
1528 		fail("target has %u vespecs on list but vecnt shows %u\n",
1529 		    (uint_t)(vepp - veps), vecnt);
1530 	}
1531 
1532 	qsort(veps, vecnt, sizeof (mdb_vespec_t *),
1533 	    (int (*)(const void *, const void *))tgt_vespec_compare);
1534 
1535 	for (vepp = veps; vepp < vend; vepp++) {
1536 		if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0)
1537 			break;
1538 	}
1539 
1540 	for (vepp = veps; vepp < vend; vepp++)
1541 		mdb_tgt_vespec_rele(t, *vepp);
1542 
1543 	mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt);
1544 	return (0);
1545 }
1546 
1547 /*
1548  * Reset the vespec flags, match limit, and callback data to the specified
1549  * values.  We silently correct invalid parameters, except for the VID.
1550  * The caller is required to query the existing properties and pass back
1551  * the existing values for any properties that should not be modified.
1552  * If the callback data is modified, the caller is responsible for cleaning
1553  * up any state associated with the previous value.
1554  */
1555 int
1556 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags,
1557     uint_t limit, void *data)
1558 {
1559 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1560 
1561 	if (vep == NULL)
1562 		return (set_errno(EMDB_NOSESPEC));
1563 
1564 	/*
1565 	 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1566 	 * appropriate vespec function to do the enable/disable work.
1567 	 */
1568 	if ((flags & MDB_TGT_SPEC_DISABLED) !=
1569 	    (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) {
1570 		if (flags & MDB_TGT_SPEC_DISABLED)
1571 			(void) mdb_tgt_vespec_disable(t, id);
1572 		else
1573 			(void) mdb_tgt_vespec_enable(t, id);
1574 	}
1575 
1576 	/*
1577 	 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1578 	 * value: extra bits are cleared according to order of precedence.
1579 	 */
1580 	if (flags & MDB_TGT_SPEC_AUTOSTOP)
1581 		flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
1582 	else if (flags & MDB_TGT_SPEC_AUTODEL)
1583 		flags &= ~MDB_TGT_SPEC_AUTODIS;
1584 
1585 	/*
1586 	 * The TEMPORARY property always takes precedence over STICKY.
1587 	 */
1588 	if (flags & MDB_TGT_SPEC_TEMPORARY)
1589 		flags &= ~MDB_TGT_SPEC_STICKY;
1590 
1591 	/*
1592 	 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1593 	 * back to zero and clear all of the old auto bits.
1594 	 */
1595 	if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) {
1596 		vep->ve_flags &= ~T_AUTO_BITS;
1597 		vep->ve_hits = 0;
1598 	}
1599 
1600 	vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS);
1601 	vep->ve_data = data;
1602 
1603 	/*
1604 	 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1605 	 * least one.  If none are set, reset it back to zero.
1606 	 */
1607 	if (vep->ve_flags & T_AUTO_BITS)
1608 		vep->ve_limit = MAX(limit, 1);
1609 	else
1610 		vep->ve_limit = 0;
1611 
1612 	/*
1613 	 * As a convenience, we allow the caller to specify SPEC_DELETED in
1614 	 * the flags field as indication that the event should be deleted.
1615 	 */
1616 	if (flags & MDB_TGT_SPEC_DELETED)
1617 		(void) mdb_tgt_vespec_delete(t, id);
1618 
1619 	return (0);
1620 }
1621 
1622 /*
1623  * Remove the user disabled bit from the specified vespec, and attempt to
1624  * activate the underlying sespec and move it to the active list if possible.
1625  */
1626 int
1627 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id)
1628 {
1629 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1630 
1631 	if (vep == NULL)
1632 		return (set_errno(EMDB_NOSESPEC));
1633 
1634 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) {
1635 		ASSERT(mdb_list_next(vep) == NULL);
1636 		vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED;
1637 		if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0)
1638 			return (-1); /* errno is set for us */
1639 	}
1640 
1641 	return (0);
1642 }
1643 
1644 /*
1645  * Set the user disabled bit on the specified vespec, and move it to the idle
1646  * list.  If the vespec is not alone with its sespec or if it is a currently
1647  * matched event, we must always create a new idle sespec and move the vespec
1648  * there.  If the vespec was alone and active, we can simply idle the sespec.
1649  */
1650 int
1651 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id)
1652 {
1653 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1654 	mdb_sespec_t *sep;
1655 
1656 	if (vep == NULL)
1657 		return (set_errno(EMDB_NOSESPEC));
1658 
1659 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
1660 		return (0); /* already disabled */
1661 
1662 	if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL ||
1663 	    vep->ve_se->se_matched != NULL) {
1664 
1665 		sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle);
1666 
1667 		mdb_list_delete(&vep->ve_se->se_velist, vep);
1668 		mdb_tgt_sespec_rele(t, vep->ve_se);
1669 
1670 		mdb_list_append(&sep->se_velist, vep);
1671 		mdb_tgt_sespec_hold(t, sep);
1672 
1673 		vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1674 		vep->ve_se = sep;
1675 
1676 	} else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE)
1677 		mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS);
1678 
1679 	vep->ve_flags |= MDB_TGT_SPEC_DISABLED;
1680 	return (0);
1681 }
1682 
1683 /*
1684  * Delete the given vespec.  We use the MDB_TGT_SPEC_DELETED flag to ensure that
1685  * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1686  * reference count on the vespec more than once.  This is because the vespec
1687  * may remain referenced if it is currently held by another routine (e.g.
1688  * vespec_iter), and so the user could attempt to delete it more than once
1689  * since it reference count will be >= 2 prior to the first delete call.
1690  */
1691 int
1692 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id)
1693 {
1694 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1695 
1696 	if (vep == NULL)
1697 		return (set_errno(EMDB_NOSESPEC));
1698 
1699 	if (vep->ve_flags & MDB_TGT_SPEC_DELETED)
1700 		return (set_errno(EBUSY));
1701 
1702 	vep->ve_flags |= MDB_TGT_SPEC_DELETED;
1703 	mdb_tgt_vespec_rele(t, vep);
1704 	return (0);
1705 }
1706 
1707 int
1708 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr,
1709     int spec_flags, mdb_tgt_se_f *func, void *p)
1710 {
1711 	return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p));
1712 }
1713 
1714 int
1715 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol,
1716     int spec_flags, mdb_tgt_se_f *func, void *p)
1717 {
1718 	return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p));
1719 }
1720 
1721 int
1722 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags,
1723     int spec_flags, mdb_tgt_se_f *func, void *p)
1724 {
1725 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1726 		(void) set_errno(EINVAL);
1727 		return (0);
1728 	}
1729 
1730 	if (pa + n < pa) {
1731 		(void) set_errno(EMDB_WPRANGE);
1732 		return (0);
1733 	}
1734 
1735 	return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p));
1736 }
1737 
1738 int
1739 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags,
1740     int spec_flags, mdb_tgt_se_f *func, void *p)
1741 {
1742 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1743 		(void) set_errno(EINVAL);
1744 		return (0);
1745 	}
1746 
1747 	if (va + n < va) {
1748 		(void) set_errno(EMDB_WPRANGE);
1749 		return (0);
1750 	}
1751 
1752 	return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p));
1753 }
1754 
1755 int
1756 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags,
1757     int spec_flags, mdb_tgt_se_f *func, void *p)
1758 {
1759 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1760 		(void) set_errno(EINVAL);
1761 		return (0);
1762 	}
1763 
1764 	if (addr + n < addr) {
1765 		(void) set_errno(EMDB_WPRANGE);
1766 		return (0);
1767 	}
1768 
1769 	return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p));
1770 }
1771 
1772 int
1773 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum,
1774     int spec_flags, mdb_tgt_se_f *func, void *p)
1775 {
1776 	return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p));
1777 }
1778 
1779 int
1780 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum,
1781     int spec_flags, mdb_tgt_se_f *func, void *p)
1782 {
1783 	return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p));
1784 }
1785 
1786 int
1787 mdb_tgt_add_signal(mdb_tgt_t *t, int sig,
1788     int spec_flags, mdb_tgt_se_f *func, void *p)
1789 {
1790 	return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p));
1791 }
1792 
1793 int
1794 mdb_tgt_add_fault(mdb_tgt_t *t, int flt,
1795     int spec_flags, mdb_tgt_se_f *func, void *p)
1796 {
1797 	return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p));
1798 }
1799 
1800 int
1801 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1802     const char *rname, mdb_tgt_reg_t *rp)
1803 {
1804 	return (t->t_ops->t_getareg(t, tid, rname, rp));
1805 }
1806 
1807 int
1808 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1809     const char *rname, mdb_tgt_reg_t r)
1810 {
1811 	return (t->t_ops->t_putareg(t, tid, rname, r));
1812 }
1813 
1814 int
1815 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs,
1816     mdb_tgt_stack_f *cb, void *p)
1817 {
1818 	return (t->t_ops->t_stack_iter(t, gregs, cb, p));
1819 }
1820 
1821 int
1822 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private)
1823 {
1824 	mdb_xdata_t *xdp;
1825 
1826 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1827 		if (func(private, xdp->xd_name, xdp->xd_desc,
1828 		    xdp->xd_copy(t, NULL, 0)) != 0)
1829 			break;
1830 	}
1831 
1832 	return (0);
1833 }
1834 
1835 ssize_t
1836 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes)
1837 {
1838 	mdb_xdata_t *xdp;
1839 
1840 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1841 		if (strcmp(xdp->xd_name, name) == 0)
1842 			return (xdp->xd_copy(t, buf, nbytes));
1843 	}
1844 
1845 	return (set_errno(ENODATA));
1846 }
1847 
1848 long
1849 mdb_tgt_notsup()
1850 {
1851 	return (set_errno(EMDB_TGTNOTSUP));
1852 }
1853 
1854 void *
1855 mdb_tgt_null()
1856 {
1857 	(void) set_errno(EMDB_TGTNOTSUP);
1858 	return (NULL);
1859 }
1860 
1861 long
1862 mdb_tgt_nop()
1863 {
1864 	return (0L);
1865 }
1866 
1867 int
1868 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc,
1869 	ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1870 {
1871 	mdb_xdata_t *xdp;
1872 
1873 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1874 		if (strcmp(xdp->xd_name, name) == 0)
1875 			return (set_errno(EMDB_XDEXISTS));
1876 	}
1877 
1878 	xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP);
1879 	mdb_list_append(&t->t_xdlist, xdp);
1880 
1881 	xdp->xd_name = name;
1882 	xdp->xd_desc = desc;
1883 	xdp->xd_copy = copy;
1884 
1885 	return (0);
1886 }
1887 
1888 int
1889 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name)
1890 {
1891 	mdb_xdata_t *xdp;
1892 
1893 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1894 		if (strcmp(xdp->xd_name, name) == 0) {
1895 			mdb_list_delete(&t->t_xdlist, xdp);
1896 			mdb_free(xdp, sizeof (mdb_xdata_t));
1897 			return (0);
1898 		}
1899 	}
1900 
1901 	return (set_errno(EMDB_NOXD));
1902 }
1903 
1904 int
1905 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask)
1906 {
1907 	uchar_t s_bind = GELF_ST_BIND(sym->st_info);
1908 	uchar_t s_type = GELF_ST_TYPE(sym->st_info);
1909 
1910 	/*
1911 	 * In case you haven't already guessed, this relies on the bitmask
1912 	 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1913 	 * type and binding matching the order of STB and STT constants
1914 	 * in <sys/elf.h>.  ELF can't change without breaking binary
1915 	 * compatibility, so I think this is reasonably fair game.
1916 	 */
1917 	if (s_bind < STB_NUM && s_type < STT_NUM) {
1918 		uint_t type = (1 << (s_type + 8)) | (1 << s_bind);
1919 		return ((type & ~mask) == 0);
1920 	}
1921 
1922 	return (0); /* Unknown binding or type; fail to match */
1923 }
1924 
1925 void
1926 mdb_tgt_elf_export(mdb_gelf_file_t *gf)
1927 {
1928 	GElf_Xword d = 0, t = 0;
1929 	GElf_Addr b = 0, e = 0;
1930 	uint32_t m = 0;
1931 	mdb_var_t *v;
1932 
1933 	/*
1934 	 * Reset legacy adb variables based on the specified ELF object file
1935 	 * provided by the target.  We define these variables:
1936 	 *
1937 	 * b - the address of the data segment (first writeable Phdr)
1938 	 * d - the size of the data segment
1939 	 * e - the address of the entry point
1940 	 * m - the magic number identifying the file
1941 	 * t - the address of the text segment (first executable Phdr)
1942 	 */
1943 	if (gf != NULL) {
1944 		const GElf_Phdr *text = NULL, *data = NULL;
1945 		size_t i;
1946 
1947 		e = gf->gf_ehdr.e_entry;
1948 		bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m));
1949 
1950 		for (i = 0; i < gf->gf_npload; i++) {
1951 			if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X))
1952 				text = &gf->gf_phdrs[i];
1953 			if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W))
1954 				data = &gf->gf_phdrs[i];
1955 		}
1956 
1957 		if (text != NULL)
1958 			t = text->p_memsz;
1959 		if (data != NULL) {
1960 			b = data->p_vaddr;
1961 			d = data->p_memsz;
1962 		}
1963 	}
1964 
1965 	if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL)
1966 		mdb_nv_set_value(v, b);
1967 	if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL)
1968 		mdb_nv_set_value(v, d);
1969 	if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL)
1970 		mdb_nv_set_value(v, e);
1971 	if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL)
1972 		mdb_nv_set_value(v, m);
1973 	if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL)
1974 		mdb_nv_set_value(v, t);
1975 }
1976 
1977 /*ARGSUSED*/
1978 void
1979 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep)
1980 {
1981 	sep->se_refs++;
1982 	ASSERT(sep->se_refs != 0);
1983 }
1984 
1985 void
1986 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep)
1987 {
1988 	ASSERT(sep->se_refs != 0);
1989 
1990 	if (--sep->se_refs == 0) {
1991 		mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep);
1992 		ASSERT(mdb_list_next(&sep->se_velist) == NULL);
1993 
1994 		if (sep->se_state != MDB_TGT_SPEC_IDLE) {
1995 			sep->se_ops->se_dtor(t, sep);
1996 			mdb_list_delete(&t->t_active, sep);
1997 		} else
1998 			mdb_list_delete(&t->t_idle, sep);
1999 
2000 		mdb_free(sep, sizeof (mdb_sespec_t));
2001 	}
2002 }
2003 
2004 mdb_sespec_t *
2005 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list)
2006 {
2007 	mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP);
2008 
2009 	if (list == &t->t_active)
2010 		sep->se_state = MDB_TGT_SPEC_ACTIVE;
2011 	else
2012 		sep->se_state = MDB_TGT_SPEC_IDLE;
2013 
2014 	mdb_list_append(list, sep);
2015 	sep->se_ops = ops;
2016 	return (sep);
2017 }
2018 
2019 mdb_sespec_t *
2020 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2021 {
2022 	mdb_sespec_t *sep;
2023 
2024 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2025 		if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args))
2026 			break;
2027 	}
2028 
2029 	return (sep);
2030 }
2031 
2032 mdb_sespec_t *
2033 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2034 {
2035 	mdb_sespec_t *sep;
2036 
2037 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2038 		if (sep->se_ops == ops && sep->se_ops->se_vecmp(t,
2039 		    mdb_list_next(&sep->se_velist), args))
2040 			break;
2041 	}
2042 
2043 	return (sep);
2044 }
2045 
2046 /*ARGSUSED*/
2047 void
2048 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep)
2049 {
2050 	vep->ve_refs++;
2051 	ASSERT(vep->ve_refs != 0);
2052 }
2053 
2054 void
2055 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep)
2056 {
2057 	ASSERT(vep->ve_refs != 0);
2058 
2059 	if (--vep->ve_refs == 0) {
2060 		/*
2061 		 * Remove this vespec from the sespec's velist and decrement
2062 		 * the reference count on the sespec.
2063 		 */
2064 		mdb_list_delete(&vep->ve_se->se_velist, vep);
2065 		mdb_tgt_sespec_rele(t, vep->ve_se);
2066 
2067 		/*
2068 		 * If we are deleting the most recently assigned VID, reset
2069 		 * t_vepos or t_veneg as appropriate to re-use that number.
2070 		 * This could be enhanced to re-use any free number by
2071 		 * maintaining a bitmap or hash of the allocated IDs.
2072 		 */
2073 		if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1)
2074 			t->t_vepos = vep->ve_id;
2075 		else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1)
2076 			t->t_veneg = -vep->ve_id;
2077 
2078 		/*
2079 		 * Call the destructor to clean up ve_args, and then free
2080 		 * the actual vespec structure.
2081 		 */
2082 		vep->ve_dtor(vep);
2083 		mdb_free(vep, sizeof (mdb_vespec_t));
2084 
2085 		ASSERT(t->t_vecnt != 0);
2086 		t->t_vecnt--;
2087 	}
2088 }
2089 
2090 int
2091 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags,
2092     mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *))
2093 {
2094 	mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP);
2095 
2096 	int id, mult, *seqp;
2097 	mdb_sespec_t *sep;
2098 
2099 	/*
2100 	 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2101 	 * value: extra bits are cleared according to order of precedence.
2102 	 */
2103 	if (flags & MDB_TGT_SPEC_AUTOSTOP)
2104 		flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
2105 	else if (flags & MDB_TGT_SPEC_AUTODEL)
2106 		flags &= ~MDB_TGT_SPEC_AUTODIS;
2107 
2108 	/*
2109 	 * The TEMPORARY property always takes precedence over STICKY.
2110 	 */
2111 	if (flags & MDB_TGT_SPEC_TEMPORARY)
2112 		flags &= ~MDB_TGT_SPEC_STICKY;
2113 
2114 	/*
2115 	 * Find a matching sespec or create a new one on the appropriate list.
2116 	 * We always create a new sespec if the vespec is created disabled.
2117 	 */
2118 	if (flags & MDB_TGT_SPEC_DISABLED)
2119 		sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle);
2120 	else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL &&
2121 	    (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL)
2122 		sep = mdb_tgt_sespec_insert(t, ops, &t->t_active);
2123 
2124 	/*
2125 	 * Generate a new ID for the vespec.  Increasing positive integers are
2126 	 * assigned to visible vespecs; decreasing negative integers are
2127 	 * assigned to hidden vespecs.  The target saves our most recent choice.
2128 	 */
2129 	if (flags & MDB_TGT_SPEC_INTERNAL) {
2130 		seqp = &t->t_veneg;
2131 		mult = -1;
2132 	} else {
2133 		seqp = &t->t_vepos;
2134 		mult = 1;
2135 	}
2136 
2137 	id = *seqp;
2138 
2139 	while (mdb_tgt_vespec_lookup(t, id * mult) != NULL)
2140 		id = MAX(id + 1, 1);
2141 
2142 	*seqp = MAX(id + 1, 1);
2143 
2144 	vep->ve_id = id * mult;
2145 	vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED);
2146 	vep->ve_se = sep;
2147 	vep->ve_callback = func;
2148 	vep->ve_data = data;
2149 	vep->ve_args = args;
2150 	vep->ve_dtor = dtor;
2151 
2152 	mdb_list_append(&sep->se_velist, vep);
2153 	mdb_tgt_sespec_hold(t, sep);
2154 
2155 	mdb_tgt_vespec_hold(t, vep);
2156 	t->t_vecnt++;
2157 
2158 	/*
2159 	 * If this vespec is the first reference to the sespec and it's active,
2160 	 * then it is newly created and we should attempt to initialize it.
2161 	 * If se_ctor fails, then move the sespec back to the idle list.
2162 	 */
2163 	if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2164 	    sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) {
2165 
2166 		mdb_list_delete(&t->t_active, sep);
2167 		mdb_list_append(&t->t_idle, sep);
2168 
2169 		sep->se_state = MDB_TGT_SPEC_IDLE;
2170 		sep->se_errno = errno;
2171 		sep->se_data = NULL;
2172 	}
2173 
2174 	/*
2175 	 * If the sespec is active and the target is currently running (because
2176 	 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2177 	 * the sespec so it will take effect immediately.
2178 	 */
2179 	if (sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2180 	    t->t_status.st_state == MDB_TGT_RUNNING)
2181 		mdb_tgt_sespec_arm_one(t, sep);
2182 
2183 	mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2184 	    vep->ve_id, (void *)sep, sep->se_refs, sep->se_state);
2185 
2186 	return (vep->ve_id);
2187 }
2188 
2189 /*
2190  * Search the target's active, idle, and disabled lists for the vespec matching
2191  * the specified VID, and return a pointer to it, or NULL if no match is found.
2192  */
2193 mdb_vespec_t *
2194 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid)
2195 {
2196 	mdb_sespec_t *sep;
2197 	mdb_vespec_t *vep;
2198 
2199 	if (vid == 0)
2200 		return (NULL); /* 0 is never a valid VID */
2201 
2202 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2203 		for (vep = mdb_list_next(&sep->se_velist); vep;
2204 		    vep = mdb_list_next(vep)) {
2205 			if (vep->ve_id == vid)
2206 				return (vep);
2207 		}
2208 	}
2209 
2210 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2211 		for (vep = mdb_list_next(&sep->se_velist); vep;
2212 		    vep = mdb_list_next(vep)) {
2213 			if (vep->ve_id == vid)
2214 				return (vep);
2215 		}
2216 	}
2217 
2218 	return (NULL);
2219 }
2220 
2221 /*ARGSUSED*/
2222 void
2223 no_ve_dtor(mdb_vespec_t *vep)
2224 {
2225 	/* default destructor does nothing */
2226 }
2227 
2228 /*ARGSUSED*/
2229 void
2230 no_se_f(mdb_tgt_t *t, int vid, void *data)
2231 {
2232 	/* default callback does nothing */
2233 }
2234 
2235 /*ARGSUSED*/
2236 void
2237 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep)
2238 {
2239 	/* default destructor does nothing */
2240 }
2241 
2242 /*ARGSUSED*/
2243 int
2244 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args)
2245 {
2246 	return (sep->se_data == args);
2247 }
2248 
2249 /*ARGSUSED*/
2250 int
2251 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args)
2252 {
2253 	return (vep->ve_args == args);
2254 }
2255 
2256 /*ARGSUSED*/
2257 int
2258 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep)
2259 {
2260 	return (0); /* return success */
2261 }
2262 
2263 /*ARGSUSED*/
2264 int
2265 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep)
2266 {
2267 	return (0); /* return success */
2268 }
2269 
2270 /*ARGSUSED*/
2271 int
2272 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp)
2273 {
2274 	if (tsp != &t->t_status)
2275 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
2276 
2277 	return (0); /* return success */
2278 }
2279 
2280 int
2281 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags)
2282 {
2283 	int fail = 0;
2284 
2285 	for (; dcp->dc_name != NULL; dcp++) {
2286 		if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) {
2287 			warn("failed to add dcmd %s", dcp->dc_name);
2288 			fail++;
2289 		}
2290 	}
2291 
2292 	return (fail > 0 ? -1 : 0);
2293 }
2294 
2295 int
2296 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags)
2297 {
2298 	int fail = 0;
2299 
2300 	for (; wp->walk_name != NULL; wp++) {
2301 		if (mdb_module_add_walker(t->t_module, wp, flags) == -1) {
2302 			warn("failed to add walk %s", wp->walk_name);
2303 			fail++;
2304 		}
2305 	}
2306 
2307 	return (fail > 0 ? -1 : 0);
2308 }
2309 
2310 void
2311 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp,
2312     const mdb_nv_disc_t *disc, int flags)
2313 {
2314 	for (; rdp->rd_name != NULL; rdp++) {
2315 		if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
2316 			continue; /* Don't export register as a variable */
2317 
2318 		if (rdp->rd_flags & MDB_TGT_R_RDONLY)
2319 			flags |= MDB_NV_RDONLY;
2320 
2321 		(void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc,
2322 		    (uintptr_t)t, MDB_NV_PERSIST | flags);
2323 	}
2324 }
2325