1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * MDB Target Layer
28 *
29 * The *target* is the program being inspected by the debugger. The MDB target
30 * layer provides a set of functions that insulate common debugger code,
31 * including the MDB Module API, from the implementation details of how the
32 * debugger accesses information from a given target. Each target exports a
33 * standard set of properties, including one or more address spaces, one or
34 * more symbol tables, a set of load objects, and a set of threads that can be
35 * examined using the interfaces in <mdb/mdb_target.h>. This technique has
36 * been employed successfully in other debuggers, including [1], primarily
37 * to improve portability, although the term "target" often refers to the
38 * encapsulation of architectural or operating system-specific details. The
39 * target abstraction is useful for MDB because it allows us to easily extend
40 * the debugger to examine a variety of different program forms. Primarily,
41 * the target functions validate input arguments and then call an appropriate
42 * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
43 * However, this interface layer provides a very high level of flexibility for
44 * separating the debugger interface from instrumentation details. Experience
45 * has shown this kind of design can facilitate separating out debugger
46 * instrumentation into an external agent [2] and enable the development of
47 * advanced instrumentation frameworks [3]. We want MDB to be an ideal
48 * extensible framework for the development of such applications.
49 *
50 * Aside from a set of wrapper functions, the target layer also provides event
51 * management for targets that represent live executing programs. Our model of
52 * events is also extensible, and is based upon work in [3] and [4]. We define
53 * a *software event* as a state transition in the target program (for example,
54 * the transition of the program counter to a location of interest) that is
55 * observed by the debugger or its agent. A *software event specifier* is a
56 * description of a class of software events that is used by the debugger to
57 * instrument the target so that the corresponding software events can be
58 * observed. In MDB, software event specifiers are represented by the
59 * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>. As the user,
60 * the internal debugger code, and MDB modules may all wish to observe software
61 * events and receive appropriate notification and callbacks, we do not expose
62 * software event specifiers directly as part of the user interface. Instead,
63 * clients of the target layer request that events be observed by creating
64 * new *virtual event specifiers*. Each virtual specifier is named by a unique
65 * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
66 * One or more virtual specifiers are then associated with each underlying
67 * software event specifier. This design enforces the constraint that the
68 * target must only insert one set of instrumentation, regardless of how many
69 * times the target layer was asked to trace a given event. For example, if
70 * multiple clients request a breakpoint at a particular address, the virtual
71 * specifiers will map to the same sespec, ensuring that only one breakpoint
72 * trap instruction is actually planted at the given target address. When no
73 * virtual specifiers refer to an sespec, it is no longer needed and can be
74 * removed, along with the corresponding instrumentation.
75 *
76 * The following state transition diagram illustrates the life cycle of a
77 * software event specifier and example transitions:
78 *
79 * cont/
80 * +--------+ delete +--------+ stop +-------+
81 * (|( DEAD )|) <------- ( ACTIVE ) <------> ( ARMED )
82 * +--------+ +--------+ +-------+
83 * ^ load/unload ^ ^ failure/ |
84 * delete | object / \ reset | failure
85 * | v v |
86 * | +--------+ +-------+ |
87 * +---- ( IDLE ) ( ERR ) <----+
88 * | +--------+ +-------+
89 * | |
90 * +------------------------------+
91 *
92 * The MDB execution control model is based upon the synchronous debugging
93 * model exported by Solaris proc(4). A target program is set running or the
94 * debugger is attached to a running target. On ISTOP (stop on event of
95 * interest), one target thread is selected as the representative. The
96 * algorithm for selecting the representative is target-specific, but we assume
97 * that if an observed software event has occurred, the target will select the
98 * thread that triggered the state transition of interest. The other threads
99 * are stopped in sympathy with the representative as soon as possible. Prior
100 * to continuing the target, we plant our instrumentation, transitioning event
101 * specifiers from the ACTIVE to the ARMED state, and then back again when the
102 * target stops. We then query each active event specifier to learn which ones
103 * are matched, and then invoke the callbacks associated with their vespecs.
104 * If an OS error occurs while attempting to arm or disarm a specifier, the
105 * specifier is transitioned to the ERROR state; we will attempt to arm it
106 * again at the next continue. If no target process is under our control or
107 * if an event is not currently applicable (e.g. a deferred breakpoint on an
108 * object that is not yet loaded), it remains in the IDLE state. The target
109 * implementation should intercept object load events and then transition the
110 * specifier to the ACTIVE state when the corresponding object is loaded.
111 *
112 * To simplify the debugger implementation and allow targets to easily provide
113 * new types of observable events, most of the event specifier management is
114 * done by the target layer. Each software event specifier provides an ops
115 * vector of subroutines that the target layer can call to perform the
116 * various state transitions described above. The target maintains two lists
117 * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
118 * (ACTIVE, ARMED, and ERROR states). Each mdb_sespec_t maintains a list of
119 * associated mdb_vespec_t's. If an sespec is IDLE or ERROR, its se_errno
120 * field will have an errno value specifying the reason for its inactivity.
121 * The vespec stores the client's callback function and private data, and the
122 * arguments used to construct the sespec. All objects are reference counted
123 * so we can destroy an object when it is no longer needed. The mdb_sespec_t
124 * invariants for the respective states are as follows:
125 *
126 * IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
127 * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
128 * ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
129 * ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
130 *
131 * Additional commentary on specific state transitions and issues involving
132 * event management can be found below near the target layer functions.
133 *
134 * References
135 *
136 * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
137 * 1.84 edition, 1994.
138 *
139 * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
140 * Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
141 *
142 * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
143 * Technical Report CS-97-12, Department of Computer Science,
144 * Brown University.
145 *
146 * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
147 * Report CS-98-05, Department of Computer Science, Brown University.
148 */
149
150 #include <mdb/mdb_target_impl.h>
151 #include <mdb/mdb_debug.h>
152 #include <mdb/mdb_modapi.h>
153 #include <mdb/mdb_err.h>
154 #include <mdb/mdb_callb.h>
155 #include <mdb/mdb_gelf.h>
156 #include <mdb/mdb_io_impl.h>
157 #include <mdb/mdb_string.h>
158 #include <mdb/mdb_signal.h>
159 #include <mdb/mdb_frame.h>
160 #include <mdb/mdb.h>
161
162 #include <sys/stat.h>
163 #include <sys/param.h>
164 #include <sys/signal.h>
165 #include <strings.h>
166 #include <stdlib.h>
167 #include <errno.h>
168
169 /*
170 * Define convenience macros for referencing the set of vespec flag bits that
171 * are preserved by the target implementation, and the set of bits that
172 * determine automatic ve_hits == ve_limit behavior.
173 */
174 #define T_IMPL_BITS \
175 (MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
176 MDB_TGT_SPEC_DELETED)
177
178 #define T_AUTO_BITS \
179 (MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
180
181 /*
182 * Define convenience macro for referencing target flag pending continue bits.
183 */
184 #define T_CONT_BITS \
185 (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_STEP_BRANCH | \
186 MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
187
188 mdb_tgt_t *
mdb_tgt_create(mdb_tgt_ctor_f * ctor,int flags,int argc,const char * argv[])189 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[])
190 {
191 mdb_module_t *mp;
192 mdb_tgt_t *t;
193
194 if (flags & ~MDB_TGT_F_ALL) {
195 (void) set_errno(EINVAL);
196 return (NULL);
197 }
198
199 t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP);
200 mdb_list_append(&mdb.m_tgtlist, t);
201
202 t->t_module = &mdb.m_rmod;
203 t->t_matched = T_SE_END;
204 t->t_flags = flags;
205 t->t_vepos = 1;
206 t->t_veneg = 1;
207
208 for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) {
209 if (ctor == mp->mod_tgt_ctor) {
210 t->t_module = mp;
211 break;
212 }
213 }
214
215 if (ctor(t, argc, argv) != 0) {
216 mdb_list_delete(&mdb.m_tgtlist, t);
217 mdb_free(t, sizeof (mdb_tgt_t));
218 return (NULL);
219 }
220
221 mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n",
222 t->t_module->mod_name, (void *)t);
223
224 (void) t->t_ops->t_status(t, &t->t_status);
225 return (t);
226 }
227
228 int
mdb_tgt_getflags(mdb_tgt_t * t)229 mdb_tgt_getflags(mdb_tgt_t *t)
230 {
231 return (t->t_flags);
232 }
233
234 int
mdb_tgt_setflags(mdb_tgt_t * t,int flags)235 mdb_tgt_setflags(mdb_tgt_t *t, int flags)
236 {
237 if (flags & ~MDB_TGT_F_ALL)
238 return (set_errno(EINVAL));
239
240 return (t->t_ops->t_setflags(t, flags));
241 }
242
243 int
mdb_tgt_setcontext(mdb_tgt_t * t,void * context)244 mdb_tgt_setcontext(mdb_tgt_t *t, void *context)
245 {
246 return (t->t_ops->t_setcontext(t, context));
247 }
248
249 /*ARGSUSED*/
250 static int
tgt_delete_vespec(mdb_tgt_t * t,void * private,int vid,void * data)251 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data)
252 {
253 (void) mdb_tgt_vespec_delete(t, vid);
254 return (0);
255 }
256
257 void
mdb_tgt_destroy(mdb_tgt_t * t)258 mdb_tgt_destroy(mdb_tgt_t *t)
259 {
260 mdb_xdata_t *xdp, *nxdp;
261
262 if (mdb.m_target == t) {
263 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
264 t->t_module->mod_name, (void *)t);
265 t->t_ops->t_deactivate(t);
266 mdb.m_target = NULL;
267 }
268
269 mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n",
270 t->t_module->mod_name, (void *)t);
271
272 for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) {
273 nxdp = mdb_list_next(xdp);
274 mdb_list_delete(&t->t_xdlist, xdp);
275 mdb_free(xdp, sizeof (mdb_xdata_t));
276 }
277
278 mdb_tgt_sespec_idle_all(t, EBUSY, TRUE);
279 (void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL);
280 t->t_ops->t_destroy(t);
281
282 mdb_list_delete(&mdb.m_tgtlist, t);
283 mdb_free(t, sizeof (mdb_tgt_t));
284
285 if (mdb.m_target == NULL)
286 mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist));
287 }
288
289 void
mdb_tgt_activate(mdb_tgt_t * t)290 mdb_tgt_activate(mdb_tgt_t *t)
291 {
292 mdb_tgt_t *otgt = mdb.m_target;
293
294 if (mdb.m_target != NULL) {
295 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
296 mdb.m_target->t_module->mod_name, (void *)mdb.m_target);
297 mdb.m_target->t_ops->t_deactivate(mdb.m_target);
298 }
299
300 if ((mdb.m_target = t) != NULL) {
301 const char *v = strstr(mdb.m_root, "%V");
302
303 mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n",
304 t->t_module->mod_name, (void *)t);
305
306 /*
307 * If the root was explicitly set with -R and contains %V,
308 * expand it like a path. If the resulting directory is
309 * not present, then replace %V with "latest" and re-evaluate.
310 */
311 if (v != NULL) {
312 char old_root[MAXPATHLEN];
313 const char **p;
314 #ifndef _KMDB
315 struct stat s;
316 #endif
317 size_t len;
318
319 p = mdb_path_alloc(mdb.m_root, &len);
320 (void) strcpy(old_root, mdb.m_root);
321 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
322 mdb.m_root[MAXPATHLEN - 1] = '\0';
323 mdb_path_free(p, len);
324
325 #ifndef _KMDB
326 if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) {
327 mdb.m_flags |= MDB_FL_LATEST;
328 p = mdb_path_alloc(old_root, &len);
329 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
330 mdb.m_root[MAXPATHLEN - 1] = '\0';
331 mdb_path_free(p, len);
332 }
333 #endif
334 }
335
336 /*
337 * Re-evaluate the macro and dmod paths now that we have the
338 * new target set and m_root figured out.
339 */
340 if (otgt == NULL) {
341 mdb_set_ipath(mdb.m_ipathstr);
342 mdb_set_lpath(mdb.m_lpathstr);
343 }
344
345 t->t_ops->t_activate(t);
346 }
347 }
348
349 void
mdb_tgt_periodic(mdb_tgt_t * t)350 mdb_tgt_periodic(mdb_tgt_t *t)
351 {
352 t->t_ops->t_periodic(t);
353 }
354
355 const char *
mdb_tgt_name(mdb_tgt_t * t)356 mdb_tgt_name(mdb_tgt_t *t)
357 {
358 return (t->t_ops->t_name(t));
359 }
360
361 const char *
mdb_tgt_isa(mdb_tgt_t * t)362 mdb_tgt_isa(mdb_tgt_t *t)
363 {
364 return (t->t_ops->t_isa(t));
365 }
366
367 const char *
mdb_tgt_platform(mdb_tgt_t * t)368 mdb_tgt_platform(mdb_tgt_t *t)
369 {
370 return (t->t_ops->t_platform(t));
371 }
372
373 int
mdb_tgt_uname(mdb_tgt_t * t,struct utsname * utsp)374 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp)
375 {
376 return (t->t_ops->t_uname(t, utsp));
377 }
378
379 int
mdb_tgt_dmodel(mdb_tgt_t * t)380 mdb_tgt_dmodel(mdb_tgt_t *t)
381 {
382 return (t->t_ops->t_dmodel(t));
383 }
384
385 int
mdb_tgt_auxv(mdb_tgt_t * t,const auxv_t ** auxvp)386 mdb_tgt_auxv(mdb_tgt_t *t, const auxv_t **auxvp)
387 {
388 return (t->t_ops->t_auxv(t, auxvp));
389 }
390
391 ssize_t
mdb_tgt_aread(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t n,mdb_tgt_addr_t addr)392 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as,
393 void *buf, size_t n, mdb_tgt_addr_t addr)
394 {
395 if (t->t_flags & MDB_TGT_F_ASIO)
396 return (t->t_ops->t_aread(t, as, buf, n, addr));
397
398 switch ((uintptr_t)as) {
399 case (uintptr_t)MDB_TGT_AS_VIRT:
400 return (t->t_ops->t_vread(t, buf, n, addr));
401 case (uintptr_t)MDB_TGT_AS_PHYS:
402 return (t->t_ops->t_pread(t, buf, n, addr));
403 case (uintptr_t)MDB_TGT_AS_FILE:
404 return (t->t_ops->t_fread(t, buf, n, addr));
405 case (uintptr_t)MDB_TGT_AS_IO:
406 return (t->t_ops->t_ioread(t, buf, n, addr));
407 }
408 return (t->t_ops->t_aread(t, as, buf, n, addr));
409 }
410
411 ssize_t
mdb_tgt_awrite(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t n,mdb_tgt_addr_t addr)412 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as,
413 const void *buf, size_t n, mdb_tgt_addr_t addr)
414 {
415 if (!(t->t_flags & MDB_TGT_F_RDWR))
416 return (set_errno(EMDB_TGTRDONLY));
417
418 if (t->t_flags & MDB_TGT_F_ASIO)
419 return (t->t_ops->t_awrite(t, as, buf, n, addr));
420
421 switch ((uintptr_t)as) {
422 case (uintptr_t)MDB_TGT_AS_VIRT:
423 return (t->t_ops->t_vwrite(t, buf, n, addr));
424 case (uintptr_t)MDB_TGT_AS_PHYS:
425 return (t->t_ops->t_pwrite(t, buf, n, addr));
426 case (uintptr_t)MDB_TGT_AS_FILE:
427 return (t->t_ops->t_fwrite(t, buf, n, addr));
428 case (uintptr_t)MDB_TGT_AS_IO:
429 return (t->t_ops->t_iowrite(t, buf, n, addr));
430 }
431 return (t->t_ops->t_awrite(t, as, buf, n, addr));
432 }
433
434 ssize_t
mdb_tgt_vread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)435 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
436 {
437 return (t->t_ops->t_vread(t, buf, n, addr));
438 }
439
440 ssize_t
mdb_tgt_vwrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)441 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
442 {
443 if (t->t_flags & MDB_TGT_F_RDWR)
444 return (t->t_ops->t_vwrite(t, buf, n, addr));
445
446 return (set_errno(EMDB_TGTRDONLY));
447 }
448
449 ssize_t
mdb_tgt_pread(mdb_tgt_t * t,void * buf,size_t n,physaddr_t addr)450 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr)
451 {
452 return (t->t_ops->t_pread(t, buf, n, addr));
453 }
454
455 ssize_t
mdb_tgt_pwrite(mdb_tgt_t * t,const void * buf,size_t n,physaddr_t addr)456 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr)
457 {
458 if (t->t_flags & MDB_TGT_F_RDWR)
459 return (t->t_ops->t_pwrite(t, buf, n, addr));
460
461 return (set_errno(EMDB_TGTRDONLY));
462 }
463
464 ssize_t
mdb_tgt_fread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)465 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
466 {
467 return (t->t_ops->t_fread(t, buf, n, addr));
468 }
469
470 ssize_t
mdb_tgt_fwrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)471 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
472 {
473 if (t->t_flags & MDB_TGT_F_RDWR)
474 return (t->t_ops->t_fwrite(t, buf, n, addr));
475
476 return (set_errno(EMDB_TGTRDONLY));
477 }
478
479 ssize_t
mdb_tgt_ioread(mdb_tgt_t * t,void * buf,size_t n,uintptr_t addr)480 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
481 {
482 return (t->t_ops->t_ioread(t, buf, n, addr));
483 }
484
485 ssize_t
mdb_tgt_iowrite(mdb_tgt_t * t,const void * buf,size_t n,uintptr_t addr)486 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
487 {
488 if (t->t_flags & MDB_TGT_F_RDWR)
489 return (t->t_ops->t_iowrite(t, buf, n, addr));
490
491 return (set_errno(EMDB_TGTRDONLY));
492 }
493
494 int
mdb_tgt_vtop(mdb_tgt_t * t,mdb_tgt_as_t as,uintptr_t va,physaddr_t * pap)495 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
496 {
497 return (t->t_ops->t_vtop(t, as, va, pap));
498 }
499
500 ssize_t
mdb_tgt_readstr(mdb_tgt_t * t,mdb_tgt_as_t as,char * buf,size_t nbytes,mdb_tgt_addr_t addr)501 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf,
502 size_t nbytes, mdb_tgt_addr_t addr)
503 {
504 ssize_t n, nread = mdb_tgt_aread(t, as, buf, nbytes, addr);
505 char *p;
506
507 if (nread >= 0) {
508 if ((p = memchr(buf, '\0', nread)) != NULL)
509 nread = (size_t)(p - buf);
510 goto done;
511 }
512
513 nread = 0;
514 p = &buf[0];
515
516 while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) {
517 if (*p == '\0')
518 return (nread);
519 nread++;
520 addr++;
521 p++;
522 }
523
524 if (nread == 0 && n == -1)
525 return (-1); /* If we can't even read a byte, return -1 */
526
527 done:
528 if (nbytes != 0)
529 buf[MIN(nread, nbytes - 1)] = '\0';
530
531 return (nread);
532 }
533
534 ssize_t
mdb_tgt_writestr(mdb_tgt_t * t,mdb_tgt_as_t as,const char * buf,mdb_tgt_addr_t addr)535 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as,
536 const char *buf, mdb_tgt_addr_t addr)
537 {
538 ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr);
539 return (nwritten > 0 ? nwritten - 1 : nwritten);
540 }
541
542 int
mdb_tgt_lookup_by_name(mdb_tgt_t * t,const char * obj,const char * name,GElf_Sym * symp,mdb_syminfo_t * sip)543 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj,
544 const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
545 {
546 mdb_syminfo_t info;
547 GElf_Sym sym;
548 uint_t id;
549
550 if (name == NULL || t == NULL)
551 return (set_errno(EINVAL));
552
553 if (obj == MDB_TGT_OBJ_EVERY &&
554 mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) {
555 info.sym_table = MDB_TGT_PRVSYM;
556 info.sym_id = id;
557 goto found;
558 }
559
560 if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0)
561 goto found;
562
563 return (-1);
564
565 found:
566 if (symp != NULL)
567 *symp = sym;
568 if (sip != NULL)
569 *sip = info;
570 return (0);
571 }
572
573 int
mdb_tgt_lookup_by_addr(mdb_tgt_t * t,uintptr_t addr,uint_t flags,char * buf,size_t len,GElf_Sym * symp,mdb_syminfo_t * sip)574 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
575 char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
576 {
577 mdb_syminfo_t info;
578 GElf_Sym sym;
579
580 if (t == NULL)
581 return (set_errno(EINVAL));
582
583 if (t->t_ops->t_lookup_by_addr(t, addr, flags,
584 buf, len, &sym, &info) == 0) {
585 if (symp != NULL)
586 *symp = sym;
587 if (sip != NULL)
588 *sip = info;
589 return (0);
590 }
591
592 return (-1);
593 }
594
595 /*
596 * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
597 * wants to look up a scoped symbol name such as "object`symbol". It is
598 * implemented as a simple wrapper around mdb_tgt_lookup_by_name. Note that
599 * we split on the *last* occurrence of "`", so the object name itself may
600 * contain additional scopes whose evaluation is left to the target. This
601 * allows targets to implement additional scopes, such as source files,
602 * function names, link map identifiers, etc.
603 */
604 int
mdb_tgt_lookup_by_scope(mdb_tgt_t * t,const char * s,GElf_Sym * symp,mdb_syminfo_t * sip)605 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp,
606 mdb_syminfo_t *sip)
607 {
608 const char *object = MDB_TGT_OBJ_EVERY;
609 const char *name = s;
610 char buf[MDB_TGT_SYM_NAMLEN];
611
612 if (t == NULL)
613 return (set_errno(EINVAL));
614
615 if (strchr(name, '`') != NULL) {
616
617 (void) strncpy(buf, s, sizeof (buf));
618 buf[sizeof (buf) - 1] = '\0';
619 name = buf;
620
621 if ((s = strrsplit(buf, '`')) != NULL) {
622 object = buf;
623 name = s;
624 if (*object == '\0')
625 return (set_errno(EMDB_NOOBJ));
626 if (*name == '\0')
627 return (set_errno(EMDB_NOSYM));
628 }
629 }
630
631 return (mdb_tgt_lookup_by_name(t, object, name, symp, sip));
632 }
633
634 int
mdb_tgt_symbol_iter(mdb_tgt_t * t,const char * obj,uint_t which,uint_t type,mdb_tgt_sym_f * cb,void * p)635 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which,
636 uint_t type, mdb_tgt_sym_f *cb, void *p)
637 {
638 if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) ||
639 (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0)
640 return (set_errno(EINVAL));
641
642 return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p));
643 }
644
645 ssize_t
mdb_tgt_readsym(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t nbytes,const char * obj,const char * name)646 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes,
647 const char *obj, const char *name)
648 {
649 GElf_Sym sym;
650
651 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
652 return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value));
653
654 return (-1);
655 }
656
657 ssize_t
mdb_tgt_writesym(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t nbytes,const char * obj,const char * name)658 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
659 size_t nbytes, const char *obj, const char *name)
660 {
661 GElf_Sym sym;
662
663 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
664 return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value));
665
666 return (-1);
667 }
668
669 int
mdb_tgt_mapping_iter(mdb_tgt_t * t,mdb_tgt_map_f * cb,void * p)670 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
671 {
672 return (t->t_ops->t_mapping_iter(t, cb, p));
673 }
674
675 int
mdb_tgt_object_iter(mdb_tgt_t * t,mdb_tgt_map_f * cb,void * p)676 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
677 {
678 return (t->t_ops->t_object_iter(t, cb, p));
679 }
680
681 const mdb_map_t *
mdb_tgt_addr_to_map(mdb_tgt_t * t,uintptr_t addr)682 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
683 {
684 return (t->t_ops->t_addr_to_map(t, addr));
685 }
686
687 const mdb_map_t *
mdb_tgt_name_to_map(mdb_tgt_t * t,const char * name)688 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name)
689 {
690 return (t->t_ops->t_name_to_map(t, name));
691 }
692
693 struct ctf_file *
mdb_tgt_addr_to_ctf(mdb_tgt_t * t,uintptr_t addr)694 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
695 {
696 return (t->t_ops->t_addr_to_ctf(t, addr));
697 }
698
699 struct ctf_file *
mdb_tgt_name_to_ctf(mdb_tgt_t * t,const char * name)700 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name)
701 {
702 return (t->t_ops->t_name_to_ctf(t, name));
703 }
704
705 /*
706 * Return the latest target status. We just copy out our cached copy. The
707 * status only needs to change when the target is run, stepped, or continued.
708 */
709 int
mdb_tgt_status(mdb_tgt_t * t,mdb_tgt_status_t * tsp)710 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
711 {
712 uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP);
713 uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP);
714 uint_t state = t->t_status.st_state;
715
716 if (tsp == NULL)
717 return (set_errno(EINVAL));
718
719 /*
720 * If we're called with the address of the target's internal status,
721 * then call down to update it; otherwise copy out the saved status.
722 */
723 if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0)
724 return (-1); /* errno is set for us */
725
726 /*
727 * Assert that our state is valid before returning it. The state must
728 * be valid, and DSTOP and ISTOP cannot be set simultaneously. ISTOP
729 * is only valid when stopped. DSTOP is only valid when running or
730 * stopped. If any test fails, abort the debugger.
731 */
732 if (state > MDB_TGT_LOST)
733 fail("invalid target state (%u)\n", state);
734 if (state != MDB_TGT_STOPPED && istop)
735 fail("target state is (%u) and ISTOP is set\n", state);
736 if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop)
737 fail("target state is (%u) and DSTOP is set\n", state);
738 if (istop && dstop)
739 fail("target has ISTOP and DSTOP set simultaneously\n");
740
741 if (tsp != &t->t_status)
742 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
743
744 return (0);
745 }
746
747 /*
748 * For the given sespec, scan its list of vespecs for ones that are marked
749 * temporary and delete them. We use the same method as vespec_delete below.
750 */
751 /*ARGSUSED*/
752 void
mdb_tgt_sespec_prune_one(mdb_tgt_t * t,mdb_sespec_t * sep)753 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep)
754 {
755 mdb_vespec_t *vep, *nvep;
756
757 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
758 nvep = mdb_list_next(vep);
759
760 if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED |
761 MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) {
762 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
763 mdb_tgt_vespec_rele(t, vep);
764 }
765 }
766 }
767
768 /*
769 * Prune each sespec on the active list of temporary vespecs. This function
770 * is called, for example, after the target finishes a continue operation.
771 */
772 void
mdb_tgt_sespec_prune_all(mdb_tgt_t * t)773 mdb_tgt_sespec_prune_all(mdb_tgt_t *t)
774 {
775 mdb_sespec_t *sep, *nsep;
776
777 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
778 nsep = mdb_list_next(sep);
779 mdb_tgt_sespec_prune_one(t, sep);
780 }
781 }
782
783 /*
784 * Transition the given sespec to the IDLE state. We invoke the destructor,
785 * and then move the sespec from the active list to the idle list.
786 */
787 void
mdb_tgt_sespec_idle_one(mdb_tgt_t * t,mdb_sespec_t * sep,int reason)788 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason)
789 {
790 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
791
792 if (sep->se_state == MDB_TGT_SPEC_ARMED)
793 (void) sep->se_ops->se_disarm(t, sep);
794
795 sep->se_ops->se_dtor(t, sep);
796 sep->se_data = NULL;
797
798 sep->se_state = MDB_TGT_SPEC_IDLE;
799 sep->se_errno = reason;
800
801 mdb_list_delete(&t->t_active, sep);
802 mdb_list_append(&t->t_idle, sep);
803
804 mdb_tgt_sespec_prune_one(t, sep);
805 }
806
807 /*
808 * Transition each sespec on the active list to the IDLE state. This function
809 * is called, for example, after the target terminates execution.
810 */
811 void
mdb_tgt_sespec_idle_all(mdb_tgt_t * t,int reason,int clear_matched)812 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched)
813 {
814 mdb_sespec_t *sep, *nsep;
815 mdb_vespec_t *vep;
816
817 while ((sep = t->t_matched) != T_SE_END && clear_matched) {
818 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
819 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
820 vep = mdb_list_next(vep);
821 }
822
823 t->t_matched = sep->se_matched;
824 sep->se_matched = NULL;
825 mdb_tgt_sespec_rele(t, sep);
826 }
827
828 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
829 nsep = mdb_list_next(sep);
830 mdb_tgt_sespec_idle_one(t, sep, reason);
831 }
832 }
833
834 /*
835 * Attempt to transition the given sespec from the IDLE to ACTIVE state. We
836 * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
837 * and return -1 with errno set. One strange case we need to deal with here is
838 * the possibility that a given vespec is sitting on the idle list with its
839 * corresponding sespec, but it is actually a duplicate of another sespec on the
840 * active list. This can happen if the sespec is associated with a
841 * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
842 * activated. A more interesting reason this situation might arise is the case
843 * where a virtual address breakpoint is set at an address just mmap'ed by
844 * dlmopen. Since no symbol table information is available for this mapping
845 * yet, a pre-existing deferred symbolic breakpoint may already exist for this
846 * address, but it is on the idle list. When the symbol table is ready and the
847 * DLACTIVITY event occurs, we now discover that the virtual address obtained by
848 * evaluating the symbolic breakpoint matches the explicit virtual address of
849 * the active virtual breakpoint. To resolve this conflict in either case, we
850 * destroy the idle sespec, and attach its list of vespecs to the existing
851 * active sespec.
852 */
853 int
mdb_tgt_sespec_activate_one(mdb_tgt_t * t,mdb_sespec_t * sep)854 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep)
855 {
856 mdb_vespec_t *vep = mdb_list_next(&sep->se_velist);
857
858 mdb_vespec_t *nvep;
859 mdb_sespec_t *dup;
860
861 ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE);
862 ASSERT(vep != NULL);
863
864 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
865 return (0); /* cannot be activated while disabled bit set */
866
867 /*
868 * First search the active list for an existing, duplicate sespec to
869 * handle the special case described above.
870 */
871 for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) {
872 if (dup->se_ops == sep->se_ops &&
873 dup->se_ops->se_secmp(t, dup, vep->ve_args)) {
874 ASSERT(dup != sep);
875 break;
876 }
877 }
878
879 /*
880 * If a duplicate is found, destroy the existing, idle sespec, and
881 * attach all of its vespecs to the duplicate sespec.
882 */
883 if (dup != NULL) {
884 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
885 mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n",
886 vep->ve_id, (void *)dup);
887
888 if (dup->se_matched != NULL)
889 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
890
891 nvep = mdb_list_next(vep);
892 vep->ve_hits = 0;
893
894 mdb_list_delete(&sep->se_velist, vep);
895 mdb_tgt_sespec_rele(t, sep);
896
897 mdb_list_append(&dup->se_velist, vep);
898 mdb_tgt_sespec_hold(t, dup);
899 vep->ve_se = dup;
900 }
901
902 mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n",
903 (void *)sep, (void *)dup);
904 return (0);
905 }
906
907 /*
908 * If no duplicate is found, call the sespec's constructor. If this
909 * is successful, move the sespec to the active list.
910 */
911 if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) {
912 sep->se_errno = errno;
913 sep->se_data = NULL;
914
915 return (-1);
916 }
917
918 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
919 nvep = mdb_list_next(vep);
920 vep->ve_hits = 0;
921 }
922 mdb_list_delete(&t->t_idle, sep);
923 mdb_list_append(&t->t_active, sep);
924 sep->se_state = MDB_TGT_SPEC_ACTIVE;
925 sep->se_errno = 0;
926
927 return (0);
928 }
929
930 /*
931 * Transition each sespec on the idle list to the ACTIVE state. This function
932 * is called, for example, after the target's t_run() function returns. If
933 * the se_ctor() function fails, the specifier is not yet applicable; it will
934 * remain on the idle list and can be activated later.
935 *
936 * Returns 1 if there weren't any unexpected activation failures; 0 if there
937 * were.
938 */
939 int
mdb_tgt_sespec_activate_all(mdb_tgt_t * t)940 mdb_tgt_sespec_activate_all(mdb_tgt_t *t)
941 {
942 mdb_sespec_t *sep, *nsep;
943 int rc = 1;
944
945 for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) {
946 nsep = mdb_list_next(sep);
947
948 if (mdb_tgt_sespec_activate_one(t, sep) < 0 &&
949 sep->se_errno != EMDB_NOOBJ)
950 rc = 0;
951 }
952
953 return (rc);
954 }
955
956 /*
957 * Transition the given sespec to the ARMED state. Note that we attempt to
958 * re-arm sespecs previously in the ERROR state. If se_arm() fails the sespec
959 * transitions to the ERROR state but stays on the active list.
960 */
961 void
mdb_tgt_sespec_arm_one(mdb_tgt_t * t,mdb_sespec_t * sep)962 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep)
963 {
964 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
965
966 if (sep->se_state == MDB_TGT_SPEC_ARMED)
967 return; /* do not arm sespecs more than once */
968
969 if (sep->se_ops->se_arm(t, sep) == -1) {
970 sep->se_state = MDB_TGT_SPEC_ERROR;
971 sep->se_errno = errno;
972 } else {
973 sep->se_state = MDB_TGT_SPEC_ARMED;
974 sep->se_errno = 0;
975 }
976 }
977
978 /*
979 * Transition each sespec on the active list (except matched specs) to the
980 * ARMED state. This function is called prior to continuing the target.
981 */
982 void
mdb_tgt_sespec_arm_all(mdb_tgt_t * t)983 mdb_tgt_sespec_arm_all(mdb_tgt_t *t)
984 {
985 mdb_sespec_t *sep, *nsep;
986
987 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
988 nsep = mdb_list_next(sep);
989 if (sep->se_matched == NULL)
990 mdb_tgt_sespec_arm_one(t, sep);
991 }
992 }
993
994 /*
995 * Transition each sespec on the active list that is in the ARMED state to
996 * the ACTIVE state. If se_disarm() fails, the sespec is transitioned to
997 * the ERROR state instead, but left on the active list.
998 */
999 static void
tgt_disarm_sespecs(mdb_tgt_t * t)1000 tgt_disarm_sespecs(mdb_tgt_t *t)
1001 {
1002 mdb_sespec_t *sep;
1003
1004 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1005 if (sep->se_state != MDB_TGT_SPEC_ARMED)
1006 continue; /* do not disarm if in ERROR state */
1007
1008 if (sep->se_ops->se_disarm(t, sep) == -1) {
1009 sep->se_state = MDB_TGT_SPEC_ERROR;
1010 sep->se_errno = errno;
1011 } else {
1012 sep->se_state = MDB_TGT_SPEC_ACTIVE;
1013 sep->se_errno = 0;
1014 }
1015 }
1016 }
1017
1018 /*
1019 * Determine if the software event that triggered the most recent stop matches
1020 * any of the active event specifiers. If 'all' is TRUE, we consider all
1021 * sespecs in our search. If 'all' is FALSE, we only consider ARMED sespecs.
1022 * If we successfully match an event, we add it to the t_matched list and
1023 * place an additional hold on it.
1024 */
1025 static mdb_sespec_t *
tgt_match_sespecs(mdb_tgt_t * t,int all)1026 tgt_match_sespecs(mdb_tgt_t *t, int all)
1027 {
1028 mdb_sespec_t *sep;
1029
1030 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1031 if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED)
1032 continue; /* restrict search to ARMED sespecs */
1033
1034 if (sep->se_state != MDB_TGT_SPEC_ERROR &&
1035 sep->se_ops->se_match(t, sep, &t->t_status)) {
1036 mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep);
1037 mdb_tgt_sespec_hold(t, sep);
1038 sep->se_matched = t->t_matched;
1039 t->t_matched = sep;
1040 }
1041 }
1042
1043 return (t->t_matched);
1044 }
1045
1046 /*
1047 * This function provides the low-level target continue algorithm. We proceed
1048 * in three phases: (1) we arm the active sespecs, except the specs matched at
1049 * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1050 * step over these event transitions, and then arm the corresponding sespecs,
1051 * and (3) we call the appropriate low-level continue routine. Once the
1052 * target stops again, we determine which sespecs were matched, and invoke the
1053 * appropriate vespec callbacks and perform other vespec maintenance.
1054 */
1055 static int
tgt_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp,int (* t_cont)(mdb_tgt_t *,mdb_tgt_status_t *))1056 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp,
1057 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1058 {
1059 mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits");
1060 uintptr_t pc = t->t_status.st_pc;
1061 int error = 0;
1062
1063 mdb_sespec_t *sep, *nsep, *matched;
1064 mdb_vespec_t *vep, *nvep;
1065 uintptr_t addr;
1066
1067 uint_t cbits = 0; /* union of pending continue bits */
1068 uint_t ncont = 0; /* # of callbacks that requested cont */
1069 uint_t n = 0; /* # of callbacks */
1070
1071 /*
1072 * If the target is undead, dead, or lost, we no longer allow continue.
1073 * This effectively forces the user to use ::kill or ::run after death.
1074 */
1075 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1076 return (set_errno(EMDB_TGTZOMB));
1077 if (t->t_status.st_state == MDB_TGT_DEAD)
1078 return (set_errno(EMDB_TGTCORE));
1079 if (t->t_status.st_state == MDB_TGT_LOST)
1080 return (set_errno(EMDB_TGTLOST));
1081
1082 /*
1083 * If any of single-step, step-over, or step-out is pending, it takes
1084 * precedence over an explicit or pending continue, because these are
1085 * all different specialized forms of continue.
1086 */
1087 if (t->t_flags & MDB_TGT_F_STEP)
1088 t_cont = t->t_ops->t_step;
1089 else if (t->t_flags & MDB_TGT_F_NEXT)
1090 t_cont = t->t_ops->t_step;
1091 else if (t->t_flags & MDB_TGT_F_STEP_BRANCH)
1092 t_cont = t->t_ops->t_cont;
1093 else if (t->t_flags & MDB_TGT_F_STEP_OUT)
1094 t_cont = t->t_ops->t_cont;
1095
1096 /*
1097 * To handle step-over, we ask the target to find the address past the
1098 * next control transfer instruction. If an address is found, we plant
1099 * a temporary breakpoint there and continue; otherwise just step.
1100 */
1101 if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) {
1102 if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t,
1103 addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY,
1104 no_se_f, NULL) == 0) {
1105 mdb_dprintf(MDB_DBG_TGT, "next falling back to step: "
1106 "%s\n", mdb_strerror(errno));
1107 } else
1108 t_cont = t->t_ops->t_cont;
1109 }
1110
1111 /*
1112 * To handle step-out, we ask the target to find the return address of
1113 * the current frame, plant a temporary breakpoint there, and continue.
1114 */
1115 if (t->t_flags & MDB_TGT_F_STEP_OUT) {
1116 if (t->t_ops->t_step_out(t, &addr) == -1)
1117 return (-1); /* errno is set for us */
1118
1119 if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN |
1120 MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0)
1121 return (-1); /* errno is set for us */
1122 }
1123
1124 /*
1125 * To handle step-branch, we ask the target to enable it for the coming
1126 * continue. Step-branch is incompatible with step, so don't enable it
1127 * if we're going to be stepping.
1128 */
1129 if (t->t_flags & MDB_TGT_F_STEP_BRANCH && t_cont == t->t_ops->t_cont) {
1130 if (t->t_ops->t_step_branch(t) == -1)
1131 return (-1); /* errno is set for us */
1132 }
1133
1134 (void) mdb_signal_block(SIGHUP);
1135 (void) mdb_signal_block(SIGTERM);
1136 mdb_intr_disable();
1137
1138 t->t_flags &= ~T_CONT_BITS;
1139 t->t_flags |= MDB_TGT_F_BUSY;
1140 mdb_tgt_sespec_arm_all(t);
1141
1142 ASSERT(t->t_matched != NULL);
1143 matched = t->t_matched;
1144 t->t_matched = T_SE_END;
1145
1146 if (mdb.m_term != NULL)
1147 IOP_SUSPEND(mdb.m_term);
1148
1149 /*
1150 * Iterate over the matched sespec list, performing autostop processing
1151 * and clearing the matched bit for each associated vespec. We then
1152 * invoke each sespec's se_cont callback in order to continue past
1153 * the corresponding event. If the matched list has more than one
1154 * sespec, we assume that the se_cont callbacks are non-interfering.
1155 */
1156 for (sep = matched; sep != T_SE_END; sep = sep->se_matched) {
1157 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
1158 if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) &&
1159 (vep->ve_limit && vep->ve_hits == vep->ve_limit))
1160 vep->ve_hits = 0;
1161
1162 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1163 vep = mdb_list_next(vep);
1164 }
1165
1166 if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) {
1167 error = errno ? errno : -1;
1168 tgt_disarm_sespecs(t);
1169 break;
1170 }
1171
1172 if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) {
1173 tgt_disarm_sespecs(t);
1174 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1175 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1176 else if (t->t_status.st_state == MDB_TGT_LOST)
1177 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1178 break;
1179 }
1180 }
1181
1182 /*
1183 * Clear the se_matched field for each matched sespec, and drop the
1184 * reference count since the sespec is no longer on the matched list.
1185 */
1186 for (sep = matched; sep != T_SE_END; sep = nsep) {
1187 nsep = sep->se_matched;
1188 sep->se_matched = NULL;
1189 mdb_tgt_sespec_rele(t, sep);
1190 }
1191
1192 /*
1193 * If the matched list was non-empty, see if we hit another event while
1194 * performing se_cont() processing. If so, don't bother continuing any
1195 * further. If not, arm the sespecs on the old matched list by calling
1196 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1197 */
1198 if (matched != T_SE_END) {
1199 if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP))
1200 goto out; /* abort now if se_cont() failed */
1201
1202 if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) {
1203 tgt_disarm_sespecs(t);
1204 goto out;
1205 }
1206
1207 mdb_tgt_sespec_arm_all(t);
1208 }
1209
1210 if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) {
1211 if (t_cont(t, &t->t_status) != 0)
1212 error = errno ? errno : -1;
1213 }
1214
1215 tgt_disarm_sespecs(t);
1216
1217 if (t->t_flags & MDB_TGT_F_UNLOAD)
1218 longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT);
1219
1220 if (t->t_status.st_state == MDB_TGT_UNDEAD)
1221 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1222 else if (t->t_status.st_state == MDB_TGT_LOST)
1223 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1224 else if (t->t_status.st_flags & MDB_TGT_ISTOP)
1225 t->t_matched = tgt_match_sespecs(t, TRUE);
1226 out:
1227 if (mdb.m_term != NULL)
1228 IOP_RESUME(mdb.m_term);
1229
1230 (void) mdb_signal_unblock(SIGTERM);
1231 (void) mdb_signal_unblock(SIGHUP);
1232 mdb_intr_enable();
1233
1234 for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) {
1235 /*
1236 * When we invoke a ve_callback, it may in turn request that the
1237 * target continue immediately after callback processing is
1238 * complete. We only allow this to occur if *all* callbacks
1239 * agree to continue. To implement this behavior, we keep a
1240 * count (ncont) of such requests, and only apply the cumulative
1241 * continue bits (cbits) to the target if ncont is equal to the
1242 * total number of callbacks that are invoked (n).
1243 */
1244 for (vep = mdb_list_next(&sep->se_velist);
1245 vep != NULL; vep = nvep, n++) {
1246 /*
1247 * Place an extra hold on the current vespec and pick
1248 * up the next pointer before invoking the callback: we
1249 * must be prepared for the vespec to be deleted or
1250 * moved to a different list by the callback.
1251 */
1252 mdb_tgt_vespec_hold(t, vep);
1253 nvep = mdb_list_next(vep);
1254
1255 vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
1256 vep->ve_hits++;
1257
1258 mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc);
1259 mdb_nv_set_value(hitv, vep->ve_hits);
1260
1261 ASSERT((t->t_flags & T_CONT_BITS) == 0);
1262 vep->ve_callback(t, vep->ve_id, vep->ve_data);
1263
1264 ncont += (t->t_flags & T_CONT_BITS) != 0;
1265 cbits |= (t->t_flags & T_CONT_BITS);
1266 t->t_flags &= ~T_CONT_BITS;
1267
1268 if (vep->ve_limit && vep->ve_hits == vep->ve_limit) {
1269 if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL)
1270 (void) mdb_tgt_vespec_delete(t,
1271 vep->ve_id);
1272 else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS)
1273 (void) mdb_tgt_vespec_disable(t,
1274 vep->ve_id);
1275 }
1276
1277 if (vep->ve_limit && vep->ve_hits < vep->ve_limit) {
1278 if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP)
1279 (void) mdb_tgt_continue(t, NULL);
1280 }
1281
1282 mdb_tgt_vespec_rele(t, vep);
1283 }
1284 }
1285
1286 if (t->t_matched != T_SE_END && ncont == n)
1287 t->t_flags |= cbits; /* apply continues (see above) */
1288
1289 mdb_tgt_sespec_prune_all(t);
1290
1291 t->t_status.st_flags &= ~MDB_TGT_BUSY;
1292 t->t_flags &= ~MDB_TGT_F_BUSY;
1293
1294 if (tsp != NULL)
1295 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
1296
1297 if (error != 0)
1298 return (set_errno(error));
1299
1300 return (0);
1301 }
1302
1303 /*
1304 * This function is the common glue that connects the high-level target layer
1305 * continue functions (e.g. step and cont below) with the low-level
1306 * tgt_continue() function above. Since vespec callbacks may perform any
1307 * actions, including attempting to continue the target itself, we must be
1308 * prepared to be called while the target is still marked F_BUSY. In this
1309 * case, we just set a pending bit and return. When we return from the call
1310 * to tgt_continue() that made us busy into the tgt_request_continue() call
1311 * that is still on the stack, we will loop around and call tgt_continue()
1312 * again. This allows vespecs to continue the target without recursion.
1313 */
1314 static int
tgt_request_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp,uint_t tflag,int (* t_cont)(mdb_tgt_t *,mdb_tgt_status_t *))1315 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag,
1316 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1317 {
1318 mdb_tgt_spec_desc_t desc;
1319 mdb_sespec_t *sep;
1320 char buf[BUFSIZ];
1321 int status;
1322
1323 if (t->t_flags & MDB_TGT_F_BUSY) {
1324 t->t_flags |= tflag;
1325 return (0);
1326 }
1327
1328 do {
1329 status = tgt_continue(t, tsp, t_cont);
1330 } while (status == 0 && (t->t_flags & T_CONT_BITS));
1331
1332 if (status == 0) {
1333 for (sep = t->t_matched; sep != T_SE_END;
1334 sep = sep->se_matched) {
1335 mdb_vespec_t *vep;
1336
1337 for (vep = mdb_list_next(&sep->se_velist); vep;
1338 vep = mdb_list_next(vep)) {
1339 if (vep->ve_flags & MDB_TGT_SPEC_SILENT)
1340 continue;
1341 warn("%s\n", sep->se_ops->se_info(t, sep,
1342 vep, &desc, buf, sizeof (buf)));
1343 }
1344 }
1345
1346 mdb_callb_fire(MDB_CALLB_STCHG);
1347 }
1348
1349 t->t_flags &= ~T_CONT_BITS;
1350 return (status);
1351 }
1352
1353 /*
1354 * Restart target execution: we rely upon the underlying target implementation
1355 * to do most of the work for us. In particular, we assume it will properly
1356 * preserve the state of our event lists if the run fails for some reason,
1357 * and that it will reset all events to the IDLE state if the run succeeds.
1358 * If it is successful, we attempt to activate all of the idle sespecs. The
1359 * t_run() operation is defined to leave the target stopped at the earliest
1360 * possible point in execution, and then return control to the debugger,
1361 * awaiting a step or continue operation to set it running again.
1362 */
1363 int
mdb_tgt_run(mdb_tgt_t * t,int argc,const mdb_arg_t * argv)1364 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv)
1365 {
1366 int i;
1367
1368 for (i = 0; i < argc; i++) {
1369 if (argv->a_type != MDB_TYPE_STRING)
1370 return (set_errno(EINVAL));
1371 }
1372
1373 if (t->t_ops->t_run(t, argc, argv) == -1)
1374 return (-1); /* errno is set for us */
1375
1376 t->t_flags &= ~T_CONT_BITS;
1377 (void) mdb_tgt_sespec_activate_all(t);
1378
1379 if (mdb.m_term != NULL)
1380 IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL);
1381
1382 return (0);
1383 }
1384
1385 int
mdb_tgt_step(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1386 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1387 {
1388 return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step));
1389 }
1390
1391 int
mdb_tgt_step_out(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1392 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1393 {
1394 t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */
1395 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1396 }
1397
1398 int
mdb_tgt_step_branch(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1399 mdb_tgt_step_branch(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1400 {
1401 t->t_flags |= MDB_TGT_F_STEP_BRANCH; /* set flag even if tgt not busy */
1402 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1403 }
1404
1405 int
mdb_tgt_next(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1406 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1407 {
1408 t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */
1409 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step));
1410 }
1411
1412 int
mdb_tgt_continue(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1413 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1414 {
1415 return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont));
1416 }
1417
1418 int
mdb_tgt_signal(mdb_tgt_t * t,int sig)1419 mdb_tgt_signal(mdb_tgt_t *t, int sig)
1420 {
1421 return (t->t_ops->t_signal(t, sig));
1422 }
1423
1424 void *
mdb_tgt_vespec_data(mdb_tgt_t * t,int vid)1425 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid)
1426 {
1427 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1428
1429 if (vep == NULL) {
1430 (void) set_errno(EMDB_NOSESPEC);
1431 return (NULL);
1432 }
1433
1434 return (vep->ve_data);
1435 }
1436
1437 /*
1438 * Return a structured description and comment string for the given vespec.
1439 * We fill in the common information from the vespec, and then call down to
1440 * the underlying sespec to provide the comment string and modify any
1441 * event type-specific information.
1442 */
1443 char *
mdb_tgt_vespec_info(mdb_tgt_t * t,int vid,mdb_tgt_spec_desc_t * sp,char * buf,size_t nbytes)1444 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp,
1445 char *buf, size_t nbytes)
1446 {
1447 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1448
1449 mdb_tgt_spec_desc_t desc;
1450 mdb_sespec_t *sep;
1451
1452 if (vep == NULL) {
1453 if (sp != NULL)
1454 bzero(sp, sizeof (mdb_tgt_spec_desc_t));
1455 (void) set_errno(EMDB_NOSESPEC);
1456 return (NULL);
1457 }
1458
1459 if (sp == NULL)
1460 sp = &desc;
1461
1462 sep = vep->ve_se;
1463
1464 sp->spec_id = vep->ve_id;
1465 sp->spec_flags = vep->ve_flags;
1466 sp->spec_hits = vep->ve_hits;
1467 sp->spec_limit = vep->ve_limit;
1468 sp->spec_state = sep->se_state;
1469 sp->spec_errno = sep->se_errno;
1470 sp->spec_base = NULL;
1471 sp->spec_size = 0;
1472 sp->spec_data = vep->ve_data;
1473
1474 return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes));
1475 }
1476
1477 /*
1478 * Qsort callback for sorting vespecs by VID, used below.
1479 */
1480 static int
tgt_vespec_compare(const mdb_vespec_t ** lp,const mdb_vespec_t ** rp)1481 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp)
1482 {
1483 return ((*lp)->ve_id - (*rp)->ve_id);
1484 }
1485
1486 /*
1487 * Iterate over all vespecs and call the specified callback function with the
1488 * corresponding VID and caller data pointer. We want the callback function
1489 * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1490 * to take actions such as deleting the vespec itself, so we cannot simply
1491 * iterate over the lists. Instead, we pre-allocate an array of vespec
1492 * pointers, fill it in and place an additional hold on each vespec, and then
1493 * sort it. After the callback has been executed on each vespec in the
1494 * sorted array, we remove our hold and free the temporary array.
1495 */
1496 int
mdb_tgt_vespec_iter(mdb_tgt_t * t,mdb_tgt_vespec_f * func,void * p)1497 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p)
1498 {
1499 mdb_vespec_t **veps, **vepp, **vend;
1500 mdb_vespec_t *vep, *nvep;
1501 mdb_sespec_t *sep;
1502
1503 uint_t vecnt = t->t_vecnt;
1504
1505 veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP);
1506 vend = veps + vecnt;
1507 vepp = veps;
1508
1509 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1510 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1511 mdb_tgt_vespec_hold(t, vep);
1512 nvep = mdb_list_next(vep);
1513 *vepp++ = vep;
1514 }
1515 }
1516
1517 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
1518 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1519 mdb_tgt_vespec_hold(t, vep);
1520 nvep = mdb_list_next(vep);
1521 *vepp++ = vep;
1522 }
1523 }
1524
1525 if (vepp != vend) {
1526 fail("target has %u vespecs on list but vecnt shows %u\n",
1527 (uint_t)(vepp - veps), vecnt);
1528 }
1529
1530 qsort(veps, vecnt, sizeof (mdb_vespec_t *),
1531 (int (*)(const void *, const void *))tgt_vespec_compare);
1532
1533 for (vepp = veps; vepp < vend; vepp++) {
1534 if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0)
1535 break;
1536 }
1537
1538 for (vepp = veps; vepp < vend; vepp++)
1539 mdb_tgt_vespec_rele(t, *vepp);
1540
1541 mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt);
1542 return (0);
1543 }
1544
1545 /*
1546 * Reset the vespec flags, match limit, and callback data to the specified
1547 * values. We silently correct invalid parameters, except for the VID.
1548 * The caller is required to query the existing properties and pass back
1549 * the existing values for any properties that should not be modified.
1550 * If the callback data is modified, the caller is responsible for cleaning
1551 * up any state associated with the previous value.
1552 */
1553 int
mdb_tgt_vespec_modify(mdb_tgt_t * t,int id,uint_t flags,uint_t limit,void * data)1554 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags,
1555 uint_t limit, void *data)
1556 {
1557 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1558
1559 if (vep == NULL)
1560 return (set_errno(EMDB_NOSESPEC));
1561
1562 /*
1563 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1564 * appropriate vespec function to do the enable/disable work.
1565 */
1566 if ((flags & MDB_TGT_SPEC_DISABLED) !=
1567 (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) {
1568 if (flags & MDB_TGT_SPEC_DISABLED)
1569 (void) mdb_tgt_vespec_disable(t, id);
1570 else
1571 (void) mdb_tgt_vespec_enable(t, id);
1572 }
1573
1574 /*
1575 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1576 * value: extra bits are cleared according to order of precedence.
1577 */
1578 if (flags & MDB_TGT_SPEC_AUTOSTOP)
1579 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
1580 else if (flags & MDB_TGT_SPEC_AUTODEL)
1581 flags &= ~MDB_TGT_SPEC_AUTODIS;
1582
1583 /*
1584 * The TEMPORARY property always takes precedence over STICKY.
1585 */
1586 if (flags & MDB_TGT_SPEC_TEMPORARY)
1587 flags &= ~MDB_TGT_SPEC_STICKY;
1588
1589 /*
1590 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1591 * back to zero and clear all of the old auto bits.
1592 */
1593 if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) {
1594 vep->ve_flags &= ~T_AUTO_BITS;
1595 vep->ve_hits = 0;
1596 }
1597
1598 vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS);
1599 vep->ve_data = data;
1600
1601 /*
1602 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1603 * least one. If none are set, reset it back to zero.
1604 */
1605 if (vep->ve_flags & T_AUTO_BITS)
1606 vep->ve_limit = MAX(limit, 1);
1607 else
1608 vep->ve_limit = 0;
1609
1610 /*
1611 * As a convenience, we allow the caller to specify SPEC_DELETED in
1612 * the flags field as indication that the event should be deleted.
1613 */
1614 if (flags & MDB_TGT_SPEC_DELETED)
1615 (void) mdb_tgt_vespec_delete(t, id);
1616
1617 return (0);
1618 }
1619
1620 /*
1621 * Remove the user disabled bit from the specified vespec, and attempt to
1622 * activate the underlying sespec and move it to the active list if possible.
1623 */
1624 int
mdb_tgt_vespec_enable(mdb_tgt_t * t,int id)1625 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id)
1626 {
1627 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1628
1629 if (vep == NULL)
1630 return (set_errno(EMDB_NOSESPEC));
1631
1632 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) {
1633 ASSERT(mdb_list_next(vep) == NULL);
1634 vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED;
1635 if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0)
1636 return (-1); /* errno is set for us */
1637 }
1638
1639 return (0);
1640 }
1641
1642 /*
1643 * Set the user disabled bit on the specified vespec, and move it to the idle
1644 * list. If the vespec is not alone with its sespec or if it is a currently
1645 * matched event, we must always create a new idle sespec and move the vespec
1646 * there. If the vespec was alone and active, we can simply idle the sespec.
1647 */
1648 int
mdb_tgt_vespec_disable(mdb_tgt_t * t,int id)1649 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id)
1650 {
1651 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1652 mdb_sespec_t *sep;
1653
1654 if (vep == NULL)
1655 return (set_errno(EMDB_NOSESPEC));
1656
1657 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
1658 return (0); /* already disabled */
1659
1660 if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL ||
1661 vep->ve_se->se_matched != NULL) {
1662
1663 sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle);
1664
1665 mdb_list_delete(&vep->ve_se->se_velist, vep);
1666 mdb_tgt_sespec_rele(t, vep->ve_se);
1667
1668 mdb_list_append(&sep->se_velist, vep);
1669 mdb_tgt_sespec_hold(t, sep);
1670
1671 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1672 vep->ve_se = sep;
1673
1674 } else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE)
1675 mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS);
1676
1677 vep->ve_flags |= MDB_TGT_SPEC_DISABLED;
1678 return (0);
1679 }
1680
1681 /*
1682 * Delete the given vespec. We use the MDB_TGT_SPEC_DELETED flag to ensure that
1683 * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1684 * reference count on the vespec more than once. This is because the vespec
1685 * may remain referenced if it is currently held by another routine (e.g.
1686 * vespec_iter), and so the user could attempt to delete it more than once
1687 * since it reference count will be >= 2 prior to the first delete call.
1688 */
1689 int
mdb_tgt_vespec_delete(mdb_tgt_t * t,int id)1690 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id)
1691 {
1692 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1693
1694 if (vep == NULL)
1695 return (set_errno(EMDB_NOSESPEC));
1696
1697 if (vep->ve_flags & MDB_TGT_SPEC_DELETED)
1698 return (set_errno(EBUSY));
1699
1700 vep->ve_flags |= MDB_TGT_SPEC_DELETED;
1701 mdb_tgt_vespec_rele(t, vep);
1702 return (0);
1703 }
1704
1705 int
mdb_tgt_add_vbrkpt(mdb_tgt_t * t,uintptr_t addr,int spec_flags,mdb_tgt_se_f * func,void * p)1706 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr,
1707 int spec_flags, mdb_tgt_se_f *func, void *p)
1708 {
1709 return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p));
1710 }
1711
1712 int
mdb_tgt_add_sbrkpt(mdb_tgt_t * t,const char * symbol,int spec_flags,mdb_tgt_se_f * func,void * p)1713 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol,
1714 int spec_flags, mdb_tgt_se_f *func, void *p)
1715 {
1716 return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p));
1717 }
1718
1719 int
mdb_tgt_add_pwapt(mdb_tgt_t * t,physaddr_t pa,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1720 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags,
1721 int spec_flags, mdb_tgt_se_f *func, void *p)
1722 {
1723 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1724 (void) set_errno(EINVAL);
1725 return (0);
1726 }
1727
1728 if (pa + n < pa) {
1729 (void) set_errno(EMDB_WPRANGE);
1730 return (0);
1731 }
1732
1733 return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p));
1734 }
1735
1736 int
mdb_tgt_add_vwapt(mdb_tgt_t * t,uintptr_t va,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1737 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags,
1738 int spec_flags, mdb_tgt_se_f *func, void *p)
1739 {
1740 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1741 (void) set_errno(EINVAL);
1742 return (0);
1743 }
1744
1745 if (va + n < va) {
1746 (void) set_errno(EMDB_WPRANGE);
1747 return (0);
1748 }
1749
1750 return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p));
1751 }
1752
1753 int
mdb_tgt_add_iowapt(mdb_tgt_t * t,uintptr_t addr,size_t n,uint_t flags,int spec_flags,mdb_tgt_se_f * func,void * p)1754 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags,
1755 int spec_flags, mdb_tgt_se_f *func, void *p)
1756 {
1757 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1758 (void) set_errno(EINVAL);
1759 return (0);
1760 }
1761
1762 if (addr + n < addr) {
1763 (void) set_errno(EMDB_WPRANGE);
1764 return (0);
1765 }
1766
1767 return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p));
1768 }
1769
1770 int
mdb_tgt_add_sysenter(mdb_tgt_t * t,int sysnum,int spec_flags,mdb_tgt_se_f * func,void * p)1771 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum,
1772 int spec_flags, mdb_tgt_se_f *func, void *p)
1773 {
1774 return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p));
1775 }
1776
1777 int
mdb_tgt_add_sysexit(mdb_tgt_t * t,int sysnum,int spec_flags,mdb_tgt_se_f * func,void * p)1778 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum,
1779 int spec_flags, mdb_tgt_se_f *func, void *p)
1780 {
1781 return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p));
1782 }
1783
1784 int
mdb_tgt_add_signal(mdb_tgt_t * t,int sig,int spec_flags,mdb_tgt_se_f * func,void * p)1785 mdb_tgt_add_signal(mdb_tgt_t *t, int sig,
1786 int spec_flags, mdb_tgt_se_f *func, void *p)
1787 {
1788 return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p));
1789 }
1790
1791 int
mdb_tgt_add_fault(mdb_tgt_t * t,int flt,int spec_flags,mdb_tgt_se_f * func,void * p)1792 mdb_tgt_add_fault(mdb_tgt_t *t, int flt,
1793 int spec_flags, mdb_tgt_se_f *func, void *p)
1794 {
1795 return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p));
1796 }
1797
1798 int
mdb_tgt_getareg(mdb_tgt_t * t,mdb_tgt_tid_t tid,const char * rname,mdb_tgt_reg_t * rp)1799 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1800 const char *rname, mdb_tgt_reg_t *rp)
1801 {
1802 return (t->t_ops->t_getareg(t, tid, rname, rp));
1803 }
1804
1805 int
mdb_tgt_putareg(mdb_tgt_t * t,mdb_tgt_tid_t tid,const char * rname,mdb_tgt_reg_t r)1806 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1807 const char *rname, mdb_tgt_reg_t r)
1808 {
1809 return (t->t_ops->t_putareg(t, tid, rname, r));
1810 }
1811
1812 int
mdb_tgt_stack_iter(mdb_tgt_t * t,const mdb_tgt_gregset_t * gregs,mdb_tgt_stack_f * cb,void * p)1813 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs,
1814 mdb_tgt_stack_f *cb, void *p)
1815 {
1816 return (t->t_ops->t_stack_iter(t, gregs, cb, p));
1817 }
1818
1819 int
mdb_tgt_xdata_iter(mdb_tgt_t * t,mdb_tgt_xdata_f * func,void * private)1820 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private)
1821 {
1822 mdb_xdata_t *xdp;
1823
1824 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1825 if (func(private, xdp->xd_name, xdp->xd_desc,
1826 xdp->xd_copy(t, NULL, 0)) != 0)
1827 break;
1828 }
1829
1830 return (0);
1831 }
1832
1833 ssize_t
mdb_tgt_getxdata(mdb_tgt_t * t,const char * name,void * buf,size_t nbytes)1834 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes)
1835 {
1836 mdb_xdata_t *xdp;
1837
1838 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1839 if (strcmp(xdp->xd_name, name) == 0)
1840 return (xdp->xd_copy(t, buf, nbytes));
1841 }
1842
1843 return (set_errno(ENODATA));
1844 }
1845
1846 long
mdb_tgt_notsup()1847 mdb_tgt_notsup()
1848 {
1849 return (set_errno(EMDB_TGTNOTSUP));
1850 }
1851
1852 void *
mdb_tgt_null()1853 mdb_tgt_null()
1854 {
1855 (void) set_errno(EMDB_TGTNOTSUP);
1856 return (NULL);
1857 }
1858
1859 long
mdb_tgt_nop()1860 mdb_tgt_nop()
1861 {
1862 return (0L);
1863 }
1864
1865 int
mdb_tgt_xdata_insert(mdb_tgt_t * t,const char * name,const char * desc,ssize_t (* copy)(mdb_tgt_t *,void *,size_t))1866 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc,
1867 ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1868 {
1869 mdb_xdata_t *xdp;
1870
1871 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1872 if (strcmp(xdp->xd_name, name) == 0)
1873 return (set_errno(EMDB_XDEXISTS));
1874 }
1875
1876 xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP);
1877 mdb_list_append(&t->t_xdlist, xdp);
1878
1879 xdp->xd_name = name;
1880 xdp->xd_desc = desc;
1881 xdp->xd_copy = copy;
1882
1883 return (0);
1884 }
1885
1886 int
mdb_tgt_xdata_delete(mdb_tgt_t * t,const char * name)1887 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name)
1888 {
1889 mdb_xdata_t *xdp;
1890
1891 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1892 if (strcmp(xdp->xd_name, name) == 0) {
1893 mdb_list_delete(&t->t_xdlist, xdp);
1894 mdb_free(xdp, sizeof (mdb_xdata_t));
1895 return (0);
1896 }
1897 }
1898
1899 return (set_errno(EMDB_NOXD));
1900 }
1901
1902 int
mdb_tgt_sym_match(const GElf_Sym * sym,uint_t mask)1903 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask)
1904 {
1905 #if STT_NUM != (STT_TLS + 1)
1906 #error "STT_NUM has grown. update mdb_tgt_sym_match()"
1907 #endif
1908
1909 uchar_t s_bind = GELF_ST_BIND(sym->st_info);
1910 uchar_t s_type = GELF_ST_TYPE(sym->st_info);
1911
1912 /*
1913 * In case you haven't already guessed, this relies on the bitmask
1914 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1915 * type and binding matching the order of STB and STT constants
1916 * in <sys/elf.h>. Changes to ELF must maintain binary
1917 * compatibility, so I think this is reasonably fair game.
1918 */
1919 if (s_bind < STB_NUM && s_type < STT_NUM) {
1920 uint_t type = (1 << (s_type + 8)) | (1 << s_bind);
1921 return ((type & ~mask) == 0);
1922 }
1923
1924 return (0); /* Unknown binding or type; fail to match */
1925 }
1926
1927 void
mdb_tgt_elf_export(mdb_gelf_file_t * gf)1928 mdb_tgt_elf_export(mdb_gelf_file_t *gf)
1929 {
1930 GElf_Xword d = 0, t = 0;
1931 GElf_Addr b = 0, e = 0;
1932 uint32_t m = 0;
1933 mdb_var_t *v;
1934
1935 /*
1936 * Reset legacy adb variables based on the specified ELF object file
1937 * provided by the target. We define these variables:
1938 *
1939 * b - the address of the data segment (first writeable Phdr)
1940 * d - the size of the data segment
1941 * e - the address of the entry point
1942 * m - the magic number identifying the file
1943 * t - the address of the text segment (first executable Phdr)
1944 */
1945 if (gf != NULL) {
1946 const GElf_Phdr *text = NULL, *data = NULL;
1947 size_t i;
1948
1949 e = gf->gf_ehdr.e_entry;
1950 bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m));
1951
1952 for (i = 0; i < gf->gf_npload; i++) {
1953 if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X))
1954 text = &gf->gf_phdrs[i];
1955 if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W))
1956 data = &gf->gf_phdrs[i];
1957 }
1958
1959 if (text != NULL)
1960 t = text->p_memsz;
1961 if (data != NULL) {
1962 b = data->p_vaddr;
1963 d = data->p_memsz;
1964 }
1965 }
1966
1967 if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL)
1968 mdb_nv_set_value(v, b);
1969 if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL)
1970 mdb_nv_set_value(v, d);
1971 if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL)
1972 mdb_nv_set_value(v, e);
1973 if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL)
1974 mdb_nv_set_value(v, m);
1975 if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL)
1976 mdb_nv_set_value(v, t);
1977 }
1978
1979 /*ARGSUSED*/
1980 void
mdb_tgt_sespec_hold(mdb_tgt_t * t,mdb_sespec_t * sep)1981 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep)
1982 {
1983 sep->se_refs++;
1984 ASSERT(sep->se_refs != 0);
1985 }
1986
1987 void
mdb_tgt_sespec_rele(mdb_tgt_t * t,mdb_sespec_t * sep)1988 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep)
1989 {
1990 ASSERT(sep->se_refs != 0);
1991
1992 if (--sep->se_refs == 0) {
1993 mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep);
1994 ASSERT(mdb_list_next(&sep->se_velist) == NULL);
1995
1996 if (sep->se_state != MDB_TGT_SPEC_IDLE) {
1997 sep->se_ops->se_dtor(t, sep);
1998 mdb_list_delete(&t->t_active, sep);
1999 } else
2000 mdb_list_delete(&t->t_idle, sep);
2001
2002 mdb_free(sep, sizeof (mdb_sespec_t));
2003 }
2004 }
2005
2006 mdb_sespec_t *
mdb_tgt_sespec_insert(mdb_tgt_t * t,const mdb_se_ops_t * ops,mdb_list_t * list)2007 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list)
2008 {
2009 mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP);
2010
2011 if (list == &t->t_active)
2012 sep->se_state = MDB_TGT_SPEC_ACTIVE;
2013 else
2014 sep->se_state = MDB_TGT_SPEC_IDLE;
2015
2016 mdb_list_append(list, sep);
2017 sep->se_ops = ops;
2018 return (sep);
2019 }
2020
2021 mdb_sespec_t *
mdb_tgt_sespec_lookup_active(mdb_tgt_t * t,const mdb_se_ops_t * ops,void * args)2022 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2023 {
2024 mdb_sespec_t *sep;
2025
2026 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2027 if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args))
2028 break;
2029 }
2030
2031 return (sep);
2032 }
2033
2034 mdb_sespec_t *
mdb_tgt_sespec_lookup_idle(mdb_tgt_t * t,const mdb_se_ops_t * ops,void * args)2035 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2036 {
2037 mdb_sespec_t *sep;
2038
2039 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2040 if (sep->se_ops == ops && sep->se_ops->se_vecmp(t,
2041 mdb_list_next(&sep->se_velist), args))
2042 break;
2043 }
2044
2045 return (sep);
2046 }
2047
2048 /*ARGSUSED*/
2049 void
mdb_tgt_vespec_hold(mdb_tgt_t * t,mdb_vespec_t * vep)2050 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep)
2051 {
2052 vep->ve_refs++;
2053 ASSERT(vep->ve_refs != 0);
2054 }
2055
2056 void
mdb_tgt_vespec_rele(mdb_tgt_t * t,mdb_vespec_t * vep)2057 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep)
2058 {
2059 ASSERT(vep->ve_refs != 0);
2060
2061 if (--vep->ve_refs == 0) {
2062 /*
2063 * Remove this vespec from the sespec's velist and decrement
2064 * the reference count on the sespec.
2065 */
2066 mdb_list_delete(&vep->ve_se->se_velist, vep);
2067 mdb_tgt_sespec_rele(t, vep->ve_se);
2068
2069 /*
2070 * If we are deleting the most recently assigned VID, reset
2071 * t_vepos or t_veneg as appropriate to re-use that number.
2072 * This could be enhanced to re-use any free number by
2073 * maintaining a bitmap or hash of the allocated IDs.
2074 */
2075 if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1)
2076 t->t_vepos = vep->ve_id;
2077 else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1)
2078 t->t_veneg = -vep->ve_id;
2079
2080 /*
2081 * Call the destructor to clean up ve_args, and then free
2082 * the actual vespec structure.
2083 */
2084 vep->ve_dtor(vep);
2085 mdb_free(vep, sizeof (mdb_vespec_t));
2086
2087 ASSERT(t->t_vecnt != 0);
2088 t->t_vecnt--;
2089 }
2090 }
2091
2092 int
mdb_tgt_vespec_insert(mdb_tgt_t * t,const mdb_se_ops_t * ops,int flags,mdb_tgt_se_f * func,void * data,void * args,void (* dtor)(mdb_vespec_t *))2093 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags,
2094 mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *))
2095 {
2096 mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP);
2097
2098 int id, mult, *seqp;
2099 mdb_sespec_t *sep;
2100
2101 /*
2102 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2103 * value: extra bits are cleared according to order of precedence.
2104 */
2105 if (flags & MDB_TGT_SPEC_AUTOSTOP)
2106 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
2107 else if (flags & MDB_TGT_SPEC_AUTODEL)
2108 flags &= ~MDB_TGT_SPEC_AUTODIS;
2109
2110 /*
2111 * The TEMPORARY property always takes precedence over STICKY.
2112 */
2113 if (flags & MDB_TGT_SPEC_TEMPORARY)
2114 flags &= ~MDB_TGT_SPEC_STICKY;
2115
2116 /*
2117 * Find a matching sespec or create a new one on the appropriate list.
2118 * We always create a new sespec if the vespec is created disabled.
2119 */
2120 if (flags & MDB_TGT_SPEC_DISABLED)
2121 sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle);
2122 else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL &&
2123 (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL)
2124 sep = mdb_tgt_sespec_insert(t, ops, &t->t_active);
2125
2126 /*
2127 * Generate a new ID for the vespec. Increasing positive integers are
2128 * assigned to visible vespecs; decreasing negative integers are
2129 * assigned to hidden vespecs. The target saves our most recent choice.
2130 */
2131 if (flags & MDB_TGT_SPEC_INTERNAL) {
2132 seqp = &t->t_veneg;
2133 mult = -1;
2134 } else {
2135 seqp = &t->t_vepos;
2136 mult = 1;
2137 }
2138
2139 id = *seqp;
2140
2141 while (mdb_tgt_vespec_lookup(t, id * mult) != NULL)
2142 id = MAX(id + 1, 1);
2143
2144 *seqp = MAX(id + 1, 1);
2145
2146 vep->ve_id = id * mult;
2147 vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED);
2148 vep->ve_se = sep;
2149 vep->ve_callback = func;
2150 vep->ve_data = data;
2151 vep->ve_args = args;
2152 vep->ve_dtor = dtor;
2153
2154 mdb_list_append(&sep->se_velist, vep);
2155 mdb_tgt_sespec_hold(t, sep);
2156
2157 mdb_tgt_vespec_hold(t, vep);
2158 t->t_vecnt++;
2159
2160 /*
2161 * If this vespec is the first reference to the sespec and it's active,
2162 * then it is newly created and we should attempt to initialize it.
2163 * If se_ctor fails, then move the sespec back to the idle list.
2164 */
2165 if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2166 sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) {
2167
2168 mdb_list_delete(&t->t_active, sep);
2169 mdb_list_append(&t->t_idle, sep);
2170
2171 sep->se_state = MDB_TGT_SPEC_IDLE;
2172 sep->se_errno = errno;
2173 sep->se_data = NULL;
2174 }
2175
2176 /*
2177 * If the sespec is active and the target is currently running (because
2178 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2179 * the sespec so it will take effect immediately.
2180 */
2181 if (sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2182 t->t_status.st_state == MDB_TGT_RUNNING)
2183 mdb_tgt_sespec_arm_one(t, sep);
2184
2185 mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2186 vep->ve_id, (void *)sep, sep->se_refs, sep->se_state);
2187
2188 return (vep->ve_id);
2189 }
2190
2191 /*
2192 * Search the target's active, idle, and disabled lists for the vespec matching
2193 * the specified VID, and return a pointer to it, or NULL if no match is found.
2194 */
2195 mdb_vespec_t *
mdb_tgt_vespec_lookup(mdb_tgt_t * t,int vid)2196 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid)
2197 {
2198 mdb_sespec_t *sep;
2199 mdb_vespec_t *vep;
2200
2201 if (vid == 0)
2202 return (NULL); /* 0 is never a valid VID */
2203
2204 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2205 for (vep = mdb_list_next(&sep->se_velist); vep;
2206 vep = mdb_list_next(vep)) {
2207 if (vep->ve_id == vid)
2208 return (vep);
2209 }
2210 }
2211
2212 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2213 for (vep = mdb_list_next(&sep->se_velist); vep;
2214 vep = mdb_list_next(vep)) {
2215 if (vep->ve_id == vid)
2216 return (vep);
2217 }
2218 }
2219
2220 return (NULL);
2221 }
2222
2223 /*ARGSUSED*/
2224 void
no_ve_dtor(mdb_vespec_t * vep)2225 no_ve_dtor(mdb_vespec_t *vep)
2226 {
2227 /* default destructor does nothing */
2228 }
2229
2230 /*ARGSUSED*/
2231 void
no_se_f(mdb_tgt_t * t,int vid,void * data)2232 no_se_f(mdb_tgt_t *t, int vid, void *data)
2233 {
2234 /* default callback does nothing */
2235 }
2236
2237 /*ARGSUSED*/
2238 void
no_se_dtor(mdb_tgt_t * t,mdb_sespec_t * sep)2239 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep)
2240 {
2241 /* default destructor does nothing */
2242 }
2243
2244 /*ARGSUSED*/
2245 int
no_se_secmp(mdb_tgt_t * t,mdb_sespec_t * sep,void * args)2246 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args)
2247 {
2248 return (sep->se_data == args);
2249 }
2250
2251 /*ARGSUSED*/
2252 int
no_se_vecmp(mdb_tgt_t * t,mdb_vespec_t * vep,void * args)2253 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args)
2254 {
2255 return (vep->ve_args == args);
2256 }
2257
2258 /*ARGSUSED*/
2259 int
no_se_arm(mdb_tgt_t * t,mdb_sespec_t * sep)2260 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep)
2261 {
2262 return (0); /* return success */
2263 }
2264
2265 /*ARGSUSED*/
2266 int
no_se_disarm(mdb_tgt_t * t,mdb_sespec_t * sep)2267 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep)
2268 {
2269 return (0); /* return success */
2270 }
2271
2272 /*ARGSUSED*/
2273 int
no_se_cont(mdb_tgt_t * t,mdb_sespec_t * sep,mdb_tgt_status_t * tsp)2274 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp)
2275 {
2276 if (tsp != &t->t_status)
2277 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
2278
2279 return (0); /* return success */
2280 }
2281
2282 int
mdb_tgt_register_dcmds(mdb_tgt_t * t,const mdb_dcmd_t * dcp,int flags)2283 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags)
2284 {
2285 int fail = 0;
2286
2287 for (; dcp->dc_name != NULL; dcp++) {
2288 if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) {
2289 warn("failed to add dcmd %s", dcp->dc_name);
2290 fail++;
2291 }
2292 }
2293
2294 return (fail > 0 ? -1 : 0);
2295 }
2296
2297 int
mdb_tgt_register_walkers(mdb_tgt_t * t,const mdb_walker_t * wp,int flags)2298 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags)
2299 {
2300 int fail = 0;
2301
2302 for (; wp->walk_name != NULL; wp++) {
2303 if (mdb_module_add_walker(t->t_module, wp, flags) == -1) {
2304 warn("failed to add walk %s", wp->walk_name);
2305 fail++;
2306 }
2307 }
2308
2309 return (fail > 0 ? -1 : 0);
2310 }
2311
2312 void
mdb_tgt_register_regvars(mdb_tgt_t * t,const mdb_tgt_regdesc_t * rdp,const mdb_nv_disc_t * disc,int flags)2313 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp,
2314 const mdb_nv_disc_t *disc, int flags)
2315 {
2316 for (; rdp->rd_name != NULL; rdp++) {
2317 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
2318 continue; /* Don't export register as a variable */
2319
2320 if (rdp->rd_flags & MDB_TGT_R_RDONLY)
2321 flags |= MDB_NV_RDONLY;
2322
2323 (void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc,
2324 (uintptr_t)t, MDB_NV_PERSIST | flags);
2325 }
2326 }
2327