1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright 2018 Joyent, Inc. 26 * Copyright 2024 Oxide Computer Company 27 */ 28 29 /* 30 * MDB Target Layer 31 * 32 * The *target* is the program being inspected by the debugger. The MDB target 33 * layer provides a set of functions that insulate common debugger code, 34 * including the MDB Module API, from the implementation details of how the 35 * debugger accesses information from a given target. Each target exports a 36 * standard set of properties, including one or more address spaces, one or 37 * more symbol tables, a set of load objects, and a set of threads that can be 38 * examined using the interfaces in <mdb/mdb_target.h>. This technique has 39 * been employed successfully in other debuggers, including [1], primarily 40 * to improve portability, although the term "target" often refers to the 41 * encapsulation of architectural or operating system-specific details. The 42 * target abstraction is useful for MDB because it allows us to easily extend 43 * the debugger to examine a variety of different program forms. Primarily, 44 * the target functions validate input arguments and then call an appropriate 45 * function in the target ops vector, defined in <mdb/mdb_target_impl.h>. 46 * However, this interface layer provides a very high level of flexibility for 47 * separating the debugger interface from instrumentation details. Experience 48 * has shown this kind of design can facilitate separating out debugger 49 * instrumentation into an external agent [2] and enable the development of 50 * advanced instrumentation frameworks [3]. We want MDB to be an ideal 51 * extensible framework for the development of such applications. 52 * 53 * Aside from a set of wrapper functions, the target layer also provides event 54 * management for targets that represent live executing programs. Our model of 55 * events is also extensible, and is based upon work in [3] and [4]. We define 56 * a *software event* as a state transition in the target program (for example, 57 * the transition of the program counter to a location of interest) that is 58 * observed by the debugger or its agent. A *software event specifier* is a 59 * description of a class of software events that is used by the debugger to 60 * instrument the target so that the corresponding software events can be 61 * observed. In MDB, software event specifiers are represented by the 62 * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>. As the user, 63 * the internal debugger code, and MDB modules may all wish to observe software 64 * events and receive appropriate notification and callbacks, we do not expose 65 * software event specifiers directly as part of the user interface. Instead, 66 * clients of the target layer request that events be observed by creating 67 * new *virtual event specifiers*. Each virtual specifier is named by a unique 68 * non-zero integer (the VID), and is represented by a mdb_vespec_t structure. 69 * One or more virtual specifiers are then associated with each underlying 70 * software event specifier. This design enforces the constraint that the 71 * target must only insert one set of instrumentation, regardless of how many 72 * times the target layer was asked to trace a given event. For example, if 73 * multiple clients request a breakpoint at a particular address, the virtual 74 * specifiers will map to the same sespec, ensuring that only one breakpoint 75 * trap instruction is actually planted at the given target address. When no 76 * virtual specifiers refer to an sespec, it is no longer needed and can be 77 * removed, along with the corresponding instrumentation. 78 * 79 * The following state transition diagram illustrates the life cycle of a 80 * software event specifier and example transitions: 81 * 82 * cont/ 83 * +--------+ delete +--------+ stop +-------+ 84 * (|( DEAD )|) <------- ( ACTIVE ) <------> ( ARMED ) 85 * +--------+ +--------+ +-------+ 86 * ^ load/unload ^ ^ failure/ | 87 * delete | object / \ reset | failure 88 * | v v | 89 * | +--------+ +-------+ | 90 * +---- ( IDLE ) ( ERR ) <----+ 91 * | +--------+ +-------+ 92 * | | 93 * +------------------------------+ 94 * 95 * The MDB execution control model is based upon the synchronous debugging 96 * model exported by Solaris proc(5). A target program is set running or the 97 * debugger is attached to a running target. On ISTOP (stop on event of 98 * interest), one target thread is selected as the representative. The 99 * algorithm for selecting the representative is target-specific, but we assume 100 * that if an observed software event has occurred, the target will select the 101 * thread that triggered the state transition of interest. The other threads 102 * are stopped in sympathy with the representative as soon as possible. Prior 103 * to continuing the target, we plant our instrumentation, transitioning event 104 * specifiers from the ACTIVE to the ARMED state, and then back again when the 105 * target stops. We then query each active event specifier to learn which ones 106 * are matched, and then invoke the callbacks associated with their vespecs. 107 * If an OS error occurs while attempting to arm or disarm a specifier, the 108 * specifier is transitioned to the ERROR state; we will attempt to arm it 109 * again at the next continue. If no target process is under our control or 110 * if an event is not currently applicable (e.g. a deferred breakpoint on an 111 * object that is not yet loaded), it remains in the IDLE state. The target 112 * implementation should intercept object load events and then transition the 113 * specifier to the ACTIVE state when the corresponding object is loaded. 114 * 115 * To simplify the debugger implementation and allow targets to easily provide 116 * new types of observable events, most of the event specifier management is 117 * done by the target layer. Each software event specifier provides an ops 118 * vector of subroutines that the target layer can call to perform the 119 * various state transitions described above. The target maintains two lists 120 * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list 121 * (ACTIVE, ARMED, and ERROR states). Each mdb_sespec_t maintains a list of 122 * associated mdb_vespec_t's. If an sespec is IDLE or ERROR, its se_errno 123 * field will have an errno value specifying the reason for its inactivity. 124 * The vespec stores the client's callback function and private data, and the 125 * arguments used to construct the sespec. All objects are reference counted 126 * so we can destroy an object when it is no longer needed. The mdb_sespec_t 127 * invariants for the respective states are as follows: 128 * 129 * IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called 130 * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called 131 * ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called 132 * ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called 133 * 134 * Additional commentary on specific state transitions and issues involving 135 * event management can be found below near the target layer functions. 136 * 137 * References 138 * 139 * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support, 140 * 1.84 edition, 1994. 141 * 142 * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent 143 * Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996). 144 * 145 * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging", 146 * Technical Report CS-97-12, Department of Computer Science, 147 * Brown University. 148 * 149 * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical 150 * Report CS-98-05, Department of Computer Science, Brown University. 151 */ 152 153 #include <mdb/mdb_target_impl.h> 154 #include <mdb/mdb_debug.h> 155 #include <mdb/mdb_modapi.h> 156 #include <mdb/mdb_err.h> 157 #include <mdb/mdb_callb.h> 158 #include <mdb/mdb_gelf.h> 159 #include <mdb/mdb_io_impl.h> 160 #include <mdb/mdb_string.h> 161 #include <mdb/mdb_signal.h> 162 #include <mdb/mdb_frame.h> 163 #include <mdb/mdb.h> 164 165 #include <sys/stat.h> 166 #include <sys/param.h> 167 #include <sys/signal.h> 168 #include <strings.h> 169 #include <stdlib.h> 170 #include <errno.h> 171 172 /* 173 * Define convenience macros for referencing the set of vespec flag bits that 174 * are preserved by the target implementation, and the set of bits that 175 * determine automatic ve_hits == ve_limit behavior. 176 */ 177 #define T_IMPL_BITS \ 178 (MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \ 179 MDB_TGT_SPEC_DELETED) 180 181 #define T_AUTO_BITS \ 182 (MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS) 183 184 /* 185 * Define convenience macro for referencing target flag pending continue bits. 186 */ 187 #define T_CONT_BITS \ 188 (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_NEXT | MDB_TGT_F_CONT) 189 190 mdb_tgt_t * 191 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[]) 192 { 193 mdb_module_t *mp; 194 mdb_tgt_t *t; 195 196 if (flags & ~MDB_TGT_F_ALL) { 197 (void) set_errno(EINVAL); 198 return (NULL); 199 } 200 201 t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP); 202 mdb_list_append(&mdb.m_tgtlist, t); 203 204 t->t_module = &mdb.m_rmod; 205 t->t_matched = T_SE_END; 206 t->t_flags = flags; 207 t->t_vepos = 1; 208 t->t_veneg = 1; 209 210 for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) { 211 if (ctor == mp->mod_tgt_ctor) { 212 t->t_module = mp; 213 break; 214 } 215 } 216 217 if (ctor(t, argc, argv) != 0) { 218 mdb_list_delete(&mdb.m_tgtlist, t); 219 mdb_free(t, sizeof (mdb_tgt_t)); 220 return (NULL); 221 } 222 223 mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n", 224 t->t_module->mod_name, (void *)t); 225 226 (void) t->t_ops->t_status(t, &t->t_status); 227 return (t); 228 } 229 230 int 231 mdb_tgt_getflags(mdb_tgt_t *t) 232 { 233 return (t->t_flags); 234 } 235 236 int 237 mdb_tgt_setflags(mdb_tgt_t *t, int flags) 238 { 239 if (flags & ~MDB_TGT_F_ALL) 240 return (set_errno(EINVAL)); 241 242 return (t->t_ops->t_setflags(t, flags)); 243 } 244 245 int 246 mdb_tgt_setcontext(mdb_tgt_t *t, void *context) 247 { 248 return (t->t_ops->t_setcontext(t, context)); 249 } 250 251 /*ARGSUSED*/ 252 static int 253 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data) 254 { 255 (void) mdb_tgt_vespec_delete(t, vid); 256 return (0); 257 } 258 259 void 260 mdb_tgt_destroy(mdb_tgt_t *t) 261 { 262 mdb_xdata_t *xdp, *nxdp; 263 264 if (mdb.m_target == t) { 265 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n", 266 t->t_module->mod_name, (void *)t); 267 t->t_ops->t_deactivate(t); 268 mdb.m_target = NULL; 269 } 270 271 mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n", 272 t->t_module->mod_name, (void *)t); 273 274 for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) { 275 nxdp = mdb_list_next(xdp); 276 mdb_list_delete(&t->t_xdlist, xdp); 277 mdb_free(xdp, sizeof (mdb_xdata_t)); 278 } 279 280 mdb_tgt_sespec_idle_all(t, EBUSY, TRUE); 281 (void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL); 282 t->t_ops->t_destroy(t); 283 284 mdb_list_delete(&mdb.m_tgtlist, t); 285 mdb_free(t, sizeof (mdb_tgt_t)); 286 287 if (mdb.m_target == NULL) 288 mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist)); 289 } 290 291 void 292 mdb_tgt_activate(mdb_tgt_t *t) 293 { 294 mdb_tgt_t *otgt = mdb.m_target; 295 296 if (mdb.m_target != NULL) { 297 mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n", 298 mdb.m_target->t_module->mod_name, (void *)mdb.m_target); 299 mdb.m_target->t_ops->t_deactivate(mdb.m_target); 300 } 301 302 if ((mdb.m_target = t) != NULL) { 303 const char *v = strstr(mdb.m_root, "%V"); 304 305 mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n", 306 t->t_module->mod_name, (void *)t); 307 308 /* 309 * If the root was explicitly set with -R and contains %V, 310 * expand it like a path. If the resulting directory is 311 * not present, then replace %V with "latest" and re-evaluate. 312 */ 313 if (v != NULL) { 314 char old_root[MAXPATHLEN]; 315 const char **p; 316 #ifndef _KMDB 317 struct stat s; 318 #endif 319 size_t len; 320 321 p = mdb_path_alloc(mdb.m_root, &len); 322 (void) strcpy(old_root, mdb.m_root); 323 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN); 324 mdb.m_root[MAXPATHLEN - 1] = '\0'; 325 mdb_path_free(p, len); 326 327 #ifndef _KMDB 328 if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) { 329 mdb.m_flags |= MDB_FL_LATEST; 330 p = mdb_path_alloc(old_root, &len); 331 (void) strncpy(mdb.m_root, p[0], MAXPATHLEN); 332 mdb.m_root[MAXPATHLEN - 1] = '\0'; 333 mdb_path_free(p, len); 334 } 335 #endif 336 } 337 338 /* 339 * Re-evaluate the macro and dmod paths now that we have the 340 * new target set and m_root figured out. 341 */ 342 if (otgt == NULL) { 343 mdb_set_ipath(mdb.m_ipathstr); 344 mdb_set_lpath(mdb.m_lpathstr); 345 } 346 347 t->t_ops->t_activate(t); 348 } 349 } 350 351 void 352 mdb_tgt_periodic(mdb_tgt_t *t) 353 { 354 t->t_ops->t_periodic(t); 355 } 356 357 const char * 358 mdb_tgt_name(mdb_tgt_t *t) 359 { 360 return (t->t_ops->t_name(t)); 361 } 362 363 const char * 364 mdb_tgt_isa(mdb_tgt_t *t) 365 { 366 return (t->t_ops->t_isa(t)); 367 } 368 369 const char * 370 mdb_tgt_platform(mdb_tgt_t *t) 371 { 372 return (t->t_ops->t_platform(t)); 373 } 374 375 int 376 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp) 377 { 378 return (t->t_ops->t_uname(t, utsp)); 379 } 380 381 int 382 mdb_tgt_dmodel(mdb_tgt_t *t) 383 { 384 return (t->t_ops->t_dmodel(t)); 385 } 386 387 int 388 mdb_tgt_auxv(mdb_tgt_t *t, const auxv_t **auxvp) 389 { 390 return (t->t_ops->t_auxv(t, auxvp)); 391 } 392 393 ssize_t 394 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, 395 void *buf, size_t n, mdb_tgt_addr_t addr) 396 { 397 if (t->t_flags & MDB_TGT_F_ASIO) 398 return (t->t_ops->t_aread(t, as, buf, n, addr)); 399 400 switch ((uintptr_t)as) { 401 case (uintptr_t)MDB_TGT_AS_VIRT: 402 case (uintptr_t)MDB_TGT_AS_VIRT_I: 403 case (uintptr_t)MDB_TGT_AS_VIRT_S: 404 return (t->t_ops->t_vread(t, buf, n, addr)); 405 case (uintptr_t)MDB_TGT_AS_PHYS: 406 return (t->t_ops->t_pread(t, buf, n, addr)); 407 case (uintptr_t)MDB_TGT_AS_FILE: 408 return (t->t_ops->t_fread(t, buf, n, addr)); 409 case (uintptr_t)MDB_TGT_AS_IO: 410 return (t->t_ops->t_ioread(t, buf, n, addr)); 411 } 412 return (t->t_ops->t_aread(t, as, buf, n, addr)); 413 } 414 415 ssize_t 416 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, 417 const void *buf, size_t n, mdb_tgt_addr_t addr) 418 { 419 if (!(t->t_flags & MDB_TGT_F_RDWR)) 420 return (set_errno(EMDB_TGTRDONLY)); 421 422 if (t->t_flags & MDB_TGT_F_ASIO) 423 return (t->t_ops->t_awrite(t, as, buf, n, addr)); 424 425 switch ((uintptr_t)as) { 426 case (uintptr_t)MDB_TGT_AS_VIRT: 427 case (uintptr_t)MDB_TGT_AS_VIRT_I: 428 case (uintptr_t)MDB_TGT_AS_VIRT_S: 429 return (t->t_ops->t_vwrite(t, buf, n, addr)); 430 case (uintptr_t)MDB_TGT_AS_PHYS: 431 return (t->t_ops->t_pwrite(t, buf, n, addr)); 432 case (uintptr_t)MDB_TGT_AS_FILE: 433 return (t->t_ops->t_fwrite(t, buf, n, addr)); 434 case (uintptr_t)MDB_TGT_AS_IO: 435 return (t->t_ops->t_iowrite(t, buf, n, addr)); 436 } 437 return (t->t_ops->t_awrite(t, as, buf, n, addr)); 438 } 439 440 ssize_t 441 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr) 442 { 443 return (t->t_ops->t_vread(t, buf, n, addr)); 444 } 445 446 ssize_t 447 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr) 448 { 449 if (t->t_flags & MDB_TGT_F_RDWR) 450 return (t->t_ops->t_vwrite(t, buf, n, addr)); 451 452 return (set_errno(EMDB_TGTRDONLY)); 453 } 454 455 ssize_t 456 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr) 457 { 458 return (t->t_ops->t_pread(t, buf, n, addr)); 459 } 460 461 ssize_t 462 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr) 463 { 464 if (t->t_flags & MDB_TGT_F_RDWR) 465 return (t->t_ops->t_pwrite(t, buf, n, addr)); 466 467 return (set_errno(EMDB_TGTRDONLY)); 468 } 469 470 ssize_t 471 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr) 472 { 473 return (t->t_ops->t_fread(t, buf, n, addr)); 474 } 475 476 ssize_t 477 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr) 478 { 479 if (t->t_flags & MDB_TGT_F_RDWR) 480 return (t->t_ops->t_fwrite(t, buf, n, addr)); 481 482 return (set_errno(EMDB_TGTRDONLY)); 483 } 484 485 ssize_t 486 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr) 487 { 488 return (t->t_ops->t_ioread(t, buf, n, addr)); 489 } 490 491 ssize_t 492 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr) 493 { 494 if (t->t_flags & MDB_TGT_F_RDWR) 495 return (t->t_ops->t_iowrite(t, buf, n, addr)); 496 497 return (set_errno(EMDB_TGTRDONLY)); 498 } 499 500 int 501 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap) 502 { 503 return (t->t_ops->t_vtop(t, as, va, pap)); 504 } 505 506 ssize_t 507 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf, 508 size_t nbytes, mdb_tgt_addr_t addr) 509 { 510 ssize_t n = -1, nread = mdb_tgt_aread(t, as, buf, nbytes, addr); 511 char *p; 512 513 if (nread >= 0) { 514 if ((p = memchr(buf, '\0', nread)) != NULL) 515 nread = (size_t)(p - buf); 516 goto done; 517 } 518 519 nread = 0; 520 p = &buf[0]; 521 522 while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) { 523 if (*p == '\0') 524 return (nread); 525 nread++; 526 addr++; 527 p++; 528 } 529 530 if (nread == 0 && n == -1) 531 return (-1); /* If we can't even read a byte, return -1 */ 532 533 done: 534 if (nbytes != 0) 535 buf[MIN(nread, nbytes - 1)] = '\0'; 536 537 return (nread); 538 } 539 540 ssize_t 541 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as, 542 const char *buf, mdb_tgt_addr_t addr) 543 { 544 ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr); 545 return (nwritten > 0 ? nwritten - 1 : nwritten); 546 } 547 548 int 549 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj, 550 const char *name, GElf_Sym *symp, mdb_syminfo_t *sip) 551 { 552 mdb_syminfo_t info; 553 GElf_Sym sym; 554 uint_t id; 555 556 if (name == NULL || t == NULL) 557 return (set_errno(EINVAL)); 558 559 if (obj == MDB_TGT_OBJ_EVERY && 560 mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) { 561 info.sym_table = MDB_TGT_PRVSYM; 562 info.sym_id = id; 563 goto found; 564 } 565 566 if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0) 567 goto found; 568 569 return (-1); 570 571 found: 572 if (symp != NULL) 573 *symp = sym; 574 if (sip != NULL) 575 *sip = info; 576 return (0); 577 } 578 579 int 580 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags, 581 char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip) 582 { 583 mdb_syminfo_t info; 584 GElf_Sym sym; 585 586 if (t == NULL) 587 return (set_errno(EINVAL)); 588 589 if (t->t_ops->t_lookup_by_addr(t, addr, flags, 590 buf, len, &sym, &info) == 0) { 591 if (symp != NULL) 592 *symp = sym; 593 if (sip != NULL) 594 *sip = info; 595 return (0); 596 } 597 598 return (-1); 599 } 600 601 /* 602 * The mdb_tgt_lookup_by_scope function is a convenience routine for code that 603 * wants to look up a scoped symbol name such as "object`symbol". It is 604 * implemented as a simple wrapper around mdb_tgt_lookup_by_name. Note that 605 * we split on the *last* occurrence of "`", so the object name itself may 606 * contain additional scopes whose evaluation is left to the target. This 607 * allows targets to implement additional scopes, such as source files, 608 * function names, link map identifiers, etc. 609 */ 610 int 611 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp, 612 mdb_syminfo_t *sip) 613 { 614 const char *object = MDB_TGT_OBJ_EVERY; 615 const char *name = s; 616 char buf[MDB_TGT_SYM_NAMLEN]; 617 618 if (t == NULL) 619 return (set_errno(EINVAL)); 620 621 if (strchr(name, '`') != NULL) { 622 623 (void) strncpy(buf, s, sizeof (buf)); 624 buf[sizeof (buf) - 1] = '\0'; 625 name = buf; 626 627 if ((s = strrsplit(buf, '`')) != NULL) { 628 object = buf; 629 name = s; 630 if (*object == '\0') 631 return (set_errno(EMDB_NOOBJ)); 632 if (*name == '\0') 633 return (set_errno(EMDB_NOSYM)); 634 } 635 } 636 637 return (mdb_tgt_lookup_by_name(t, object, name, symp, sip)); 638 } 639 640 int 641 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, 642 uint_t type, mdb_tgt_sym_f *cb, void *p) 643 { 644 if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) || 645 (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0) 646 return (set_errno(EINVAL)); 647 648 return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p)); 649 } 650 651 ssize_t 652 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes, 653 const char *obj, const char *name) 654 { 655 GElf_Sym sym; 656 657 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0) 658 return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value)); 659 660 return (-1); 661 } 662 663 ssize_t 664 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf, 665 size_t nbytes, const char *obj, const char *name) 666 { 667 GElf_Sym sym; 668 669 if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0) 670 return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value)); 671 672 return (-1); 673 } 674 675 int 676 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p) 677 { 678 return (t->t_ops->t_mapping_iter(t, cb, p)); 679 } 680 681 int 682 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p) 683 { 684 return (t->t_ops->t_object_iter(t, cb, p)); 685 } 686 687 const mdb_map_t * 688 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr) 689 { 690 return (t->t_ops->t_addr_to_map(t, addr)); 691 } 692 693 const mdb_map_t * 694 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name) 695 { 696 return (t->t_ops->t_name_to_map(t, name)); 697 } 698 699 struct ctf_file * 700 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr) 701 { 702 return (t->t_ops->t_addr_to_ctf(t, addr)); 703 } 704 705 struct ctf_file * 706 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name) 707 { 708 return (t->t_ops->t_name_to_ctf(t, name)); 709 } 710 711 /* 712 * Return the latest target status. We just copy out our cached copy. The 713 * status only needs to change when the target is run, stepped, or continued. 714 */ 715 int 716 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 717 { 718 uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP); 719 uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP); 720 uint_t state = t->t_status.st_state; 721 722 if (tsp == NULL) 723 return (set_errno(EINVAL)); 724 725 /* 726 * If we're called with the address of the target's internal status, 727 * then call down to update it; otherwise copy out the saved status. 728 */ 729 if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0) 730 return (-1); /* errno is set for us */ 731 732 /* 733 * Assert that our state is valid before returning it. The state must 734 * be valid, and DSTOP and ISTOP cannot be set simultaneously. ISTOP 735 * is only valid when stopped. DSTOP is only valid when running or 736 * stopped. If any test fails, abort the debugger. 737 */ 738 if (state > MDB_TGT_LOST) 739 fail("invalid target state (%u)\n", state); 740 if (state != MDB_TGT_STOPPED && istop) 741 fail("target state is (%u) and ISTOP is set\n", state); 742 if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop) 743 fail("target state is (%u) and DSTOP is set\n", state); 744 if (istop && dstop) 745 fail("target has ISTOP and DSTOP set simultaneously\n"); 746 747 if (tsp != &t->t_status) 748 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t)); 749 750 return (0); 751 } 752 753 /* 754 * For the given sespec, scan its list of vespecs for ones that are marked 755 * temporary and delete them. We use the same method as vespec_delete below. 756 */ 757 /*ARGSUSED*/ 758 void 759 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep) 760 { 761 mdb_vespec_t *vep, *nvep; 762 763 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) { 764 nvep = mdb_list_next(vep); 765 766 if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED | 767 MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) { 768 vep->ve_flags |= MDB_TGT_SPEC_DELETED; 769 mdb_tgt_vespec_rele(t, vep); 770 } 771 } 772 } 773 774 /* 775 * Prune each sespec on the active list of temporary vespecs. This function 776 * is called, for example, after the target finishes a continue operation. 777 */ 778 void 779 mdb_tgt_sespec_prune_all(mdb_tgt_t *t) 780 { 781 mdb_sespec_t *sep, *nsep; 782 783 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) { 784 nsep = mdb_list_next(sep); 785 mdb_tgt_sespec_prune_one(t, sep); 786 } 787 } 788 789 /* 790 * Transition the given sespec to the IDLE state. We invoke the destructor, 791 * and then move the sespec from the active list to the idle list. 792 */ 793 void 794 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason) 795 { 796 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE); 797 798 if (sep->se_state == MDB_TGT_SPEC_ARMED) 799 (void) sep->se_ops->se_disarm(t, sep); 800 801 sep->se_ops->se_dtor(t, sep); 802 sep->se_data = NULL; 803 804 sep->se_state = MDB_TGT_SPEC_IDLE; 805 sep->se_errno = reason; 806 807 mdb_list_delete(&t->t_active, sep); 808 mdb_list_append(&t->t_idle, sep); 809 810 mdb_tgt_sespec_prune_one(t, sep); 811 } 812 813 /* 814 * Transition each sespec on the active list to the IDLE state. This function 815 * is called, for example, after the target terminates execution. 816 */ 817 void 818 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched) 819 { 820 mdb_sespec_t *sep, *nsep; 821 mdb_vespec_t *vep; 822 823 while ((sep = t->t_matched) != T_SE_END && clear_matched) { 824 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) { 825 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED; 826 vep = mdb_list_next(vep); 827 } 828 829 t->t_matched = sep->se_matched; 830 sep->se_matched = NULL; 831 mdb_tgt_sespec_rele(t, sep); 832 } 833 834 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) { 835 nsep = mdb_list_next(sep); 836 mdb_tgt_sespec_idle_one(t, sep, reason); 837 } 838 } 839 840 /* 841 * Attempt to transition the given sespec from the IDLE to ACTIVE state. We 842 * do this by invoking se_ctor -- if this fails, we save the reason in se_errno 843 * and return -1 with errno set. One strange case we need to deal with here is 844 * the possibility that a given vespec is sitting on the idle list with its 845 * corresponding sespec, but it is actually a duplicate of another sespec on the 846 * active list. This can happen if the sespec is associated with a 847 * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be 848 * activated. A more interesting reason this situation might arise is the case 849 * where a virtual address breakpoint is set at an address just mmap'ed by 850 * dlmopen. Since no symbol table information is available for this mapping 851 * yet, a pre-existing deferred symbolic breakpoint may already exist for this 852 * address, but it is on the idle list. When the symbol table is ready and the 853 * DLACTIVITY event occurs, we now discover that the virtual address obtained by 854 * evaluating the symbolic breakpoint matches the explicit virtual address of 855 * the active virtual breakpoint. To resolve this conflict in either case, we 856 * destroy the idle sespec, and attach its list of vespecs to the existing 857 * active sespec. 858 */ 859 int 860 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep) 861 { 862 mdb_vespec_t *vep = mdb_list_next(&sep->se_velist); 863 864 mdb_vespec_t *nvep; 865 mdb_sespec_t *dup; 866 867 ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE); 868 ASSERT(vep != NULL); 869 870 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) 871 return (0); /* cannot be activated while disabled bit set */ 872 873 /* 874 * First search the active list for an existing, duplicate sespec to 875 * handle the special case described above. 876 */ 877 for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) { 878 if (dup->se_ops == sep->se_ops && 879 dup->se_ops->se_secmp(t, dup, vep->ve_args)) { 880 ASSERT(dup != sep); 881 break; 882 } 883 } 884 885 /* 886 * If a duplicate is found, destroy the existing, idle sespec, and 887 * attach all of its vespecs to the duplicate sespec. 888 */ 889 if (dup != NULL) { 890 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) { 891 mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n", 892 vep->ve_id, (void *)dup); 893 894 if (dup->se_matched != NULL) 895 vep->ve_flags |= MDB_TGT_SPEC_MATCHED; 896 897 nvep = mdb_list_next(vep); 898 vep->ve_hits = 0; 899 900 mdb_list_delete(&sep->se_velist, vep); 901 mdb_tgt_sespec_rele(t, sep); 902 903 mdb_list_append(&dup->se_velist, vep); 904 mdb_tgt_sespec_hold(t, dup); 905 vep->ve_se = dup; 906 } 907 908 mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n", 909 (void *)sep, (void *)dup); 910 return (0); 911 } 912 913 /* 914 * If no duplicate is found, call the sespec's constructor. If this 915 * is successful, move the sespec to the active list. 916 */ 917 if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) { 918 sep->se_errno = errno; 919 sep->se_data = NULL; 920 921 return (-1); 922 } 923 924 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) { 925 nvep = mdb_list_next(vep); 926 vep->ve_hits = 0; 927 } 928 mdb_list_delete(&t->t_idle, sep); 929 mdb_list_append(&t->t_active, sep); 930 sep->se_state = MDB_TGT_SPEC_ACTIVE; 931 sep->se_errno = 0; 932 933 return (0); 934 } 935 936 /* 937 * Transition each sespec on the idle list to the ACTIVE state. This function 938 * is called, for example, after the target's t_run() function returns. If 939 * the se_ctor() function fails, the specifier is not yet applicable; it will 940 * remain on the idle list and can be activated later. 941 * 942 * Returns 1 if there weren't any unexpected activation failures; 0 if there 943 * were. 944 */ 945 int 946 mdb_tgt_sespec_activate_all(mdb_tgt_t *t) 947 { 948 mdb_sespec_t *sep, *nsep; 949 int rc = 1; 950 951 for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) { 952 nsep = mdb_list_next(sep); 953 954 if (mdb_tgt_sespec_activate_one(t, sep) < 0 && 955 sep->se_errno != EMDB_NOOBJ) 956 rc = 0; 957 } 958 959 return (rc); 960 } 961 962 /* 963 * Transition the given sespec to the ARMED state. Note that we attempt to 964 * re-arm sespecs previously in the ERROR state. If se_arm() fails the sespec 965 * transitions to the ERROR state but stays on the active list. 966 */ 967 void 968 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep) 969 { 970 ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE); 971 972 if (sep->se_state == MDB_TGT_SPEC_ARMED) 973 return; /* do not arm sespecs more than once */ 974 975 if (sep->se_ops->se_arm(t, sep) == -1) { 976 sep->se_state = MDB_TGT_SPEC_ERROR; 977 sep->se_errno = errno; 978 } else { 979 sep->se_state = MDB_TGT_SPEC_ARMED; 980 sep->se_errno = 0; 981 } 982 } 983 984 /* 985 * Transition each sespec on the active list (except matched specs) to the 986 * ARMED state. This function is called prior to continuing the target. 987 */ 988 void 989 mdb_tgt_sespec_arm_all(mdb_tgt_t *t) 990 { 991 mdb_sespec_t *sep, *nsep; 992 993 for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) { 994 nsep = mdb_list_next(sep); 995 if (sep->se_matched == NULL) 996 mdb_tgt_sespec_arm_one(t, sep); 997 } 998 } 999 1000 /* 1001 * Transition each sespec on the active list that is in the ARMED state to 1002 * the ACTIVE state. If se_disarm() fails, the sespec is transitioned to 1003 * the ERROR state instead, but left on the active list. 1004 */ 1005 static void 1006 tgt_disarm_sespecs(mdb_tgt_t *t) 1007 { 1008 mdb_sespec_t *sep; 1009 1010 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1011 if (sep->se_state != MDB_TGT_SPEC_ARMED) 1012 continue; /* do not disarm if in ERROR state */ 1013 1014 if (sep->se_ops->se_disarm(t, sep) == -1) { 1015 sep->se_state = MDB_TGT_SPEC_ERROR; 1016 sep->se_errno = errno; 1017 } else { 1018 sep->se_state = MDB_TGT_SPEC_ACTIVE; 1019 sep->se_errno = 0; 1020 } 1021 } 1022 } 1023 1024 /* 1025 * Determine if the software event that triggered the most recent stop matches 1026 * any of the active event specifiers. If 'all' is TRUE, we consider all 1027 * sespecs in our search. If 'all' is FALSE, we only consider ARMED sespecs. 1028 * If we successfully match an event, we add it to the t_matched list and 1029 * place an additional hold on it. 1030 */ 1031 static mdb_sespec_t * 1032 tgt_match_sespecs(mdb_tgt_t *t, int all) 1033 { 1034 mdb_sespec_t *sep; 1035 1036 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1037 if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED) 1038 continue; /* restrict search to ARMED sespecs */ 1039 1040 if (sep->se_state != MDB_TGT_SPEC_ERROR && 1041 sep->se_ops->se_match(t, sep, &t->t_status)) { 1042 mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep); 1043 mdb_tgt_sespec_hold(t, sep); 1044 sep->se_matched = t->t_matched; 1045 t->t_matched = sep; 1046 } 1047 } 1048 1049 return (t->t_matched); 1050 } 1051 1052 /* 1053 * This function provides the low-level target continue algorithm. We proceed 1054 * in three phases: (1) we arm the active sespecs, except the specs matched at 1055 * the time we last stopped, (2) we call se_cont() on any matched sespecs to 1056 * step over these event transitions, and then arm the corresponding sespecs, 1057 * and (3) we call the appropriate low-level continue routine. Once the 1058 * target stops again, we determine which sespecs were matched, and invoke the 1059 * appropriate vespec callbacks and perform other vespec maintenance. 1060 */ 1061 static int 1062 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, 1063 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *)) 1064 { 1065 mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits"); 1066 uintptr_t pc = t->t_status.st_pc; 1067 int error = 0; 1068 1069 mdb_sespec_t *sep, *nsep, *matched; 1070 mdb_vespec_t *vep, *nvep; 1071 uintptr_t addr; 1072 1073 uint_t cbits = 0; /* union of pending continue bits */ 1074 uint_t ncont = 0; /* # of callbacks that requested cont */ 1075 uint_t n = 0; /* # of callbacks */ 1076 1077 /* 1078 * If the target is undead, dead, or lost, we no longer allow continue. 1079 * This effectively forces the user to use ::kill or ::run after death. 1080 */ 1081 if (t->t_status.st_state == MDB_TGT_UNDEAD) 1082 return (set_errno(EMDB_TGTZOMB)); 1083 if (t->t_status.st_state == MDB_TGT_DEAD) 1084 return (set_errno(EMDB_TGTCORE)); 1085 if (t->t_status.st_state == MDB_TGT_LOST) 1086 return (set_errno(EMDB_TGTLOST)); 1087 1088 /* 1089 * If any of single-step, step-over, or step-out is pending, it takes 1090 * precedence over an explicit or pending continue, because these are 1091 * all different specialized forms of continue. 1092 */ 1093 if (t->t_flags & MDB_TGT_F_STEP) 1094 t_cont = t->t_ops->t_step; 1095 else if (t->t_flags & MDB_TGT_F_NEXT) 1096 t_cont = t->t_ops->t_step; 1097 else if (t->t_flags & MDB_TGT_F_STEP_OUT) 1098 t_cont = t->t_ops->t_cont; 1099 1100 /* 1101 * To handle step-over, we ask the target to find the address past the 1102 * next control transfer instruction. If an address is found, we plant 1103 * a temporary breakpoint there and continue; otherwise just step. 1104 */ 1105 if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) { 1106 if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t, 1107 addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY, 1108 no_se_f, NULL) == 0) { 1109 mdb_dprintf(MDB_DBG_TGT, "next falling back to step: " 1110 "%s\n", mdb_strerror(errno)); 1111 } else 1112 t_cont = t->t_ops->t_cont; 1113 } 1114 1115 /* 1116 * To handle step-out, we ask the target to find the return address of 1117 * the current frame, plant a temporary breakpoint there, and continue. 1118 */ 1119 if (t->t_flags & MDB_TGT_F_STEP_OUT) { 1120 if (t->t_ops->t_step_out(t, &addr) == -1) 1121 return (-1); /* errno is set for us */ 1122 1123 if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN | 1124 MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0) 1125 return (-1); /* errno is set for us */ 1126 } 1127 1128 (void) mdb_signal_block(SIGHUP); 1129 (void) mdb_signal_block(SIGTERM); 1130 mdb_intr_disable(); 1131 1132 t->t_flags &= ~T_CONT_BITS; 1133 t->t_flags |= MDB_TGT_F_BUSY; 1134 mdb_tgt_sespec_arm_all(t); 1135 1136 ASSERT(t->t_matched != NULL); 1137 matched = t->t_matched; 1138 t->t_matched = T_SE_END; 1139 1140 if (mdb.m_term != NULL) 1141 IOP_SUSPEND(mdb.m_term); 1142 1143 /* 1144 * Iterate over the matched sespec list, performing autostop processing 1145 * and clearing the matched bit for each associated vespec. We then 1146 * invoke each sespec's se_cont callback in order to continue past 1147 * the corresponding event. If the matched list has more than one 1148 * sespec, we assume that the se_cont callbacks are non-interfering. 1149 */ 1150 for (sep = matched; sep != T_SE_END; sep = sep->se_matched) { 1151 for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) { 1152 if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) && 1153 (vep->ve_limit && vep->ve_hits == vep->ve_limit)) 1154 vep->ve_hits = 0; 1155 1156 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED; 1157 vep = mdb_list_next(vep); 1158 } 1159 1160 if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) { 1161 error = errno ? errno : -1; 1162 tgt_disarm_sespecs(t); 1163 break; 1164 } 1165 1166 if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) { 1167 tgt_disarm_sespecs(t); 1168 if (t->t_status.st_state == MDB_TGT_UNDEAD) 1169 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE); 1170 else if (t->t_status.st_state == MDB_TGT_LOST) 1171 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE); 1172 break; 1173 } 1174 } 1175 1176 /* 1177 * Clear the se_matched field for each matched sespec, and drop the 1178 * reference count since the sespec is no longer on the matched list. 1179 */ 1180 for (sep = matched; sep != T_SE_END; sep = nsep) { 1181 nsep = sep->se_matched; 1182 sep->se_matched = NULL; 1183 mdb_tgt_sespec_rele(t, sep); 1184 } 1185 1186 /* 1187 * If the matched list was non-empty, see if we hit another event while 1188 * performing se_cont() processing. If so, don't bother continuing any 1189 * further. If not, arm the sespecs on the old matched list by calling 1190 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont. 1191 */ 1192 if (matched != T_SE_END) { 1193 if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP)) 1194 goto out; /* abort now if se_cont() failed */ 1195 1196 if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) { 1197 tgt_disarm_sespecs(t); 1198 goto out; 1199 } 1200 1201 mdb_tgt_sespec_arm_all(t); 1202 } 1203 1204 if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) { 1205 if (t_cont(t, &t->t_status) != 0) 1206 error = errno ? errno : -1; 1207 } 1208 1209 tgt_disarm_sespecs(t); 1210 1211 if (t->t_flags & MDB_TGT_F_UNLOAD) 1212 longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT); 1213 1214 if (t->t_status.st_state == MDB_TGT_UNDEAD) 1215 mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE); 1216 else if (t->t_status.st_state == MDB_TGT_LOST) 1217 mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE); 1218 else if (t->t_status.st_flags & MDB_TGT_ISTOP) 1219 t->t_matched = tgt_match_sespecs(t, TRUE); 1220 out: 1221 if (mdb.m_term != NULL) 1222 IOP_RESUME(mdb.m_term); 1223 1224 (void) mdb_signal_unblock(SIGTERM); 1225 (void) mdb_signal_unblock(SIGHUP); 1226 mdb_intr_enable(); 1227 1228 for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) { 1229 /* 1230 * When we invoke a ve_callback, it may in turn request that the 1231 * target continue immediately after callback processing is 1232 * complete. We only allow this to occur if *all* callbacks 1233 * agree to continue. To implement this behavior, we keep a 1234 * count (ncont) of such requests, and only apply the cumulative 1235 * continue bits (cbits) to the target if ncont is equal to the 1236 * total number of callbacks that are invoked (n). 1237 */ 1238 for (vep = mdb_list_next(&sep->se_velist); 1239 vep != NULL; vep = nvep, n++) { 1240 /* 1241 * Place an extra hold on the current vespec and pick 1242 * up the next pointer before invoking the callback: we 1243 * must be prepared for the vespec to be deleted or 1244 * moved to a different list by the callback. 1245 */ 1246 mdb_tgt_vespec_hold(t, vep); 1247 nvep = mdb_list_next(vep); 1248 1249 vep->ve_flags |= MDB_TGT_SPEC_MATCHED; 1250 vep->ve_hits++; 1251 1252 mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc); 1253 mdb_nv_set_value(hitv, vep->ve_hits); 1254 1255 ASSERT((t->t_flags & T_CONT_BITS) == 0); 1256 vep->ve_callback(t, vep->ve_id, vep->ve_data); 1257 1258 ncont += (t->t_flags & T_CONT_BITS) != 0; 1259 cbits |= (t->t_flags & T_CONT_BITS); 1260 t->t_flags &= ~T_CONT_BITS; 1261 1262 if (vep->ve_limit && vep->ve_hits == vep->ve_limit) { 1263 if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL) 1264 (void) mdb_tgt_vespec_delete(t, 1265 vep->ve_id); 1266 else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS) 1267 (void) mdb_tgt_vespec_disable(t, 1268 vep->ve_id); 1269 } 1270 1271 if (vep->ve_limit && vep->ve_hits < vep->ve_limit) { 1272 if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) 1273 (void) mdb_tgt_continue(t, NULL); 1274 } 1275 1276 mdb_tgt_vespec_rele(t, vep); 1277 } 1278 } 1279 1280 if (t->t_matched != T_SE_END && ncont == n) 1281 t->t_flags |= cbits; /* apply continues (see above) */ 1282 1283 mdb_tgt_sespec_prune_all(t); 1284 1285 t->t_status.st_flags &= ~MDB_TGT_BUSY; 1286 t->t_flags &= ~MDB_TGT_F_BUSY; 1287 1288 if (tsp != NULL) 1289 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t)); 1290 1291 if (error != 0) 1292 return (set_errno(error)); 1293 1294 return (0); 1295 } 1296 1297 /* 1298 * This function is the common glue that connects the high-level target layer 1299 * continue functions (e.g. step and cont below) with the low-level 1300 * tgt_continue() function above. Since vespec callbacks may perform any 1301 * actions, including attempting to continue the target itself, we must be 1302 * prepared to be called while the target is still marked F_BUSY. In this 1303 * case, we just set a pending bit and return. When we return from the call 1304 * to tgt_continue() that made us busy into the tgt_request_continue() call 1305 * that is still on the stack, we will loop around and call tgt_continue() 1306 * again. This allows vespecs to continue the target without recursion. 1307 */ 1308 static int 1309 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag, 1310 int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *)) 1311 { 1312 mdb_tgt_spec_desc_t desc; 1313 mdb_sespec_t *sep; 1314 char buf[BUFSIZ]; 1315 int status; 1316 1317 if (t->t_flags & MDB_TGT_F_BUSY) { 1318 t->t_flags |= tflag; 1319 return (0); 1320 } 1321 1322 do { 1323 status = tgt_continue(t, tsp, t_cont); 1324 } while (status == 0 && (t->t_flags & T_CONT_BITS)); 1325 1326 if (status == 0) { 1327 for (sep = t->t_matched; sep != T_SE_END; 1328 sep = sep->se_matched) { 1329 mdb_vespec_t *vep; 1330 1331 for (vep = mdb_list_next(&sep->se_velist); vep; 1332 vep = mdb_list_next(vep)) { 1333 if (vep->ve_flags & MDB_TGT_SPEC_SILENT) 1334 continue; 1335 warn("%s\n", sep->se_ops->se_info(t, sep, 1336 vep, &desc, buf, sizeof (buf))); 1337 } 1338 } 1339 1340 mdb_callb_fire(MDB_CALLB_STCHG); 1341 } 1342 1343 t->t_flags &= ~T_CONT_BITS; 1344 return (status); 1345 } 1346 1347 /* 1348 * Restart target execution: we rely upon the underlying target implementation 1349 * to do most of the work for us. In particular, we assume it will properly 1350 * preserve the state of our event lists if the run fails for some reason, 1351 * and that it will reset all events to the IDLE state if the run succeeds. 1352 * If it is successful, we attempt to activate all of the idle sespecs. The 1353 * t_run() operation is defined to leave the target stopped at the earliest 1354 * possible point in execution, and then return control to the debugger, 1355 * awaiting a step or continue operation to set it running again. 1356 */ 1357 int 1358 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv) 1359 { 1360 int i; 1361 1362 for (i = 0; i < argc; i++) { 1363 if (argv->a_type != MDB_TYPE_STRING) 1364 return (set_errno(EINVAL)); 1365 } 1366 1367 if (t->t_ops->t_run(t, argc, argv) == -1) 1368 return (-1); /* errno is set for us */ 1369 1370 t->t_flags &= ~T_CONT_BITS; 1371 (void) mdb_tgt_sespec_activate_all(t); 1372 1373 if (mdb.m_term != NULL) 1374 IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL); 1375 1376 return (0); 1377 } 1378 1379 int 1380 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1381 { 1382 return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step)); 1383 } 1384 1385 int 1386 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1387 { 1388 t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */ 1389 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont)); 1390 } 1391 1392 int 1393 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1394 { 1395 t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */ 1396 return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step)); 1397 } 1398 1399 int 1400 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1401 { 1402 return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont)); 1403 } 1404 1405 int 1406 mdb_tgt_signal(mdb_tgt_t *t, int sig) 1407 { 1408 return (t->t_ops->t_signal(t, sig)); 1409 } 1410 1411 void * 1412 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid) 1413 { 1414 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid); 1415 1416 if (vep == NULL) { 1417 (void) set_errno(EMDB_NOSESPEC); 1418 return (NULL); 1419 } 1420 1421 return (vep->ve_data); 1422 } 1423 1424 /* 1425 * Return a structured description and comment string for the given vespec. 1426 * We fill in the common information from the vespec, and then call down to 1427 * the underlying sespec to provide the comment string and modify any 1428 * event type-specific information. 1429 */ 1430 char * 1431 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp, 1432 char *buf, size_t nbytes) 1433 { 1434 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid); 1435 1436 mdb_tgt_spec_desc_t desc; 1437 mdb_sespec_t *sep; 1438 1439 if (vep == NULL) { 1440 if (sp != NULL) 1441 bzero(sp, sizeof (mdb_tgt_spec_desc_t)); 1442 (void) set_errno(EMDB_NOSESPEC); 1443 return (NULL); 1444 } 1445 1446 if (sp == NULL) 1447 sp = &desc; 1448 1449 sep = vep->ve_se; 1450 1451 sp->spec_id = vep->ve_id; 1452 sp->spec_flags = vep->ve_flags; 1453 sp->spec_hits = vep->ve_hits; 1454 sp->spec_limit = vep->ve_limit; 1455 sp->spec_state = sep->se_state; 1456 sp->spec_errno = sep->se_errno; 1457 sp->spec_base = 0; 1458 sp->spec_size = 0; 1459 sp->spec_data = vep->ve_data; 1460 1461 return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes)); 1462 } 1463 1464 /* 1465 * Qsort callback for sorting vespecs by VID, used below. 1466 */ 1467 static int 1468 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp) 1469 { 1470 return ((*lp)->ve_id - (*rp)->ve_id); 1471 } 1472 1473 /* 1474 * Iterate over all vespecs and call the specified callback function with the 1475 * corresponding VID and caller data pointer. We want the callback function 1476 * to see a consistent, sorted snapshot of the vespecs, and allow the callback 1477 * to take actions such as deleting the vespec itself, so we cannot simply 1478 * iterate over the lists. Instead, we pre-allocate an array of vespec 1479 * pointers, fill it in and place an additional hold on each vespec, and then 1480 * sort it. After the callback has been executed on each vespec in the 1481 * sorted array, we remove our hold and free the temporary array. 1482 */ 1483 int 1484 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p) 1485 { 1486 mdb_vespec_t **veps, **vepp, **vend; 1487 mdb_vespec_t *vep, *nvep; 1488 mdb_sespec_t *sep; 1489 1490 uint_t vecnt = t->t_vecnt; 1491 1492 veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP); 1493 vend = veps + vecnt; 1494 vepp = veps; 1495 1496 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1497 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) { 1498 mdb_tgt_vespec_hold(t, vep); 1499 nvep = mdb_list_next(vep); 1500 *vepp++ = vep; 1501 } 1502 } 1503 1504 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) { 1505 for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) { 1506 mdb_tgt_vespec_hold(t, vep); 1507 nvep = mdb_list_next(vep); 1508 *vepp++ = vep; 1509 } 1510 } 1511 1512 if (vepp != vend) { 1513 fail("target has %u vespecs on list but vecnt shows %u\n", 1514 (uint_t)(vepp - veps), vecnt); 1515 } 1516 1517 qsort(veps, vecnt, sizeof (mdb_vespec_t *), 1518 (int (*)(const void *, const void *))tgt_vespec_compare); 1519 1520 for (vepp = veps; vepp < vend; vepp++) { 1521 if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0) 1522 break; 1523 } 1524 1525 for (vepp = veps; vepp < vend; vepp++) 1526 mdb_tgt_vespec_rele(t, *vepp); 1527 1528 mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt); 1529 return (0); 1530 } 1531 1532 /* 1533 * Reset the vespec flags, match limit, and callback data to the specified 1534 * values. We silently correct invalid parameters, except for the VID. 1535 * The caller is required to query the existing properties and pass back 1536 * the existing values for any properties that should not be modified. 1537 * If the callback data is modified, the caller is responsible for cleaning 1538 * up any state associated with the previous value. 1539 */ 1540 int 1541 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags, 1542 uint_t limit, void *data) 1543 { 1544 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id); 1545 1546 if (vep == NULL) 1547 return (set_errno(EMDB_NOSESPEC)); 1548 1549 /* 1550 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the 1551 * appropriate vespec function to do the enable/disable work. 1552 */ 1553 if ((flags & MDB_TGT_SPEC_DISABLED) != 1554 (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) { 1555 if (flags & MDB_TGT_SPEC_DISABLED) 1556 (void) mdb_tgt_vespec_disable(t, id); 1557 else 1558 (void) mdb_tgt_vespec_enable(t, id); 1559 } 1560 1561 /* 1562 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags 1563 * value: extra bits are cleared according to order of precedence. 1564 */ 1565 if (flags & MDB_TGT_SPEC_AUTOSTOP) 1566 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS); 1567 else if (flags & MDB_TGT_SPEC_AUTODEL) 1568 flags &= ~MDB_TGT_SPEC_AUTODIS; 1569 1570 /* 1571 * The TEMPORARY property always takes precedence over STICKY. 1572 */ 1573 if (flags & MDB_TGT_SPEC_TEMPORARY) 1574 flags &= ~MDB_TGT_SPEC_STICKY; 1575 1576 /* 1577 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count 1578 * back to zero and clear all of the old auto bits. 1579 */ 1580 if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) { 1581 vep->ve_flags &= ~T_AUTO_BITS; 1582 vep->ve_hits = 0; 1583 } 1584 1585 vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS); 1586 vep->ve_data = data; 1587 1588 /* 1589 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at 1590 * least one. If none are set, reset it back to zero. 1591 */ 1592 if (vep->ve_flags & T_AUTO_BITS) 1593 vep->ve_limit = MAX(limit, 1); 1594 else 1595 vep->ve_limit = 0; 1596 1597 /* 1598 * As a convenience, we allow the caller to specify SPEC_DELETED in 1599 * the flags field as indication that the event should be deleted. 1600 */ 1601 if (flags & MDB_TGT_SPEC_DELETED) 1602 (void) mdb_tgt_vespec_delete(t, id); 1603 1604 return (0); 1605 } 1606 1607 /* 1608 * Remove the user disabled bit from the specified vespec, and attempt to 1609 * activate the underlying sespec and move it to the active list if possible. 1610 */ 1611 int 1612 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id) 1613 { 1614 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id); 1615 1616 if (vep == NULL) 1617 return (set_errno(EMDB_NOSESPEC)); 1618 1619 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) { 1620 ASSERT(mdb_list_next(vep) == NULL); 1621 vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED; 1622 if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0) 1623 return (-1); /* errno is set for us */ 1624 } 1625 1626 return (0); 1627 } 1628 1629 /* 1630 * Set the user disabled bit on the specified vespec, and move it to the idle 1631 * list. If the vespec is not alone with its sespec or if it is a currently 1632 * matched event, we must always create a new idle sespec and move the vespec 1633 * there. If the vespec was alone and active, we can simply idle the sespec. 1634 */ 1635 int 1636 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id) 1637 { 1638 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id); 1639 mdb_sespec_t *sep; 1640 1641 if (vep == NULL) 1642 return (set_errno(EMDB_NOSESPEC)); 1643 1644 if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) 1645 return (0); /* already disabled */ 1646 1647 if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL || 1648 vep->ve_se->se_matched != NULL) { 1649 1650 sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle); 1651 1652 mdb_list_delete(&vep->ve_se->se_velist, vep); 1653 mdb_tgt_sespec_rele(t, vep->ve_se); 1654 1655 mdb_list_append(&sep->se_velist, vep); 1656 mdb_tgt_sespec_hold(t, sep); 1657 1658 vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED; 1659 vep->ve_se = sep; 1660 1661 } else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE) 1662 mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS); 1663 1664 vep->ve_flags |= MDB_TGT_SPEC_DISABLED; 1665 return (0); 1666 } 1667 1668 /* 1669 * Delete the given vespec. We use the MDB_TGT_SPEC_DELETED flag to ensure that 1670 * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the 1671 * reference count on the vespec more than once. This is because the vespec 1672 * may remain referenced if it is currently held by another routine (e.g. 1673 * vespec_iter), and so the user could attempt to delete it more than once 1674 * since it reference count will be >= 2 prior to the first delete call. 1675 */ 1676 int 1677 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id) 1678 { 1679 mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id); 1680 1681 if (vep == NULL) 1682 return (set_errno(EMDB_NOSESPEC)); 1683 1684 if (vep->ve_flags & MDB_TGT_SPEC_DELETED) 1685 return (set_errno(EBUSY)); 1686 1687 vep->ve_flags |= MDB_TGT_SPEC_DELETED; 1688 mdb_tgt_vespec_rele(t, vep); 1689 return (0); 1690 } 1691 1692 int 1693 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr, 1694 int spec_flags, mdb_tgt_se_f *func, void *p) 1695 { 1696 return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p)); 1697 } 1698 1699 int 1700 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol, 1701 int spec_flags, mdb_tgt_se_f *func, void *p) 1702 { 1703 return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p)); 1704 } 1705 1706 int 1707 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags, 1708 int spec_flags, mdb_tgt_se_f *func, void *p) 1709 { 1710 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) { 1711 (void) set_errno(EINVAL); 1712 return (0); 1713 } 1714 1715 if (pa + n < pa) { 1716 (void) set_errno(EMDB_WPRANGE); 1717 return (0); 1718 } 1719 1720 return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p)); 1721 } 1722 1723 int 1724 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags, 1725 int spec_flags, mdb_tgt_se_f *func, void *p) 1726 { 1727 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) { 1728 (void) set_errno(EINVAL); 1729 return (0); 1730 } 1731 1732 if (va + n < va) { 1733 (void) set_errno(EMDB_WPRANGE); 1734 return (0); 1735 } 1736 1737 return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p)); 1738 } 1739 1740 int 1741 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags, 1742 int spec_flags, mdb_tgt_se_f *func, void *p) 1743 { 1744 if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) { 1745 (void) set_errno(EINVAL); 1746 return (0); 1747 } 1748 1749 if (addr + n < addr) { 1750 (void) set_errno(EMDB_WPRANGE); 1751 return (0); 1752 } 1753 1754 return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p)); 1755 } 1756 1757 int 1758 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum, 1759 int spec_flags, mdb_tgt_se_f *func, void *p) 1760 { 1761 return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p)); 1762 } 1763 1764 int 1765 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum, 1766 int spec_flags, mdb_tgt_se_f *func, void *p) 1767 { 1768 return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p)); 1769 } 1770 1771 int 1772 mdb_tgt_add_signal(mdb_tgt_t *t, int sig, 1773 int spec_flags, mdb_tgt_se_f *func, void *p) 1774 { 1775 return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p)); 1776 } 1777 1778 int 1779 mdb_tgt_add_fault(mdb_tgt_t *t, int flt, 1780 int spec_flags, mdb_tgt_se_f *func, void *p) 1781 { 1782 return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p)); 1783 } 1784 1785 int 1786 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, 1787 const char *rname, mdb_tgt_reg_t *rp) 1788 { 1789 return (t->t_ops->t_getareg(t, tid, rname, rp)); 1790 } 1791 1792 int 1793 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, 1794 const char *rname, mdb_tgt_reg_t r) 1795 { 1796 return (t->t_ops->t_putareg(t, tid, rname, r)); 1797 } 1798 1799 int 1800 mdb_tgt_thread_name(mdb_tgt_t *t, mdb_tgt_tid_t tid, char *buf, size_t bufsize) 1801 { 1802 return (t->t_ops->t_thread_name(t, tid, buf, bufsize)); 1803 } 1804 1805 int 1806 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs, 1807 mdb_tgt_stack_f *cb, void *p) 1808 { 1809 return (t->t_ops->t_stack_iter(t, gregs, cb, p)); 1810 } 1811 1812 int 1813 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private) 1814 { 1815 mdb_xdata_t *xdp; 1816 1817 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) { 1818 if (func(private, xdp->xd_name, xdp->xd_desc, 1819 xdp->xd_copy(t, NULL, 0)) != 0) 1820 break; 1821 } 1822 1823 return (0); 1824 } 1825 1826 ssize_t 1827 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes) 1828 { 1829 mdb_xdata_t *xdp; 1830 1831 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) { 1832 if (strcmp(xdp->xd_name, name) == 0) 1833 return (xdp->xd_copy(t, buf, nbytes)); 1834 } 1835 1836 return (set_errno(ENODATA)); 1837 } 1838 1839 long 1840 mdb_tgt_notsup() 1841 { 1842 return (set_errno(EMDB_TGTNOTSUP)); 1843 } 1844 1845 void * 1846 mdb_tgt_null() 1847 { 1848 (void) set_errno(EMDB_TGTNOTSUP); 1849 return (NULL); 1850 } 1851 1852 long 1853 mdb_tgt_nop() 1854 { 1855 return (0L); 1856 } 1857 1858 int 1859 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc, 1860 ssize_t (*copy)(mdb_tgt_t *, void *, size_t)) 1861 { 1862 mdb_xdata_t *xdp; 1863 1864 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) { 1865 if (strcmp(xdp->xd_name, name) == 0) 1866 return (set_errno(EMDB_XDEXISTS)); 1867 } 1868 1869 xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP); 1870 mdb_list_append(&t->t_xdlist, xdp); 1871 1872 xdp->xd_name = name; 1873 xdp->xd_desc = desc; 1874 xdp->xd_copy = copy; 1875 1876 return (0); 1877 } 1878 1879 int 1880 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name) 1881 { 1882 mdb_xdata_t *xdp; 1883 1884 for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) { 1885 if (strcmp(xdp->xd_name, name) == 0) { 1886 mdb_list_delete(&t->t_xdlist, xdp); 1887 mdb_free(xdp, sizeof (mdb_xdata_t)); 1888 return (0); 1889 } 1890 } 1891 1892 return (set_errno(EMDB_NOXD)); 1893 } 1894 1895 int 1896 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask) 1897 { 1898 #if STT_NUM != (STT_TLS + 1) 1899 #error "STT_NUM has grown. update mdb_tgt_sym_match()" 1900 #endif 1901 1902 uchar_t s_bind = GELF_ST_BIND(sym->st_info); 1903 uchar_t s_type = GELF_ST_TYPE(sym->st_info); 1904 1905 /* 1906 * In case you haven't already guessed, this relies on the bitmask 1907 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol 1908 * type and binding matching the order of STB and STT constants 1909 * in <sys/elf.h>. Changes to ELF must maintain binary 1910 * compatibility, so I think this is reasonably fair game. 1911 */ 1912 if (s_bind < STB_NUM && s_type < STT_NUM) { 1913 uint_t type = (1 << (s_type + 8)) | (1 << s_bind); 1914 return ((type & ~mask) == 0); 1915 } 1916 1917 return (0); /* Unknown binding or type; fail to match */ 1918 } 1919 1920 void 1921 mdb_tgt_elf_export(mdb_gelf_file_t *gf) 1922 { 1923 GElf_Xword d = 0, t = 0; 1924 GElf_Addr b = 0, e = 0; 1925 uint32_t m = 0; 1926 mdb_var_t *v; 1927 1928 /* 1929 * Reset legacy adb variables based on the specified ELF object file 1930 * provided by the target. We define these variables: 1931 * 1932 * b - the address of the data segment (first writeable Phdr) 1933 * d - the size of the data segment 1934 * e - the address of the entry point 1935 * m - the magic number identifying the file 1936 * t - the address of the text segment (first executable Phdr) 1937 */ 1938 if (gf != NULL) { 1939 const GElf_Phdr *text = NULL, *data = NULL; 1940 size_t i; 1941 1942 e = gf->gf_ehdr.e_entry; 1943 bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m)); 1944 1945 for (i = 0; i < gf->gf_npload; i++) { 1946 if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X)) 1947 text = &gf->gf_phdrs[i]; 1948 if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W)) 1949 data = &gf->gf_phdrs[i]; 1950 } 1951 1952 if (text != NULL) 1953 t = text->p_memsz; 1954 if (data != NULL) { 1955 b = data->p_vaddr; 1956 d = data->p_memsz; 1957 } 1958 } 1959 1960 if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL) 1961 mdb_nv_set_value(v, b); 1962 if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL) 1963 mdb_nv_set_value(v, d); 1964 if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL) 1965 mdb_nv_set_value(v, e); 1966 if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL) 1967 mdb_nv_set_value(v, m); 1968 if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL) 1969 mdb_nv_set_value(v, t); 1970 } 1971 1972 /*ARGSUSED*/ 1973 void 1974 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep) 1975 { 1976 sep->se_refs++; 1977 ASSERT(sep->se_refs != 0); 1978 } 1979 1980 void 1981 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep) 1982 { 1983 ASSERT(sep->se_refs != 0); 1984 1985 if (--sep->se_refs == 0) { 1986 mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep); 1987 ASSERT(mdb_list_next(&sep->se_velist) == NULL); 1988 1989 if (sep->se_state != MDB_TGT_SPEC_IDLE) { 1990 sep->se_ops->se_dtor(t, sep); 1991 mdb_list_delete(&t->t_active, sep); 1992 } else 1993 mdb_list_delete(&t->t_idle, sep); 1994 1995 mdb_free(sep, sizeof (mdb_sespec_t)); 1996 } 1997 } 1998 1999 mdb_sespec_t * 2000 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list) 2001 { 2002 mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP); 2003 2004 if (list == &t->t_active) 2005 sep->se_state = MDB_TGT_SPEC_ACTIVE; 2006 else 2007 sep->se_state = MDB_TGT_SPEC_IDLE; 2008 2009 mdb_list_append(list, sep); 2010 sep->se_ops = ops; 2011 return (sep); 2012 } 2013 2014 mdb_sespec_t * 2015 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args) 2016 { 2017 mdb_sespec_t *sep; 2018 2019 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 2020 if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args)) 2021 break; 2022 } 2023 2024 return (sep); 2025 } 2026 2027 mdb_sespec_t * 2028 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args) 2029 { 2030 mdb_sespec_t *sep; 2031 2032 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) { 2033 if (sep->se_ops == ops && sep->se_ops->se_vecmp(t, 2034 mdb_list_next(&sep->se_velist), args)) 2035 break; 2036 } 2037 2038 return (sep); 2039 } 2040 2041 /*ARGSUSED*/ 2042 void 2043 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep) 2044 { 2045 vep->ve_refs++; 2046 ASSERT(vep->ve_refs != 0); 2047 } 2048 2049 void 2050 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep) 2051 { 2052 ASSERT(vep->ve_refs != 0); 2053 2054 if (--vep->ve_refs == 0) { 2055 /* 2056 * Remove this vespec from the sespec's velist and decrement 2057 * the reference count on the sespec. 2058 */ 2059 mdb_list_delete(&vep->ve_se->se_velist, vep); 2060 mdb_tgt_sespec_rele(t, vep->ve_se); 2061 2062 /* 2063 * If we are deleting the most recently assigned VID, reset 2064 * t_vepos or t_veneg as appropriate to re-use that number. 2065 * This could be enhanced to re-use any free number by 2066 * maintaining a bitmap or hash of the allocated IDs. 2067 */ 2068 if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1) 2069 t->t_vepos = vep->ve_id; 2070 else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1) 2071 t->t_veneg = -vep->ve_id; 2072 2073 /* 2074 * Call the destructor to clean up ve_args, and then free 2075 * the actual vespec structure. 2076 */ 2077 vep->ve_dtor(vep); 2078 mdb_free(vep, sizeof (mdb_vespec_t)); 2079 2080 ASSERT(t->t_vecnt != 0); 2081 t->t_vecnt--; 2082 } 2083 } 2084 2085 int 2086 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags, 2087 mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *)) 2088 { 2089 mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP); 2090 2091 int id, mult, *seqp; 2092 mdb_sespec_t *sep; 2093 2094 /* 2095 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags 2096 * value: extra bits are cleared according to order of precedence. 2097 */ 2098 if (flags & MDB_TGT_SPEC_AUTOSTOP) 2099 flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS); 2100 else if (flags & MDB_TGT_SPEC_AUTODEL) 2101 flags &= ~MDB_TGT_SPEC_AUTODIS; 2102 2103 /* 2104 * The TEMPORARY property always takes precedence over STICKY. 2105 */ 2106 if (flags & MDB_TGT_SPEC_TEMPORARY) 2107 flags &= ~MDB_TGT_SPEC_STICKY; 2108 2109 /* 2110 * Find a matching sespec or create a new one on the appropriate list. 2111 * We always create a new sespec if the vespec is created disabled. 2112 */ 2113 if (flags & MDB_TGT_SPEC_DISABLED) 2114 sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle); 2115 else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL && 2116 (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL) 2117 sep = mdb_tgt_sespec_insert(t, ops, &t->t_active); 2118 2119 /* 2120 * Generate a new ID for the vespec. Increasing positive integers are 2121 * assigned to visible vespecs; decreasing negative integers are 2122 * assigned to hidden vespecs. The target saves our most recent choice. 2123 */ 2124 if (flags & MDB_TGT_SPEC_INTERNAL) { 2125 seqp = &t->t_veneg; 2126 mult = -1; 2127 } else { 2128 seqp = &t->t_vepos; 2129 mult = 1; 2130 } 2131 2132 id = *seqp; 2133 2134 while (mdb_tgt_vespec_lookup(t, id * mult) != NULL) 2135 id = MAX(id + 1, 1); 2136 2137 *seqp = MAX(id + 1, 1); 2138 2139 vep->ve_id = id * mult; 2140 vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED); 2141 vep->ve_se = sep; 2142 vep->ve_callback = func; 2143 vep->ve_data = data; 2144 vep->ve_args = args; 2145 vep->ve_dtor = dtor; 2146 2147 mdb_list_append(&sep->se_velist, vep); 2148 mdb_tgt_sespec_hold(t, sep); 2149 2150 mdb_tgt_vespec_hold(t, vep); 2151 t->t_vecnt++; 2152 2153 /* 2154 * If this vespec is the first reference to the sespec and it's active, 2155 * then it is newly created and we should attempt to initialize it. 2156 * If se_ctor fails, then move the sespec back to the idle list. 2157 */ 2158 if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE && 2159 sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) { 2160 2161 mdb_list_delete(&t->t_active, sep); 2162 mdb_list_append(&t->t_idle, sep); 2163 2164 sep->se_state = MDB_TGT_SPEC_IDLE; 2165 sep->se_errno = errno; 2166 sep->se_data = NULL; 2167 } 2168 2169 /* 2170 * If the sespec is active and the target is currently running (because 2171 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm 2172 * the sespec so it will take effect immediately. 2173 */ 2174 if (sep->se_state == MDB_TGT_SPEC_ACTIVE && 2175 t->t_status.st_state == MDB_TGT_RUNNING) 2176 mdb_tgt_sespec_arm_one(t, sep); 2177 2178 mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n", 2179 vep->ve_id, (void *)sep, sep->se_refs, sep->se_state); 2180 2181 return (vep->ve_id); 2182 } 2183 2184 /* 2185 * Search the target's active, idle, and disabled lists for the vespec matching 2186 * the specified VID, and return a pointer to it, or NULL if no match is found. 2187 */ 2188 mdb_vespec_t * 2189 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid) 2190 { 2191 mdb_sespec_t *sep; 2192 mdb_vespec_t *vep; 2193 2194 if (vid == 0) 2195 return (NULL); /* 0 is never a valid VID */ 2196 2197 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 2198 for (vep = mdb_list_next(&sep->se_velist); vep; 2199 vep = mdb_list_next(vep)) { 2200 if (vep->ve_id == vid) 2201 return (vep); 2202 } 2203 } 2204 2205 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) { 2206 for (vep = mdb_list_next(&sep->se_velist); vep; 2207 vep = mdb_list_next(vep)) { 2208 if (vep->ve_id == vid) 2209 return (vep); 2210 } 2211 } 2212 2213 return (NULL); 2214 } 2215 2216 /*ARGSUSED*/ 2217 void 2218 no_ve_dtor(mdb_vespec_t *vep) 2219 { 2220 /* default destructor does nothing */ 2221 } 2222 2223 /*ARGSUSED*/ 2224 void 2225 no_se_f(mdb_tgt_t *t, int vid, void *data) 2226 { 2227 /* default callback does nothing */ 2228 } 2229 2230 /*ARGSUSED*/ 2231 void 2232 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep) 2233 { 2234 /* default destructor does nothing */ 2235 } 2236 2237 /*ARGSUSED*/ 2238 int 2239 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 2240 { 2241 return (sep->se_data == args); 2242 } 2243 2244 /*ARGSUSED*/ 2245 int 2246 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args) 2247 { 2248 return (vep->ve_args == args); 2249 } 2250 2251 /*ARGSUSED*/ 2252 int 2253 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep) 2254 { 2255 return (0); /* return success */ 2256 } 2257 2258 /*ARGSUSED*/ 2259 int 2260 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep) 2261 { 2262 return (0); /* return success */ 2263 } 2264 2265 /*ARGSUSED*/ 2266 int 2267 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2268 { 2269 if (tsp != &t->t_status) 2270 bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t)); 2271 2272 return (0); /* return success */ 2273 } 2274 2275 int 2276 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags) 2277 { 2278 int fail = 0; 2279 2280 for (; dcp->dc_name != NULL; dcp++) { 2281 if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) { 2282 warn("failed to add dcmd %s", dcp->dc_name); 2283 fail++; 2284 } 2285 } 2286 2287 return (fail > 0 ? -1 : 0); 2288 } 2289 2290 int 2291 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags) 2292 { 2293 int fail = 0; 2294 2295 for (; wp->walk_name != NULL; wp++) { 2296 if (mdb_module_add_walker(t->t_module, wp, flags) == -1) { 2297 warn("failed to add walk %s", wp->walk_name); 2298 fail++; 2299 } 2300 } 2301 2302 return (fail > 0 ? -1 : 0); 2303 } 2304 2305 void 2306 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp, 2307 const mdb_nv_disc_t *disc, int flags) 2308 { 2309 for (; rdp->rd_name != NULL; rdp++) { 2310 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT)) 2311 continue; /* Don't export register as a variable */ 2312 2313 if (rdp->rd_flags & MDB_TGT_R_RDONLY) 2314 flags |= MDB_NV_RDONLY; 2315 2316 (void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc, 2317 (uintptr_t)t, MDB_NV_PERSIST | flags); 2318 } 2319 } 2320