1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static void *dtrace_softstate; /* softstate pointer */ 172 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 173 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 174 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 175 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 176 static int dtrace_toxranges; /* number of toxic ranges */ 177 static int dtrace_toxranges_max; /* size of toxic range array */ 178 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 179 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 180 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 181 static kthread_t *dtrace_panicked; /* panicking thread */ 182 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 183 static int dtrace_double_errors; /* ERRORs inducing error */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_state_t *dtrace_state; /* temporary variable */ 188 static int dtrace_error; /* temporary variable */ 189 190 /* 191 * DTrace Locking 192 * DTrace is protected by three (relatively coarse-grained) locks: 193 * 194 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 195 * including enabling state, probes, ECBs, consumer state, helper state, 196 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 197 * probe context is lock-free -- synchronization is handled via the 198 * dtrace_sync() cross call mechanism. 199 * 200 * (2) dtrace_provider_lock is required when manipulating provider state, or 201 * when provider state must be held constant. 202 * 203 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 204 * when meta provider state must be held constant. 205 * 206 * The lock ordering between these three locks is dtrace_meta_lock before 207 * dtrace_provider_lock before dtrace_lock. (In particular, there are 208 * several places where dtrace_provider_lock is held by the framework as it 209 * calls into the providers -- which then call back into the framework, 210 * grabbing dtrace_lock.) 211 * 212 * There are two other locks in the mix: mod_lock and cpu_lock. cpu_lock 213 * continues its historical role as a coarse-grained lock; it is acquired 214 * before both dtrace_provider_lock and dtrace_lock. mod_lock is slightly 215 * stranger: it must be acquired _between_ dtrace_provider_lock and 216 * dtrace_lock. 217 */ 218 static kmutex_t dtrace_lock; /* probe state lock */ 219 static kmutex_t dtrace_provider_lock; /* provider state lock */ 220 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 221 222 /* 223 * DTrace Provider Variables 224 * 225 * These are the variables relating to DTrace as a provider (that is, the 226 * provider of the BEGIN, END, and ERROR probes). 227 */ 228 static dtrace_pattr_t dtrace_provider_attr = { 229 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 230 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 }; 235 236 static void 237 dtrace_nullop(void) 238 {} 239 240 static dtrace_pops_t dtrace_provider_ops = { 241 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 242 (void (*)(void *, struct modctl *))dtrace_nullop, 243 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 NULL, 248 NULL, 249 NULL, 250 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 251 }; 252 253 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 254 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 255 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 256 257 /* 258 * DTrace Helper Tracing Variables 259 */ 260 uint32_t dtrace_helptrace_next = 0; 261 uint32_t dtrace_helptrace_nlocals; 262 char *dtrace_helptrace_buffer; 263 int dtrace_helptrace_bufsize = 512 * 1024; 264 265 #ifdef DEBUG 266 int dtrace_helptrace_enabled = 1; 267 #else 268 int dtrace_helptrace_enabled = 0; 269 #endif 270 271 /* 272 * DTrace Error Hashing 273 * 274 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 275 * table. This is very useful for checking coverage of tests that are 276 * expected to induce DIF or DOF processing errors, and may be useful for 277 * debugging problems in the DIF code generator or in DOF generation . The 278 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 279 */ 280 #ifdef DEBUG 281 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 282 static const char *dtrace_errlast; 283 static kthread_t *dtrace_errthread; 284 static kmutex_t dtrace_errlock; 285 #endif 286 287 /* 288 * DTrace Macros and Constants 289 * 290 * These are various macros that are useful in various spots in the 291 * implementation, along with a few random constants that have no meaning 292 * outside of the implementation. There is no real structure to this cpp 293 * mishmash -- but is there ever? 294 */ 295 #define DTRACE_HASHSTR(hash, probe) \ 296 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 297 298 #define DTRACE_HASHNEXT(hash, probe) \ 299 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 300 301 #define DTRACE_HASHPREV(hash, probe) \ 302 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 303 304 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 305 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 306 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 307 308 #define DTRACE_AGGHASHSIZE_SLEW 17 309 310 /* 311 * The key for a thread-local variable consists of the lower 61 bits of the 312 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 313 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 314 * equal to a variable identifier. This is necessary (but not sufficient) to 315 * assure that global associative arrays never collide with thread-local 316 * variables. To guarantee that they cannot collide, we must also define the 317 * order for keying dynamic variables. That order is: 318 * 319 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 320 * 321 * Because the variable-key and the tls-key are in orthogonal spaces, there is 322 * no way for a global variable key signature to match a thread-local key 323 * signature. 324 */ 325 #define DTRACE_TLS_THRKEY(where) { \ 326 uint_t intr = 0; \ 327 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 328 for (; actv; actv >>= 1) \ 329 intr++; \ 330 ASSERT(intr < (1 << 3)); \ 331 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 332 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 333 } 334 335 #define DTRACE_STORE(type, tomax, offset, what) \ 336 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 337 338 #ifndef __i386 339 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 340 if (addr & (size - 1)) { \ 341 *flags |= CPU_DTRACE_BADALIGN; \ 342 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 343 return (0); \ 344 } 345 #else 346 #define DTRACE_ALIGNCHECK(addr, size, flags) 347 #endif 348 349 #define DTRACE_LOADFUNC(bits) \ 350 /*CSTYLED*/ \ 351 uint##bits##_t \ 352 dtrace_load##bits(uintptr_t addr) \ 353 { \ 354 size_t size = bits / NBBY; \ 355 /*CSTYLED*/ \ 356 uint##bits##_t rval; \ 357 int i; \ 358 volatile uint16_t *flags = (volatile uint16_t *) \ 359 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 360 \ 361 DTRACE_ALIGNCHECK(addr, size, flags); \ 362 \ 363 for (i = 0; i < dtrace_toxranges; i++) { \ 364 if (addr >= dtrace_toxrange[i].dtt_limit) \ 365 continue; \ 366 \ 367 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 368 continue; \ 369 \ 370 /* \ 371 * This address falls within a toxic region; return 0. \ 372 */ \ 373 *flags |= CPU_DTRACE_BADADDR; \ 374 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 375 return (0); \ 376 } \ 377 \ 378 *flags |= CPU_DTRACE_NOFAULT; \ 379 /*CSTYLED*/ \ 380 rval = *((volatile uint##bits##_t *)addr); \ 381 *flags &= ~CPU_DTRACE_NOFAULT; \ 382 \ 383 return (rval); \ 384 } 385 386 #ifdef _LP64 387 #define dtrace_loadptr dtrace_load64 388 #else 389 #define dtrace_loadptr dtrace_load32 390 #endif 391 392 #define DTRACE_MATCH_NEXT 0 393 #define DTRACE_MATCH_DONE 1 394 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 395 #define DTRACE_STATE_ALIGN 64 396 397 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 398 static void dtrace_enabling_provide(dtrace_provider_t *); 399 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 400 static void dtrace_enabling_matchall(void); 401 static dtrace_state_t *dtrace_anon_grab(void); 402 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 403 dtrace_state_t *, uint64_t, uint64_t); 404 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 405 static void dtrace_buffer_drop(dtrace_buffer_t *); 406 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 407 dtrace_state_t *, dtrace_mstate_t *); 408 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 409 dtrace_optval_t); 410 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 411 412 /* 413 * DTrace Probe Context Functions 414 * 415 * These functions are called from probe context. Because probe context is 416 * any context in which C may be called, arbitrarily locks may be held, 417 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 418 * As a result, functions called from probe context may only call other DTrace 419 * support functions -- they may not interact at all with the system at large. 420 * (Note that the ASSERT macro is made probe-context safe by redefining it in 421 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 422 * loads are to be performed from probe context, they _must_ be in terms of 423 * the safe dtrace_load*() variants. 424 * 425 * Some functions in this block are not actually called from probe context; 426 * for these functions, there will be a comment above the function reading 427 * "Note: not called from probe context." 428 */ 429 void 430 dtrace_panic(const char *format, ...) 431 { 432 va_list alist; 433 434 va_start(alist, format); 435 dtrace_vpanic(format, alist); 436 va_end(alist); 437 } 438 439 int 440 dtrace_assfail(const char *a, const char *f, int l) 441 { 442 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 443 444 /* 445 * We just need something here that even the most clever compiler 446 * cannot optimize away. 447 */ 448 return (a[(uintptr_t)f]); 449 } 450 451 /* 452 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 453 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 454 */ 455 DTRACE_LOADFUNC(8) 456 DTRACE_LOADFUNC(16) 457 DTRACE_LOADFUNC(32) 458 DTRACE_LOADFUNC(64) 459 460 static int 461 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 462 { 463 if (dest < mstate->dtms_scratch_base) 464 return (0); 465 466 if (dest + size < dest) 467 return (0); 468 469 if (dest + size > mstate->dtms_scratch_ptr) 470 return (0); 471 472 return (1); 473 } 474 475 static int 476 dtrace_canstore_statvar(uint64_t addr, size_t sz, 477 dtrace_statvar_t **svars, int nsvars) 478 { 479 int i; 480 481 for (i = 0; i < nsvars; i++) { 482 dtrace_statvar_t *svar = svars[i]; 483 484 if (svar == NULL || svar->dtsv_size == 0) 485 continue; 486 487 if (addr - svar->dtsv_data < svar->dtsv_size && 488 addr + sz <= svar->dtsv_data + svar->dtsv_size) 489 return (1); 490 } 491 492 return (0); 493 } 494 495 /* 496 * Check to see if the address is within a memory region to which a store may 497 * be issued. This includes the DTrace scratch areas, and any DTrace variable 498 * region. The caller of dtrace_canstore() is responsible for performing any 499 * alignment checks that are needed before stores are actually executed. 500 */ 501 static int 502 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 503 dtrace_vstate_t *vstate) 504 { 505 uintptr_t a; 506 size_t s; 507 508 /* 509 * First, check to see if the address is in scratch space... 510 */ 511 a = mstate->dtms_scratch_base; 512 s = mstate->dtms_scratch_size; 513 514 if (addr - a < s && addr + sz <= a + s) 515 return (1); 516 517 /* 518 * Now check to see if it's a dynamic variable. This check will pick 519 * up both thread-local variables and any global dynamically-allocated 520 * variables. 521 */ 522 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base; 523 s = vstate->dtvs_dynvars.dtds_size; 524 if (addr - a < s && addr + sz <= a + s) 525 return (1); 526 527 /* 528 * Finally, check the static local and global variables. These checks 529 * take the longest, so we perform them last. 530 */ 531 if (dtrace_canstore_statvar(addr, sz, 532 vstate->dtvs_locals, vstate->dtvs_nlocals)) 533 return (1); 534 535 if (dtrace_canstore_statvar(addr, sz, 536 vstate->dtvs_globals, vstate->dtvs_nglobals)) 537 return (1); 538 539 return (0); 540 } 541 542 /* 543 * Compare two strings using safe loads. 544 */ 545 static int 546 dtrace_strncmp(char *s1, char *s2, size_t limit) 547 { 548 uint8_t c1, c2; 549 volatile uint16_t *flags; 550 551 if (s1 == s2 || limit == 0) 552 return (0); 553 554 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 555 556 do { 557 if (s1 == NULL) { 558 c1 = '\0'; 559 } else { 560 c1 = dtrace_load8((uintptr_t)s1++); 561 } 562 563 if (s2 == NULL) { 564 c2 = '\0'; 565 } else { 566 c2 = dtrace_load8((uintptr_t)s2++); 567 } 568 569 if (c1 != c2) 570 return (c1 - c2); 571 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 572 573 return (0); 574 } 575 576 /* 577 * Compute strlen(s) for a string using safe memory accesses. The additional 578 * len parameter is used to specify a maximum length to ensure completion. 579 */ 580 static size_t 581 dtrace_strlen(const char *s, size_t lim) 582 { 583 uint_t len; 584 585 for (len = 0; len != lim; len++) { 586 if (dtrace_load8((uintptr_t)s++) == '\0') 587 break; 588 } 589 590 return (len); 591 } 592 593 /* 594 * Check if an address falls within a toxic region. 595 */ 596 static int 597 dtrace_istoxic(uintptr_t kaddr, size_t size) 598 { 599 uintptr_t taddr, tsize; 600 int i; 601 602 for (i = 0; i < dtrace_toxranges; i++) { 603 taddr = dtrace_toxrange[i].dtt_base; 604 tsize = dtrace_toxrange[i].dtt_limit - taddr; 605 606 if (kaddr - taddr < tsize) { 607 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 608 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 609 return (1); 610 } 611 612 if (taddr - kaddr < size) { 613 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 614 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 615 return (1); 616 } 617 } 618 619 return (0); 620 } 621 622 /* 623 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 624 * memory specified by the DIF program. The dst is assumed to be safe memory 625 * that we can store to directly because it is managed by DTrace. As with 626 * standard bcopy, overlapping copies are handled properly. 627 */ 628 static void 629 dtrace_bcopy(const void *src, void *dst, size_t len) 630 { 631 if (len != 0) { 632 uint8_t *s1 = dst; 633 const uint8_t *s2 = src; 634 635 if (s1 <= s2) { 636 do { 637 *s1++ = dtrace_load8((uintptr_t)s2++); 638 } while (--len != 0); 639 } else { 640 s2 += len; 641 s1 += len; 642 643 do { 644 *--s1 = dtrace_load8((uintptr_t)--s2); 645 } while (--len != 0); 646 } 647 } 648 } 649 650 /* 651 * Copy src to dst using safe memory accesses, up to either the specified 652 * length, or the point that a nul byte is encountered. The src is assumed to 653 * be unsafe memory specified by the DIF program. The dst is assumed to be 654 * safe memory that we can store to directly because it is managed by DTrace. 655 * Unlike dtrace_bcopy(), overlapping regions are not handled. 656 */ 657 static void 658 dtrace_strcpy(const void *src, void *dst, size_t len) 659 { 660 if (len != 0) { 661 uint8_t *s1 = dst, c; 662 const uint8_t *s2 = src; 663 664 do { 665 *s1++ = c = dtrace_load8((uintptr_t)s2++); 666 } while (--len != 0 && c != '\0'); 667 } 668 } 669 670 /* 671 * Copy src to dst, deriving the size and type from the specified (BYREF) 672 * variable type. The src is assumed to be unsafe memory specified by the DIF 673 * program. The dst is assumed to be DTrace variable memory that is of the 674 * specified type; we assume that we can store to directly. 675 */ 676 static void 677 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 678 { 679 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 680 681 if (type->dtdt_kind == DIF_TYPE_STRING) { 682 dtrace_strcpy(src, dst, type->dtdt_size); 683 } else { 684 dtrace_bcopy(src, dst, type->dtdt_size); 685 } 686 } 687 688 /* 689 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 690 * unsafe memory specified by the DIF program. The s2 data is assumed to be 691 * safe memory that we can access directly because it is managed by DTrace. 692 */ 693 static int 694 dtrace_bcmp(const void *s1, const void *s2, size_t len) 695 { 696 volatile uint16_t *flags; 697 698 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 699 700 if (s1 == s2) 701 return (0); 702 703 if (s1 == NULL || s2 == NULL) 704 return (1); 705 706 if (s1 != s2 && len != 0) { 707 const uint8_t *ps1 = s1; 708 const uint8_t *ps2 = s2; 709 710 do { 711 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 712 return (1); 713 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 714 } 715 return (0); 716 } 717 718 /* 719 * Zero the specified region using a simple byte-by-byte loop. Note that this 720 * is for safe DTrace-managed memory only. 721 */ 722 static void 723 dtrace_bzero(void *dst, size_t len) 724 { 725 uchar_t *cp; 726 727 for (cp = dst; len != 0; len--) 728 *cp++ = 0; 729 } 730 731 /* 732 * This privilege checks should be used by actions and subroutines to 733 * verify the credentials of the process that enabled the invoking ECB. 734 */ 735 static int 736 dtrace_priv_proc_common(dtrace_state_t *state) 737 { 738 uid_t uid = state->dts_cred.dcr_uid; 739 gid_t gid = state->dts_cred.dcr_gid; 740 cred_t *cr; 741 proc_t *proc; 742 743 if ((cr = CRED()) != NULL && 744 uid == cr->cr_uid && 745 uid == cr->cr_ruid && 746 uid == cr->cr_suid && 747 gid == cr->cr_gid && 748 gid == cr->cr_rgid && 749 gid == cr->cr_sgid && 750 (proc = ttoproc(curthread)) != NULL && 751 !(proc->p_flag & SNOCD)) 752 return (1); 753 754 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 755 756 return (0); 757 } 758 759 static int 760 dtrace_priv_proc_destructive(dtrace_state_t *state) 761 { 762 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_DESTRUCTIVE) 763 return (1); 764 765 return (dtrace_priv_proc_common(state)); 766 } 767 768 static int 769 dtrace_priv_proc_control(dtrace_state_t *state) 770 { 771 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 772 return (1); 773 774 return (dtrace_priv_proc_common(state)); 775 } 776 777 static int 778 dtrace_priv_proc(dtrace_state_t *state) 779 { 780 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 781 return (1); 782 783 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 784 785 return (0); 786 } 787 788 static int 789 dtrace_priv_kernel(dtrace_state_t *state) 790 { 791 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 792 return (1); 793 794 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 795 796 return (0); 797 } 798 799 static int 800 dtrace_priv_kernel_destructive(dtrace_state_t *state) 801 { 802 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 803 return (1); 804 805 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 806 807 return (0); 808 } 809 810 /* 811 * Note: not called from probe context. This function is called 812 * asynchronously (and at a regular interval) from outside of probe context to 813 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 814 * cleaning is explained in detail in <sys/dtrace_impl.h>. 815 */ 816 void 817 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 818 { 819 dtrace_dynvar_t *dirty; 820 dtrace_dstate_percpu_t *dcpu; 821 int i, work = 0; 822 823 for (i = 0; i < NCPU; i++) { 824 dcpu = &dstate->dtds_percpu[i]; 825 826 ASSERT(dcpu->dtdsc_rinsing == NULL); 827 828 /* 829 * If the dirty list is NULL, there is no dirty work to do. 830 */ 831 if (dcpu->dtdsc_dirty == NULL) 832 continue; 833 834 /* 835 * If the clean list is non-NULL, then we're not going to do 836 * any work for this CPU -- it means that there has not been 837 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 838 * since the last time we cleaned house. 839 */ 840 if (dcpu->dtdsc_clean != NULL) 841 continue; 842 843 work = 1; 844 845 /* 846 * Atomically move the dirty list aside. 847 */ 848 do { 849 dirty = dcpu->dtdsc_dirty; 850 851 /* 852 * Before we zap the dirty list, set the rinsing list. 853 * (This allows for a potential assertion in 854 * dtrace_dynvar(): if a free dynamic variable appears 855 * on a hash chain, either the dirty list or the 856 * rinsing list for some CPU must be non-NULL.) 857 */ 858 dcpu->dtdsc_rinsing = dirty; 859 dtrace_membar_producer(); 860 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 861 dirty, NULL) != dirty); 862 } 863 864 if (!work) { 865 /* 866 * We have no work to do; we can simply return. 867 */ 868 return; 869 } 870 871 dtrace_sync(); 872 873 for (i = 0; i < NCPU; i++) { 874 dcpu = &dstate->dtds_percpu[i]; 875 876 if (dcpu->dtdsc_rinsing == NULL) 877 continue; 878 879 /* 880 * We are now guaranteed that no hash chain contains a pointer 881 * into this dirty list; we can make it clean. 882 */ 883 ASSERT(dcpu->dtdsc_clean == NULL); 884 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 885 dcpu->dtdsc_rinsing = NULL; 886 } 887 888 /* 889 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 890 * sure that all CPUs have seen all of the dtdsc_clean pointers. 891 * This prevents a race whereby a CPU incorrectly decides that 892 * the state should be something other than DTRACE_DSTATE_CLEAN 893 * after dtrace_dynvar_clean() has completed. 894 */ 895 dtrace_sync(); 896 897 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 898 } 899 900 /* 901 * Depending on the value of the op parameter, this function looks-up, 902 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 903 * allocation is requested, this function will return a pointer to a 904 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 905 * variable can be allocated. If NULL is returned, the appropriate counter 906 * will be incremented. 907 */ 908 dtrace_dynvar_t * 909 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 910 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op) 911 { 912 uint64_t hashval = 1; 913 dtrace_dynhash_t *hash = dstate->dtds_hash; 914 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 915 processorid_t me = CPU->cpu_id, cpu = me; 916 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 917 size_t bucket, ksize; 918 size_t chunksize = dstate->dtds_chunksize; 919 uintptr_t kdata, lock, nstate; 920 uint_t i; 921 922 ASSERT(nkeys != 0); 923 924 /* 925 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 926 * algorithm. For the by-value portions, we perform the algorithm in 927 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 928 * bit, and seems to have only a minute effect on distribution. For 929 * the by-reference data, we perform "One-at-a-time" iterating (safely) 930 * over each referenced byte. It's painful to do this, but it's much 931 * better than pathological hash distribution. The efficacy of the 932 * hashing algorithm (and a comparison with other algorithms) may be 933 * found by running the ::dtrace_dynstat MDB dcmd. 934 */ 935 for (i = 0; i < nkeys; i++) { 936 if (key[i].dttk_size == 0) { 937 uint64_t val = key[i].dttk_value; 938 939 hashval += (val >> 48) & 0xffff; 940 hashval += (hashval << 10); 941 hashval ^= (hashval >> 6); 942 943 hashval += (val >> 32) & 0xffff; 944 hashval += (hashval << 10); 945 hashval ^= (hashval >> 6); 946 947 hashval += (val >> 16) & 0xffff; 948 hashval += (hashval << 10); 949 hashval ^= (hashval >> 6); 950 951 hashval += val & 0xffff; 952 hashval += (hashval << 10); 953 hashval ^= (hashval >> 6); 954 } else { 955 /* 956 * This is incredibly painful, but it beats the hell 957 * out of the alternative. 958 */ 959 uint64_t j, size = key[i].dttk_size; 960 uintptr_t base = (uintptr_t)key[i].dttk_value; 961 962 for (j = 0; j < size; j++) { 963 hashval += dtrace_load8(base + j); 964 hashval += (hashval << 10); 965 hashval ^= (hashval >> 6); 966 } 967 } 968 } 969 970 hashval += (hashval << 3); 971 hashval ^= (hashval >> 11); 972 hashval += (hashval << 15); 973 974 /* 975 * There is a remote chance (ideally, 1 in 2^32) that our hashval 976 * comes out to be 0. We rely on a zero hashval denoting a free 977 * element; if this actually happens, we set the hashval to 1. 978 */ 979 if (hashval == 0) 980 hashval = 1; 981 982 /* 983 * Yes, it's painful to do a divide here. If the cycle count becomes 984 * important here, tricks can be pulled to reduce it. (However, it's 985 * critical that hash collisions be kept to an absolute minimum; 986 * they're much more painful than a divide.) It's better to have a 987 * solution that generates few collisions and still keeps things 988 * relatively simple. 989 */ 990 bucket = hashval % dstate->dtds_hashsize; 991 992 if (op == DTRACE_DYNVAR_DEALLOC) { 993 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 994 995 for (;;) { 996 while ((lock = *lockp) & 1) 997 continue; 998 999 if (dtrace_casptr((void *)lockp, 1000 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1001 break; 1002 } 1003 1004 dtrace_membar_producer(); 1005 } 1006 1007 top: 1008 prev = NULL; 1009 lock = hash[bucket].dtdh_lock; 1010 1011 dtrace_membar_consumer(); 1012 1013 start = hash[bucket].dtdh_chain; 1014 ASSERT(start == NULL || start->dtdv_hashval != 0 || 1015 op != DTRACE_DYNVAR_DEALLOC); 1016 1017 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1018 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1019 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1020 1021 if (dvar->dtdv_hashval != hashval) { 1022 if (dvar->dtdv_hashval == 0) { 1023 /* 1024 * We've gone off the rails. Somewhere 1025 * along the line, one of the members of this 1026 * hash chain was deleted. We could assert 1027 * that either the dirty list or the rinsing 1028 * list is non-NULL. (The dtrace_sync() in 1029 * dtrace_dynvar_clean() would validate this 1030 * assertion.) 1031 */ 1032 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1033 goto top; 1034 } 1035 1036 goto next; 1037 } 1038 1039 if (dtuple->dtt_nkeys != nkeys) 1040 goto next; 1041 1042 for (i = 0; i < nkeys; i++, dkey++) { 1043 if (dkey->dttk_size != key[i].dttk_size) 1044 goto next; /* size or type mismatch */ 1045 1046 if (dkey->dttk_size != 0) { 1047 if (dtrace_bcmp( 1048 (void *)(uintptr_t)key[i].dttk_value, 1049 (void *)(uintptr_t)dkey->dttk_value, 1050 dkey->dttk_size)) 1051 goto next; 1052 } else { 1053 if (dkey->dttk_value != key[i].dttk_value) 1054 goto next; 1055 } 1056 } 1057 1058 if (op != DTRACE_DYNVAR_DEALLOC) 1059 return (dvar); 1060 1061 ASSERT(dvar->dtdv_next == NULL || 1062 dvar->dtdv_next->dtdv_hashval != 0); 1063 1064 if (prev != NULL) { 1065 ASSERT(hash[bucket].dtdh_chain != dvar); 1066 ASSERT(start != dvar); 1067 ASSERT(prev->dtdv_next == dvar); 1068 prev->dtdv_next = dvar->dtdv_next; 1069 } else { 1070 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1071 start, dvar->dtdv_next) != start) { 1072 /* 1073 * We have failed to atomically swing the 1074 * hash table head pointer, presumably because 1075 * of a conflicting allocation on another CPU. 1076 * We need to reread the hash chain and try 1077 * again. 1078 */ 1079 goto top; 1080 } 1081 } 1082 1083 dtrace_membar_producer(); 1084 1085 /* 1086 * Now clear the hash value to indicate that it's free. 1087 */ 1088 ASSERT(hash[bucket].dtdh_chain != dvar); 1089 dvar->dtdv_hashval = 0; 1090 1091 dtrace_membar_producer(); 1092 1093 /* 1094 * Set the next pointer to point at the dirty list, and 1095 * atomically swing the dirty pointer to the newly freed dvar. 1096 */ 1097 do { 1098 next = dcpu->dtdsc_dirty; 1099 dvar->dtdv_next = next; 1100 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1101 1102 /* 1103 * Finally, unlock this hash bucket. 1104 */ 1105 ASSERT(hash[bucket].dtdh_lock == lock); 1106 ASSERT(lock & 1); 1107 hash[bucket].dtdh_lock++; 1108 1109 return (NULL); 1110 next: 1111 prev = dvar; 1112 continue; 1113 } 1114 1115 if (op != DTRACE_DYNVAR_ALLOC) { 1116 /* 1117 * If we are not to allocate a new variable, we want to 1118 * return NULL now. Before we return, check that the value 1119 * of the lock word hasn't changed. If it has, we may have 1120 * seen an inconsistent snapshot. 1121 */ 1122 if (op == DTRACE_DYNVAR_NOALLOC) { 1123 if (hash[bucket].dtdh_lock != lock) 1124 goto top; 1125 } else { 1126 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1127 ASSERT(hash[bucket].dtdh_lock == lock); 1128 ASSERT(lock & 1); 1129 hash[bucket].dtdh_lock++; 1130 } 1131 1132 return (NULL); 1133 } 1134 1135 /* 1136 * We need to allocate a new dynamic variable. The size we need is the 1137 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1138 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1139 * the size of any referred-to data (dsize). We then round the final 1140 * size up to the chunksize for allocation. 1141 */ 1142 for (ksize = 0, i = 0; i < nkeys; i++) 1143 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1144 1145 /* 1146 * This should be pretty much impossible, but could happen if, say, 1147 * strange DIF specified the tuple. Ideally, this should be an 1148 * assertion and not an error condition -- but that requires that the 1149 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1150 * bullet-proof. (That is, it must not be able to be fooled by 1151 * malicious DIF.) Given the lack of backwards branches in DIF, 1152 * solving this would presumably not amount to solving the Halting 1153 * Problem -- but it still seems awfully hard. 1154 */ 1155 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1156 ksize + dsize > chunksize) { 1157 dcpu->dtdsc_drops++; 1158 return (NULL); 1159 } 1160 1161 nstate = DTRACE_DSTATE_EMPTY; 1162 1163 do { 1164 retry: 1165 free = dcpu->dtdsc_free; 1166 1167 if (free == NULL) { 1168 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1169 void *rval; 1170 1171 if (clean == NULL) { 1172 /* 1173 * We're out of dynamic variable space on 1174 * this CPU. Unless we have tried all CPUs, 1175 * we'll try to allocate from a different 1176 * CPU. 1177 */ 1178 switch (dstate->dtds_state) { 1179 case DTRACE_DSTATE_CLEAN: { 1180 void *sp = &dstate->dtds_state; 1181 1182 if (++cpu >= NCPU) 1183 cpu = 0; 1184 1185 if (dcpu->dtdsc_dirty != NULL && 1186 nstate == DTRACE_DSTATE_EMPTY) 1187 nstate = DTRACE_DSTATE_DIRTY; 1188 1189 if (dcpu->dtdsc_rinsing != NULL) 1190 nstate = DTRACE_DSTATE_RINSING; 1191 1192 dcpu = &dstate->dtds_percpu[cpu]; 1193 1194 if (cpu != me) 1195 goto retry; 1196 1197 (void) dtrace_cas32(sp, 1198 DTRACE_DSTATE_CLEAN, nstate); 1199 1200 /* 1201 * To increment the correct bean 1202 * counter, take another lap. 1203 */ 1204 goto retry; 1205 } 1206 1207 case DTRACE_DSTATE_DIRTY: 1208 dcpu->dtdsc_dirty_drops++; 1209 break; 1210 1211 case DTRACE_DSTATE_RINSING: 1212 dcpu->dtdsc_rinsing_drops++; 1213 break; 1214 1215 case DTRACE_DSTATE_EMPTY: 1216 dcpu->dtdsc_drops++; 1217 break; 1218 } 1219 1220 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1221 return (NULL); 1222 } 1223 1224 /* 1225 * The clean list appears to be non-empty. We want to 1226 * move the clean list to the free list; we start by 1227 * moving the clean pointer aside. 1228 */ 1229 if (dtrace_casptr(&dcpu->dtdsc_clean, 1230 clean, NULL) != clean) { 1231 /* 1232 * We are in one of two situations: 1233 * 1234 * (a) The clean list was switched to the 1235 * free list by another CPU. 1236 * 1237 * (b) The clean list was added to by the 1238 * cleansing cyclic. 1239 * 1240 * In either of these situations, we can 1241 * just reattempt the free list allocation. 1242 */ 1243 goto retry; 1244 } 1245 1246 ASSERT(clean->dtdv_hashval == 0); 1247 1248 /* 1249 * Now we'll move the clean list to the free list. 1250 * It's impossible for this to fail: the only way 1251 * the free list can be updated is through this 1252 * code path, and only one CPU can own the clean list. 1253 * Thus, it would only be possible for this to fail if 1254 * this code were racing with dtrace_dynvar_clean(). 1255 * (That is, if dtrace_dynvar_clean() updated the clean 1256 * list, and we ended up racing to update the free 1257 * list.) This race is prevented by the dtrace_sync() 1258 * in dtrace_dynvar_clean() -- which flushes the 1259 * owners of the clean lists out before resetting 1260 * the clean lists. 1261 */ 1262 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1263 ASSERT(rval == NULL); 1264 goto retry; 1265 } 1266 1267 dvar = free; 1268 new_free = dvar->dtdv_next; 1269 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1270 1271 /* 1272 * We have now allocated a new chunk. We copy the tuple keys into the 1273 * tuple array and copy any referenced key data into the data space 1274 * following the tuple array. As we do this, we relocate dttk_value 1275 * in the final tuple to point to the key data address in the chunk. 1276 */ 1277 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1278 dvar->dtdv_data = (void *)(kdata + ksize); 1279 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1280 1281 for (i = 0; i < nkeys; i++) { 1282 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1283 size_t kesize = key[i].dttk_size; 1284 1285 if (kesize != 0) { 1286 dtrace_bcopy( 1287 (const void *)(uintptr_t)key[i].dttk_value, 1288 (void *)kdata, kesize); 1289 dkey->dttk_value = kdata; 1290 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1291 } else { 1292 dkey->dttk_value = key[i].dttk_value; 1293 } 1294 1295 dkey->dttk_size = kesize; 1296 } 1297 1298 ASSERT(dvar->dtdv_hashval == 0); 1299 dvar->dtdv_hashval = hashval; 1300 dvar->dtdv_next = start; 1301 1302 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1303 return (dvar); 1304 1305 /* 1306 * The cas has failed. Either another CPU is adding an element to 1307 * this hash chain, or another CPU is deleting an element from this 1308 * hash chain. The simplest way to deal with both of these cases 1309 * (though not necessarily the most efficient) is to free our 1310 * allocated block and tail-call ourselves. Note that the free is 1311 * to the dirty list and _not_ to the free list. This is to prevent 1312 * races with allocators, above. 1313 */ 1314 dvar->dtdv_hashval = 0; 1315 1316 dtrace_membar_producer(); 1317 1318 do { 1319 free = dcpu->dtdsc_dirty; 1320 dvar->dtdv_next = free; 1321 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1322 1323 return (dtrace_dynvar(dstate, nkeys, key, dsize, op)); 1324 } 1325 1326 static void 1327 dtrace_aggregate_min(uint64_t *oval, uint64_t nval) 1328 { 1329 if (nval < *oval) 1330 *oval = nval; 1331 } 1332 1333 static void 1334 dtrace_aggregate_max(uint64_t *oval, uint64_t nval) 1335 { 1336 if (nval > *oval) 1337 *oval = nval; 1338 } 1339 1340 static void 1341 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval) 1342 { 1343 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1344 int64_t val = (int64_t)nval; 1345 1346 if (val < 0) { 1347 for (i = 0; i < zero; i++) { 1348 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1349 quanta[i]++; 1350 return; 1351 } 1352 } 1353 } else { 1354 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1355 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1356 quanta[i - 1]++; 1357 return; 1358 } 1359 } 1360 1361 quanta[DTRACE_QUANTIZE_NBUCKETS - 1]++; 1362 return; 1363 } 1364 1365 ASSERT(0); 1366 } 1367 1368 static void 1369 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval) 1370 { 1371 uint64_t arg = *lquanta++; 1372 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1373 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1374 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1375 int32_t val = (int32_t)nval, level; 1376 1377 ASSERT(step != 0); 1378 ASSERT(levels != 0); 1379 1380 if (val < base) { 1381 /* 1382 * This is an underflow. 1383 */ 1384 lquanta[0]++; 1385 return; 1386 } 1387 1388 level = (val - base) / step; 1389 1390 if (level < levels) { 1391 lquanta[level + 1]++; 1392 return; 1393 } 1394 1395 /* 1396 * This is an overflow. 1397 */ 1398 lquanta[levels + 1]++; 1399 } 1400 1401 static void 1402 dtrace_aggregate_avg(uint64_t *data, uint64_t nval) 1403 { 1404 data[0]++; 1405 data[1] += nval; 1406 } 1407 1408 /*ARGSUSED*/ 1409 static void 1410 dtrace_aggregate_count(uint64_t *oval, uint64_t nval) 1411 { 1412 *oval = *oval + 1; 1413 } 1414 1415 /*ARGSUSED*/ 1416 static void 1417 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval) 1418 { 1419 *oval += nval; 1420 } 1421 1422 /* 1423 * Aggregate given the tuple in the principal data buffer, and the aggregating 1424 * action denoted by the specified dtrace_aggregation_t. The aggregation 1425 * buffer is specified as the buf parameter. This routine does not return 1426 * failure; if there is no space in the aggregation buffer, the data will be 1427 * dropped, and a corresponding counter incremented. 1428 */ 1429 static void 1430 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1431 intptr_t offset, dtrace_buffer_t *buf, uint64_t arg) 1432 { 1433 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1434 uint32_t i, ndx, size, fsize; 1435 uint32_t align = sizeof (uint64_t) - 1; 1436 dtrace_aggbuffer_t *agb; 1437 dtrace_aggkey_t *key; 1438 uint32_t hashval = 0; 1439 caddr_t tomax, data, kdata; 1440 dtrace_actkind_t action; 1441 uintptr_t offs; 1442 1443 if (buf == NULL) 1444 return; 1445 1446 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1447 size = rec->dtrd_offset - agg->dtag_base; 1448 fsize = size + rec->dtrd_size; 1449 1450 ASSERT(dbuf->dtb_tomax != NULL); 1451 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1452 1453 if ((tomax = buf->dtb_tomax) == NULL) { 1454 dtrace_buffer_drop(buf); 1455 return; 1456 } 1457 1458 /* 1459 * The metastructure is always at the bottom of the buffer. 1460 */ 1461 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1462 sizeof (dtrace_aggbuffer_t)); 1463 1464 if (buf->dtb_offset == 0) { 1465 /* 1466 * We just kludge up approximately 1/8th of the size to be 1467 * buckets. If this guess ends up being routinely 1468 * off-the-mark, we may need to dynamically readjust this 1469 * based on past performance. 1470 */ 1471 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1472 1473 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1474 (uintptr_t)tomax || hashsize == 0) { 1475 /* 1476 * We've been given a ludicrously small buffer; 1477 * increment our drop count and leave. 1478 */ 1479 dtrace_buffer_drop(buf); 1480 return; 1481 } 1482 1483 /* 1484 * And now, a pathetic attempt to try to get a an odd (or 1485 * perchance, a prime) hash size for better hash distribution. 1486 */ 1487 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1488 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1489 1490 agb->dtagb_hashsize = hashsize; 1491 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1492 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1493 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1494 1495 for (i = 0; i < agb->dtagb_hashsize; i++) 1496 agb->dtagb_hash[i] = NULL; 1497 } 1498 1499 /* 1500 * Calculate the hash value based on the key. Note that we _don't_ 1501 * include the aggid in the hashing (but we will store it as part of 1502 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1503 * algorithm: a simple, quick algorithm that has no known funnels, and 1504 * gets good distribution in practice. The efficacy of the hashing 1505 * algorithm (and a comparison with other algorithms) may be found by 1506 * running the ::dtrace_aggstat MDB dcmd. 1507 */ 1508 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1509 hashval += data[i]; 1510 hashval += (hashval << 10); 1511 hashval ^= (hashval >> 6); 1512 } 1513 1514 hashval += (hashval << 3); 1515 hashval ^= (hashval >> 11); 1516 hashval += (hashval << 15); 1517 1518 /* 1519 * Yes, the divide here is expensive. If the cycle count here becomes 1520 * prohibitive, we can do tricks to eliminate it. 1521 */ 1522 ndx = hashval % agb->dtagb_hashsize; 1523 1524 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1525 ASSERT((caddr_t)key >= tomax); 1526 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1527 1528 if (hashval != key->dtak_hashval || key->dtak_size != size) 1529 continue; 1530 1531 kdata = key->dtak_data; 1532 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1533 1534 for (i = sizeof (dtrace_aggid_t); i < size; i++) { 1535 if (kdata[i] != data[i]) 1536 goto next; 1537 } 1538 1539 if (action != key->dtak_action) { 1540 /* 1541 * We are aggregating on the same value in the same 1542 * aggregation with two different aggregating actions. 1543 * (This should have been picked up in the compiler, 1544 * so we may be dealing with errant or devious DIF.) 1545 * This is an error condition; we indicate as much, 1546 * and return. 1547 */ 1548 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1549 return; 1550 } 1551 1552 /* 1553 * This is a hit: we need to apply the aggregator to 1554 * the value at this key. 1555 */ 1556 agg->dtag_aggregate((uint64_t *)(kdata + size), arg); 1557 return; 1558 next: 1559 continue; 1560 } 1561 1562 /* 1563 * We didn't find it. We need to allocate some zero-filled space, 1564 * link it into the hash table appropriately, and apply the aggregator 1565 * to the (zero-filled) value. 1566 */ 1567 offs = buf->dtb_offset; 1568 while (offs & (align - 1)) 1569 offs += sizeof (uint32_t); 1570 1571 /* 1572 * If we don't have enough room to both allocate a new key _and_ 1573 * its associated data, increment the drop count and return. 1574 */ 1575 if ((uintptr_t)tomax + offs + fsize > 1576 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1577 dtrace_buffer_drop(buf); 1578 return; 1579 } 1580 1581 /*CONSTCOND*/ 1582 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1583 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1584 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1585 1586 key->dtak_data = kdata = tomax + offs; 1587 buf->dtb_offset = offs + fsize; 1588 1589 /* 1590 * Now copy the data across. 1591 */ 1592 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1593 1594 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1595 kdata[i] = data[i]; 1596 1597 for (i = size; i < fsize; i++) 1598 kdata[i] = 0; 1599 1600 key->dtak_hashval = hashval; 1601 key->dtak_size = size; 1602 key->dtak_action = action; 1603 key->dtak_next = agb->dtagb_hash[ndx]; 1604 agb->dtagb_hash[ndx] = key; 1605 1606 /* 1607 * Finally, apply the aggregator. 1608 */ 1609 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1610 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), arg); 1611 } 1612 1613 /* 1614 * Given consumer state, this routine finds a speculation in the INACTIVE 1615 * state and transitions it into the ACTIVE state. If there is no speculation 1616 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1617 * incremented -- it is up to the caller to take appropriate action. 1618 */ 1619 static int 1620 dtrace_speculation(dtrace_state_t *state) 1621 { 1622 int i = 0; 1623 dtrace_speculation_state_t current; 1624 uint32_t *stat = &state->dts_speculations_unavail, count; 1625 1626 while (i < state->dts_nspeculations) { 1627 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1628 1629 current = spec->dtsp_state; 1630 1631 if (current != DTRACESPEC_INACTIVE) { 1632 if (current == DTRACESPEC_COMMITTINGMANY || 1633 current == DTRACESPEC_COMMITTING || 1634 current == DTRACESPEC_DISCARDING) 1635 stat = &state->dts_speculations_busy; 1636 i++; 1637 continue; 1638 } 1639 1640 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1641 current, DTRACESPEC_ACTIVE) == current) 1642 return (i + 1); 1643 } 1644 1645 /* 1646 * We couldn't find a speculation. If we found as much as a single 1647 * busy speculation buffer, we'll attribute this failure as "busy" 1648 * instead of "unavail". 1649 */ 1650 do { 1651 count = *stat; 1652 } while (dtrace_cas32(stat, count, count + 1) != count); 1653 1654 return (0); 1655 } 1656 1657 /* 1658 * This routine commits an active speculation. If the specified speculation 1659 * is not in a valid state to perform a commit(), this routine will silently do 1660 * nothing. The state of the specified speculation is transitioned according 1661 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1662 */ 1663 static void 1664 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1665 dtrace_specid_t which) 1666 { 1667 dtrace_speculation_t *spec; 1668 dtrace_buffer_t *src, *dest; 1669 uintptr_t daddr, saddr, dlimit; 1670 dtrace_speculation_state_t current, new; 1671 intptr_t offs; 1672 1673 if (which == 0) 1674 return; 1675 1676 if (which > state->dts_nspeculations) { 1677 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1678 return; 1679 } 1680 1681 spec = &state->dts_speculations[which - 1]; 1682 src = &spec->dtsp_buffer[cpu]; 1683 dest = &state->dts_buffer[cpu]; 1684 1685 do { 1686 current = spec->dtsp_state; 1687 1688 if (current == DTRACESPEC_COMMITTINGMANY) 1689 break; 1690 1691 switch (current) { 1692 case DTRACESPEC_INACTIVE: 1693 case DTRACESPEC_DISCARDING: 1694 return; 1695 1696 case DTRACESPEC_COMMITTING: 1697 /* 1698 * This is only possible if we are (a) commit()'ing 1699 * without having done a prior speculate() on this CPU 1700 * and (b) racing with another commit() on a different 1701 * CPU. There's nothing to do -- we just assert that 1702 * our offset is 0. 1703 */ 1704 ASSERT(src->dtb_offset == 0); 1705 return; 1706 1707 case DTRACESPEC_ACTIVE: 1708 new = DTRACESPEC_COMMITTING; 1709 break; 1710 1711 case DTRACESPEC_ACTIVEONE: 1712 /* 1713 * This speculation is active on one CPU. If our 1714 * buffer offset is non-zero, we know that the one CPU 1715 * must be us. Otherwise, we are committing on a 1716 * different CPU from the speculate(), and we must 1717 * rely on being asynchronously cleaned. 1718 */ 1719 if (src->dtb_offset != 0) { 1720 new = DTRACESPEC_COMMITTING; 1721 break; 1722 } 1723 /*FALLTHROUGH*/ 1724 1725 case DTRACESPEC_ACTIVEMANY: 1726 new = DTRACESPEC_COMMITTINGMANY; 1727 break; 1728 1729 default: 1730 ASSERT(0); 1731 } 1732 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1733 current, new) != current); 1734 1735 /* 1736 * We have set the state to indicate that we are committing this 1737 * speculation. Now reserve the necessary space in the destination 1738 * buffer. 1739 */ 1740 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 1741 sizeof (uint64_t), state, NULL)) < 0) { 1742 dtrace_buffer_drop(dest); 1743 goto out; 1744 } 1745 1746 /* 1747 * We have the space; copy the buffer across. (Note that this is a 1748 * highly subobtimal bcopy(); in the unlikely event that this becomes 1749 * a serious performance issue, a high-performance DTrace-specific 1750 * bcopy() should obviously be invented.) 1751 */ 1752 daddr = (uintptr_t)dest->dtb_tomax + offs; 1753 dlimit = daddr + src->dtb_offset; 1754 saddr = (uintptr_t)src->dtb_tomax; 1755 1756 /* 1757 * First, the aligned portion. 1758 */ 1759 while (dlimit - daddr >= sizeof (uint64_t)) { 1760 *((uint64_t *)daddr) = *((uint64_t *)saddr); 1761 1762 daddr += sizeof (uint64_t); 1763 saddr += sizeof (uint64_t); 1764 } 1765 1766 /* 1767 * Now any left-over bit... 1768 */ 1769 while (dlimit - daddr) 1770 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 1771 1772 /* 1773 * Finally, commit the reserved space in the destination buffer. 1774 */ 1775 dest->dtb_offset = offs + src->dtb_offset; 1776 1777 out: 1778 /* 1779 * If we're lucky enough to be the only active CPU on this speculation 1780 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 1781 */ 1782 if (current == DTRACESPEC_ACTIVE || 1783 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 1784 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 1785 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 1786 1787 ASSERT(rval == DTRACESPEC_COMMITTING); 1788 } 1789 1790 src->dtb_offset = 0; 1791 src->dtb_xamot_drops += src->dtb_drops; 1792 src->dtb_drops = 0; 1793 } 1794 1795 /* 1796 * This routine discards an active speculation. If the specified speculation 1797 * is not in a valid state to perform a discard(), this routine will silently 1798 * do nothing. The state of the specified speculation is transitioned 1799 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 1800 */ 1801 static void 1802 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 1803 dtrace_specid_t which) 1804 { 1805 dtrace_speculation_t *spec; 1806 dtrace_speculation_state_t current, new; 1807 dtrace_buffer_t *buf; 1808 1809 if (which == 0) 1810 return; 1811 1812 if (which > state->dts_nspeculations) { 1813 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1814 return; 1815 } 1816 1817 spec = &state->dts_speculations[which - 1]; 1818 buf = &spec->dtsp_buffer[cpu]; 1819 1820 do { 1821 current = spec->dtsp_state; 1822 1823 switch (current) { 1824 case DTRACESPEC_INACTIVE: 1825 case DTRACESPEC_COMMITTINGMANY: 1826 case DTRACESPEC_COMMITTING: 1827 case DTRACESPEC_DISCARDING: 1828 return; 1829 1830 case DTRACESPEC_ACTIVE: 1831 case DTRACESPEC_ACTIVEMANY: 1832 new = DTRACESPEC_DISCARDING; 1833 break; 1834 1835 case DTRACESPEC_ACTIVEONE: 1836 if (buf->dtb_offset != 0) { 1837 new = DTRACESPEC_INACTIVE; 1838 } else { 1839 new = DTRACESPEC_DISCARDING; 1840 } 1841 break; 1842 1843 default: 1844 ASSERT(0); 1845 } 1846 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1847 current, new) != current); 1848 1849 buf->dtb_offset = 0; 1850 buf->dtb_drops = 0; 1851 } 1852 1853 /* 1854 * Note: not called from probe context. This function is called 1855 * asynchronously from cross call context to clean any speculations that are 1856 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 1857 * transitioned back to the INACTIVE state until all CPUs have cleaned the 1858 * speculation. 1859 */ 1860 static void 1861 dtrace_speculation_clean_here(dtrace_state_t *state) 1862 { 1863 dtrace_icookie_t cookie; 1864 processorid_t cpu = CPU->cpu_id; 1865 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 1866 dtrace_specid_t i; 1867 1868 cookie = dtrace_interrupt_disable(); 1869 1870 if (dest->dtb_tomax == NULL) { 1871 dtrace_interrupt_enable(cookie); 1872 return; 1873 } 1874 1875 for (i = 0; i < state->dts_nspeculations; i++) { 1876 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1877 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 1878 1879 if (src->dtb_tomax == NULL) 1880 continue; 1881 1882 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 1883 src->dtb_offset = 0; 1884 continue; 1885 } 1886 1887 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1888 continue; 1889 1890 if (src->dtb_offset == 0) 1891 continue; 1892 1893 dtrace_speculation_commit(state, cpu, i + 1); 1894 } 1895 1896 dtrace_interrupt_enable(cookie); 1897 } 1898 1899 /* 1900 * Note: not called from probe context. This function is called 1901 * asynchronously (and at a regular interval) to clean any speculations that 1902 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 1903 * is work to be done, it cross calls all CPUs to perform that work; 1904 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 1905 * INACTIVE state until they have been cleaned by all CPUs. 1906 */ 1907 static void 1908 dtrace_speculation_clean(dtrace_state_t *state) 1909 { 1910 int work = 0, rv; 1911 dtrace_specid_t i; 1912 1913 for (i = 0; i < state->dts_nspeculations; i++) { 1914 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1915 1916 ASSERT(!spec->dtsp_cleaning); 1917 1918 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 1919 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 1920 continue; 1921 1922 work++; 1923 spec->dtsp_cleaning = 1; 1924 } 1925 1926 if (!work) 1927 return; 1928 1929 dtrace_xcall(DTRACE_CPUALL, 1930 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 1931 1932 /* 1933 * We now know that all CPUs have committed or discarded their 1934 * speculation buffers, as appropriate. We can now set the state 1935 * to inactive. 1936 */ 1937 for (i = 0; i < state->dts_nspeculations; i++) { 1938 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1939 dtrace_speculation_state_t current, new; 1940 1941 if (!spec->dtsp_cleaning) 1942 continue; 1943 1944 current = spec->dtsp_state; 1945 ASSERT(current == DTRACESPEC_DISCARDING || 1946 current == DTRACESPEC_COMMITTINGMANY); 1947 1948 new = DTRACESPEC_INACTIVE; 1949 1950 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 1951 ASSERT(rv == current); 1952 spec->dtsp_cleaning = 0; 1953 } 1954 } 1955 1956 /* 1957 * Called as part of a speculate() to get the speculative buffer associated 1958 * with a given speculation. Returns NULL if the specified speculation is not 1959 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 1960 * the active CPU is not the specified CPU -- the speculation will be 1961 * atomically transitioned into the ACTIVEMANY state. 1962 */ 1963 static dtrace_buffer_t * 1964 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 1965 dtrace_specid_t which) 1966 { 1967 dtrace_speculation_t *spec; 1968 dtrace_speculation_state_t current, new; 1969 dtrace_buffer_t *buf; 1970 1971 if (which == 0) 1972 return (NULL); 1973 1974 if (which > state->dts_nspeculations) { 1975 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1976 return (NULL); 1977 } 1978 1979 spec = &state->dts_speculations[which - 1]; 1980 buf = &spec->dtsp_buffer[cpuid]; 1981 1982 do { 1983 current = spec->dtsp_state; 1984 1985 switch (current) { 1986 case DTRACESPEC_INACTIVE: 1987 case DTRACESPEC_COMMITTINGMANY: 1988 case DTRACESPEC_DISCARDING: 1989 return (NULL); 1990 1991 case DTRACESPEC_COMMITTING: 1992 ASSERT(buf->dtb_offset == 0); 1993 return (NULL); 1994 1995 case DTRACESPEC_ACTIVEONE: 1996 /* 1997 * This speculation is currently active on one CPU. 1998 * Check the offset in the buffer; if it's non-zero, 1999 * that CPU must be us (and we leave the state alone). 2000 * If it's zero, assume that we're starting on a new 2001 * CPU -- and change the state to indicate that the 2002 * speculation is active on more than one CPU. 2003 */ 2004 if (buf->dtb_offset != 0) 2005 return (buf); 2006 2007 new = DTRACESPEC_ACTIVEMANY; 2008 break; 2009 2010 case DTRACESPEC_ACTIVEMANY: 2011 return (buf); 2012 2013 case DTRACESPEC_ACTIVE: 2014 new = DTRACESPEC_ACTIVEONE; 2015 break; 2016 2017 default: 2018 ASSERT(0); 2019 } 2020 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2021 current, new) != current); 2022 2023 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2024 return (buf); 2025 } 2026 2027 /* 2028 * This function implements the DIF emulator's variable lookups. The emulator 2029 * passes a reserved variable identifier and optional built-in array index. 2030 */ 2031 static uint64_t 2032 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2033 uint64_t i) 2034 { 2035 /* 2036 * If we're accessing one of the uncached arguments, we'll turn this 2037 * into a reference in the args array. 2038 */ 2039 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2040 i = v - DIF_VAR_ARG0; 2041 v = DIF_VAR_ARGS; 2042 } 2043 2044 switch (v) { 2045 case DIF_VAR_ARGS: 2046 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2047 if (i >= sizeof (mstate->dtms_arg) / 2048 sizeof (mstate->dtms_arg[0])) { 2049 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2050 dtrace_provider_t *pv; 2051 uint64_t val; 2052 2053 pv = mstate->dtms_probe->dtpr_provider; 2054 if (pv->dtpv_pops.dtps_getargval != NULL) 2055 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2056 mstate->dtms_probe->dtpr_id, 2057 mstate->dtms_probe->dtpr_arg, i, aframes); 2058 else 2059 val = dtrace_getarg(i, aframes); 2060 2061 /* 2062 * This is regrettably required to keep the compiler 2063 * from tail-optimizing the call to dtrace_getarg(). 2064 * The condition always evaluates to true, but the 2065 * compiler has no way of figuring that out a priori. 2066 * (None of this would be necessary if the compiler 2067 * could be relied upon to _always_ tail-optimize 2068 * the call to dtrace_getarg() -- but it can't.) 2069 */ 2070 if (mstate->dtms_probe != NULL) 2071 return (val); 2072 2073 ASSERT(0); 2074 } 2075 2076 return (mstate->dtms_arg[i]); 2077 2078 case DIF_VAR_UREGS: { 2079 klwp_t *lwp; 2080 2081 if (!dtrace_priv_proc(state)) 2082 return (0); 2083 2084 if ((lwp = curthread->t_lwp) == NULL) { 2085 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2086 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2087 return (0); 2088 } 2089 2090 return (dtrace_getreg(lwp->lwp_regs, i)); 2091 } 2092 2093 case DIF_VAR_CURTHREAD: 2094 if (!dtrace_priv_kernel(state)) 2095 return (0); 2096 return ((uint64_t)(uintptr_t)curthread); 2097 2098 case DIF_VAR_TIMESTAMP: 2099 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2100 mstate->dtms_timestamp = dtrace_gethrtime(); 2101 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2102 } 2103 return (mstate->dtms_timestamp); 2104 2105 case DIF_VAR_VTIMESTAMP: 2106 ASSERT(dtrace_vtime_references != 0); 2107 return (curthread->t_dtrace_vtime); 2108 2109 case DIF_VAR_WALLTIMESTAMP: 2110 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2111 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2112 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2113 } 2114 return (mstate->dtms_walltimestamp); 2115 2116 case DIF_VAR_IPL: 2117 if (!dtrace_priv_kernel(state)) 2118 return (0); 2119 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2120 mstate->dtms_ipl = dtrace_getipl(); 2121 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2122 } 2123 return (mstate->dtms_ipl); 2124 2125 case DIF_VAR_EPID: 2126 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2127 return (mstate->dtms_epid); 2128 2129 case DIF_VAR_ID: 2130 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2131 return (mstate->dtms_probe->dtpr_id); 2132 2133 case DIF_VAR_STACKDEPTH: 2134 if (!dtrace_priv_kernel(state)) 2135 return (0); 2136 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2137 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2138 2139 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2140 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2141 } 2142 return (mstate->dtms_stackdepth); 2143 2144 case DIF_VAR_CALLER: 2145 if (!dtrace_priv_kernel(state)) 2146 return (0); 2147 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2148 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2149 2150 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2151 /* 2152 * If this is an unanchored probe, we are 2153 * required to go through the slow path: 2154 * dtrace_caller() only guarantees correct 2155 * results for anchored probes. 2156 */ 2157 pc_t caller[2]; 2158 2159 dtrace_getpcstack(caller, 2, aframes, 2160 (uint32_t *)mstate->dtms_arg[0]); 2161 mstate->dtms_caller = caller[1]; 2162 } else if ((mstate->dtms_caller = 2163 dtrace_caller(aframes)) == -1) { 2164 /* 2165 * We have failed to do this the quick way; 2166 * we must resort to the slower approach of 2167 * calling dtrace_getpcstack(). 2168 */ 2169 pc_t caller; 2170 2171 dtrace_getpcstack(&caller, 1, aframes, NULL); 2172 mstate->dtms_caller = caller; 2173 } 2174 2175 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2176 } 2177 return (mstate->dtms_caller); 2178 2179 case DIF_VAR_PROBEPROV: 2180 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2181 return ((uint64_t)(uintptr_t) 2182 mstate->dtms_probe->dtpr_provider->dtpv_name); 2183 2184 case DIF_VAR_PROBEMOD: 2185 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2186 return ((uint64_t)(uintptr_t) 2187 mstate->dtms_probe->dtpr_mod); 2188 2189 case DIF_VAR_PROBEFUNC: 2190 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2191 return ((uint64_t)(uintptr_t) 2192 mstate->dtms_probe->dtpr_func); 2193 2194 case DIF_VAR_PROBENAME: 2195 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2196 return ((uint64_t)(uintptr_t) 2197 mstate->dtms_probe->dtpr_name); 2198 2199 case DIF_VAR_PID: 2200 if (!dtrace_priv_proc(state)) 2201 return (0); 2202 2203 /* 2204 * Note that we are assuming that an unanchored probe is 2205 * always due to a high-level interrupt. (And we're assuming 2206 * that there is only a single high level interrupt.) 2207 */ 2208 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2209 return (pid0.pid_id); 2210 2211 /* 2212 * It is always safe to dereference one's own t_procp pointer: 2213 * it always points to a valid, allocated proc structure. 2214 * Further, it is always safe to dereference the p_pidp member 2215 * of one's own proc structure. (These are truisms becuase 2216 * threads and processes don't clean up their own state -- 2217 * they leave that task to whomever reaps them.) 2218 */ 2219 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2220 2221 case DIF_VAR_TID: 2222 /* 2223 * See comment in DIF_VAR_PID. 2224 */ 2225 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2226 return (0); 2227 2228 return ((uint64_t)curthread->t_tid); 2229 2230 case DIF_VAR_EXECNAME: 2231 if (!dtrace_priv_proc(state)) 2232 return (0); 2233 2234 /* 2235 * See comment in DIF_VAR_PID. 2236 */ 2237 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2238 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2239 2240 /* 2241 * It is always safe to dereference one's own t_procp pointer: 2242 * it always points to a valid, allocated proc structure. 2243 * (This is true because threads don't clean up their own 2244 * state -- they leave that task to whomever reaps them.) 2245 */ 2246 return ((uint64_t)(uintptr_t) 2247 curthread->t_procp->p_user.u_comm); 2248 2249 case DIF_VAR_ZONENAME: 2250 if (!dtrace_priv_proc(state)) 2251 return (0); 2252 2253 /* 2254 * See comment in DIF_VAR_PID. 2255 */ 2256 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2257 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2258 2259 /* 2260 * It is always safe to dereference one's own t_procp pointer: 2261 * it always points to a valid, allocated proc structure. 2262 * (This is true because threads don't clean up their own 2263 * state -- they leave that task to whomever reaps them.) 2264 */ 2265 return ((uint64_t)(uintptr_t) 2266 curthread->t_procp->p_zone->zone_name); 2267 2268 default: 2269 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2270 return (0); 2271 } 2272 } 2273 2274 /* 2275 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2276 * Notice that we don't bother validating the proper number of arguments or 2277 * their types in the tuple stack. This isn't needed because all argument 2278 * interpretation is safe because of our load safety -- the worst that can 2279 * happen is that a bogus program can obtain bogus results. 2280 */ 2281 static void 2282 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2283 dtrace_key_t *tupregs, int nargs, 2284 dtrace_mstate_t *mstate, dtrace_state_t *state) 2285 { 2286 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2287 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2288 2289 union { 2290 mutex_impl_t mi; 2291 uint64_t mx; 2292 } m; 2293 2294 union { 2295 krwlock_t ri; 2296 uintptr_t rw; 2297 } r; 2298 2299 switch (subr) { 2300 case DIF_SUBR_RAND: 2301 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2302 break; 2303 2304 case DIF_SUBR_MUTEX_OWNED: 2305 m.mx = dtrace_load64(tupregs[0].dttk_value); 2306 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2307 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2308 else 2309 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2310 break; 2311 2312 case DIF_SUBR_MUTEX_OWNER: 2313 m.mx = dtrace_load64(tupregs[0].dttk_value); 2314 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2315 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2316 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2317 else 2318 regs[rd] = 0; 2319 break; 2320 2321 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2322 m.mx = dtrace_load64(tupregs[0].dttk_value); 2323 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2324 break; 2325 2326 case DIF_SUBR_MUTEX_TYPE_SPIN: 2327 m.mx = dtrace_load64(tupregs[0].dttk_value); 2328 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2329 break; 2330 2331 case DIF_SUBR_RW_READ_HELD: { 2332 uintptr_t tmp; 2333 2334 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2335 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2336 break; 2337 } 2338 2339 case DIF_SUBR_RW_WRITE_HELD: 2340 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2341 regs[rd] = _RW_WRITE_HELD(&r.ri); 2342 break; 2343 2344 case DIF_SUBR_RW_ISWRITER: 2345 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2346 regs[rd] = _RW_ISWRITER(&r.ri); 2347 break; 2348 2349 case DIF_SUBR_BCOPY: { 2350 /* 2351 * We need to be sure that the destination is in the scratch 2352 * region -- no other region is allowed. 2353 */ 2354 uintptr_t src = tupregs[0].dttk_value; 2355 uintptr_t dest = tupregs[1].dttk_value; 2356 size_t size = tupregs[2].dttk_value; 2357 2358 if (!dtrace_inscratch(dest, size, mstate)) { 2359 *flags |= CPU_DTRACE_BADADDR; 2360 *illval = regs[rd]; 2361 break; 2362 } 2363 2364 dtrace_bcopy((void *)src, (void *)dest, size); 2365 break; 2366 } 2367 2368 case DIF_SUBR_ALLOCA: 2369 case DIF_SUBR_COPYIN: { 2370 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2371 uint64_t size = 2372 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2373 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2374 2375 /* 2376 * This action doesn't require any credential checks since 2377 * probes will not activate in user contexts to which the 2378 * enabling user does not have permissions. 2379 */ 2380 if (mstate->dtms_scratch_ptr + scratch_size > 2381 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2382 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2383 regs[rd] = NULL; 2384 break; 2385 } 2386 2387 if (subr == DIF_SUBR_COPYIN) { 2388 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2389 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2390 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2391 } 2392 2393 mstate->dtms_scratch_ptr += scratch_size; 2394 regs[rd] = dest; 2395 break; 2396 } 2397 2398 case DIF_SUBR_COPYINTO: { 2399 uint64_t size = tupregs[1].dttk_value; 2400 uintptr_t dest = tupregs[2].dttk_value; 2401 2402 /* 2403 * This action doesn't require any credential checks since 2404 * probes will not activate in user contexts to which the 2405 * enabling user does not have permissions. 2406 */ 2407 if (!dtrace_inscratch(dest, size, mstate)) { 2408 *flags |= CPU_DTRACE_BADADDR; 2409 *illval = regs[rd]; 2410 break; 2411 } 2412 2413 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2414 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2415 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2416 break; 2417 } 2418 2419 case DIF_SUBR_COPYINSTR: { 2420 uintptr_t dest = mstate->dtms_scratch_ptr; 2421 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2422 2423 if (nargs > 1 && tupregs[1].dttk_value < size) 2424 size = tupregs[1].dttk_value + 1; 2425 2426 /* 2427 * This action doesn't require any credential checks since 2428 * probes will not activate in user contexts to which the 2429 * enabling user does not have permissions. 2430 */ 2431 if (mstate->dtms_scratch_ptr + size > 2432 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2433 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2434 regs[rd] = NULL; 2435 break; 2436 } 2437 2438 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2439 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2440 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2441 2442 ((char *)dest)[size - 1] = '\0'; 2443 mstate->dtms_scratch_ptr += size; 2444 regs[rd] = dest; 2445 break; 2446 } 2447 2448 case DIF_SUBR_MSGSIZE: 2449 case DIF_SUBR_MSGDSIZE: { 2450 uintptr_t baddr = tupregs[0].dttk_value, daddr; 2451 uintptr_t wptr, rptr; 2452 size_t count = 0; 2453 int cont = 0; 2454 2455 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2456 wptr = dtrace_loadptr(baddr + 2457 offsetof(mblk_t, b_wptr)); 2458 2459 rptr = dtrace_loadptr(baddr + 2460 offsetof(mblk_t, b_rptr)); 2461 2462 if (wptr < rptr) { 2463 *flags |= CPU_DTRACE_BADADDR; 2464 *illval = tupregs[0].dttk_value; 2465 break; 2466 } 2467 2468 daddr = dtrace_loadptr(baddr + 2469 offsetof(mblk_t, b_datap)); 2470 2471 baddr = dtrace_loadptr(baddr + 2472 offsetof(mblk_t, b_cont)); 2473 2474 /* 2475 * We want to prevent against denial-of-service here, 2476 * so we're only going to search the list for 2477 * dtrace_msgdsize_max mblks. 2478 */ 2479 if (cont++ > dtrace_msgdsize_max) { 2480 *flags |= CPU_DTRACE_ILLOP; 2481 break; 2482 } 2483 2484 if (subr == DIF_SUBR_MSGDSIZE) { 2485 if (dtrace_load8(daddr + 2486 offsetof(dblk_t, db_type)) != M_DATA) 2487 continue; 2488 } 2489 2490 count += wptr - rptr; 2491 } 2492 2493 if (!(*flags & CPU_DTRACE_FAULT)) 2494 regs[rd] = count; 2495 2496 break; 2497 } 2498 2499 case DIF_SUBR_PROGENYOF: { 2500 pid_t pid = tupregs[0].dttk_value; 2501 proc_t *p; 2502 int rval = 0; 2503 2504 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2505 2506 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 2507 if (p->p_pidp->pid_id == pid) { 2508 rval = 1; 2509 break; 2510 } 2511 } 2512 2513 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2514 2515 regs[rd] = rval; 2516 break; 2517 } 2518 2519 case DIF_SUBR_SPECULATION: 2520 regs[rd] = dtrace_speculation(state); 2521 break; 2522 2523 case DIF_SUBR_COPYOUT: { 2524 uintptr_t kaddr = tupregs[0].dttk_value; 2525 uintptr_t uaddr = tupregs[1].dttk_value; 2526 uint64_t size = tupregs[2].dttk_value; 2527 2528 if (!dtrace_destructive_disallow && 2529 dtrace_priv_proc_control(state) && 2530 !dtrace_istoxic(kaddr, size)) { 2531 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2532 dtrace_copyout(kaddr, uaddr, size); 2533 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2534 } 2535 break; 2536 } 2537 2538 case DIF_SUBR_COPYOUTSTR: { 2539 uintptr_t kaddr = tupregs[0].dttk_value; 2540 uintptr_t uaddr = tupregs[1].dttk_value; 2541 uint64_t size = tupregs[2].dttk_value; 2542 2543 if (!dtrace_destructive_disallow && 2544 dtrace_priv_proc_control(state) && 2545 !dtrace_istoxic(kaddr, size)) { 2546 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2547 dtrace_copyoutstr(kaddr, uaddr, size); 2548 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2549 } 2550 break; 2551 } 2552 2553 case DIF_SUBR_STRLEN: 2554 regs[rd] = dtrace_strlen((char *)(uintptr_t) 2555 tupregs[0].dttk_value, 2556 state->dts_options[DTRACEOPT_STRSIZE]); 2557 break; 2558 2559 case DIF_SUBR_STRCHR: 2560 case DIF_SUBR_STRRCHR: { 2561 /* 2562 * We're going to iterate over the string looking for the 2563 * specified character. We will iterate until we have reached 2564 * the string length or we have found the character. If this 2565 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 2566 * of the specified character instead of the first. 2567 */ 2568 uintptr_t addr = tupregs[0].dttk_value; 2569 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 2570 char c, target = (char)tupregs[1].dttk_value; 2571 2572 for (regs[rd] = NULL; addr < limit; addr++) { 2573 if ((c = dtrace_load8(addr)) == target) { 2574 regs[rd] = addr; 2575 2576 if (subr == DIF_SUBR_STRCHR) 2577 break; 2578 } 2579 2580 if (c == '\0') 2581 break; 2582 } 2583 2584 break; 2585 } 2586 2587 case DIF_SUBR_STRSTR: 2588 case DIF_SUBR_INDEX: 2589 case DIF_SUBR_RINDEX: { 2590 /* 2591 * We're going to iterate over the string looking for the 2592 * specified string. We will iterate until we have reached 2593 * the string length or we have found the string. (Yes, this 2594 * is done in the most naive way possible -- but considering 2595 * that the string we're searching for is likely to be 2596 * relatively short, the complexity of Rabin-Karp or similar 2597 * hardly seems merited.) 2598 */ 2599 char *addr = (char *)tupregs[0].dttk_value; 2600 char *substr = (char *)tupregs[1].dttk_value; 2601 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2602 size_t len = dtrace_strlen(addr, size); 2603 size_t sublen = dtrace_strlen(substr, size); 2604 char *limit = addr + len, *orig = addr; 2605 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 2606 int inc = 1; 2607 2608 regs[rd] = notfound; 2609 2610 /* 2611 * strstr() and index()/rindex() have similar semantics if 2612 * both strings are the empty string: strstr() returns a 2613 * pointer to the (empty) string, and index() and rindex() 2614 * both return index 0 (regardless of any position argument). 2615 */ 2616 if (sublen == 0 && len == 0) { 2617 if (subr == DIF_SUBR_STRSTR) 2618 regs[rd] = (uintptr_t)addr; 2619 else 2620 regs[rd] = 0; 2621 break; 2622 } 2623 2624 if (subr != DIF_SUBR_STRSTR) { 2625 if (subr == DIF_SUBR_RINDEX) { 2626 limit = orig - 1; 2627 addr += len; 2628 inc = -1; 2629 } 2630 2631 /* 2632 * Both index() and rindex() take an optional position 2633 * argument that denotes the starting position. 2634 */ 2635 if (nargs == 3) { 2636 int64_t pos = (int64_t)tupregs[2].dttk_value; 2637 2638 /* 2639 * If the position argument to index() is 2640 * negative, Perl implicitly clamps it at 2641 * zero. This semantic is a little surprising 2642 * given the special meaning of negative 2643 * positions to similar Perl functions like 2644 * substr(), but it appears to reflect a 2645 * notion that index() can start from a 2646 * negative index and increment its way up to 2647 * the string. Given this notion, Perl's 2648 * rindex() is at least self-consistent in 2649 * that it implicitly clamps positions greater 2650 * than the string length to be the string 2651 * length. Where Perl completely loses 2652 * coherence, however, is when the specified 2653 * substring is the empty string (""). In 2654 * this case, even if the position is 2655 * negative, rindex() returns 0 -- and even if 2656 * the position is greater than the length, 2657 * index() returns the string length. These 2658 * semantics violate the notion that index() 2659 * should never return a value less than the 2660 * specified position and that rindex() should 2661 * never return a value greater than the 2662 * specified position. (One assumes that 2663 * these semantics are artifacts of Perl's 2664 * implementation and not the results of 2665 * deliberate design -- it beggars belief that 2666 * even Larry Wall could desire such oddness.) 2667 * While in the abstract one would wish for 2668 * consistent position semantics across 2669 * substr(), index() and rindex() -- or at the 2670 * very least self-consitent position 2671 * semantics for index() and rindex() -- we 2672 * instead opt to keep with the extant Perl 2673 * semantics, in all their broken glory. (Do 2674 * we have more desire to maintain Perl's 2675 * semantics than Perl does? Probably.) 2676 */ 2677 if (subr == DIF_SUBR_RINDEX) { 2678 if (pos < 0) { 2679 if (sublen == 0) 2680 regs[rd] = 0; 2681 break; 2682 } 2683 2684 if (pos > len) 2685 pos = len; 2686 } else { 2687 if (pos < 0) 2688 pos = 0; 2689 2690 if (pos >= len) { 2691 if (sublen == 0) 2692 regs[rd] = len; 2693 break; 2694 } 2695 } 2696 2697 addr = orig + pos; 2698 } 2699 } 2700 2701 for (regs[rd] = notfound; addr != limit; addr += inc) { 2702 if (dtrace_strncmp(addr, substr, sublen) == 0) { 2703 if (subr != DIF_SUBR_STRSTR) { 2704 /* 2705 * As D index() and rindex() are 2706 * modeled on Perl (and not on awk), 2707 * we return a zero-based (and not a 2708 * one-based) index. (For you Perl 2709 * weenies: no, we're not going to add 2710 * $[ -- and shouldn't you be at a con 2711 * or something?) 2712 */ 2713 regs[rd] = (uintptr_t)(addr - orig); 2714 break; 2715 } 2716 2717 ASSERT(subr == DIF_SUBR_STRSTR); 2718 regs[rd] = (uintptr_t)addr; 2719 break; 2720 } 2721 } 2722 2723 break; 2724 } 2725 2726 case DIF_SUBR_STRTOK: { 2727 uintptr_t addr = tupregs[0].dttk_value; 2728 uintptr_t tokaddr = tupregs[1].dttk_value; 2729 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2730 uintptr_t limit, toklimit = tokaddr + size; 2731 uint8_t c, tokmap[32]; /* 256 / 8 */ 2732 char *dest = (char *)mstate->dtms_scratch_ptr; 2733 int i; 2734 2735 if (mstate->dtms_scratch_ptr + size > 2736 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2737 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2738 regs[rd] = NULL; 2739 break; 2740 } 2741 2742 if (addr == NULL) { 2743 /* 2744 * If the address specified is NULL, we use our saved 2745 * strtok pointer from the mstate. Note that this 2746 * means that the saved strtok pointer is _only_ 2747 * valid within multiple enablings of the same probe -- 2748 * it behaves like an implicit clause-local variable. 2749 */ 2750 addr = mstate->dtms_strtok; 2751 } 2752 2753 /* 2754 * First, zero the token map, and then process the token 2755 * string -- setting a bit in the map for every character 2756 * found in the token string. 2757 */ 2758 for (i = 0; i < sizeof (tokmap); i++) 2759 tokmap[i] = 0; 2760 2761 for (; tokaddr < toklimit; tokaddr++) { 2762 if ((c = dtrace_load8(tokaddr)) == '\0') 2763 break; 2764 2765 ASSERT((c >> 3) < sizeof (tokmap)); 2766 tokmap[c >> 3] |= (1 << (c & 0x7)); 2767 } 2768 2769 for (limit = addr + size; addr < limit; addr++) { 2770 /* 2771 * We're looking for a character that is _not_ contained 2772 * in the token string. 2773 */ 2774 if ((c = dtrace_load8(addr)) == '\0') 2775 break; 2776 2777 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 2778 break; 2779 } 2780 2781 if (c == '\0') { 2782 /* 2783 * We reached the end of the string without finding 2784 * any character that was not in the token string. 2785 * We return NULL in this case, and we set the saved 2786 * address to NULL as well. 2787 */ 2788 regs[rd] = NULL; 2789 mstate->dtms_strtok = NULL; 2790 break; 2791 } 2792 2793 /* 2794 * From here on, we're copying into the destination string. 2795 */ 2796 for (i = 0; addr < limit && i < size - 1; addr++) { 2797 if ((c = dtrace_load8(addr)) == '\0') 2798 break; 2799 2800 if (tokmap[c >> 3] & (1 << (c & 0x7))) 2801 break; 2802 2803 ASSERT(i < size); 2804 dest[i++] = c; 2805 } 2806 2807 ASSERT(i < size); 2808 dest[i] = '\0'; 2809 regs[rd] = (uintptr_t)dest; 2810 mstate->dtms_scratch_ptr += size; 2811 mstate->dtms_strtok = addr; 2812 break; 2813 } 2814 2815 case DIF_SUBR_SUBSTR: { 2816 uintptr_t s = tupregs[0].dttk_value; 2817 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2818 char *d = (char *)mstate->dtms_scratch_ptr; 2819 int64_t index = (int64_t)tupregs[1].dttk_value; 2820 int64_t remaining = (int64_t)tupregs[2].dttk_value; 2821 size_t len = dtrace_strlen((char *)s, size); 2822 int64_t i = 0; 2823 2824 if (nargs <= 2) 2825 remaining = (int64_t)size; 2826 2827 if (mstate->dtms_scratch_ptr + size > 2828 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2829 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2830 regs[rd] = NULL; 2831 break; 2832 } 2833 2834 if (index < 0) { 2835 index += len; 2836 2837 if (index < 0 && index + remaining > 0) { 2838 remaining += index; 2839 index = 0; 2840 } 2841 } 2842 2843 if (index >= len || index < 0) 2844 index = len; 2845 2846 for (d[0] = '\0'; remaining > 0; remaining--) { 2847 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 2848 break; 2849 2850 if (i == size) { 2851 d[i - 1] = '\0'; 2852 break; 2853 } 2854 } 2855 2856 mstate->dtms_scratch_ptr += size; 2857 regs[rd] = (uintptr_t)d; 2858 break; 2859 } 2860 2861 case DIF_SUBR_GETMAJOR: 2862 #ifdef _LP64 2863 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 2864 #else 2865 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 2866 #endif 2867 break; 2868 2869 case DIF_SUBR_GETMINOR: 2870 #ifdef _LP64 2871 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 2872 #else 2873 regs[rd] = tupregs[0].dttk_value & MAXMIN; 2874 #endif 2875 break; 2876 2877 case DIF_SUBR_DDI_PATHNAME: { 2878 /* 2879 * This one is a galactic mess. We are going to roughly 2880 * emulate ddi_pathname(), but it's made more complicated 2881 * by the fact that we (a) want to include the minor name and 2882 * (b) must proceed iteratively instead of recursively. 2883 */ 2884 uintptr_t dest = mstate->dtms_scratch_ptr; 2885 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2886 char *start = (char *)dest, *end = start + size - 1; 2887 uintptr_t daddr = tupregs[0].dttk_value; 2888 int64_t minor = (int64_t)tupregs[1].dttk_value; 2889 char *s; 2890 int i, len, depth = 0; 2891 2892 if (size == 0 || mstate->dtms_scratch_ptr + size > 2893 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2894 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2895 regs[rd] = NULL; 2896 break; 2897 } 2898 2899 *end = '\0'; 2900 2901 /* 2902 * We want to have a name for the minor. In order to do this, 2903 * we need to walk the minor list from the devinfo. We want 2904 * to be sure that we don't infinitely walk a circular list, 2905 * so we check for circularity by sending a scout pointer 2906 * ahead two elements for every element that we iterate over; 2907 * if the list is circular, these will ultimately point to the 2908 * same element. You may recognize this little trick as the 2909 * answer to a stupid interview question -- one that always 2910 * seems to be asked by those who had to have it laboriously 2911 * explained to them, and who can't even concisely describe 2912 * the conditions under which one would be forced to resort to 2913 * this technique. Needless to say, those conditions are 2914 * found here -- and probably only here. Is this is the only 2915 * use of this infamous trick in shipping, production code? 2916 * If it isn't, it probably should be... 2917 */ 2918 if (minor != -1) { 2919 uintptr_t maddr = dtrace_loadptr(daddr + 2920 offsetof(struct dev_info, devi_minor)); 2921 2922 uintptr_t next = offsetof(struct ddi_minor_data, next); 2923 uintptr_t name = offsetof(struct ddi_minor_data, 2924 d_minor) + offsetof(struct ddi_minor, name); 2925 uintptr_t dev = offsetof(struct ddi_minor_data, 2926 d_minor) + offsetof(struct ddi_minor, dev); 2927 uintptr_t scout; 2928 2929 if (maddr != NULL) 2930 scout = dtrace_loadptr(maddr + next); 2931 2932 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2933 uint64_t m; 2934 #ifdef _LP64 2935 m = dtrace_load64(maddr + dev) & MAXMIN64; 2936 #else 2937 m = dtrace_load32(maddr + dev) & MAXMIN; 2938 #endif 2939 if (m != minor) { 2940 maddr = dtrace_loadptr(maddr + next); 2941 2942 if (scout == NULL) 2943 continue; 2944 2945 scout = dtrace_loadptr(scout + next); 2946 2947 if (scout == NULL) 2948 continue; 2949 2950 scout = dtrace_loadptr(scout + next); 2951 2952 if (scout == NULL) 2953 continue; 2954 2955 if (scout == maddr) { 2956 *flags |= CPU_DTRACE_ILLOP; 2957 break; 2958 } 2959 2960 continue; 2961 } 2962 2963 /* 2964 * We have the minor data. Now we need to 2965 * copy the minor's name into the end of the 2966 * pathname. 2967 */ 2968 s = (char *)dtrace_loadptr(maddr + name); 2969 len = dtrace_strlen(s, size); 2970 2971 if (*flags & CPU_DTRACE_FAULT) 2972 break; 2973 2974 if (len != 0) { 2975 if ((end -= (len + 1)) < start) 2976 break; 2977 2978 *end = ':'; 2979 } 2980 2981 for (i = 1; i <= len; i++) 2982 end[i] = dtrace_load8((uintptr_t)s++); 2983 break; 2984 } 2985 } 2986 2987 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2988 ddi_node_state_t devi_state; 2989 2990 devi_state = dtrace_load32(daddr + 2991 offsetof(struct dev_info, devi_node_state)); 2992 2993 if (*flags & CPU_DTRACE_FAULT) 2994 break; 2995 2996 if (devi_state >= DS_INITIALIZED) { 2997 s = (char *)dtrace_loadptr(daddr + 2998 offsetof(struct dev_info, devi_addr)); 2999 len = dtrace_strlen(s, size); 3000 3001 if (*flags & CPU_DTRACE_FAULT) 3002 break; 3003 3004 if (len != 0) { 3005 if ((end -= (len + 1)) < start) 3006 break; 3007 3008 *end = '@'; 3009 } 3010 3011 for (i = 1; i <= len; i++) 3012 end[i] = dtrace_load8((uintptr_t)s++); 3013 } 3014 3015 /* 3016 * Now for the node name... 3017 */ 3018 s = (char *)dtrace_loadptr(daddr + 3019 offsetof(struct dev_info, devi_node_name)); 3020 3021 daddr = dtrace_loadptr(daddr + 3022 offsetof(struct dev_info, devi_parent)); 3023 3024 /* 3025 * If our parent is NULL (that is, if we're the root 3026 * node), we're going to use the special path 3027 * "devices". 3028 */ 3029 if (daddr == NULL) 3030 s = "devices"; 3031 3032 len = dtrace_strlen(s, size); 3033 if (*flags & CPU_DTRACE_FAULT) 3034 break; 3035 3036 if ((end -= (len + 1)) < start) 3037 break; 3038 3039 for (i = 1; i <= len; i++) 3040 end[i] = dtrace_load8((uintptr_t)s++); 3041 *end = '/'; 3042 3043 if (depth++ > dtrace_devdepth_max) { 3044 *flags |= CPU_DTRACE_ILLOP; 3045 break; 3046 } 3047 } 3048 3049 if (end < start) 3050 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3051 3052 if (daddr == NULL) { 3053 regs[rd] = (uintptr_t)end; 3054 mstate->dtms_scratch_ptr += size; 3055 } 3056 3057 break; 3058 } 3059 3060 case DIF_SUBR_STRJOIN: { 3061 char *d = (char *)mstate->dtms_scratch_ptr; 3062 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3063 uintptr_t s1 = tupregs[0].dttk_value; 3064 uintptr_t s2 = tupregs[1].dttk_value; 3065 int i = 0; 3066 3067 if (mstate->dtms_scratch_ptr + size > 3068 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3069 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3070 regs[rd] = NULL; 3071 break; 3072 } 3073 3074 for (;;) { 3075 if (i >= size) { 3076 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3077 regs[rd] = NULL; 3078 break; 3079 } 3080 3081 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3082 i--; 3083 break; 3084 } 3085 } 3086 3087 for (;;) { 3088 if (i >= size) { 3089 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3090 regs[rd] = NULL; 3091 break; 3092 } 3093 3094 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3095 break; 3096 } 3097 3098 if (i < size) { 3099 mstate->dtms_scratch_ptr += i; 3100 regs[rd] = (uintptr_t)d; 3101 } 3102 3103 break; 3104 } 3105 3106 case DIF_SUBR_LLTOSTR: { 3107 int64_t i = (int64_t)tupregs[0].dttk_value; 3108 int64_t val = i < 0 ? i * -1 : i; 3109 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3110 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3111 3112 if (mstate->dtms_scratch_ptr + size > 3113 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3114 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3115 regs[rd] = NULL; 3116 break; 3117 } 3118 3119 for (*end-- = '\0'; val; val /= 10) 3120 *end-- = '0' + (val % 10); 3121 3122 if (i == 0) 3123 *end-- = '0'; 3124 3125 if (i < 0) 3126 *end-- = '-'; 3127 3128 regs[rd] = (uintptr_t)end + 1; 3129 mstate->dtms_scratch_ptr += size; 3130 break; 3131 } 3132 3133 case DIF_SUBR_DIRNAME: 3134 case DIF_SUBR_BASENAME: { 3135 char *dest = (char *)mstate->dtms_scratch_ptr; 3136 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3137 uintptr_t src = tupregs[0].dttk_value; 3138 int i, j, len = dtrace_strlen((char *)src, size); 3139 int lastbase = -1, firstbase = -1, lastdir = -1; 3140 int start, end; 3141 3142 if (mstate->dtms_scratch_ptr + size > 3143 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3144 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3145 regs[rd] = NULL; 3146 break; 3147 } 3148 3149 /* 3150 * The basename and dirname for a zero-length string is 3151 * defined to be "." 3152 */ 3153 if (len == 0) { 3154 len = 1; 3155 src = (uintptr_t)"."; 3156 } 3157 3158 /* 3159 * Start from the back of the string, moving back toward the 3160 * front until we see a character that isn't a slash. That 3161 * character is the last character in the basename. 3162 */ 3163 for (i = len - 1; i >= 0; i--) { 3164 if (dtrace_load8(src + i) != '/') 3165 break; 3166 } 3167 3168 if (i >= 0) 3169 lastbase = i; 3170 3171 /* 3172 * Starting from the last character in the basename, move 3173 * towards the front until we find a slash. The character 3174 * that we processed immediately before that is the first 3175 * character in the basename. 3176 */ 3177 for (; i >= 0; i--) { 3178 if (dtrace_load8(src + i) == '/') 3179 break; 3180 } 3181 3182 if (i >= 0) 3183 firstbase = i + 1; 3184 3185 /* 3186 * Now keep going until we find a non-slash character. That 3187 * character is the last character in the dirname. 3188 */ 3189 for (; i >= 0; i--) { 3190 if (dtrace_load8(src + i) != '/') 3191 break; 3192 } 3193 3194 if (i >= 0) 3195 lastdir = i; 3196 3197 ASSERT(!(lastbase == -1 && firstbase != -1)); 3198 ASSERT(!(firstbase == -1 && lastdir != -1)); 3199 3200 if (lastbase == -1) { 3201 /* 3202 * We didn't find a non-slash character. We know that 3203 * the length is non-zero, so the whole string must be 3204 * slashes. In either the dirname or the basename 3205 * case, we return '/'. 3206 */ 3207 ASSERT(firstbase == -1); 3208 firstbase = lastbase = lastdir = 0; 3209 } 3210 3211 if (firstbase == -1) { 3212 /* 3213 * The entire string consists only of a basename 3214 * component. If we're looking for dirname, we need 3215 * to change our string to be just "."; if we're 3216 * looking for a basename, we'll just set the first 3217 * character of the basename to be 0. 3218 */ 3219 if (subr == DIF_SUBR_DIRNAME) { 3220 ASSERT(lastdir == -1); 3221 src = (uintptr_t)"."; 3222 lastdir = 0; 3223 } else { 3224 firstbase = 0; 3225 } 3226 } 3227 3228 if (subr == DIF_SUBR_DIRNAME) { 3229 if (lastdir == -1) { 3230 /* 3231 * We know that we have a slash in the name -- 3232 * or lastdir would be set to 0, above. And 3233 * because lastdir is -1, we know that this 3234 * slash must be the first character. (That 3235 * is, the full string must be of the form 3236 * "/basename".) In this case, the last 3237 * character of the directory name is 0. 3238 */ 3239 lastdir = 0; 3240 } 3241 3242 start = 0; 3243 end = lastdir; 3244 } else { 3245 ASSERT(subr == DIF_SUBR_BASENAME); 3246 ASSERT(firstbase != -1 && lastbase != -1); 3247 start = firstbase; 3248 end = lastbase; 3249 } 3250 3251 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3252 dest[j] = dtrace_load8(src + i); 3253 3254 dest[j] = '\0'; 3255 regs[rd] = (uintptr_t)dest; 3256 mstate->dtms_scratch_ptr += size; 3257 break; 3258 } 3259 3260 case DIF_SUBR_CLEANPATH: { 3261 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3262 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3263 uintptr_t src = tupregs[0].dttk_value; 3264 int i = 0, j = 0; 3265 3266 if (mstate->dtms_scratch_ptr + size > 3267 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3268 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3269 regs[rd] = NULL; 3270 break; 3271 } 3272 3273 /* 3274 * Move forward, loading each character. 3275 */ 3276 do { 3277 c = dtrace_load8(src + i++); 3278 next: 3279 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3280 break; 3281 3282 if (c != '/') { 3283 dest[j++] = c; 3284 continue; 3285 } 3286 3287 c = dtrace_load8(src + i++); 3288 3289 if (c == '/') { 3290 /* 3291 * We have two slashes -- we can just advance 3292 * to the next character. 3293 */ 3294 goto next; 3295 } 3296 3297 if (c != '.') { 3298 /* 3299 * This is not "." and it's not ".." -- we can 3300 * just store the "/" and this character and 3301 * drive on. 3302 */ 3303 dest[j++] = '/'; 3304 dest[j++] = c; 3305 continue; 3306 } 3307 3308 c = dtrace_load8(src + i++); 3309 3310 if (c == '/') { 3311 /* 3312 * This is a "/./" component. We're not going 3313 * to store anything in the destination buffer; 3314 * we're just going to go to the next component. 3315 */ 3316 goto next; 3317 } 3318 3319 if (c != '.') { 3320 /* 3321 * This is not ".." -- we can just store the 3322 * "/." and this character and continue 3323 * processing. 3324 */ 3325 dest[j++] = '/'; 3326 dest[j++] = '.'; 3327 dest[j++] = c; 3328 continue; 3329 } 3330 3331 c = dtrace_load8(src + i++); 3332 3333 if (c != '/' && c != '\0') { 3334 /* 3335 * This is not ".." -- it's "..[mumble]". 3336 * We'll store the "/.." and this character 3337 * and continue processing. 3338 */ 3339 dest[j++] = '/'; 3340 dest[j++] = '.'; 3341 dest[j++] = '.'; 3342 dest[j++] = c; 3343 continue; 3344 } 3345 3346 /* 3347 * This is "/../" or "/..\0". We need to back up 3348 * our destination pointer until we find a "/". 3349 */ 3350 i--; 3351 while (j != 0 && dest[--j] != '/') 3352 continue; 3353 3354 if (c == '\0') 3355 dest[++j] = '/'; 3356 } while (c != '\0'); 3357 3358 dest[j] = '\0'; 3359 regs[rd] = (uintptr_t)dest; 3360 mstate->dtms_scratch_ptr += size; 3361 break; 3362 } 3363 } 3364 } 3365 3366 /* 3367 * Emulate the execution of DTrace IR instructions specified by the given 3368 * DIF object. This function is deliberately void of assertions as all of 3369 * the necessary checks are handled by a call to dtrace_difo_validate(). 3370 */ 3371 static uint64_t 3372 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 3373 dtrace_vstate_t *vstate, dtrace_state_t *state) 3374 { 3375 const dif_instr_t *text = difo->dtdo_buf; 3376 const uint_t textlen = difo->dtdo_len; 3377 const char *strtab = difo->dtdo_strtab; 3378 const uint64_t *inttab = difo->dtdo_inttab; 3379 3380 uint64_t rval = 0; 3381 dtrace_statvar_t *svar; 3382 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 3383 dtrace_difv_t *v; 3384 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3385 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3386 3387 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 3388 uint64_t regs[DIF_DIR_NREGS]; 3389 3390 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 3391 int64_t cc_r; 3392 uint_t pc = 0, id, opc; 3393 uint8_t ttop = 0; 3394 dif_instr_t instr; 3395 uint_t r1, r2, rd; 3396 3397 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 3398 3399 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 3400 opc = pc; 3401 3402 instr = text[pc++]; 3403 r1 = DIF_INSTR_R1(instr); 3404 r2 = DIF_INSTR_R2(instr); 3405 rd = DIF_INSTR_RD(instr); 3406 3407 switch (DIF_INSTR_OP(instr)) { 3408 case DIF_OP_OR: 3409 regs[rd] = regs[r1] | regs[r2]; 3410 break; 3411 case DIF_OP_XOR: 3412 regs[rd] = regs[r1] ^ regs[r2]; 3413 break; 3414 case DIF_OP_AND: 3415 regs[rd] = regs[r1] & regs[r2]; 3416 break; 3417 case DIF_OP_SLL: 3418 regs[rd] = regs[r1] << regs[r2]; 3419 break; 3420 case DIF_OP_SRL: 3421 regs[rd] = regs[r1] >> regs[r2]; 3422 break; 3423 case DIF_OP_SUB: 3424 regs[rd] = regs[r1] - regs[r2]; 3425 break; 3426 case DIF_OP_ADD: 3427 regs[rd] = regs[r1] + regs[r2]; 3428 break; 3429 case DIF_OP_MUL: 3430 regs[rd] = regs[r1] * regs[r2]; 3431 break; 3432 case DIF_OP_SDIV: 3433 if (regs[r2] == 0) { 3434 regs[rd] = 0; 3435 *flags |= CPU_DTRACE_DIVZERO; 3436 } else { 3437 regs[rd] = (int64_t)regs[r1] / 3438 (int64_t)regs[r2]; 3439 } 3440 break; 3441 3442 case DIF_OP_UDIV: 3443 if (regs[r2] == 0) { 3444 regs[rd] = 0; 3445 *flags |= CPU_DTRACE_DIVZERO; 3446 } else { 3447 regs[rd] = regs[r1] / regs[r2]; 3448 } 3449 break; 3450 3451 case DIF_OP_SREM: 3452 if (regs[r2] == 0) { 3453 regs[rd] = 0; 3454 *flags |= CPU_DTRACE_DIVZERO; 3455 } else { 3456 regs[rd] = (int64_t)regs[r1] % 3457 (int64_t)regs[r2]; 3458 } 3459 break; 3460 3461 case DIF_OP_UREM: 3462 if (regs[r2] == 0) { 3463 regs[rd] = 0; 3464 *flags |= CPU_DTRACE_DIVZERO; 3465 } else { 3466 regs[rd] = regs[r1] % regs[r2]; 3467 } 3468 break; 3469 3470 case DIF_OP_NOT: 3471 regs[rd] = ~regs[r1]; 3472 break; 3473 case DIF_OP_MOV: 3474 regs[rd] = regs[r1]; 3475 break; 3476 case DIF_OP_CMP: 3477 cc_r = regs[r1] - regs[r2]; 3478 cc_n = cc_r < 0; 3479 cc_z = cc_r == 0; 3480 cc_v = 0; 3481 cc_c = regs[r1] < regs[r2]; 3482 break; 3483 case DIF_OP_TST: 3484 cc_n = cc_v = cc_c = 0; 3485 cc_z = regs[r1] == 0; 3486 break; 3487 case DIF_OP_BA: 3488 pc = DIF_INSTR_LABEL(instr); 3489 break; 3490 case DIF_OP_BE: 3491 if (cc_z) 3492 pc = DIF_INSTR_LABEL(instr); 3493 break; 3494 case DIF_OP_BNE: 3495 if (cc_z == 0) 3496 pc = DIF_INSTR_LABEL(instr); 3497 break; 3498 case DIF_OP_BG: 3499 if ((cc_z | (cc_n ^ cc_v)) == 0) 3500 pc = DIF_INSTR_LABEL(instr); 3501 break; 3502 case DIF_OP_BGU: 3503 if ((cc_c | cc_z) == 0) 3504 pc = DIF_INSTR_LABEL(instr); 3505 break; 3506 case DIF_OP_BGE: 3507 if ((cc_n ^ cc_v) == 0) 3508 pc = DIF_INSTR_LABEL(instr); 3509 break; 3510 case DIF_OP_BGEU: 3511 if (cc_c == 0) 3512 pc = DIF_INSTR_LABEL(instr); 3513 break; 3514 case DIF_OP_BL: 3515 if (cc_n ^ cc_v) 3516 pc = DIF_INSTR_LABEL(instr); 3517 break; 3518 case DIF_OP_BLU: 3519 if (cc_c) 3520 pc = DIF_INSTR_LABEL(instr); 3521 break; 3522 case DIF_OP_BLE: 3523 if (cc_z | (cc_n ^ cc_v)) 3524 pc = DIF_INSTR_LABEL(instr); 3525 break; 3526 case DIF_OP_BLEU: 3527 if (cc_c | cc_z) 3528 pc = DIF_INSTR_LABEL(instr); 3529 break; 3530 case DIF_OP_RLDSB: 3531 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3532 *flags |= CPU_DTRACE_KPRIV; 3533 *illval = regs[r1]; 3534 break; 3535 } 3536 /*FALLTHROUGH*/ 3537 case DIF_OP_LDSB: 3538 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 3539 break; 3540 case DIF_OP_RLDSH: 3541 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3542 *flags |= CPU_DTRACE_KPRIV; 3543 *illval = regs[r1]; 3544 break; 3545 } 3546 /*FALLTHROUGH*/ 3547 case DIF_OP_LDSH: 3548 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 3549 break; 3550 case DIF_OP_RLDSW: 3551 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3552 *flags |= CPU_DTRACE_KPRIV; 3553 *illval = regs[r1]; 3554 break; 3555 } 3556 /*FALLTHROUGH*/ 3557 case DIF_OP_LDSW: 3558 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 3559 break; 3560 case DIF_OP_RLDUB: 3561 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3562 *flags |= CPU_DTRACE_KPRIV; 3563 *illval = regs[r1]; 3564 break; 3565 } 3566 /*FALLTHROUGH*/ 3567 case DIF_OP_LDUB: 3568 regs[rd] = dtrace_load8(regs[r1]); 3569 break; 3570 case DIF_OP_RLDUH: 3571 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3572 *flags |= CPU_DTRACE_KPRIV; 3573 *illval = regs[r1]; 3574 break; 3575 } 3576 /*FALLTHROUGH*/ 3577 case DIF_OP_LDUH: 3578 regs[rd] = dtrace_load16(regs[r1]); 3579 break; 3580 case DIF_OP_RLDUW: 3581 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3582 *flags |= CPU_DTRACE_KPRIV; 3583 *illval = regs[r1]; 3584 break; 3585 } 3586 /*FALLTHROUGH*/ 3587 case DIF_OP_LDUW: 3588 regs[rd] = dtrace_load32(regs[r1]); 3589 break; 3590 case DIF_OP_RLDX: 3591 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 3592 *flags |= CPU_DTRACE_KPRIV; 3593 *illval = regs[r1]; 3594 break; 3595 } 3596 /*FALLTHROUGH*/ 3597 case DIF_OP_LDX: 3598 regs[rd] = dtrace_load64(regs[r1]); 3599 break; 3600 case DIF_OP_ULDSB: 3601 regs[rd] = (int8_t) 3602 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3603 break; 3604 case DIF_OP_ULDSH: 3605 regs[rd] = (int16_t) 3606 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3607 break; 3608 case DIF_OP_ULDSW: 3609 regs[rd] = (int32_t) 3610 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3611 break; 3612 case DIF_OP_ULDUB: 3613 regs[rd] = 3614 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3615 break; 3616 case DIF_OP_ULDUH: 3617 regs[rd] = 3618 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3619 break; 3620 case DIF_OP_ULDUW: 3621 regs[rd] = 3622 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3623 break; 3624 case DIF_OP_ULDX: 3625 regs[rd] = 3626 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 3627 break; 3628 case DIF_OP_RET: 3629 rval = regs[rd]; 3630 break; 3631 case DIF_OP_NOP: 3632 break; 3633 case DIF_OP_SETX: 3634 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 3635 break; 3636 case DIF_OP_SETS: 3637 regs[rd] = (uint64_t)(uintptr_t) 3638 (strtab + DIF_INSTR_STRING(instr)); 3639 break; 3640 case DIF_OP_SCMP: 3641 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1], 3642 (char *)(uintptr_t)regs[r2], 3643 state->dts_options[DTRACEOPT_STRSIZE]); 3644 3645 cc_n = cc_r < 0; 3646 cc_z = cc_r == 0; 3647 cc_v = cc_c = 0; 3648 break; 3649 case DIF_OP_LDGA: 3650 regs[rd] = dtrace_dif_variable(mstate, state, 3651 r1, regs[r2]); 3652 break; 3653 case DIF_OP_LDGS: 3654 id = DIF_INSTR_VAR(instr); 3655 3656 if (id >= DIF_VAR_OTHER_UBASE) { 3657 uintptr_t a; 3658 3659 id -= DIF_VAR_OTHER_UBASE; 3660 svar = vstate->dtvs_globals[id]; 3661 ASSERT(svar != NULL); 3662 v = &svar->dtsv_var; 3663 3664 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 3665 regs[rd] = svar->dtsv_data; 3666 break; 3667 } 3668 3669 a = (uintptr_t)svar->dtsv_data; 3670 3671 if (*(uint8_t *)a == UINT8_MAX) { 3672 /* 3673 * If the 0th byte is set to UINT8_MAX 3674 * then this is to be treated as a 3675 * reference to a NULL variable. 3676 */ 3677 regs[rd] = NULL; 3678 } else { 3679 regs[rd] = a + sizeof (uint64_t); 3680 } 3681 3682 break; 3683 } 3684 3685 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 3686 break; 3687 3688 case DIF_OP_STGS: 3689 id = DIF_INSTR_VAR(instr); 3690 3691 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3692 id -= DIF_VAR_OTHER_UBASE; 3693 3694 svar = vstate->dtvs_globals[id]; 3695 ASSERT(svar != NULL); 3696 v = &svar->dtsv_var; 3697 3698 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3699 uintptr_t a = (uintptr_t)svar->dtsv_data; 3700 3701 ASSERT(a != NULL); 3702 ASSERT(svar->dtsv_size != 0); 3703 3704 if (regs[rd] == NULL) { 3705 *(uint8_t *)a = UINT8_MAX; 3706 break; 3707 } else { 3708 *(uint8_t *)a = 0; 3709 a += sizeof (uint64_t); 3710 } 3711 3712 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3713 (void *)a, &v->dtdv_type); 3714 break; 3715 } 3716 3717 svar->dtsv_data = regs[rd]; 3718 break; 3719 3720 case DIF_OP_LDTA: 3721 /* 3722 * There are no DTrace built-in thread-local arrays at 3723 * present. This opcode is saved for future work. 3724 */ 3725 *flags |= CPU_DTRACE_ILLOP; 3726 regs[rd] = 0; 3727 break; 3728 3729 case DIF_OP_LDLS: 3730 id = DIF_INSTR_VAR(instr); 3731 3732 if (id < DIF_VAR_OTHER_UBASE) { 3733 /* 3734 * For now, this has no meaning. 3735 */ 3736 regs[rd] = 0; 3737 break; 3738 } 3739 3740 id -= DIF_VAR_OTHER_UBASE; 3741 3742 ASSERT(id < vstate->dtvs_nlocals); 3743 ASSERT(vstate->dtvs_locals != NULL); 3744 3745 svar = vstate->dtvs_locals[id]; 3746 ASSERT(svar != NULL); 3747 v = &svar->dtsv_var; 3748 3749 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3750 uintptr_t a = (uintptr_t)svar->dtsv_data; 3751 size_t sz = v->dtdv_type.dtdt_size; 3752 3753 sz += sizeof (uint64_t); 3754 ASSERT(svar->dtsv_size == NCPU * sz); 3755 a += CPU->cpu_id * sz; 3756 3757 if (*(uint8_t *)a == UINT8_MAX) { 3758 /* 3759 * If the 0th byte is set to UINT8_MAX 3760 * then this is to be treated as a 3761 * reference to a NULL variable. 3762 */ 3763 regs[rd] = NULL; 3764 } else { 3765 regs[rd] = a + sizeof (uint64_t); 3766 } 3767 3768 break; 3769 } 3770 3771 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3772 regs[rd] = ((uint64_t *)svar->dtsv_data)[CPU->cpu_id]; 3773 break; 3774 3775 case DIF_OP_STLS: 3776 id = DIF_INSTR_VAR(instr); 3777 3778 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3779 id -= DIF_VAR_OTHER_UBASE; 3780 ASSERT(id < vstate->dtvs_nlocals); 3781 3782 ASSERT(vstate->dtvs_locals != NULL); 3783 svar = vstate->dtvs_locals[id]; 3784 ASSERT(svar != NULL); 3785 v = &svar->dtsv_var; 3786 3787 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3788 uintptr_t a = (uintptr_t)svar->dtsv_data; 3789 size_t sz = v->dtdv_type.dtdt_size; 3790 3791 sz += sizeof (uint64_t); 3792 ASSERT(svar->dtsv_size == NCPU * sz); 3793 a += CPU->cpu_id * sz; 3794 3795 if (regs[rd] == NULL) { 3796 *(uint8_t *)a = UINT8_MAX; 3797 break; 3798 } else { 3799 *(uint8_t *)a = 0; 3800 a += sizeof (uint64_t); 3801 } 3802 3803 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3804 (void *)a, &v->dtdv_type); 3805 break; 3806 } 3807 3808 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 3809 ((uint64_t *)svar->dtsv_data)[CPU->cpu_id] = regs[rd]; 3810 break; 3811 3812 case DIF_OP_LDTS: { 3813 dtrace_dynvar_t *dvar; 3814 dtrace_key_t *key; 3815 3816 id = DIF_INSTR_VAR(instr); 3817 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3818 id -= DIF_VAR_OTHER_UBASE; 3819 v = &vstate->dtvs_tlocals[id]; 3820 3821 key = &tupregs[DIF_DTR_NREGS]; 3822 key[0].dttk_value = (uint64_t)id; 3823 key[0].dttk_size = 0; 3824 DTRACE_TLS_THRKEY(key[1].dttk_value); 3825 key[1].dttk_size = 0; 3826 3827 dvar = dtrace_dynvar(dstate, 2, key, 3828 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC); 3829 3830 if (dvar == NULL) { 3831 regs[rd] = 0; 3832 break; 3833 } 3834 3835 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3836 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 3837 } else { 3838 regs[rd] = *((uint64_t *)dvar->dtdv_data); 3839 } 3840 3841 break; 3842 } 3843 3844 case DIF_OP_STTS: { 3845 dtrace_dynvar_t *dvar; 3846 dtrace_key_t *key; 3847 3848 id = DIF_INSTR_VAR(instr); 3849 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3850 id -= DIF_VAR_OTHER_UBASE; 3851 3852 key = &tupregs[DIF_DTR_NREGS]; 3853 key[0].dttk_value = (uint64_t)id; 3854 key[0].dttk_size = 0; 3855 DTRACE_TLS_THRKEY(key[1].dttk_value); 3856 key[1].dttk_size = 0; 3857 v = &vstate->dtvs_tlocals[id]; 3858 3859 dvar = dtrace_dynvar(dstate, 2, key, 3860 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 3861 v->dtdv_type.dtdt_size : sizeof (uint64_t), 3862 regs[rd] ? DTRACE_DYNVAR_ALLOC : 3863 DTRACE_DYNVAR_DEALLOC); 3864 3865 /* 3866 * Given that we're storing to thread-local data, 3867 * we need to flush our predicate cache. 3868 */ 3869 curthread->t_predcache = NULL; 3870 3871 if (dvar == NULL) 3872 break; 3873 3874 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3875 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3876 dvar->dtdv_data, &v->dtdv_type); 3877 } else { 3878 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 3879 } 3880 3881 break; 3882 } 3883 3884 case DIF_OP_SRA: 3885 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 3886 break; 3887 3888 case DIF_OP_CALL: 3889 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 3890 regs, tupregs, ttop, mstate, state); 3891 break; 3892 3893 case DIF_OP_PUSHTR: 3894 if (ttop == DIF_DTR_NREGS) { 3895 *flags |= CPU_DTRACE_TUPOFLOW; 3896 break; 3897 } 3898 3899 if (r1 == DIF_TYPE_STRING) { 3900 /* 3901 * If this is a string type and the size is 0, 3902 * we'll use the system-wide default string 3903 * size. Note that we are _not_ looking at 3904 * the value of the DTRACEOPT_STRSIZE option; 3905 * had this been set, we would expect to have 3906 * a non-zero size value in the "pushtr". 3907 */ 3908 tupregs[ttop].dttk_size = 3909 dtrace_strlen((char *)(uintptr_t)regs[rd], 3910 regs[r2] ? regs[r2] : 3911 dtrace_strsize_default) + 1; 3912 } else { 3913 tupregs[ttop].dttk_size = regs[r2]; 3914 } 3915 3916 tupregs[ttop++].dttk_value = regs[rd]; 3917 break; 3918 3919 case DIF_OP_PUSHTV: 3920 if (ttop == DIF_DTR_NREGS) { 3921 *flags |= CPU_DTRACE_TUPOFLOW; 3922 break; 3923 } 3924 3925 tupregs[ttop].dttk_value = regs[rd]; 3926 tupregs[ttop++].dttk_size = 0; 3927 break; 3928 3929 case DIF_OP_POPTS: 3930 if (ttop != 0) 3931 ttop--; 3932 break; 3933 3934 case DIF_OP_FLUSHTS: 3935 ttop = 0; 3936 break; 3937 3938 case DIF_OP_LDGAA: 3939 case DIF_OP_LDTAA: { 3940 dtrace_dynvar_t *dvar; 3941 dtrace_key_t *key = tupregs; 3942 uint_t nkeys = ttop; 3943 3944 id = DIF_INSTR_VAR(instr); 3945 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3946 id -= DIF_VAR_OTHER_UBASE; 3947 3948 key[nkeys].dttk_value = (uint64_t)id; 3949 key[nkeys++].dttk_size = 0; 3950 3951 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 3952 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 3953 key[nkeys++].dttk_size = 0; 3954 v = &vstate->dtvs_tlocals[id]; 3955 } else { 3956 v = &vstate->dtvs_globals[id]->dtsv_var; 3957 } 3958 3959 dvar = dtrace_dynvar(dstate, nkeys, key, 3960 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 3961 v->dtdv_type.dtdt_size : sizeof (uint64_t), 3962 DTRACE_DYNVAR_NOALLOC); 3963 3964 if (dvar == NULL) { 3965 regs[rd] = 0; 3966 break; 3967 } 3968 3969 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3970 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 3971 } else { 3972 regs[rd] = *((uint64_t *)dvar->dtdv_data); 3973 } 3974 3975 break; 3976 } 3977 3978 case DIF_OP_STGAA: 3979 case DIF_OP_STTAA: { 3980 dtrace_dynvar_t *dvar; 3981 dtrace_key_t *key = tupregs; 3982 uint_t nkeys = ttop; 3983 3984 id = DIF_INSTR_VAR(instr); 3985 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3986 id -= DIF_VAR_OTHER_UBASE; 3987 3988 key[nkeys].dttk_value = (uint64_t)id; 3989 key[nkeys++].dttk_size = 0; 3990 3991 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 3992 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 3993 key[nkeys++].dttk_size = 0; 3994 v = &vstate->dtvs_tlocals[id]; 3995 } else { 3996 v = &vstate->dtvs_globals[id]->dtsv_var; 3997 } 3998 3999 dvar = dtrace_dynvar(dstate, nkeys, key, 4000 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4001 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4002 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4003 DTRACE_DYNVAR_DEALLOC); 4004 4005 if (dvar == NULL) 4006 break; 4007 4008 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4009 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4010 dvar->dtdv_data, &v->dtdv_type); 4011 } else { 4012 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4013 } 4014 4015 break; 4016 } 4017 4018 case DIF_OP_ALLOCS: { 4019 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4020 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4021 4022 if (mstate->dtms_scratch_ptr + size > 4023 mstate->dtms_scratch_base + 4024 mstate->dtms_scratch_size) { 4025 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4026 regs[rd] = NULL; 4027 } else { 4028 dtrace_bzero((void *) 4029 mstate->dtms_scratch_ptr, size); 4030 mstate->dtms_scratch_ptr += size; 4031 regs[rd] = ptr; 4032 } 4033 break; 4034 } 4035 4036 case DIF_OP_COPYS: 4037 if (!dtrace_canstore(regs[rd], regs[r2], 4038 mstate, vstate)) { 4039 *flags |= CPU_DTRACE_BADADDR; 4040 *illval = regs[rd]; 4041 break; 4042 } 4043 4044 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4045 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4046 break; 4047 4048 case DIF_OP_STB: 4049 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4050 *flags |= CPU_DTRACE_BADADDR; 4051 *illval = regs[rd]; 4052 break; 4053 } 4054 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4055 break; 4056 4057 case DIF_OP_STH: 4058 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4059 *flags |= CPU_DTRACE_BADADDR; 4060 *illval = regs[rd]; 4061 break; 4062 } 4063 if (regs[rd] & 1) { 4064 *flags |= CPU_DTRACE_BADALIGN; 4065 *illval = regs[rd]; 4066 break; 4067 } 4068 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4069 break; 4070 4071 case DIF_OP_STW: 4072 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4073 *flags |= CPU_DTRACE_BADADDR; 4074 *illval = regs[rd]; 4075 break; 4076 } 4077 if (regs[rd] & 3) { 4078 *flags |= CPU_DTRACE_BADALIGN; 4079 *illval = regs[rd]; 4080 break; 4081 } 4082 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4083 break; 4084 4085 case DIF_OP_STX: 4086 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4087 *flags |= CPU_DTRACE_BADADDR; 4088 *illval = regs[rd]; 4089 break; 4090 } 4091 if (regs[rd] & 7) { 4092 *flags |= CPU_DTRACE_BADALIGN; 4093 *illval = regs[rd]; 4094 break; 4095 } 4096 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4097 break; 4098 } 4099 } 4100 4101 if (!(*flags & CPU_DTRACE_FAULT)) 4102 return (rval); 4103 4104 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4105 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4106 4107 return (0); 4108 } 4109 4110 static void 4111 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4112 { 4113 dtrace_probe_t *probe = ecb->dte_probe; 4114 dtrace_provider_t *prov = probe->dtpr_provider; 4115 char c[DTRACE_FULLNAMELEN + 80], *str; 4116 char *msg = "dtrace: breakpoint action at probe "; 4117 char *ecbmsg = " (ecb "; 4118 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4119 uintptr_t val = (uintptr_t)ecb; 4120 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4121 4122 if (dtrace_destructive_disallow) 4123 return; 4124 4125 /* 4126 * It's impossible to be taking action on the NULL probe. 4127 */ 4128 ASSERT(probe != NULL); 4129 4130 /* 4131 * This is a poor man's (destitute man's?) sprintf(): we want to 4132 * print the provider name, module name, function name and name of 4133 * the probe, along with the hex address of the ECB with the breakpoint 4134 * action -- all of which we must place in the character buffer by 4135 * hand. 4136 */ 4137 while (*msg != '\0') 4138 c[i++] = *msg++; 4139 4140 for (str = prov->dtpv_name; *str != '\0'; str++) 4141 c[i++] = *str; 4142 c[i++] = ':'; 4143 4144 for (str = probe->dtpr_mod; *str != '\0'; str++) 4145 c[i++] = *str; 4146 c[i++] = ':'; 4147 4148 for (str = probe->dtpr_func; *str != '\0'; str++) 4149 c[i++] = *str; 4150 c[i++] = ':'; 4151 4152 for (str = probe->dtpr_name; *str != '\0'; str++) 4153 c[i++] = *str; 4154 4155 while (*ecbmsg != '\0') 4156 c[i++] = *ecbmsg++; 4157 4158 while (shift >= 0) { 4159 mask = (uintptr_t)0xf << shift; 4160 4161 if (val >= ((uintptr_t)1 << shift)) 4162 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4163 shift -= 4; 4164 } 4165 4166 c[i++] = ')'; 4167 c[i] = '\0'; 4168 4169 debug_enter(c); 4170 } 4171 4172 static void 4173 dtrace_action_panic(dtrace_ecb_t *ecb) 4174 { 4175 dtrace_probe_t *probe = ecb->dte_probe; 4176 4177 /* 4178 * It's impossible to be taking action on the NULL probe. 4179 */ 4180 ASSERT(probe != NULL); 4181 4182 if (dtrace_destructive_disallow) 4183 return; 4184 4185 if (dtrace_panicked != NULL) 4186 return; 4187 4188 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4189 return; 4190 4191 /* 4192 * We won the right to panic. (We want to be sure that only one 4193 * thread calls panic() from dtrace_probe(), and that panic() is 4194 * called exactly once.) 4195 */ 4196 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4197 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4198 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4199 } 4200 4201 static void 4202 dtrace_action_raise(uint64_t sig) 4203 { 4204 if (dtrace_destructive_disallow) 4205 return; 4206 4207 if (sig >= NSIG) { 4208 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4209 return; 4210 } 4211 4212 /* 4213 * raise() has a queue depth of 1 -- we ignore all subsequent 4214 * invocations of the raise() action. 4215 */ 4216 if (curthread->t_dtrace_sig == 0) 4217 curthread->t_dtrace_sig = (uint8_t)sig; 4218 4219 curthread->t_sig_check = 1; 4220 aston(curthread); 4221 } 4222 4223 static void 4224 dtrace_action_stop(void) 4225 { 4226 if (dtrace_destructive_disallow) 4227 return; 4228 4229 if (!curthread->t_dtrace_stop) { 4230 curthread->t_dtrace_stop = 1; 4231 curthread->t_sig_check = 1; 4232 aston(curthread); 4233 } 4234 } 4235 4236 static void 4237 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4238 { 4239 hrtime_t now; 4240 volatile uint16_t *flags; 4241 cpu_t *cpu = CPU; 4242 4243 if (dtrace_destructive_disallow) 4244 return; 4245 4246 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4247 4248 now = dtrace_gethrtime(); 4249 4250 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4251 /* 4252 * We need to advance the mark to the current time. 4253 */ 4254 cpu->cpu_dtrace_chillmark = now; 4255 cpu->cpu_dtrace_chilled = 0; 4256 } 4257 4258 /* 4259 * Now check to see if the requested chill time would take us over 4260 * the maximum amount of time allowed in the chill interval. (Or 4261 * worse, if the calculation itself induces overflow.) 4262 */ 4263 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4264 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4265 *flags |= CPU_DTRACE_ILLOP; 4266 return; 4267 } 4268 4269 while (dtrace_gethrtime() - now < val) 4270 continue; 4271 4272 /* 4273 * Normally, we assure that the value of the variable "timestamp" does 4274 * not change within an ECB. The presence of chill() represents an 4275 * exception to this rule, however. 4276 */ 4277 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4278 cpu->cpu_dtrace_chilled += val; 4279 } 4280 4281 static void 4282 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4283 uint64_t *buf, uint64_t arg) 4284 { 4285 int nframes = DTRACE_USTACK_NFRAMES(arg); 4286 int strsize = DTRACE_USTACK_STRSIZE(arg); 4287 uint64_t *pcs = &buf[1], *fps; 4288 char *str = (char *)&pcs[nframes]; 4289 int size, offs = 0, i, j; 4290 uintptr_t old = mstate->dtms_scratch_ptr, saved; 4291 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4292 char *sym; 4293 4294 /* 4295 * Should be taking a faster path if string space has not been 4296 * allocated. 4297 */ 4298 ASSERT(strsize != 0); 4299 4300 /* 4301 * We will first allocate some temporary space for the frame pointers. 4302 */ 4303 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4304 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 4305 (nframes * sizeof (uint64_t)); 4306 4307 if (mstate->dtms_scratch_ptr + size > 4308 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 4309 /* 4310 * Not enough room for our frame pointers -- need to indicate 4311 * that we ran out of scratch space. 4312 */ 4313 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4314 return; 4315 } 4316 4317 mstate->dtms_scratch_ptr += size; 4318 saved = mstate->dtms_scratch_ptr; 4319 4320 /* 4321 * Now get a stack with both program counters and frame pointers. 4322 */ 4323 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4324 dtrace_getufpstack(buf, fps, nframes + 1); 4325 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4326 4327 /* 4328 * If that faulted, we're cooked. 4329 */ 4330 if (*flags & CPU_DTRACE_FAULT) 4331 goto out; 4332 4333 /* 4334 * Now we want to walk up the stack, calling the USTACK helper. For 4335 * each iteration, we restore the scratch pointer. 4336 */ 4337 for (i = 0; i < nframes; i++) { 4338 mstate->dtms_scratch_ptr = saved; 4339 4340 if (offs >= strsize) 4341 break; 4342 4343 sym = (char *)(uintptr_t)dtrace_helper( 4344 DTRACE_HELPER_ACTION_USTACK, 4345 mstate, state, pcs[i], fps[i]); 4346 4347 /* 4348 * If we faulted while running the helper, we're going to 4349 * clear the fault and null out the corresponding string. 4350 */ 4351 if (*flags & CPU_DTRACE_FAULT) { 4352 *flags &= ~CPU_DTRACE_FAULT; 4353 str[offs++] = '\0'; 4354 continue; 4355 } 4356 4357 if (sym == NULL) { 4358 str[offs++] = '\0'; 4359 continue; 4360 } 4361 4362 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4363 4364 /* 4365 * Now copy in the string that the helper returned to us. 4366 */ 4367 for (j = 0; offs + j < strsize; j++) { 4368 if ((str[offs + j] = sym[j]) == '\0') 4369 break; 4370 } 4371 4372 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4373 4374 /* 4375 * If we didn't have room for all of the last string, break 4376 * out -- the loop at the end will take clear of zeroing the 4377 * remainder of the string table. 4378 */ 4379 if (offs + j >= strsize) 4380 break; 4381 4382 offs += j + 1; 4383 } 4384 4385 while (offs < strsize) 4386 str[offs++] = '\0'; 4387 4388 out: 4389 mstate->dtms_scratch_ptr = old; 4390 } 4391 4392 /* 4393 * If you're looking for the epicenter of DTrace, you just found it. This 4394 * is the function called by the provider to fire a probe -- from which all 4395 * subsequent probe-context DTrace activity emanates. 4396 */ 4397 void 4398 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 4399 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 4400 { 4401 processorid_t cpuid; 4402 dtrace_icookie_t cookie; 4403 dtrace_probe_t *probe; 4404 dtrace_mstate_t mstate; 4405 dtrace_ecb_t *ecb; 4406 dtrace_action_t *act; 4407 intptr_t offs; 4408 size_t size; 4409 int vtime, onintr; 4410 volatile uint16_t *flags; 4411 hrtime_t now; 4412 4413 /* 4414 * Kick out immediately if this CPU is still being born (in which case 4415 * curthread will be set to -1) 4416 */ 4417 if ((uintptr_t)curthread & 1) 4418 return; 4419 4420 cookie = dtrace_interrupt_disable(); 4421 probe = dtrace_probes[id - 1]; 4422 cpuid = CPU->cpu_id; 4423 onintr = CPU_ON_INTR(CPU); 4424 4425 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 4426 probe->dtpr_predcache == curthread->t_predcache) { 4427 /* 4428 * We have hit in the predicate cache; we know that 4429 * this predicate would evaluate to be false. 4430 */ 4431 dtrace_interrupt_enable(cookie); 4432 return; 4433 } 4434 4435 if (panic_quiesce) { 4436 /* 4437 * We don't trace anything if we're panicking. 4438 */ 4439 dtrace_interrupt_enable(cookie); 4440 return; 4441 } 4442 4443 now = dtrace_gethrtime(); 4444 vtime = dtrace_vtime_references != 0; 4445 4446 if (vtime && curthread->t_dtrace_start) 4447 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 4448 4449 mstate.dtms_probe = probe; 4450 mstate.dtms_arg[0] = arg0; 4451 mstate.dtms_arg[1] = arg1; 4452 mstate.dtms_arg[2] = arg2; 4453 mstate.dtms_arg[3] = arg3; 4454 mstate.dtms_arg[4] = arg4; 4455 4456 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 4457 4458 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 4459 dtrace_predicate_t *pred = ecb->dte_predicate; 4460 dtrace_state_t *state = ecb->dte_state; 4461 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 4462 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 4463 dtrace_vstate_t *vstate = &state->dts_vstate; 4464 dtrace_provider_t *prov = probe->dtpr_provider; 4465 int committed = 0; 4466 caddr_t tomax; 4467 4468 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 4469 *flags &= ~CPU_DTRACE_ERROR; 4470 4471 if (prov == dtrace_provider) { 4472 /* 4473 * If dtrace itself is the provider of this probe, 4474 * we're only going to continue processing the ECB if 4475 * arg0 (the dtrace_state_t) is equal to the ECB's 4476 * creating state. (This prevents disjoint consumers 4477 * from seeing one another's metaprobes.) 4478 */ 4479 if (arg0 != (uint64_t)(uintptr_t)state) 4480 continue; 4481 } 4482 4483 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 4484 /* 4485 * We're not currently active. If our provider isn't 4486 * the dtrace pseudo provider, we're not interested. 4487 */ 4488 if (prov != dtrace_provider) 4489 continue; 4490 4491 /* 4492 * Now we must further check if we are in the BEGIN 4493 * probe. If we are, we will only continue processing 4494 * if we're still in WARMUP -- if one BEGIN enabling 4495 * has invoked the exit() action, we don't want to 4496 * evaluate subsequent BEGIN enablings. 4497 */ 4498 if (probe->dtpr_id == dtrace_probeid_begin && 4499 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 4500 ASSERT(state->dts_activity == 4501 DTRACE_ACTIVITY_DRAINING); 4502 continue; 4503 } 4504 } 4505 4506 if (ecb->dte_cond) { 4507 /* 4508 * If the dte_cond bits indicate that this 4509 * consumer is only allowed to see user-mode firings 4510 * of this probe, call the provider's dtps_usermode() 4511 * entry point to check that the probe was fired 4512 * while in a user context. Skip this ECB if that's 4513 * not the case. 4514 */ 4515 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 4516 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 4517 probe->dtpr_id, probe->dtpr_arg) == 0) 4518 continue; 4519 4520 /* 4521 * This is more subtle than it looks. We have to be 4522 * absolutely certain that CRED() isn't going to 4523 * change out from under us so it's only legit to 4524 * examine that structure if we're in constrained 4525 * situations. Currently, the only times we'll this 4526 * check is if a non-super-user has enabled the 4527 * profile or syscall providers -- providers that 4528 * allow visibility of all processes. For the 4529 * profile case, the check above will ensure that 4530 * we're examining a user context. 4531 */ 4532 if (ecb->dte_cond & DTRACE_COND_OWNER) { 4533 uid_t uid = ecb->dte_state->dts_cred.dcr_uid; 4534 gid_t gid = ecb->dte_state->dts_cred.dcr_gid; 4535 cred_t *cr; 4536 proc_t *proc; 4537 4538 if ((cr = CRED()) == NULL || 4539 uid != cr->cr_uid || 4540 uid != cr->cr_ruid || 4541 uid != cr->cr_suid || 4542 gid != cr->cr_gid || 4543 gid != cr->cr_rgid || 4544 gid != cr->cr_sgid || 4545 (proc = ttoproc(curthread)) == NULL || 4546 (proc->p_flag & SNOCD)) 4547 continue; 4548 4549 } 4550 } 4551 4552 if (now - state->dts_alive > dtrace_deadman_timeout) { 4553 /* 4554 * We seem to be dead. Unless we (a) have kernel 4555 * destructive permissions (b) have expicitly enabled 4556 * destructive actions and (c) destructive actions have 4557 * not been disabled, we're going to transition into 4558 * the KILLED state, from which no further processing 4559 * on this state will be performed. 4560 */ 4561 if (!dtrace_priv_kernel_destructive(state) || 4562 !state->dts_cred.dcr_destructive || 4563 dtrace_destructive_disallow) { 4564 void *activity = &state->dts_activity; 4565 dtrace_activity_t current; 4566 4567 do { 4568 current = state->dts_activity; 4569 } while (dtrace_cas32(activity, current, 4570 DTRACE_ACTIVITY_KILLED) != current); 4571 4572 continue; 4573 } 4574 } 4575 4576 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 4577 ecb->dte_alignment, state, &mstate)) < 0) 4578 continue; 4579 4580 tomax = buf->dtb_tomax; 4581 ASSERT(tomax != NULL); 4582 4583 if (ecb->dte_size != 0) 4584 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 4585 4586 mstate.dtms_epid = ecb->dte_epid; 4587 mstate.dtms_present |= DTRACE_MSTATE_EPID; 4588 4589 if (pred != NULL) { 4590 dtrace_difo_t *dp = pred->dtp_difo; 4591 int rval; 4592 4593 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 4594 4595 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 4596 dtrace_cacheid_t cid = probe->dtpr_predcache; 4597 4598 if (cid != DTRACE_CACHEIDNONE && !onintr) { 4599 /* 4600 * Update the predicate cache... 4601 */ 4602 ASSERT(cid == pred->dtp_cacheid); 4603 curthread->t_predcache = cid; 4604 } 4605 4606 continue; 4607 } 4608 } 4609 4610 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 4611 act != NULL; act = act->dta_next) { 4612 uint64_t val; 4613 size_t valoffs; 4614 dtrace_difo_t *dp; 4615 dtrace_recdesc_t *rec = &act->dta_rec; 4616 4617 size = rec->dtrd_size; 4618 valoffs = offs + rec->dtrd_offset; 4619 4620 if (DTRACEACT_ISAGG(act->dta_kind)) { 4621 uint64_t v = 0xbad; 4622 dtrace_aggregation_t *agg; 4623 4624 agg = (dtrace_aggregation_t *)act; 4625 4626 if ((dp = act->dta_difo) != NULL) 4627 v = dtrace_dif_emulate(dp, 4628 &mstate, vstate, state); 4629 4630 if (*flags & CPU_DTRACE_ERROR) 4631 continue; 4632 4633 dtrace_aggregate(agg, buf, offs, aggbuf, v); 4634 continue; 4635 } 4636 4637 switch (act->dta_kind) { 4638 case DTRACEACT_STOP: 4639 if (dtrace_priv_proc_destructive(state)) 4640 dtrace_action_stop(); 4641 continue; 4642 4643 case DTRACEACT_BREAKPOINT: 4644 if (dtrace_priv_kernel_destructive(state)) 4645 dtrace_action_breakpoint(ecb); 4646 continue; 4647 4648 case DTRACEACT_PANIC: 4649 if (dtrace_priv_kernel_destructive(state)) 4650 dtrace_action_panic(ecb); 4651 continue; 4652 4653 case DTRACEACT_STACK: 4654 if (!dtrace_priv_kernel(state)) 4655 continue; 4656 4657 dtrace_getpcstack((pc_t *)(tomax + valoffs), 4658 size / sizeof (pc_t), probe->dtpr_aframes, 4659 DTRACE_ANCHORED(probe) ? NULL : 4660 (uint32_t *)arg0); 4661 4662 continue; 4663 4664 case DTRACEACT_JSTACK: 4665 case DTRACEACT_USTACK: 4666 if (!dtrace_priv_proc(state)) 4667 continue; 4668 4669 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 4670 curproc->p_dtrace_helpers != NULL) { 4671 /* 4672 * This is the slow path -- we have 4673 * allocated string space, and we're 4674 * getting the stack of a process that 4675 * has helpers. Call into a separate 4676 * routine to perform this processing. 4677 */ 4678 dtrace_action_ustack(&mstate, state, 4679 (uint64_t *)(tomax + valoffs), 4680 rec->dtrd_arg); 4681 continue; 4682 } 4683 4684 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4685 dtrace_getupcstack((uint64_t *) 4686 (tomax + valoffs), 4687 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 4688 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4689 continue; 4690 4691 default: 4692 break; 4693 } 4694 4695 dp = act->dta_difo; 4696 ASSERT(dp != NULL); 4697 4698 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 4699 4700 if (*flags & CPU_DTRACE_ERROR) 4701 continue; 4702 4703 switch (act->dta_kind) { 4704 case DTRACEACT_SPECULATE: 4705 ASSERT(buf == &state->dts_buffer[cpuid]); 4706 buf = dtrace_speculation_buffer(state, 4707 cpuid, val); 4708 4709 if (buf == NULL) { 4710 *flags |= CPU_DTRACE_DROP; 4711 continue; 4712 } 4713 4714 offs = dtrace_buffer_reserve(buf, 4715 ecb->dte_needed, ecb->dte_alignment, 4716 state, NULL); 4717 4718 if (offs < 0) { 4719 *flags |= CPU_DTRACE_DROP; 4720 continue; 4721 } 4722 4723 tomax = buf->dtb_tomax; 4724 ASSERT(tomax != NULL); 4725 4726 if (ecb->dte_size != 0) 4727 DTRACE_STORE(uint32_t, tomax, offs, 4728 ecb->dte_epid); 4729 continue; 4730 4731 case DTRACEACT_CHILL: 4732 if (dtrace_priv_kernel_destructive(state)) 4733 dtrace_action_chill(&mstate, val); 4734 continue; 4735 4736 case DTRACEACT_RAISE: 4737 if (dtrace_priv_proc_destructive(state)) 4738 dtrace_action_raise(val); 4739 continue; 4740 4741 case DTRACEACT_COMMIT: 4742 ASSERT(!committed); 4743 4744 /* 4745 * We need to commit our buffer state. 4746 */ 4747 if (ecb->dte_size) 4748 buf->dtb_offset = offs + ecb->dte_size; 4749 buf = &state->dts_buffer[cpuid]; 4750 dtrace_speculation_commit(state, cpuid, val); 4751 committed = 1; 4752 continue; 4753 4754 case DTRACEACT_DISCARD: 4755 dtrace_speculation_discard(state, cpuid, val); 4756 continue; 4757 4758 case DTRACEACT_DIFEXPR: 4759 case DTRACEACT_LIBACT: 4760 case DTRACEACT_PRINTF: 4761 case DTRACEACT_PRINTA: 4762 case DTRACEACT_SYSTEM: 4763 case DTRACEACT_FREOPEN: 4764 break; 4765 4766 case DTRACEACT_EXIT: { 4767 /* 4768 * For the exit action, we are going to attempt 4769 * to atomically set our activity to be 4770 * draining. If this fails (either because 4771 * another CPU has beat us to the exit action, 4772 * or because our current activity is something 4773 * other than ACTIVE or WARMUP), we will 4774 * continue. This assures that the exit action 4775 * can be successfully recorded at most once 4776 * when we're in the ACTIVE state. If we're 4777 * encountering the exit() action while in 4778 * COOLDOWN, however, we want to honor the new 4779 * status code. (We know that we're the only 4780 * thread in COOLDOWN, so there is no race.) 4781 */ 4782 void *activity = &state->dts_activity; 4783 dtrace_activity_t current = state->dts_activity; 4784 4785 if (current == DTRACE_ACTIVITY_COOLDOWN) 4786 break; 4787 4788 if (current != DTRACE_ACTIVITY_WARMUP) 4789 current = DTRACE_ACTIVITY_ACTIVE; 4790 4791 if (dtrace_cas32(activity, current, 4792 DTRACE_ACTIVITY_DRAINING) != current) { 4793 *flags |= CPU_DTRACE_DROP; 4794 continue; 4795 } 4796 4797 break; 4798 } 4799 4800 default: 4801 ASSERT(0); 4802 } 4803 4804 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 4805 uintptr_t end = valoffs + size; 4806 4807 /* 4808 * If this is a string, we're going to only 4809 * load until we find the zero byte -- after 4810 * which we'll store zero bytes. 4811 */ 4812 if (dp->dtdo_rtype.dtdt_kind == 4813 DIF_TYPE_STRING) { 4814 char c = '\0' + 1; 4815 size_t s; 4816 4817 for (s = 0; s < size; s++) { 4818 if (c != '\0') 4819 c = dtrace_load8(val++); 4820 4821 DTRACE_STORE(uint8_t, tomax, 4822 valoffs++, c); 4823 } 4824 4825 continue; 4826 } 4827 4828 while (valoffs < end) { 4829 DTRACE_STORE(uint8_t, tomax, valoffs++, 4830 dtrace_load8(val++)); 4831 } 4832 4833 continue; 4834 } 4835 4836 switch (size) { 4837 case 0: 4838 break; 4839 4840 case sizeof (uint8_t): 4841 DTRACE_STORE(uint8_t, tomax, valoffs, val); 4842 break; 4843 case sizeof (uint16_t): 4844 DTRACE_STORE(uint16_t, tomax, valoffs, val); 4845 break; 4846 case sizeof (uint32_t): 4847 DTRACE_STORE(uint32_t, tomax, valoffs, val); 4848 break; 4849 case sizeof (uint64_t): 4850 DTRACE_STORE(uint64_t, tomax, valoffs, val); 4851 break; 4852 default: 4853 /* 4854 * Any other size should have been returned by 4855 * reference, not by value. 4856 */ 4857 ASSERT(0); 4858 break; 4859 } 4860 } 4861 4862 if (*flags & CPU_DTRACE_DROP) 4863 continue; 4864 4865 if (*flags & CPU_DTRACE_FAULT) { 4866 int ndx; 4867 dtrace_action_t *err; 4868 4869 buf->dtb_errors++; 4870 4871 if (probe->dtpr_id == dtrace_probeid_error) { 4872 /* 4873 * There's nothing we can do -- we had an 4874 * error on the error probe. 4875 */ 4876 dtrace_double_errors++; 4877 continue; 4878 } 4879 4880 if (vtime) { 4881 /* 4882 * Before recursing on dtrace_probe(), we 4883 * need to explicitly clear out our start 4884 * time to prevent it from being accumulated 4885 * into t_dtrace_vtime. 4886 */ 4887 curthread->t_dtrace_start = 0; 4888 } 4889 4890 /* 4891 * Iterate over the actions to figure out which action 4892 * we were processing when we experienced the error. 4893 * Note that act points _past_ the faulting action; if 4894 * act is ecb->dte_action, the fault was in the 4895 * predicate, if it's ecb->dte_action->dta_next it's 4896 * in action #1, and so on. 4897 */ 4898 for (err = ecb->dte_action, ndx = 0; 4899 err != act; err = err->dta_next, ndx++) 4900 continue; 4901 4902 dtrace_probe_error(state, ecb->dte_epid, ndx, 4903 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 4904 mstate.dtms_fltoffs : -1, 4905 (*flags & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : 4906 (*flags & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : 4907 (*flags & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : 4908 (*flags & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : 4909 (*flags & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : 4910 (*flags & CPU_DTRACE_TUPOFLOW) ? 4911 DTRACEFLT_TUPOFLOW : 4912 (*flags & CPU_DTRACE_BADALIGN) ? 4913 DTRACEFLT_BADALIGN : 4914 (*flags & CPU_DTRACE_NOSCRATCH) ? 4915 DTRACEFLT_NOSCRATCH : DTRACEFLT_UNKNOWN, 4916 cpu_core[cpuid].cpuc_dtrace_illval); 4917 4918 continue; 4919 } 4920 4921 if (!committed) 4922 buf->dtb_offset = offs + ecb->dte_size; 4923 } 4924 4925 if (vtime) 4926 curthread->t_dtrace_start = dtrace_gethrtime(); 4927 4928 dtrace_interrupt_enable(cookie); 4929 } 4930 4931 /* 4932 * DTrace Probe Hashing Functions 4933 * 4934 * The functions in this section (and indeed, the functions in remaining 4935 * sections) are not _called_ from probe context. (Any exceptions to this are 4936 * marked with a "Note:".) Rather, they are called from elsewhere in the 4937 * DTrace framework to look-up probes in, add probes to and remove probes from 4938 * the DTrace probe hashes. (Each probe is hashed by each element of the 4939 * probe tuple -- allowing for fast lookups, regardless of what was 4940 * specified.) 4941 */ 4942 static uint_t 4943 dtrace_hash_str(char *p) 4944 { 4945 unsigned int g; 4946 uint_t hval = 0; 4947 4948 while (*p) { 4949 hval = (hval << 4) + *p++; 4950 if ((g = (hval & 0xf0000000)) != 0) 4951 hval ^= g >> 24; 4952 hval &= ~g; 4953 } 4954 return (hval); 4955 } 4956 4957 static dtrace_hash_t * 4958 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 4959 { 4960 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 4961 4962 hash->dth_stroffs = stroffs; 4963 hash->dth_nextoffs = nextoffs; 4964 hash->dth_prevoffs = prevoffs; 4965 4966 hash->dth_size = 1; 4967 hash->dth_mask = hash->dth_size - 1; 4968 4969 hash->dth_tab = kmem_zalloc(hash->dth_size * 4970 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 4971 4972 return (hash); 4973 } 4974 4975 static void 4976 dtrace_hash_destroy(dtrace_hash_t *hash) 4977 { 4978 #ifdef DEBUG 4979 int i; 4980 4981 for (i = 0; i < hash->dth_size; i++) 4982 ASSERT(hash->dth_tab[i] == NULL); 4983 #endif 4984 4985 kmem_free(hash->dth_tab, 4986 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 4987 kmem_free(hash, sizeof (dtrace_hash_t)); 4988 } 4989 4990 static void 4991 dtrace_hash_resize(dtrace_hash_t *hash) 4992 { 4993 int size = hash->dth_size, i, ndx; 4994 int new_size = hash->dth_size << 1; 4995 int new_mask = new_size - 1; 4996 dtrace_hashbucket_t **new_tab, *bucket, *next; 4997 4998 ASSERT((new_size & new_mask) == 0); 4999 5000 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5001 5002 for (i = 0; i < size; i++) { 5003 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5004 dtrace_probe_t *probe = bucket->dthb_chain; 5005 5006 ASSERT(probe != NULL); 5007 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5008 5009 next = bucket->dthb_next; 5010 bucket->dthb_next = new_tab[ndx]; 5011 new_tab[ndx] = bucket; 5012 } 5013 } 5014 5015 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5016 hash->dth_tab = new_tab; 5017 hash->dth_size = new_size; 5018 hash->dth_mask = new_mask; 5019 } 5020 5021 static void 5022 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5023 { 5024 int hashval = DTRACE_HASHSTR(hash, new); 5025 int ndx = hashval & hash->dth_mask; 5026 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5027 dtrace_probe_t **nextp, **prevp; 5028 5029 for (; bucket != NULL; bucket = bucket->dthb_next) { 5030 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5031 goto add; 5032 } 5033 5034 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5035 dtrace_hash_resize(hash); 5036 dtrace_hash_add(hash, new); 5037 return; 5038 } 5039 5040 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5041 bucket->dthb_next = hash->dth_tab[ndx]; 5042 hash->dth_tab[ndx] = bucket; 5043 hash->dth_nbuckets++; 5044 5045 add: 5046 nextp = DTRACE_HASHNEXT(hash, new); 5047 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5048 *nextp = bucket->dthb_chain; 5049 5050 if (bucket->dthb_chain != NULL) { 5051 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5052 ASSERT(*prevp == NULL); 5053 *prevp = new; 5054 } 5055 5056 bucket->dthb_chain = new; 5057 bucket->dthb_len++; 5058 } 5059 5060 static dtrace_probe_t * 5061 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5062 { 5063 int hashval = DTRACE_HASHSTR(hash, template); 5064 int ndx = hashval & hash->dth_mask; 5065 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5066 5067 for (; bucket != NULL; bucket = bucket->dthb_next) { 5068 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5069 return (bucket->dthb_chain); 5070 } 5071 5072 return (NULL); 5073 } 5074 5075 static int 5076 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5077 { 5078 int hashval = DTRACE_HASHSTR(hash, template); 5079 int ndx = hashval & hash->dth_mask; 5080 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5081 5082 for (; bucket != NULL; bucket = bucket->dthb_next) { 5083 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5084 return (bucket->dthb_len); 5085 } 5086 5087 return (NULL); 5088 } 5089 5090 static void 5091 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5092 { 5093 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5094 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5095 5096 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5097 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5098 5099 /* 5100 * Find the bucket that we're removing this probe from. 5101 */ 5102 for (; bucket != NULL; bucket = bucket->dthb_next) { 5103 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5104 break; 5105 } 5106 5107 ASSERT(bucket != NULL); 5108 5109 if (*prevp == NULL) { 5110 if (*nextp == NULL) { 5111 /* 5112 * The removed probe was the only probe on this 5113 * bucket; we need to remove the bucket. 5114 */ 5115 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5116 5117 ASSERT(bucket->dthb_chain == probe); 5118 ASSERT(b != NULL); 5119 5120 if (b == bucket) { 5121 hash->dth_tab[ndx] = bucket->dthb_next; 5122 } else { 5123 while (b->dthb_next != bucket) 5124 b = b->dthb_next; 5125 b->dthb_next = bucket->dthb_next; 5126 } 5127 5128 ASSERT(hash->dth_nbuckets > 0); 5129 hash->dth_nbuckets--; 5130 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5131 return; 5132 } 5133 5134 bucket->dthb_chain = *nextp; 5135 } else { 5136 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5137 } 5138 5139 if (*nextp != NULL) 5140 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5141 } 5142 5143 /* 5144 * DTrace Utility Functions 5145 * 5146 * These are random utility functions that are _not_ called from probe context. 5147 */ 5148 static int 5149 dtrace_badattr(const dtrace_attribute_t *a) 5150 { 5151 return (a->dtat_name > DTRACE_STABILITY_MAX || 5152 a->dtat_data > DTRACE_STABILITY_MAX || 5153 a->dtat_class > DTRACE_CLASS_MAX); 5154 } 5155 5156 /* 5157 * Return a duplicate copy of a string. If the specified string is NULL, 5158 * this function returns a zero-length string. 5159 */ 5160 static char * 5161 dtrace_strdup(const char *str) 5162 { 5163 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5164 5165 if (str != NULL) 5166 (void) strcpy(new, str); 5167 5168 return (new); 5169 } 5170 5171 #define DTRACE_ISALPHA(c) \ 5172 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5173 5174 static int 5175 dtrace_badname(const char *s) 5176 { 5177 char c; 5178 5179 if (s == NULL || (c = *s++) == '\0') 5180 return (0); 5181 5182 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5183 return (1); 5184 5185 while ((c = *s++) != '\0') { 5186 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5187 c != '-' && c != '_' && c != '.' && c != '`') 5188 return (1); 5189 } 5190 5191 return (0); 5192 } 5193 5194 static void 5195 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp) 5196 { 5197 uint32_t priv; 5198 5199 *uidp = crgetuid(cr); 5200 if (PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5201 priv = DTRACE_PRIV_ALL; 5202 } else { 5203 priv = 0; 5204 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 5205 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 5206 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 5207 priv |= DTRACE_PRIV_USER; 5208 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 5209 priv |= DTRACE_PRIV_PROC; 5210 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 5211 priv |= DTRACE_PRIV_OWNER; 5212 } 5213 5214 *privp = priv; 5215 } 5216 5217 #ifdef DTRACE_ERRDEBUG 5218 static void 5219 dtrace_errdebug(const char *str) 5220 { 5221 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 5222 int occupied = 0; 5223 5224 mutex_enter(&dtrace_errlock); 5225 dtrace_errlast = str; 5226 dtrace_errthread = curthread; 5227 5228 while (occupied++ < DTRACE_ERRHASHSZ) { 5229 if (dtrace_errhash[hval].dter_msg == str) { 5230 dtrace_errhash[hval].dter_count++; 5231 goto out; 5232 } 5233 5234 if (dtrace_errhash[hval].dter_msg != NULL) { 5235 hval = (hval + 1) % DTRACE_ERRHASHSZ; 5236 continue; 5237 } 5238 5239 dtrace_errhash[hval].dter_msg = str; 5240 dtrace_errhash[hval].dter_count = 1; 5241 goto out; 5242 } 5243 5244 panic("dtrace: undersized error hash"); 5245 out: 5246 mutex_exit(&dtrace_errlock); 5247 } 5248 #endif 5249 5250 /* 5251 * DTrace Matching Functions 5252 * 5253 * These functions are used to match groups of probes, given some elements of 5254 * a probe tuple, or some globbed expressions for elements of a probe tuple. 5255 */ 5256 static int 5257 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid) 5258 { 5259 if (priv != DTRACE_PRIV_ALL) { 5260 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 5261 uint32_t match = priv & ppriv; 5262 5263 /* 5264 * No PRIV_DTRACE_* privileges... 5265 */ 5266 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 5267 DTRACE_PRIV_KERNEL)) == 0) 5268 return (0); 5269 5270 /* 5271 * No matching bits, but there were bits to match... 5272 */ 5273 if (match == 0 && ppriv != 0) 5274 return (0); 5275 5276 /* 5277 * Need to have permissions to the process, but don't... 5278 */ 5279 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 5280 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) 5281 return (0); 5282 } 5283 5284 return (1); 5285 } 5286 5287 /* 5288 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 5289 * consists of input pattern strings and an ops-vector to evaluate them. 5290 * This function returns >0 for match, 0 for no match, and <0 for error. 5291 */ 5292 static int 5293 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 5294 uint32_t priv, uid_t uid) 5295 { 5296 dtrace_provider_t *pvp = prp->dtpr_provider; 5297 int rv; 5298 5299 if (pvp->dtpv_defunct) 5300 return (0); 5301 5302 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 5303 return (rv); 5304 5305 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 5306 return (rv); 5307 5308 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 5309 return (rv); 5310 5311 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 5312 return (rv); 5313 5314 if (dtrace_match_priv(prp, priv, uid) == 0) 5315 return (0); 5316 5317 return (rv); 5318 } 5319 5320 /* 5321 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 5322 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 5323 * libc's version, the kernel version only applies to 8-bit ASCII strings. 5324 * In addition, all of the recursion cases except for '*' matching have been 5325 * unwound. For '*', we still implement recursive evaluation, but a depth 5326 * counter is maintained and matching is aborted if we recurse too deep. 5327 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 5328 */ 5329 static int 5330 dtrace_match_glob(const char *s, const char *p, int depth) 5331 { 5332 const char *olds; 5333 char s1, c; 5334 int gs; 5335 5336 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 5337 return (-1); 5338 5339 if (s == NULL) 5340 s = ""; /* treat NULL as empty string */ 5341 5342 top: 5343 olds = s; 5344 s1 = *s++; 5345 5346 if (p == NULL) 5347 return (0); 5348 5349 if ((c = *p++) == '\0') 5350 return (s1 == '\0'); 5351 5352 switch (c) { 5353 case '[': { 5354 int ok = 0, notflag = 0; 5355 char lc = '\0'; 5356 5357 if (s1 == '\0') 5358 return (0); 5359 5360 if (*p == '!') { 5361 notflag = 1; 5362 p++; 5363 } 5364 5365 if ((c = *p++) == '\0') 5366 return (0); 5367 5368 do { 5369 if (c == '-' && lc != '\0' && *p != ']') { 5370 if ((c = *p++) == '\0') 5371 return (0); 5372 if (c == '\\' && (c = *p++) == '\0') 5373 return (0); 5374 5375 if (notflag) { 5376 if (s1 < lc || s1 > c) 5377 ok++; 5378 else 5379 return (0); 5380 } else if (lc <= s1 && s1 <= c) 5381 ok++; 5382 5383 } else if (c == '\\' && (c = *p++) == '\0') 5384 return (0); 5385 5386 lc = c; /* save left-hand 'c' for next iteration */ 5387 5388 if (notflag) { 5389 if (s1 != c) 5390 ok++; 5391 else 5392 return (0); 5393 } else if (s1 == c) 5394 ok++; 5395 5396 if ((c = *p++) == '\0') 5397 return (0); 5398 5399 } while (c != ']'); 5400 5401 if (ok) 5402 goto top; 5403 5404 return (0); 5405 } 5406 5407 case '\\': 5408 if ((c = *p++) == '\0') 5409 return (0); 5410 /*FALLTHRU*/ 5411 5412 default: 5413 if (c != s1) 5414 return (0); 5415 /*FALLTHRU*/ 5416 5417 case '?': 5418 if (s1 != '\0') 5419 goto top; 5420 return (0); 5421 5422 case '*': 5423 while (*p == '*') 5424 p++; /* consecutive *'s are identical to a single one */ 5425 5426 if (*p == '\0') 5427 return (1); 5428 5429 for (s = olds; *s != '\0'; s++) { 5430 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 5431 return (gs); 5432 } 5433 5434 return (0); 5435 } 5436 } 5437 5438 /*ARGSUSED*/ 5439 static int 5440 dtrace_match_string(const char *s, const char *p, int depth) 5441 { 5442 return (s != NULL && strcmp(s, p) == 0); 5443 } 5444 5445 /*ARGSUSED*/ 5446 static int 5447 dtrace_match_nul(const char *s, const char *p, int depth) 5448 { 5449 return (1); /* always match the empty pattern */ 5450 } 5451 5452 /*ARGSUSED*/ 5453 static int 5454 dtrace_match_nonzero(const char *s, const char *p, int depth) 5455 { 5456 return (s != NULL && s[0] != '\0'); 5457 } 5458 5459 static int 5460 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 5461 int (*matched)(dtrace_probe_t *, void *), void *arg) 5462 { 5463 dtrace_probe_t template, *probe; 5464 dtrace_hash_t *hash = NULL; 5465 int len, best = INT_MAX, nmatched = 0; 5466 dtrace_id_t i; 5467 5468 ASSERT(MUTEX_HELD(&dtrace_lock)); 5469 5470 /* 5471 * If the probe ID is specified in the key, just lookup by ID and 5472 * invoke the match callback once if a matching probe is found. 5473 */ 5474 if (pkp->dtpk_id != DTRACE_IDNONE) { 5475 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 5476 dtrace_match_probe(probe, pkp, priv, uid) > 0) { 5477 (void) (*matched)(probe, arg); 5478 nmatched++; 5479 } 5480 return (nmatched); 5481 } 5482 5483 template.dtpr_mod = (char *)pkp->dtpk_mod; 5484 template.dtpr_func = (char *)pkp->dtpk_func; 5485 template.dtpr_name = (char *)pkp->dtpk_name; 5486 5487 /* 5488 * We want to find the most distinct of the module name, function 5489 * name, and name. So for each one that is not a glob pattern or 5490 * empty string, we perform a lookup in the corresponding hash and 5491 * use the hash table with the fewest collisions to do our search. 5492 */ 5493 if (pkp->dtpk_mmatch == &dtrace_match_string && 5494 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 5495 best = len; 5496 hash = dtrace_bymod; 5497 } 5498 5499 if (pkp->dtpk_fmatch == &dtrace_match_string && 5500 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 5501 best = len; 5502 hash = dtrace_byfunc; 5503 } 5504 5505 if (pkp->dtpk_nmatch == &dtrace_match_string && 5506 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 5507 best = len; 5508 hash = dtrace_byname; 5509 } 5510 5511 /* 5512 * If we did not select a hash table, iterate over every probe and 5513 * invoke our callback for each one that matches our input probe key. 5514 */ 5515 if (hash == NULL) { 5516 for (i = 0; i < dtrace_nprobes; i++) { 5517 if ((probe = dtrace_probes[i]) == NULL || 5518 dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5519 continue; 5520 5521 nmatched++; 5522 5523 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5524 break; 5525 } 5526 5527 return (nmatched); 5528 } 5529 5530 /* 5531 * If we selected a hash table, iterate over each probe of the same key 5532 * name and invoke the callback for every probe that matches the other 5533 * attributes of our input probe key. 5534 */ 5535 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 5536 probe = *(DTRACE_HASHNEXT(hash, probe))) { 5537 5538 if (dtrace_match_probe(probe, pkp, priv, uid) <= 0) 5539 continue; 5540 5541 nmatched++; 5542 5543 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5544 break; 5545 } 5546 5547 return (nmatched); 5548 } 5549 5550 /* 5551 * Return the function pointer dtrace_probecmp() should use to compare the 5552 * specified pattern with a string. For NULL or empty patterns, we select 5553 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 5554 * For non-empty non-glob strings, we use dtrace_match_string(). 5555 */ 5556 static dtrace_probekey_f * 5557 dtrace_probekey_func(const char *p) 5558 { 5559 char c; 5560 5561 if (p == NULL || *p == '\0') 5562 return (&dtrace_match_nul); 5563 5564 while ((c = *p++) != '\0') { 5565 if (c == '[' || c == '?' || c == '*' || c == '\\') 5566 return (&dtrace_match_glob); 5567 } 5568 5569 return (&dtrace_match_string); 5570 } 5571 5572 /* 5573 * Build a probe comparison key for use with dtrace_match_probe() from the 5574 * given probe description. By convention, a null key only matches anchored 5575 * probes: if each field is the empty string, reset dtpk_fmatch to 5576 * dtrace_match_nonzero(). 5577 */ 5578 static void 5579 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 5580 { 5581 pkp->dtpk_prov = pdp->dtpd_provider; 5582 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 5583 5584 pkp->dtpk_mod = pdp->dtpd_mod; 5585 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 5586 5587 pkp->dtpk_func = pdp->dtpd_func; 5588 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 5589 5590 pkp->dtpk_name = pdp->dtpd_name; 5591 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 5592 5593 pkp->dtpk_id = pdp->dtpd_id; 5594 5595 if (pkp->dtpk_id == DTRACE_IDNONE && 5596 pkp->dtpk_pmatch == &dtrace_match_nul && 5597 pkp->dtpk_mmatch == &dtrace_match_nul && 5598 pkp->dtpk_fmatch == &dtrace_match_nul && 5599 pkp->dtpk_nmatch == &dtrace_match_nul) 5600 pkp->dtpk_fmatch = &dtrace_match_nonzero; 5601 } 5602 5603 /* 5604 * DTrace Provider-to-Framework API Functions 5605 * 5606 * These functions implement much of the Provider-to-Framework API, as 5607 * described in <sys/dtrace.h>. The parts of the API not in this section are 5608 * the functions in the API for probe management (found below), and 5609 * dtrace_probe() itself (found above). 5610 */ 5611 5612 /* 5613 * Register the calling provider with the DTrace framework. This should 5614 * generally be called by DTrace providers in their attach(9E) entry point. 5615 */ 5616 int 5617 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 5618 uid_t uid, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 5619 { 5620 dtrace_provider_t *provider; 5621 5622 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 5623 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5624 "arguments", name ? name : "<NULL>"); 5625 return (EINVAL); 5626 } 5627 5628 if (name[0] == '\0' || dtrace_badname(name)) { 5629 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5630 "provider name", name); 5631 return (EINVAL); 5632 } 5633 5634 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 5635 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 5636 pops->dtps_destroy == NULL || 5637 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 5638 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5639 "provider ops", name); 5640 return (EINVAL); 5641 } 5642 5643 if (dtrace_badattr(&pap->dtpa_provider) || 5644 dtrace_badattr(&pap->dtpa_mod) || 5645 dtrace_badattr(&pap->dtpa_func) || 5646 dtrace_badattr(&pap->dtpa_name) || 5647 dtrace_badattr(&pap->dtpa_args)) { 5648 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5649 "provider attributes", name); 5650 return (EINVAL); 5651 } 5652 5653 if (priv & ~DTRACE_PRIV_ALL) { 5654 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5655 "privilege attributes", name); 5656 return (EINVAL); 5657 } 5658 5659 if ((priv & DTRACE_PRIV_KERNEL) && 5660 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 5661 pops->dtps_usermode == NULL) { 5662 cmn_err(CE_WARN, "failed to register provider '%s': need " 5663 "dtps_usermode() op for given privilege attributes", name); 5664 return (EINVAL); 5665 } 5666 5667 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 5668 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 5669 (void) strcpy(provider->dtpv_name, name); 5670 5671 provider->dtpv_attr = *pap; 5672 provider->dtpv_priv.dtpp_flags = priv; 5673 provider->dtpv_priv.dtpp_uid = uid; 5674 provider->dtpv_pops = *pops; 5675 5676 if (pops->dtps_provide == NULL) { 5677 ASSERT(pops->dtps_provide_module != NULL); 5678 provider->dtpv_pops.dtps_provide = 5679 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 5680 } 5681 5682 if (pops->dtps_provide_module == NULL) { 5683 ASSERT(pops->dtps_provide != NULL); 5684 provider->dtpv_pops.dtps_provide_module = 5685 (void (*)(void *, struct modctl *))dtrace_nullop; 5686 } 5687 5688 if (pops->dtps_suspend == NULL) { 5689 ASSERT(pops->dtps_resume == NULL); 5690 provider->dtpv_pops.dtps_suspend = 5691 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5692 provider->dtpv_pops.dtps_resume = 5693 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 5694 } 5695 5696 provider->dtpv_arg = arg; 5697 *idp = (dtrace_provider_id_t)provider; 5698 5699 if (pops == &dtrace_provider_ops) { 5700 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5701 ASSERT(MUTEX_HELD(&dtrace_lock)); 5702 ASSERT(dtrace_anon.dta_enabling == NULL); 5703 5704 /* 5705 * We make sure that the DTrace provider is at the head of 5706 * the provider chain. 5707 */ 5708 provider->dtpv_next = dtrace_provider; 5709 dtrace_provider = provider; 5710 return (0); 5711 } 5712 5713 mutex_enter(&dtrace_provider_lock); 5714 mutex_enter(&dtrace_lock); 5715 5716 /* 5717 * If there is at least one provider registered, we'll add this 5718 * provider after the first provider. 5719 */ 5720 if (dtrace_provider != NULL) { 5721 provider->dtpv_next = dtrace_provider->dtpv_next; 5722 dtrace_provider->dtpv_next = provider; 5723 } else { 5724 dtrace_provider = provider; 5725 } 5726 5727 if (dtrace_retained != NULL) { 5728 dtrace_enabling_provide(provider); 5729 5730 /* 5731 * Now we need to call dtrace_enabling_matchall() -- which 5732 * will acquire cpu_lock and dtrace_lock. We therefore need 5733 * to drop all of our locks before calling into it... 5734 */ 5735 mutex_exit(&dtrace_lock); 5736 mutex_exit(&dtrace_provider_lock); 5737 dtrace_enabling_matchall(); 5738 5739 return (0); 5740 } 5741 5742 mutex_exit(&dtrace_lock); 5743 mutex_exit(&dtrace_provider_lock); 5744 5745 return (0); 5746 } 5747 5748 /* 5749 * Unregister the specified provider from the DTrace framework. This should 5750 * generally be called by DTrace providers in their detach(9E) entry point. 5751 */ 5752 int 5753 dtrace_unregister(dtrace_provider_id_t id) 5754 { 5755 dtrace_provider_t *old = (dtrace_provider_t *)id; 5756 dtrace_provider_t *prev = NULL; 5757 int i, self = 0; 5758 dtrace_probe_t *probe, *first = NULL; 5759 5760 if (old->dtpv_pops.dtps_enable == 5761 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 5762 /* 5763 * If DTrace itself is the provider, we're called with locks 5764 * already held. 5765 */ 5766 ASSERT(old == dtrace_provider); 5767 ASSERT(dtrace_devi != NULL); 5768 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 5769 ASSERT(MUTEX_HELD(&dtrace_lock)); 5770 self = 1; 5771 5772 if (dtrace_provider->dtpv_next != NULL) { 5773 /* 5774 * There's another provider here; return failure. 5775 */ 5776 return (EBUSY); 5777 } 5778 } else { 5779 mutex_enter(&dtrace_provider_lock); 5780 mutex_enter(&mod_lock); 5781 mutex_enter(&dtrace_lock); 5782 } 5783 5784 /* 5785 * If anyone has /dev/dtrace open, or if there are anonymous enabled 5786 * probes, we refuse to let providers slither away, unless this 5787 * provider has already been explicitly invalidated. 5788 */ 5789 if (!old->dtpv_defunct && 5790 (dtrace_opens || (dtrace_anon.dta_state != NULL && 5791 dtrace_anon.dta_state->dts_necbs > 0))) { 5792 if (!self) { 5793 mutex_exit(&dtrace_lock); 5794 mutex_exit(&mod_lock); 5795 mutex_exit(&dtrace_provider_lock); 5796 } 5797 return (EBUSY); 5798 } 5799 5800 /* 5801 * Attempt to destroy the probes associated with this provider. 5802 */ 5803 for (i = 0; i < dtrace_nprobes; i++) { 5804 if ((probe = dtrace_probes[i]) == NULL) 5805 continue; 5806 5807 if (probe->dtpr_provider != old) 5808 continue; 5809 5810 if (probe->dtpr_ecb == NULL) 5811 continue; 5812 5813 /* 5814 * We have at least one ECB; we can't remove this provider. 5815 */ 5816 if (!self) { 5817 mutex_exit(&dtrace_lock); 5818 mutex_exit(&mod_lock); 5819 mutex_exit(&dtrace_provider_lock); 5820 } 5821 return (EBUSY); 5822 } 5823 5824 /* 5825 * All of the probes for this provider are disabled; we can safely 5826 * remove all of them from their hash chains and from the probe array. 5827 */ 5828 for (i = 0; i < dtrace_nprobes; i++) { 5829 if ((probe = dtrace_probes[i]) == NULL) 5830 continue; 5831 5832 if (probe->dtpr_provider != old) 5833 continue; 5834 5835 dtrace_probes[i] = NULL; 5836 5837 dtrace_hash_remove(dtrace_bymod, probe); 5838 dtrace_hash_remove(dtrace_byfunc, probe); 5839 dtrace_hash_remove(dtrace_byname, probe); 5840 5841 if (first == NULL) { 5842 first = probe; 5843 probe->dtpr_nextmod = NULL; 5844 } else { 5845 probe->dtpr_nextmod = first; 5846 first = probe; 5847 } 5848 } 5849 5850 /* 5851 * The provider's probes have been removed from the hash chains and 5852 * from the probe array. Now issue a dtrace_sync() to be sure that 5853 * everyone has cleared out from any probe array processing. 5854 */ 5855 dtrace_sync(); 5856 5857 for (probe = first; probe != NULL; probe = first) { 5858 first = probe->dtpr_nextmod; 5859 5860 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 5861 probe->dtpr_arg); 5862 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 5863 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 5864 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 5865 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 5866 kmem_free(probe, sizeof (dtrace_probe_t)); 5867 } 5868 5869 if ((prev = dtrace_provider) == old) { 5870 ASSERT(self || dtrace_devi == NULL); 5871 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 5872 dtrace_provider = old->dtpv_next; 5873 } else { 5874 while (prev != NULL && prev->dtpv_next != old) 5875 prev = prev->dtpv_next; 5876 5877 if (prev == NULL) { 5878 panic("attempt to unregister non-existent " 5879 "dtrace provider %p\n", (void *)id); 5880 } 5881 5882 prev->dtpv_next = old->dtpv_next; 5883 } 5884 5885 if (!self) { 5886 mutex_exit(&dtrace_lock); 5887 mutex_exit(&mod_lock); 5888 mutex_exit(&dtrace_provider_lock); 5889 } 5890 5891 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 5892 kmem_free(old, sizeof (dtrace_provider_t)); 5893 5894 return (0); 5895 } 5896 5897 /* 5898 * Invalidate the specified provider. All subsequent probe lookups for the 5899 * specified provider will fail, but its probes will not be removed. 5900 */ 5901 void 5902 dtrace_invalidate(dtrace_provider_id_t id) 5903 { 5904 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 5905 5906 ASSERT(pvp->dtpv_pops.dtps_enable != 5907 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 5908 5909 mutex_enter(&dtrace_provider_lock); 5910 mutex_enter(&dtrace_lock); 5911 5912 pvp->dtpv_defunct = 1; 5913 5914 mutex_exit(&dtrace_lock); 5915 mutex_exit(&dtrace_provider_lock); 5916 } 5917 5918 /* 5919 * Indicate whether or not DTrace has attached. 5920 */ 5921 int 5922 dtrace_attached(void) 5923 { 5924 /* 5925 * dtrace_provider will be non-NULL iff the DTrace driver has 5926 * attached. (It's non-NULL because DTrace is always itself a 5927 * provider.) 5928 */ 5929 return (dtrace_provider != NULL); 5930 } 5931 5932 /* 5933 * Remove all the unenabled probes for the given provider. This function is 5934 * not unlike dtrace_unregister(), except that it doesn't remove the provider 5935 * -- just as many of its associated probes as it can. 5936 */ 5937 int 5938 dtrace_condense(dtrace_provider_id_t id) 5939 { 5940 dtrace_provider_t *prov = (dtrace_provider_t *)id; 5941 int i; 5942 dtrace_probe_t *probe; 5943 5944 /* 5945 * Make sure this isn't the dtrace provider itself. 5946 */ 5947 ASSERT(prov->dtpv_pops.dtps_enable != 5948 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 5949 5950 mutex_enter(&dtrace_provider_lock); 5951 mutex_enter(&dtrace_lock); 5952 5953 /* 5954 * Attempt to destroy the probes associated with this provider. 5955 */ 5956 for (i = 0; i < dtrace_nprobes; i++) { 5957 if ((probe = dtrace_probes[i]) == NULL) 5958 continue; 5959 5960 if (probe->dtpr_provider != prov) 5961 continue; 5962 5963 if (probe->dtpr_ecb != NULL) 5964 continue; 5965 5966 dtrace_probes[i] = NULL; 5967 5968 dtrace_hash_remove(dtrace_bymod, probe); 5969 dtrace_hash_remove(dtrace_byfunc, probe); 5970 dtrace_hash_remove(dtrace_byname, probe); 5971 5972 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 5973 probe->dtpr_arg); 5974 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 5975 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 5976 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 5977 kmem_free(probe, sizeof (dtrace_probe_t)); 5978 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 5979 } 5980 5981 mutex_exit(&dtrace_lock); 5982 mutex_exit(&dtrace_provider_lock); 5983 5984 return (0); 5985 } 5986 5987 /* 5988 * DTrace Probe Management Functions 5989 * 5990 * The functions in this section perform the DTrace probe management, 5991 * including functions to create probes, look-up probes, and call into the 5992 * providers to request that probes be provided. Some of these functions are 5993 * in the Provider-to-Framework API; these functions can be identified by the 5994 * fact that they are not declared "static". 5995 */ 5996 5997 /* 5998 * Create a probe with the specified module name, function name, and name. 5999 */ 6000 dtrace_id_t 6001 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6002 const char *func, const char *name, int aframes, void *arg) 6003 { 6004 dtrace_probe_t *probe, **probes; 6005 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6006 dtrace_id_t id; 6007 6008 if (provider == dtrace_provider) { 6009 ASSERT(MUTEX_HELD(&dtrace_lock)); 6010 } else { 6011 mutex_enter(&dtrace_lock); 6012 } 6013 6014 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6015 VM_BESTFIT | VM_SLEEP); 6016 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6017 6018 probe->dtpr_id = id; 6019 probe->dtpr_gen = dtrace_probegen++; 6020 probe->dtpr_mod = dtrace_strdup(mod); 6021 probe->dtpr_func = dtrace_strdup(func); 6022 probe->dtpr_name = dtrace_strdup(name); 6023 probe->dtpr_arg = arg; 6024 probe->dtpr_aframes = aframes; 6025 probe->dtpr_provider = provider; 6026 6027 dtrace_hash_add(dtrace_bymod, probe); 6028 dtrace_hash_add(dtrace_byfunc, probe); 6029 dtrace_hash_add(dtrace_byname, probe); 6030 6031 if (id - 1 >= dtrace_nprobes) { 6032 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6033 size_t nsize = osize << 1; 6034 6035 if (nsize == 0) { 6036 ASSERT(osize == 0); 6037 ASSERT(dtrace_probes == NULL); 6038 nsize = sizeof (dtrace_probe_t *); 6039 } 6040 6041 probes = kmem_zalloc(nsize, KM_SLEEP); 6042 6043 if (dtrace_probes == NULL) { 6044 ASSERT(osize == 0); 6045 dtrace_probes = probes; 6046 dtrace_nprobes = 1; 6047 } else { 6048 dtrace_probe_t **oprobes = dtrace_probes; 6049 6050 bcopy(oprobes, probes, osize); 6051 dtrace_membar_producer(); 6052 dtrace_probes = probes; 6053 6054 dtrace_sync(); 6055 6056 /* 6057 * All CPUs are now seeing the new probes array; we can 6058 * safely free the old array. 6059 */ 6060 kmem_free(oprobes, osize); 6061 dtrace_nprobes <<= 1; 6062 } 6063 6064 ASSERT(id - 1 < dtrace_nprobes); 6065 } 6066 6067 ASSERT(dtrace_probes[id - 1] == NULL); 6068 dtrace_probes[id - 1] = probe; 6069 6070 if (provider != dtrace_provider) 6071 mutex_exit(&dtrace_lock); 6072 6073 return (id); 6074 } 6075 6076 static dtrace_probe_t * 6077 dtrace_probe_lookup_id(dtrace_id_t id) 6078 { 6079 ASSERT(MUTEX_HELD(&dtrace_lock)); 6080 6081 if (id == 0 || id > dtrace_nprobes) 6082 return (NULL); 6083 6084 return (dtrace_probes[id - 1]); 6085 } 6086 6087 static int 6088 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6089 { 6090 *((dtrace_id_t *)arg) = probe->dtpr_id; 6091 6092 return (DTRACE_MATCH_DONE); 6093 } 6094 6095 /* 6096 * Look up a probe based on provider and one or more of module name, function 6097 * name and probe name. 6098 */ 6099 dtrace_id_t 6100 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6101 const char *func, const char *name) 6102 { 6103 dtrace_probekey_t pkey; 6104 dtrace_id_t id; 6105 int match; 6106 6107 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6108 pkey.dtpk_pmatch = &dtrace_match_string; 6109 pkey.dtpk_mod = mod; 6110 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6111 pkey.dtpk_func = func; 6112 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6113 pkey.dtpk_name = name; 6114 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6115 pkey.dtpk_id = DTRACE_IDNONE; 6116 6117 mutex_enter(&dtrace_lock); 6118 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 6119 dtrace_probe_lookup_match, &id); 6120 mutex_exit(&dtrace_lock); 6121 6122 ASSERT(match == 1 || match == 0); 6123 return (match ? id : 0); 6124 } 6125 6126 /* 6127 * Returns the probe argument associated with the specified probe. 6128 */ 6129 void * 6130 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6131 { 6132 dtrace_probe_t *probe; 6133 void *rval = NULL; 6134 6135 mutex_enter(&dtrace_lock); 6136 6137 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6138 probe->dtpr_provider == (dtrace_provider_t *)id) 6139 rval = probe->dtpr_arg; 6140 6141 mutex_exit(&dtrace_lock); 6142 6143 return (rval); 6144 } 6145 6146 /* 6147 * Copy a probe into a probe description. 6148 */ 6149 static void 6150 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6151 { 6152 bzero(pdp, sizeof (dtrace_probedesc_t)); 6153 pdp->dtpd_id = prp->dtpr_id; 6154 6155 (void) strncpy(pdp->dtpd_provider, 6156 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6157 6158 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6159 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6160 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6161 } 6162 6163 /* 6164 * Called to indicate that a probe -- or probes -- should be provided by a 6165 * specfied provider. If the specified description is NULL, the provider will 6166 * be told to provide all of its probes. (This is done whenever a new 6167 * consumer comes along, or whenever a retained enabling is to be matched.) If 6168 * the specified description is non-NULL, the provider is given the 6169 * opportunity to dynamically provide the specified probe, allowing providers 6170 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6171 * probes.) If the provider is NULL, the operations will be applied to all 6172 * providers; if the provider is non-NULL the operations will only be applied 6173 * to the specified provider. The dtrace_provider_lock must be held, and the 6174 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6175 * will need to grab the dtrace_lock when it reenters the framework through 6176 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6177 */ 6178 static void 6179 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 6180 { 6181 struct modctl *ctl; 6182 int all = 0; 6183 6184 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6185 6186 if (prv == NULL) { 6187 all = 1; 6188 prv = dtrace_provider; 6189 } 6190 6191 do { 6192 /* 6193 * First, call the blanket provide operation. 6194 */ 6195 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 6196 6197 /* 6198 * Now call the per-module provide operation. We will grab 6199 * mod_lock to prevent the list from being modified. Note 6200 * that this also prevents the mod_busy bits from changing. 6201 * (mod_busy can only be changed with mod_lock held.) 6202 */ 6203 mutex_enter(&mod_lock); 6204 6205 ctl = &modules; 6206 do { 6207 if (ctl->mod_busy || ctl->mod_mp == NULL) 6208 continue; 6209 6210 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 6211 6212 } while ((ctl = ctl->mod_next) != &modules); 6213 6214 mutex_exit(&mod_lock); 6215 } while (all && (prv = prv->dtpv_next) != NULL); 6216 } 6217 6218 /* 6219 * Iterate over each probe, and call the Framework-to-Provider API function 6220 * denoted by offs. 6221 */ 6222 static void 6223 dtrace_probe_foreach(uintptr_t offs) 6224 { 6225 dtrace_provider_t *prov; 6226 void (*func)(void *, dtrace_id_t, void *); 6227 dtrace_probe_t *probe; 6228 dtrace_icookie_t cookie; 6229 int i; 6230 6231 /* 6232 * We disable interrupts to walk through the probe array. This is 6233 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 6234 * won't see stale data. 6235 */ 6236 cookie = dtrace_interrupt_disable(); 6237 6238 for (i = 0; i < dtrace_nprobes; i++) { 6239 if ((probe = dtrace_probes[i]) == NULL) 6240 continue; 6241 6242 if (probe->dtpr_ecb == NULL) { 6243 /* 6244 * This probe isn't enabled -- don't call the function. 6245 */ 6246 continue; 6247 } 6248 6249 prov = probe->dtpr_provider; 6250 func = *((void(**)(void *, dtrace_id_t, void *)) 6251 ((uintptr_t)&prov->dtpv_pops + offs)); 6252 6253 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 6254 } 6255 6256 dtrace_interrupt_enable(cookie); 6257 } 6258 6259 static int 6260 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 6261 { 6262 dtrace_probekey_t pkey; 6263 uint32_t priv; 6264 uid_t uid; 6265 6266 ASSERT(MUTEX_HELD(&dtrace_lock)); 6267 dtrace_ecb_create_cache = NULL; 6268 6269 if (desc == NULL) { 6270 /* 6271 * If we're passed a NULL description, we're being asked to 6272 * create an ECB with a NULL probe. 6273 */ 6274 (void) dtrace_ecb_create_enable(NULL, enab); 6275 return (0); 6276 } 6277 6278 dtrace_probekey(desc, &pkey); 6279 dtrace_cred2priv(CRED(), &priv, &uid); 6280 6281 return (dtrace_match(&pkey, priv, uid, dtrace_ecb_create_enable, enab)); 6282 } 6283 6284 /* 6285 * DTrace Helper Provider Functions 6286 */ 6287 static void 6288 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 6289 { 6290 attr->dtat_name = DOF_ATTR_NAME(dofattr); 6291 attr->dtat_data = DOF_ATTR_DATA(dofattr); 6292 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 6293 } 6294 6295 static void 6296 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 6297 const dof_provider_t *dofprov, char *strtab) 6298 { 6299 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 6300 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 6301 dofprov->dofpv_provattr); 6302 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 6303 dofprov->dofpv_modattr); 6304 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 6305 dofprov->dofpv_funcattr); 6306 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 6307 dofprov->dofpv_nameattr); 6308 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 6309 dofprov->dofpv_argsattr); 6310 } 6311 6312 static void 6313 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6314 { 6315 dof_hdr_t *dof = (dof_hdr_t *)dhp->dofhp_dof; 6316 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6317 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 6318 dof_provider_t *provider; 6319 dof_probe_t *probe; 6320 uint32_t *off; 6321 uint8_t *arg; 6322 char *strtab; 6323 uint_t i, nprobes; 6324 dtrace_helper_provdesc_t dhpv; 6325 dtrace_helper_probedesc_t dhpb; 6326 dtrace_meta_t *meta = dtrace_meta_pid; 6327 dtrace_mops_t *mops = &meta->dtm_mops; 6328 void *parg; 6329 6330 provider = (dof_provider_t *)(daddr + sec->dofs_offset); 6331 str_sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6332 provider->dofpv_strtab * dof->dofh_secsize); 6333 prb_sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6334 provider->dofpv_probes * dof->dofh_secsize); 6335 arg_sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6336 provider->dofpv_prargs * dof->dofh_secsize); 6337 off_sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6338 provider->dofpv_proffs * dof->dofh_secsize); 6339 6340 strtab = (char *)(daddr + str_sec->dofs_offset); 6341 off = (uint32_t *)(daddr + off_sec->dofs_offset); 6342 arg = (uint8_t *)(daddr + arg_sec->dofs_offset); 6343 6344 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 6345 6346 /* 6347 * Create the provider. 6348 */ 6349 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6350 6351 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 6352 return; 6353 6354 meta->dtm_count++; 6355 6356 /* 6357 * Create the probes. 6358 */ 6359 for (i = 0; i < nprobes; i++) { 6360 probe = (dof_probe_t *)(daddr + prb_sec->dofs_offset + 6361 i * prb_sec->dofs_entsize); 6362 6363 dhpb.dthpb_mod = dhp->dofhp_mod; 6364 dhpb.dthpb_func = strtab + probe->dofpr_func; 6365 dhpb.dthpb_name = strtab + probe->dofpr_name; 6366 dhpb.dthpb_base = probe->dofpr_addr; 6367 dhpb.dthpb_offs = off + probe->dofpr_offidx; 6368 dhpb.dthpb_noffs = probe->dofpr_noffs; 6369 dhpb.dthpb_args = arg + probe->dofpr_argidx; 6370 dhpb.dthpb_nargc = probe->dofpr_nargc; 6371 dhpb.dthpb_xargc = probe->dofpr_xargc; 6372 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 6373 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 6374 6375 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 6376 } 6377 } 6378 6379 static void 6380 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 6381 { 6382 dof_hdr_t *dof = (dof_hdr_t *)dhp->dofhp_dof; 6383 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6384 int i; 6385 6386 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6387 6388 for (i = 0; i < dof->dofh_secnum; i++) { 6389 dof_sec_t *sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6390 i * dof->dofh_secsize); 6391 6392 if (sec->dofs_type != DOF_SECT_PROVIDER) 6393 continue; 6394 6395 dtrace_helper_provide_one(dhp, sec, pid); 6396 } 6397 } 6398 6399 static void 6400 dtrace_helper_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6401 { 6402 dof_hdr_t *dof = (dof_hdr_t *)dhp->dofhp_dof; 6403 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6404 dof_sec_t *str_sec; 6405 dof_provider_t *provider; 6406 char *strtab; 6407 dtrace_helper_provdesc_t dhpv; 6408 dtrace_meta_t *meta = dtrace_meta_pid; 6409 dtrace_mops_t *mops = &meta->dtm_mops; 6410 6411 provider = (dof_provider_t *)(daddr + sec->dofs_offset); 6412 str_sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6413 provider->dofpv_strtab * dof->dofh_secsize); 6414 6415 strtab = (char *)(daddr + str_sec->dofs_offset); 6416 6417 /* 6418 * Create the provider. 6419 */ 6420 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6421 6422 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 6423 6424 meta->dtm_count--; 6425 } 6426 6427 static void 6428 dtrace_helper_remove(dof_helper_t *dhp, pid_t pid) 6429 { 6430 dof_hdr_t *dof = (dof_hdr_t *)dhp->dofhp_dof; 6431 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6432 int i; 6433 6434 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6435 6436 for (i = 0; i < dof->dofh_secnum; i++) { 6437 dof_sec_t *sec = (dof_sec_t *)(daddr + dof->dofh_secoff + 6438 i * dof->dofh_secsize); 6439 6440 if (sec->dofs_type != DOF_SECT_PROVIDER) 6441 continue; 6442 6443 dtrace_helper_remove_one(dhp, sec, pid); 6444 } 6445 } 6446 6447 /* 6448 * DTrace Meta Provider-to-Framework API Functions 6449 * 6450 * These functions implement the Meta Provider-to-Framework API, as described 6451 * in <sys/dtrace.h>. 6452 */ 6453 int 6454 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 6455 dtrace_meta_provider_id_t *idp) 6456 { 6457 dtrace_meta_t *meta; 6458 dtrace_helpers_t *help, *next; 6459 int i; 6460 6461 *idp = DTRACE_METAPROVNONE; 6462 6463 /* 6464 * We strictly don't need the name, but we hold onto it for 6465 * debuggability. All hail error queues! 6466 */ 6467 if (name == NULL) { 6468 cmn_err(CE_WARN, "failed to register meta-provider: " 6469 "invalid name"); 6470 return (EINVAL); 6471 } 6472 6473 if (mops == NULL || 6474 mops->dtms_create_probe == NULL || 6475 mops->dtms_provide_pid == NULL || 6476 mops->dtms_remove_pid == NULL) { 6477 cmn_err(CE_WARN, "failed to register meta-register %s: " 6478 "invalid ops", name); 6479 return (EINVAL); 6480 } 6481 6482 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 6483 meta->dtm_mops = *mops; 6484 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6485 (void) strcpy(meta->dtm_name, name); 6486 meta->dtm_arg = arg; 6487 6488 mutex_enter(&dtrace_meta_lock); 6489 mutex_enter(&dtrace_lock); 6490 6491 if (dtrace_meta_pid != NULL) { 6492 mutex_exit(&dtrace_lock); 6493 mutex_exit(&dtrace_meta_lock); 6494 cmn_err(CE_WARN, "failed to register meta-register %s: " 6495 "user-land meta-provider exists", name); 6496 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 6497 kmem_free(meta, sizeof (dtrace_meta_t)); 6498 return (EINVAL); 6499 } 6500 6501 dtrace_meta_pid = meta; 6502 *idp = (dtrace_meta_provider_id_t)meta; 6503 6504 /* 6505 * If there are providers and probes ready to go, pass them 6506 * off to the new meta provider now. 6507 */ 6508 6509 help = dtrace_deferred_pid; 6510 dtrace_deferred_pid = NULL; 6511 6512 mutex_exit(&dtrace_lock); 6513 6514 while (help != NULL) { 6515 for (i = 0; i < help->dthps_nprovs; i++) { 6516 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 6517 help->dthps_pid); 6518 } 6519 6520 next = help->dthps_next; 6521 help->dthps_next = NULL; 6522 help->dthps_prev = NULL; 6523 help = next; 6524 } 6525 6526 mutex_exit(&dtrace_meta_lock); 6527 6528 return (0); 6529 } 6530 6531 int 6532 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 6533 { 6534 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 6535 6536 mutex_enter(&dtrace_meta_lock); 6537 mutex_enter(&dtrace_lock); 6538 6539 if (old == dtrace_meta_pid) { 6540 pp = &dtrace_meta_pid; 6541 } else { 6542 panic("attempt to unregister non-existent " 6543 "dtrace meta-provider %p\n", (void *)old); 6544 } 6545 6546 if (old->dtm_count != 0) { 6547 mutex_exit(&dtrace_lock); 6548 mutex_exit(&dtrace_meta_lock); 6549 return (EBUSY); 6550 } 6551 6552 *pp = NULL; 6553 6554 mutex_exit(&dtrace_lock); 6555 mutex_exit(&dtrace_meta_lock); 6556 6557 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 6558 kmem_free(old, sizeof (dtrace_meta_t)); 6559 6560 return (0); 6561 } 6562 6563 6564 /* 6565 * DTrace DIF Object Functions 6566 */ 6567 static int 6568 dtrace_difo_err(uint_t pc, const char *format, ...) 6569 { 6570 if (dtrace_err_verbose) { 6571 va_list alist; 6572 6573 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 6574 va_start(alist, format); 6575 (void) vuprintf(format, alist); 6576 va_end(alist); 6577 } 6578 6579 #ifdef DTRACE_ERRDEBUG 6580 dtrace_errdebug(format); 6581 #endif 6582 return (1); 6583 } 6584 6585 /* 6586 * Validate a DTrace DIF object by checking the IR instructions. The following 6587 * rules are currently enforced by dtrace_difo_validate(): 6588 * 6589 * 1. Each instruction must have a valid opcode 6590 * 2. Each register, string, variable, or subroutine reference must be valid 6591 * 3. No instruction can modify register %r0 (must be zero) 6592 * 4. All instruction reserved bits must be set to zero 6593 * 5. The last instruction must be a "ret" instruction 6594 * 6. All branch targets must reference a valid instruction _after_ the branch 6595 */ 6596 static int 6597 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 6598 cred_t *cr) 6599 { 6600 int err = 0, i; 6601 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 6602 int kcheck; 6603 uint_t pc; 6604 6605 kcheck = cr == NULL || 6606 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0; 6607 6608 dp->dtdo_destructive = 0; 6609 6610 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 6611 dif_instr_t instr = dp->dtdo_buf[pc]; 6612 6613 uint_t r1 = DIF_INSTR_R1(instr); 6614 uint_t r2 = DIF_INSTR_R2(instr); 6615 uint_t rd = DIF_INSTR_RD(instr); 6616 uint_t rs = DIF_INSTR_RS(instr); 6617 uint_t label = DIF_INSTR_LABEL(instr); 6618 uint_t v = DIF_INSTR_VAR(instr); 6619 uint_t subr = DIF_INSTR_SUBR(instr); 6620 uint_t type = DIF_INSTR_TYPE(instr); 6621 uint_t op = DIF_INSTR_OP(instr); 6622 6623 switch (op) { 6624 case DIF_OP_OR: 6625 case DIF_OP_XOR: 6626 case DIF_OP_AND: 6627 case DIF_OP_SLL: 6628 case DIF_OP_SRL: 6629 case DIF_OP_SRA: 6630 case DIF_OP_SUB: 6631 case DIF_OP_ADD: 6632 case DIF_OP_MUL: 6633 case DIF_OP_SDIV: 6634 case DIF_OP_UDIV: 6635 case DIF_OP_SREM: 6636 case DIF_OP_UREM: 6637 case DIF_OP_COPYS: 6638 if (r1 >= nregs) 6639 err += efunc(pc, "invalid register %u\n", r1); 6640 if (r2 >= nregs) 6641 err += efunc(pc, "invalid register %u\n", r2); 6642 if (rd >= nregs) 6643 err += efunc(pc, "invalid register %u\n", rd); 6644 if (rd == 0) 6645 err += efunc(pc, "cannot write to %r0\n"); 6646 break; 6647 case DIF_OP_NOT: 6648 case DIF_OP_MOV: 6649 case DIF_OP_ALLOCS: 6650 if (r1 >= nregs) 6651 err += efunc(pc, "invalid register %u\n", r1); 6652 if (r2 != 0) 6653 err += efunc(pc, "non-zero reserved bits\n"); 6654 if (rd >= nregs) 6655 err += efunc(pc, "invalid register %u\n", rd); 6656 if (rd == 0) 6657 err += efunc(pc, "cannot write to %r0\n"); 6658 break; 6659 case DIF_OP_LDSB: 6660 case DIF_OP_LDSH: 6661 case DIF_OP_LDSW: 6662 case DIF_OP_LDUB: 6663 case DIF_OP_LDUH: 6664 case DIF_OP_LDUW: 6665 case DIF_OP_LDX: 6666 if (r1 >= nregs) 6667 err += efunc(pc, "invalid register %u\n", r1); 6668 if (r2 != 0) 6669 err += efunc(pc, "non-zero reserved bits\n"); 6670 if (rd >= nregs) 6671 err += efunc(pc, "invalid register %u\n", rd); 6672 if (rd == 0) 6673 err += efunc(pc, "cannot write to %r0\n"); 6674 if (kcheck) 6675 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 6676 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 6677 break; 6678 case DIF_OP_RLDSB: 6679 case DIF_OP_RLDSH: 6680 case DIF_OP_RLDSW: 6681 case DIF_OP_RLDUB: 6682 case DIF_OP_RLDUH: 6683 case DIF_OP_RLDUW: 6684 case DIF_OP_RLDX: 6685 if (r1 >= nregs) 6686 err += efunc(pc, "invalid register %u\n", r1); 6687 if (r2 != 0) 6688 err += efunc(pc, "non-zero reserved bits\n"); 6689 if (rd >= nregs) 6690 err += efunc(pc, "invalid register %u\n", rd); 6691 if (rd == 0) 6692 err += efunc(pc, "cannot write to %r0\n"); 6693 break; 6694 case DIF_OP_ULDSB: 6695 case DIF_OP_ULDSH: 6696 case DIF_OP_ULDSW: 6697 case DIF_OP_ULDUB: 6698 case DIF_OP_ULDUH: 6699 case DIF_OP_ULDUW: 6700 case DIF_OP_ULDX: 6701 if (r1 >= nregs) 6702 err += efunc(pc, "invalid register %u\n", r1); 6703 if (r2 != 0) 6704 err += efunc(pc, "non-zero reserved bits\n"); 6705 if (rd >= nregs) 6706 err += efunc(pc, "invalid register %u\n", rd); 6707 if (rd == 0) 6708 err += efunc(pc, "cannot write to %r0\n"); 6709 break; 6710 case DIF_OP_STB: 6711 case DIF_OP_STH: 6712 case DIF_OP_STW: 6713 case DIF_OP_STX: 6714 if (r1 >= nregs) 6715 err += efunc(pc, "invalid register %u\n", r1); 6716 if (r2 != 0) 6717 err += efunc(pc, "non-zero reserved bits\n"); 6718 if (rd >= nregs) 6719 err += efunc(pc, "invalid register %u\n", rd); 6720 if (rd == 0) 6721 err += efunc(pc, "cannot write to 0 address\n"); 6722 break; 6723 case DIF_OP_CMP: 6724 case DIF_OP_SCMP: 6725 if (r1 >= nregs) 6726 err += efunc(pc, "invalid register %u\n", r1); 6727 if (r2 >= nregs) 6728 err += efunc(pc, "invalid register %u\n", r2); 6729 if (rd != 0) 6730 err += efunc(pc, "non-zero reserved bits\n"); 6731 break; 6732 case DIF_OP_TST: 6733 if (r1 >= nregs) 6734 err += efunc(pc, "invalid register %u\n", r1); 6735 if (r2 != 0 || rd != 0) 6736 err += efunc(pc, "non-zero reserved bits\n"); 6737 break; 6738 case DIF_OP_BA: 6739 case DIF_OP_BE: 6740 case DIF_OP_BNE: 6741 case DIF_OP_BG: 6742 case DIF_OP_BGU: 6743 case DIF_OP_BGE: 6744 case DIF_OP_BGEU: 6745 case DIF_OP_BL: 6746 case DIF_OP_BLU: 6747 case DIF_OP_BLE: 6748 case DIF_OP_BLEU: 6749 if (label >= dp->dtdo_len) { 6750 err += efunc(pc, "invalid branch target %u\n", 6751 label); 6752 } 6753 if (label <= pc) { 6754 err += efunc(pc, "backward branch to %u\n", 6755 label); 6756 } 6757 break; 6758 case DIF_OP_RET: 6759 if (r1 != 0 || r2 != 0) 6760 err += efunc(pc, "non-zero reserved bits\n"); 6761 if (rd >= nregs) 6762 err += efunc(pc, "invalid register %u\n", rd); 6763 break; 6764 case DIF_OP_NOP: 6765 case DIF_OP_POPTS: 6766 case DIF_OP_FLUSHTS: 6767 if (r1 != 0 || r2 != 0 || rd != 0) 6768 err += efunc(pc, "non-zero reserved bits\n"); 6769 break; 6770 case DIF_OP_SETX: 6771 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 6772 err += efunc(pc, "invalid integer ref %u\n", 6773 DIF_INSTR_INTEGER(instr)); 6774 } 6775 if (rd >= nregs) 6776 err += efunc(pc, "invalid register %u\n", rd); 6777 if (rd == 0) 6778 err += efunc(pc, "cannot write to %r0\n"); 6779 break; 6780 case DIF_OP_SETS: 6781 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 6782 err += efunc(pc, "invalid string ref %u\n", 6783 DIF_INSTR_STRING(instr)); 6784 } 6785 if (rd >= nregs) 6786 err += efunc(pc, "invalid register %u\n", rd); 6787 if (rd == 0) 6788 err += efunc(pc, "cannot write to %r0\n"); 6789 break; 6790 case DIF_OP_LDGA: 6791 case DIF_OP_LDTA: 6792 if (r1 > DIF_VAR_ARRAY_MAX) 6793 err += efunc(pc, "invalid array %u\n", r1); 6794 if (r2 >= nregs) 6795 err += efunc(pc, "invalid register %u\n", r2); 6796 if (rd >= nregs) 6797 err += efunc(pc, "invalid register %u\n", rd); 6798 if (rd == 0) 6799 err += efunc(pc, "cannot write to %r0\n"); 6800 break; 6801 case DIF_OP_LDGS: 6802 case DIF_OP_LDTS: 6803 case DIF_OP_LDLS: 6804 case DIF_OP_LDGAA: 6805 case DIF_OP_LDTAA: 6806 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 6807 err += efunc(pc, "invalid variable %u\n", v); 6808 if (rd >= nregs) 6809 err += efunc(pc, "invalid register %u\n", rd); 6810 if (rd == 0) 6811 err += efunc(pc, "cannot write to %r0\n"); 6812 break; 6813 case DIF_OP_STGS: 6814 case DIF_OP_STTS: 6815 case DIF_OP_STLS: 6816 case DIF_OP_STGAA: 6817 case DIF_OP_STTAA: 6818 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 6819 err += efunc(pc, "invalid variable %u\n", v); 6820 if (rs >= nregs) 6821 err += efunc(pc, "invalid register %u\n", rd); 6822 break; 6823 case DIF_OP_CALL: 6824 if (subr > DIF_SUBR_MAX) 6825 err += efunc(pc, "invalid subr %u\n", subr); 6826 if (rd >= nregs) 6827 err += efunc(pc, "invalid register %u\n", rd); 6828 if (rd == 0) 6829 err += efunc(pc, "cannot write to %r0\n"); 6830 6831 if (subr == DIF_SUBR_COPYOUT || 6832 subr == DIF_SUBR_COPYOUTSTR) { 6833 dp->dtdo_destructive = 1; 6834 } 6835 break; 6836 case DIF_OP_PUSHTR: 6837 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 6838 err += efunc(pc, "invalid ref type %u\n", type); 6839 if (r2 >= nregs) 6840 err += efunc(pc, "invalid register %u\n", r2); 6841 if (rs >= nregs) 6842 err += efunc(pc, "invalid register %u\n", rs); 6843 break; 6844 case DIF_OP_PUSHTV: 6845 if (type != DIF_TYPE_CTF) 6846 err += efunc(pc, "invalid val type %u\n", type); 6847 if (r2 >= nregs) 6848 err += efunc(pc, "invalid register %u\n", r2); 6849 if (rs >= nregs) 6850 err += efunc(pc, "invalid register %u\n", rs); 6851 break; 6852 default: 6853 err += efunc(pc, "invalid opcode %u\n", 6854 DIF_INSTR_OP(instr)); 6855 } 6856 } 6857 6858 if (dp->dtdo_len != 0 && 6859 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 6860 err += efunc(dp->dtdo_len - 1, 6861 "expected 'ret' as last DIF instruction\n"); 6862 } 6863 6864 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 6865 /* 6866 * If we're not returning by reference, the size must be either 6867 * 0 or the size of one of the base types. 6868 */ 6869 switch (dp->dtdo_rtype.dtdt_size) { 6870 case 0: 6871 case sizeof (uint8_t): 6872 case sizeof (uint16_t): 6873 case sizeof (uint32_t): 6874 case sizeof (uint64_t): 6875 break; 6876 6877 default: 6878 err += efunc(dp->dtdo_len - 1, "bad return size"); 6879 } 6880 } 6881 6882 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 6883 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 6884 dtrace_diftype_t *vt, *et; 6885 uint_t id, ndx; 6886 6887 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 6888 v->dtdv_scope != DIFV_SCOPE_THREAD && 6889 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 6890 err += efunc(i, "unrecognized variable scope %d\n", 6891 v->dtdv_scope); 6892 break; 6893 } 6894 6895 if (v->dtdv_kind != DIFV_KIND_ARRAY && 6896 v->dtdv_kind != DIFV_KIND_SCALAR) { 6897 err += efunc(i, "unrecognized variable type %d\n", 6898 v->dtdv_kind); 6899 break; 6900 } 6901 6902 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 6903 err += efunc(i, "%d exceeds variable id limit\n", id); 6904 break; 6905 } 6906 6907 if (id < DIF_VAR_OTHER_UBASE) 6908 continue; 6909 6910 /* 6911 * For user-defined variables, we need to check that this 6912 * definition is identical to any previous definition that we 6913 * encountered. 6914 */ 6915 ndx = id - DIF_VAR_OTHER_UBASE; 6916 6917 switch (v->dtdv_scope) { 6918 case DIFV_SCOPE_GLOBAL: 6919 if (ndx < vstate->dtvs_nglobals) { 6920 dtrace_statvar_t *svar; 6921 6922 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 6923 existing = &svar->dtsv_var; 6924 } 6925 6926 break; 6927 6928 case DIFV_SCOPE_THREAD: 6929 if (ndx < vstate->dtvs_ntlocals) 6930 existing = &vstate->dtvs_tlocals[ndx]; 6931 break; 6932 6933 case DIFV_SCOPE_LOCAL: 6934 if (ndx < vstate->dtvs_nlocals) { 6935 dtrace_statvar_t *svar; 6936 6937 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 6938 existing = &svar->dtsv_var; 6939 } 6940 6941 break; 6942 } 6943 6944 vt = &v->dtdv_type; 6945 6946 if (vt->dtdt_flags & DIF_TF_BYREF) { 6947 if (vt->dtdt_size == 0) { 6948 err += efunc(i, "zero-sized variable\n"); 6949 break; 6950 } 6951 6952 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 6953 vt->dtdt_size > dtrace_global_maxsize) { 6954 err += efunc(i, "oversized by-ref global\n"); 6955 break; 6956 } 6957 } 6958 6959 if (existing == NULL || existing->dtdv_id == 0) 6960 continue; 6961 6962 ASSERT(existing->dtdv_id == v->dtdv_id); 6963 ASSERT(existing->dtdv_scope == v->dtdv_scope); 6964 6965 if (existing->dtdv_kind != v->dtdv_kind) 6966 err += efunc(i, "%d changed variable kind\n", id); 6967 6968 et = &existing->dtdv_type; 6969 6970 if (vt->dtdt_flags != et->dtdt_flags) { 6971 err += efunc(i, "%d changed variable type flags\n", id); 6972 break; 6973 } 6974 6975 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 6976 err += efunc(i, "%d changed variable type size\n", id); 6977 break; 6978 } 6979 } 6980 6981 return (err); 6982 } 6983 6984 /* 6985 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 6986 * are much more constrained than normal DIFOs. Specifically, they may 6987 * not: 6988 * 6989 * 1. Make calls to subroutines other than copyin() or copyinstr(). 6990 * 2. Access DTrace variables other than the args[] array, and the 6991 * curthread, pid, tid and execname variables. 6992 * 3. Have thread-local variables. 6993 * 4. Have dynamic variables. 6994 */ 6995 static int 6996 dtrace_difo_validate_helper(dtrace_difo_t *dp) 6997 { 6998 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 6999 int err = 0; 7000 uint_t pc; 7001 7002 for (pc = 0; pc < dp->dtdo_len; pc++) { 7003 dif_instr_t instr = dp->dtdo_buf[pc]; 7004 7005 uint_t v = DIF_INSTR_VAR(instr); 7006 uint_t subr = DIF_INSTR_SUBR(instr); 7007 uint_t op = DIF_INSTR_OP(instr); 7008 7009 switch (op) { 7010 case DIF_OP_OR: 7011 case DIF_OP_XOR: 7012 case DIF_OP_AND: 7013 case DIF_OP_SLL: 7014 case DIF_OP_SRL: 7015 case DIF_OP_SRA: 7016 case DIF_OP_SUB: 7017 case DIF_OP_ADD: 7018 case DIF_OP_MUL: 7019 case DIF_OP_SDIV: 7020 case DIF_OP_UDIV: 7021 case DIF_OP_SREM: 7022 case DIF_OP_UREM: 7023 case DIF_OP_COPYS: 7024 case DIF_OP_NOT: 7025 case DIF_OP_MOV: 7026 case DIF_OP_RLDSB: 7027 case DIF_OP_RLDSH: 7028 case DIF_OP_RLDSW: 7029 case DIF_OP_RLDUB: 7030 case DIF_OP_RLDUH: 7031 case DIF_OP_RLDUW: 7032 case DIF_OP_RLDX: 7033 case DIF_OP_ULDSB: 7034 case DIF_OP_ULDSH: 7035 case DIF_OP_ULDSW: 7036 case DIF_OP_ULDUB: 7037 case DIF_OP_ULDUH: 7038 case DIF_OP_ULDUW: 7039 case DIF_OP_ULDX: 7040 case DIF_OP_STB: 7041 case DIF_OP_STH: 7042 case DIF_OP_STW: 7043 case DIF_OP_STX: 7044 case DIF_OP_ALLOCS: 7045 case DIF_OP_CMP: 7046 case DIF_OP_SCMP: 7047 case DIF_OP_TST: 7048 case DIF_OP_BA: 7049 case DIF_OP_BE: 7050 case DIF_OP_BNE: 7051 case DIF_OP_BG: 7052 case DIF_OP_BGU: 7053 case DIF_OP_BGE: 7054 case DIF_OP_BGEU: 7055 case DIF_OP_BL: 7056 case DIF_OP_BLU: 7057 case DIF_OP_BLE: 7058 case DIF_OP_BLEU: 7059 case DIF_OP_RET: 7060 case DIF_OP_NOP: 7061 case DIF_OP_POPTS: 7062 case DIF_OP_FLUSHTS: 7063 case DIF_OP_SETX: 7064 case DIF_OP_SETS: 7065 case DIF_OP_LDGA: 7066 case DIF_OP_LDLS: 7067 case DIF_OP_STGS: 7068 case DIF_OP_STLS: 7069 case DIF_OP_PUSHTR: 7070 case DIF_OP_PUSHTV: 7071 break; 7072 7073 case DIF_OP_LDGS: 7074 if (v >= DIF_VAR_OTHER_UBASE) 7075 break; 7076 7077 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7078 break; 7079 7080 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7081 v == DIF_VAR_TID || v == DIF_VAR_EXECNAME || 7082 v == DIF_VAR_ZONENAME) 7083 break; 7084 7085 err += efunc(pc, "illegal variable %u\n", v); 7086 break; 7087 7088 case DIF_OP_LDTA: 7089 case DIF_OP_LDTS: 7090 case DIF_OP_LDGAA: 7091 case DIF_OP_LDTAA: 7092 err += efunc(pc, "illegal dynamic variable load\n"); 7093 break; 7094 7095 case DIF_OP_STTS: 7096 case DIF_OP_STGAA: 7097 case DIF_OP_STTAA: 7098 err += efunc(pc, "illegal dynamic variable store\n"); 7099 break; 7100 7101 case DIF_OP_CALL: 7102 if (subr == DIF_SUBR_ALLOCA || 7103 subr == DIF_SUBR_BCOPY || 7104 subr == DIF_SUBR_COPYIN || 7105 subr == DIF_SUBR_COPYINTO || 7106 subr == DIF_SUBR_COPYINSTR) 7107 break; 7108 7109 err += efunc(pc, "invalid subr %u\n", subr); 7110 break; 7111 7112 default: 7113 err += efunc(pc, "invalid opcode %u\n", 7114 DIF_INSTR_OP(instr)); 7115 } 7116 } 7117 7118 return (err); 7119 } 7120 7121 /* 7122 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7123 * basis; 0 if not. 7124 */ 7125 static int 7126 dtrace_difo_cacheable(dtrace_difo_t *dp) 7127 { 7128 int i; 7129 7130 if (dp == NULL) 7131 return (0); 7132 7133 for (i = 0; i < dp->dtdo_varlen; i++) { 7134 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7135 7136 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 7137 continue; 7138 7139 switch (v->dtdv_id) { 7140 case DIF_VAR_CURTHREAD: 7141 case DIF_VAR_PID: 7142 case DIF_VAR_TID: 7143 case DIF_VAR_EXECNAME: 7144 case DIF_VAR_ZONENAME: 7145 break; 7146 7147 default: 7148 return (0); 7149 } 7150 } 7151 7152 /* 7153 * This DIF object may be cacheable. Now we need to look for any 7154 * load variant instructions, or any stores to thread-local variables. 7155 */ 7156 for (i = 0; i < dp->dtdo_len; i++) { 7157 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 7158 7159 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 7160 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 7161 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 7162 (op == DIF_OP_STTS)) 7163 return (0); 7164 } 7165 7166 return (1); 7167 } 7168 7169 static void 7170 dtrace_difo_hold(dtrace_difo_t *dp) 7171 { 7172 int i; 7173 7174 ASSERT(MUTEX_HELD(&dtrace_lock)); 7175 7176 dp->dtdo_refcnt++; 7177 ASSERT(dp->dtdo_refcnt != 0); 7178 7179 /* 7180 * We need to check this DIF object for references to the variable 7181 * DIF_VAR_VTIMESTAMP. 7182 */ 7183 for (i = 0; i < dp->dtdo_varlen; i++) { 7184 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7185 7186 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7187 continue; 7188 7189 if (dtrace_vtime_references++ == 0) 7190 dtrace_vtime_enable(); 7191 } 7192 } 7193 7194 /* 7195 * This routine calculates the dynamic variable chunksize for a given DIF 7196 * object. The calculation is not fool-proof, and can probably be tricked by 7197 * malicious DIF -- but it works for all compiler-generated DIF. Because this 7198 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 7199 * if a dynamic variable size exceeds the chunksize. 7200 */ 7201 static void 7202 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7203 { 7204 uint64_t sval; 7205 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 7206 const dif_instr_t *text = dp->dtdo_buf; 7207 uint_t pc, srd = 0; 7208 uint_t ttop = 0; 7209 size_t size, ksize; 7210 uint_t id, i; 7211 7212 for (pc = 0; pc < dp->dtdo_len; pc++) { 7213 dif_instr_t instr = text[pc]; 7214 uint_t op = DIF_INSTR_OP(instr); 7215 uint_t rd = DIF_INSTR_RD(instr); 7216 uint_t r1 = DIF_INSTR_R1(instr); 7217 uint_t nkeys = 0; 7218 uchar_t scope; 7219 7220 dtrace_key_t *key = tupregs; 7221 7222 switch (op) { 7223 case DIF_OP_SETX: 7224 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 7225 srd = rd; 7226 continue; 7227 7228 case DIF_OP_STTS: 7229 key = &tupregs[DIF_DTR_NREGS]; 7230 key[0].dttk_size = 0; 7231 key[1].dttk_size = 0; 7232 nkeys = 2; 7233 scope = DIFV_SCOPE_THREAD; 7234 break; 7235 7236 case DIF_OP_STGAA: 7237 case DIF_OP_STTAA: 7238 nkeys = ttop; 7239 7240 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 7241 key[nkeys++].dttk_size = 0; 7242 7243 key[nkeys++].dttk_size = 0; 7244 7245 if (op == DIF_OP_STTAA) { 7246 scope = DIFV_SCOPE_THREAD; 7247 } else { 7248 scope = DIFV_SCOPE_GLOBAL; 7249 } 7250 7251 break; 7252 7253 case DIF_OP_PUSHTR: 7254 if (ttop == DIF_DTR_NREGS) 7255 return; 7256 7257 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 7258 /* 7259 * If the register for the size of the "pushtr" 7260 * is %r0 (or the value is 0) and the type is 7261 * a string, we'll use the system-wide default 7262 * string size. 7263 */ 7264 tupregs[ttop++].dttk_size = 7265 dtrace_strsize_default; 7266 } else { 7267 if (srd == 0) 7268 return; 7269 7270 tupregs[ttop++].dttk_size = sval; 7271 } 7272 7273 break; 7274 7275 case DIF_OP_PUSHTV: 7276 if (ttop == DIF_DTR_NREGS) 7277 return; 7278 7279 tupregs[ttop++].dttk_size = 0; 7280 break; 7281 7282 case DIF_OP_FLUSHTS: 7283 ttop = 0; 7284 break; 7285 7286 case DIF_OP_POPTS: 7287 if (ttop != 0) 7288 ttop--; 7289 break; 7290 } 7291 7292 sval = 0; 7293 srd = 0; 7294 7295 if (nkeys == 0) 7296 continue; 7297 7298 /* 7299 * We have a dynamic variable allocation; calculate its size. 7300 */ 7301 for (ksize = 0, i = 0; i < nkeys; i++) 7302 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 7303 7304 size = sizeof (dtrace_dynvar_t); 7305 size += sizeof (dtrace_key_t) * (nkeys - 1); 7306 size += ksize; 7307 7308 /* 7309 * Now we need to determine the size of the stored data. 7310 */ 7311 id = DIF_INSTR_VAR(instr); 7312 7313 for (i = 0; i < dp->dtdo_varlen; i++) { 7314 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7315 7316 if (v->dtdv_id == id && v->dtdv_scope == scope) { 7317 size += v->dtdv_type.dtdt_size; 7318 break; 7319 } 7320 } 7321 7322 if (i == dp->dtdo_varlen) 7323 return; 7324 7325 /* 7326 * We have the size. If this is larger than the chunk size 7327 * for our dynamic variable state, reset the chunk size. 7328 */ 7329 size = P2ROUNDUP(size, sizeof (uint64_t)); 7330 7331 if (size > vstate->dtvs_dynvars.dtds_chunksize) 7332 vstate->dtvs_dynvars.dtds_chunksize = size; 7333 } 7334 } 7335 7336 static void 7337 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7338 { 7339 int i, oldsvars, osz, nsz, otlocals, ntlocals; 7340 uint_t id; 7341 7342 ASSERT(MUTEX_HELD(&dtrace_lock)); 7343 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 7344 7345 for (i = 0; i < dp->dtdo_varlen; i++) { 7346 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7347 dtrace_statvar_t *svar, ***svarp; 7348 size_t dsize = 0; 7349 uint8_t scope = v->dtdv_scope; 7350 int *np; 7351 7352 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7353 continue; 7354 7355 id -= DIF_VAR_OTHER_UBASE; 7356 7357 switch (scope) { 7358 case DIFV_SCOPE_THREAD: 7359 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 7360 dtrace_difv_t *tlocals; 7361 7362 if ((ntlocals = (otlocals << 1)) == 0) 7363 ntlocals = 1; 7364 7365 osz = otlocals * sizeof (dtrace_difv_t); 7366 nsz = ntlocals * sizeof (dtrace_difv_t); 7367 7368 tlocals = kmem_zalloc(nsz, KM_SLEEP); 7369 7370 if (osz != 0) { 7371 bcopy(vstate->dtvs_tlocals, 7372 tlocals, osz); 7373 kmem_free(vstate->dtvs_tlocals, osz); 7374 } 7375 7376 vstate->dtvs_tlocals = tlocals; 7377 vstate->dtvs_ntlocals = ntlocals; 7378 } 7379 7380 vstate->dtvs_tlocals[id] = *v; 7381 continue; 7382 7383 case DIFV_SCOPE_LOCAL: 7384 np = &vstate->dtvs_nlocals; 7385 svarp = &vstate->dtvs_locals; 7386 7387 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7388 dsize = NCPU * (v->dtdv_type.dtdt_size + 7389 sizeof (uint64_t)); 7390 else 7391 dsize = NCPU * sizeof (uint64_t); 7392 7393 break; 7394 7395 case DIFV_SCOPE_GLOBAL: 7396 np = &vstate->dtvs_nglobals; 7397 svarp = &vstate->dtvs_globals; 7398 7399 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7400 dsize = v->dtdv_type.dtdt_size + 7401 sizeof (uint64_t); 7402 7403 break; 7404 7405 default: 7406 ASSERT(0); 7407 } 7408 7409 while (id >= (oldsvars = *np)) { 7410 dtrace_statvar_t **statics; 7411 int newsvars, oldsize, newsize; 7412 7413 if ((newsvars = (oldsvars << 1)) == 0) 7414 newsvars = 1; 7415 7416 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 7417 newsize = newsvars * sizeof (dtrace_statvar_t *); 7418 7419 statics = kmem_zalloc(newsize, KM_SLEEP); 7420 7421 if (oldsize != 0) { 7422 bcopy(*svarp, statics, oldsize); 7423 kmem_free(*svarp, oldsize); 7424 } 7425 7426 *svarp = statics; 7427 *np = newsvars; 7428 } 7429 7430 if ((svar = (*svarp)[id]) == NULL) { 7431 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 7432 svar->dtsv_var = *v; 7433 7434 if ((svar->dtsv_size = dsize) != 0) { 7435 svar->dtsv_data = (uint64_t)(uintptr_t) 7436 kmem_zalloc(dsize, KM_SLEEP); 7437 } 7438 7439 (*svarp)[id] = svar; 7440 } 7441 7442 svar->dtsv_refcnt++; 7443 } 7444 7445 dtrace_difo_chunksize(dp, vstate); 7446 dtrace_difo_hold(dp); 7447 } 7448 7449 static dtrace_difo_t * 7450 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7451 { 7452 dtrace_difo_t *new; 7453 size_t sz; 7454 7455 ASSERT(dp->dtdo_buf != NULL); 7456 ASSERT(dp->dtdo_refcnt != 0); 7457 7458 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 7459 7460 ASSERT(dp->dtdo_buf != NULL); 7461 sz = dp->dtdo_len * sizeof (dif_instr_t); 7462 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 7463 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 7464 new->dtdo_len = dp->dtdo_len; 7465 7466 if (dp->dtdo_strtab != NULL) { 7467 ASSERT(dp->dtdo_strlen != 0); 7468 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 7469 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 7470 new->dtdo_strlen = dp->dtdo_strlen; 7471 } 7472 7473 if (dp->dtdo_inttab != NULL) { 7474 ASSERT(dp->dtdo_intlen != 0); 7475 sz = dp->dtdo_intlen * sizeof (uint64_t); 7476 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 7477 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 7478 new->dtdo_intlen = dp->dtdo_intlen; 7479 } 7480 7481 if (dp->dtdo_vartab != NULL) { 7482 ASSERT(dp->dtdo_varlen != 0); 7483 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 7484 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 7485 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 7486 new->dtdo_varlen = dp->dtdo_varlen; 7487 } 7488 7489 dtrace_difo_init(new, vstate); 7490 return (new); 7491 } 7492 7493 static void 7494 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7495 { 7496 int i; 7497 7498 ASSERT(dp->dtdo_refcnt == 0); 7499 7500 for (i = 0; i < dp->dtdo_varlen; i++) { 7501 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7502 dtrace_statvar_t *svar, **svarp; 7503 uint_t id; 7504 uint8_t scope = v->dtdv_scope; 7505 int *np; 7506 7507 switch (scope) { 7508 case DIFV_SCOPE_THREAD: 7509 continue; 7510 7511 case DIFV_SCOPE_LOCAL: 7512 np = &vstate->dtvs_nlocals; 7513 svarp = vstate->dtvs_locals; 7514 break; 7515 7516 case DIFV_SCOPE_GLOBAL: 7517 np = &vstate->dtvs_nglobals; 7518 svarp = vstate->dtvs_globals; 7519 break; 7520 7521 default: 7522 ASSERT(0); 7523 } 7524 7525 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7526 continue; 7527 7528 id -= DIF_VAR_OTHER_UBASE; 7529 ASSERT(id < *np); 7530 7531 svar = svarp[id]; 7532 ASSERT(svar != NULL); 7533 ASSERT(svar->dtsv_refcnt > 0); 7534 7535 if (--svar->dtsv_refcnt > 0) 7536 continue; 7537 7538 if (svar->dtsv_size != 0) { 7539 ASSERT(svar->dtsv_data != NULL); 7540 kmem_free((void *)(uintptr_t)svar->dtsv_data, 7541 svar->dtsv_size); 7542 } 7543 7544 kmem_free(svar, sizeof (dtrace_statvar_t)); 7545 svarp[id] = NULL; 7546 } 7547 7548 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 7549 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 7550 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 7551 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 7552 7553 kmem_free(dp, sizeof (dtrace_difo_t)); 7554 } 7555 7556 static void 7557 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7558 { 7559 int i; 7560 7561 ASSERT(MUTEX_HELD(&dtrace_lock)); 7562 ASSERT(dp->dtdo_refcnt != 0); 7563 7564 for (i = 0; i < dp->dtdo_varlen; i++) { 7565 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7566 7567 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7568 continue; 7569 7570 ASSERT(dtrace_vtime_references > 0); 7571 if (--dtrace_vtime_references == 0) 7572 dtrace_vtime_disable(); 7573 } 7574 7575 if (--dp->dtdo_refcnt == 0) 7576 dtrace_difo_destroy(dp, vstate); 7577 } 7578 7579 /* 7580 * DTrace Format Functions 7581 */ 7582 static uint16_t 7583 dtrace_format_add(dtrace_state_t *state, char *str) 7584 { 7585 char *fmt, **new; 7586 uint16_t ndx, len = strlen(str) + 1; 7587 7588 fmt = kmem_zalloc(len, KM_SLEEP); 7589 bcopy(str, fmt, len); 7590 7591 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 7592 if (state->dts_formats[ndx] == NULL) { 7593 state->dts_formats[ndx] = fmt; 7594 return (ndx + 1); 7595 } 7596 } 7597 7598 if (state->dts_nformats == USHRT_MAX) { 7599 /* 7600 * This is only likely if a denial-of-service attack is being 7601 * attempted. As such, it's okay to fail silently here. 7602 */ 7603 kmem_free(fmt, len); 7604 return (0); 7605 } 7606 7607 /* 7608 * For simplicity, we always resize the formats array to be exactly the 7609 * number of formats. 7610 */ 7611 ndx = state->dts_nformats++; 7612 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 7613 7614 if (state->dts_formats != NULL) { 7615 ASSERT(ndx != 0); 7616 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 7617 kmem_free(state->dts_formats, ndx * sizeof (char *)); 7618 } 7619 7620 state->dts_formats = new; 7621 state->dts_formats[ndx] = fmt; 7622 7623 return (ndx + 1); 7624 } 7625 7626 static void 7627 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 7628 { 7629 char *fmt; 7630 7631 ASSERT(state->dts_formats != NULL); 7632 ASSERT(format <= state->dts_nformats); 7633 ASSERT(state->dts_formats[format - 1] != NULL); 7634 7635 fmt = state->dts_formats[format - 1]; 7636 kmem_free(fmt, strlen(fmt) + 1); 7637 state->dts_formats[format - 1] = NULL; 7638 } 7639 7640 static void 7641 dtrace_format_destroy(dtrace_state_t *state) 7642 { 7643 int i; 7644 7645 if (state->dts_nformats == 0) { 7646 ASSERT(state->dts_formats == NULL); 7647 return; 7648 } 7649 7650 ASSERT(state->dts_formats != NULL); 7651 7652 for (i = 0; i < state->dts_nformats; i++) { 7653 char *fmt = state->dts_formats[i]; 7654 7655 if (fmt == NULL) 7656 continue; 7657 7658 kmem_free(fmt, strlen(fmt) + 1); 7659 } 7660 7661 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 7662 state->dts_nformats = 0; 7663 state->dts_formats = NULL; 7664 } 7665 7666 /* 7667 * DTrace Predicate Functions 7668 */ 7669 static dtrace_predicate_t * 7670 dtrace_predicate_create(dtrace_difo_t *dp) 7671 { 7672 dtrace_predicate_t *pred; 7673 7674 ASSERT(MUTEX_HELD(&dtrace_lock)); 7675 ASSERT(dp->dtdo_refcnt != 0); 7676 7677 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 7678 pred->dtp_difo = dp; 7679 pred->dtp_refcnt = 1; 7680 7681 if (!dtrace_difo_cacheable(dp)) 7682 return (pred); 7683 7684 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 7685 /* 7686 * This is only theoretically possible -- we have had 2^32 7687 * cacheable predicates on this machine. We cannot allow any 7688 * more predicates to become cacheable: as unlikely as it is, 7689 * there may be a thread caching a (now stale) predicate cache 7690 * ID. (N.B.: the temptation is being successfully resisted to 7691 * have this cmn_err() "Holy shit -- we executed this code!") 7692 */ 7693 return (pred); 7694 } 7695 7696 pred->dtp_cacheid = dtrace_predcache_id++; 7697 7698 return (pred); 7699 } 7700 7701 static void 7702 dtrace_predicate_hold(dtrace_predicate_t *pred) 7703 { 7704 ASSERT(MUTEX_HELD(&dtrace_lock)); 7705 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 7706 ASSERT(pred->dtp_refcnt > 0); 7707 7708 pred->dtp_refcnt++; 7709 } 7710 7711 static void 7712 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 7713 { 7714 dtrace_difo_t *dp = pred->dtp_difo; 7715 7716 ASSERT(MUTEX_HELD(&dtrace_lock)); 7717 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 7718 ASSERT(pred->dtp_refcnt > 0); 7719 7720 if (--pred->dtp_refcnt == 0) { 7721 dtrace_difo_release(pred->dtp_difo, vstate); 7722 kmem_free(pred, sizeof (dtrace_predicate_t)); 7723 } 7724 } 7725 7726 /* 7727 * DTrace Action Description Functions 7728 */ 7729 static dtrace_actdesc_t * 7730 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 7731 uint64_t uarg, uint64_t arg) 7732 { 7733 dtrace_actdesc_t *act; 7734 7735 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 7736 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 7737 7738 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 7739 act->dtad_kind = kind; 7740 act->dtad_ntuple = ntuple; 7741 act->dtad_uarg = uarg; 7742 act->dtad_arg = arg; 7743 act->dtad_refcnt = 1; 7744 7745 return (act); 7746 } 7747 7748 static void 7749 dtrace_actdesc_hold(dtrace_actdesc_t *act) 7750 { 7751 ASSERT(act->dtad_refcnt >= 1); 7752 act->dtad_refcnt++; 7753 } 7754 7755 static void 7756 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 7757 { 7758 dtrace_actkind_t kind = act->dtad_kind; 7759 dtrace_difo_t *dp; 7760 7761 ASSERT(act->dtad_refcnt >= 1); 7762 7763 if (--act->dtad_refcnt != 0) 7764 return; 7765 7766 if ((dp = act->dtad_difo) != NULL) 7767 dtrace_difo_release(dp, vstate); 7768 7769 if (DTRACEACT_ISPRINTFLIKE(kind)) { 7770 char *str = (char *)(uintptr_t)act->dtad_arg; 7771 7772 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 7773 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 7774 7775 if (str != NULL) 7776 kmem_free(str, strlen(str) + 1); 7777 } 7778 7779 kmem_free(act, sizeof (dtrace_actdesc_t)); 7780 } 7781 7782 /* 7783 * DTrace ECB Functions 7784 */ 7785 static dtrace_ecb_t * 7786 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 7787 { 7788 dtrace_ecb_t *ecb; 7789 dtrace_epid_t epid; 7790 7791 ASSERT(MUTEX_HELD(&dtrace_lock)); 7792 7793 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 7794 ecb->dte_predicate = NULL; 7795 ecb->dte_probe = probe; 7796 7797 /* 7798 * The default size is the size of the default action: recording 7799 * the epid. 7800 */ 7801 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 7802 ecb->dte_alignment = sizeof (dtrace_epid_t); 7803 7804 epid = state->dts_epid++; 7805 7806 if (epid - 1 >= state->dts_necbs) { 7807 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 7808 int necbs = state->dts_necbs << 1; 7809 7810 ASSERT(epid == state->dts_necbs + 1); 7811 7812 if (necbs == 0) { 7813 ASSERT(oecbs == NULL); 7814 necbs = 1; 7815 } 7816 7817 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 7818 7819 if (oecbs != NULL) 7820 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 7821 7822 dtrace_membar_producer(); 7823 state->dts_ecbs = ecbs; 7824 7825 if (oecbs != NULL) { 7826 /* 7827 * If this state is active, we must dtrace_sync() 7828 * before we can free the old dts_ecbs array: we're 7829 * coming in hot, and there may be active ring 7830 * buffer processing (which indexes into the dts_ecbs 7831 * array) on another CPU. 7832 */ 7833 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 7834 dtrace_sync(); 7835 7836 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 7837 } 7838 7839 dtrace_membar_producer(); 7840 state->dts_necbs = necbs; 7841 } 7842 7843 ecb->dte_state = state; 7844 7845 ASSERT(state->dts_ecbs[epid - 1] == NULL); 7846 dtrace_membar_producer(); 7847 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 7848 7849 return (ecb); 7850 } 7851 7852 static void 7853 dtrace_ecb_enable(dtrace_ecb_t *ecb) 7854 { 7855 dtrace_probe_t *probe = ecb->dte_probe; 7856 7857 ASSERT(MUTEX_HELD(&cpu_lock)); 7858 ASSERT(MUTEX_HELD(&dtrace_lock)); 7859 ASSERT(ecb->dte_next == NULL); 7860 7861 if (probe == NULL) { 7862 /* 7863 * This is the NULL probe -- there's nothing to do. 7864 */ 7865 return; 7866 } 7867 7868 if (probe->dtpr_ecb == NULL) { 7869 dtrace_provider_t *prov = probe->dtpr_provider; 7870 7871 /* 7872 * We're the first ECB on this probe. 7873 */ 7874 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 7875 7876 if (ecb->dte_predicate != NULL) 7877 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 7878 7879 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 7880 probe->dtpr_id, probe->dtpr_arg); 7881 } else { 7882 /* 7883 * This probe is already active. Swing the last pointer to 7884 * point to the new ECB, and issue a dtrace_sync() to assure 7885 * that all CPUs have seen the change. 7886 */ 7887 ASSERT(probe->dtpr_ecb_last != NULL); 7888 probe->dtpr_ecb_last->dte_next = ecb; 7889 probe->dtpr_ecb_last = ecb; 7890 probe->dtpr_predcache = 0; 7891 7892 dtrace_sync(); 7893 } 7894 } 7895 7896 static void 7897 dtrace_ecb_resize(dtrace_ecb_t *ecb) 7898 { 7899 uint32_t maxalign = sizeof (dtrace_epid_t); 7900 uint32_t align = sizeof (uint8_t), offs, diff; 7901 dtrace_action_t *act; 7902 int wastuple = 0; 7903 uint32_t aggbase = UINT32_MAX; 7904 dtrace_state_t *state = ecb->dte_state; 7905 7906 /* 7907 * If we record anything, we always record the epid. (And we always 7908 * record it first.) 7909 */ 7910 offs = sizeof (dtrace_epid_t); 7911 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 7912 7913 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 7914 dtrace_recdesc_t *rec = &act->dta_rec; 7915 7916 if ((align = rec->dtrd_alignment) > maxalign) 7917 maxalign = align; 7918 7919 if (!wastuple && act->dta_intuple) { 7920 /* 7921 * This is the first record in a tuple. Align the 7922 * offset to be at offset 4 in an 8-byte aligned 7923 * block. 7924 */ 7925 diff = offs + sizeof (dtrace_aggid_t); 7926 7927 if (diff = (diff & (sizeof (uint64_t) - 1))) 7928 offs += sizeof (uint64_t) - diff; 7929 7930 aggbase = offs - sizeof (dtrace_aggid_t); 7931 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 7932 } 7933 7934 /*LINTED*/ 7935 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 7936 /* 7937 * The current offset is not properly aligned; align it. 7938 */ 7939 offs += align - diff; 7940 } 7941 7942 rec->dtrd_offset = offs; 7943 7944 if (offs + rec->dtrd_size > ecb->dte_needed) { 7945 ecb->dte_needed = offs + rec->dtrd_size; 7946 7947 if (ecb->dte_needed > state->dts_needed) 7948 state->dts_needed = ecb->dte_needed; 7949 } 7950 7951 if (DTRACEACT_ISAGG(act->dta_kind)) { 7952 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 7953 dtrace_action_t *first = agg->dtag_first, *prev; 7954 7955 ASSERT(rec->dtrd_size != 0 && first != NULL); 7956 ASSERT(wastuple); 7957 ASSERT(aggbase != UINT32_MAX); 7958 7959 agg->dtag_base = aggbase; 7960 7961 while ((prev = first->dta_prev) != NULL && 7962 DTRACEACT_ISAGG(prev->dta_kind)) { 7963 agg = (dtrace_aggregation_t *)prev; 7964 first = agg->dtag_first; 7965 } 7966 7967 if (prev != NULL) { 7968 offs = prev->dta_rec.dtrd_offset + 7969 prev->dta_rec.dtrd_size; 7970 } else { 7971 offs = sizeof (dtrace_epid_t); 7972 } 7973 wastuple = 0; 7974 } else { 7975 if (!act->dta_intuple) 7976 ecb->dte_size = offs + rec->dtrd_size; 7977 7978 offs += rec->dtrd_size; 7979 } 7980 7981 wastuple = act->dta_intuple; 7982 } 7983 7984 if ((act = ecb->dte_action) != NULL && 7985 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 7986 ecb->dte_size == sizeof (dtrace_epid_t)) { 7987 /* 7988 * If the size is still sizeof (dtrace_epid_t), then all 7989 * actions store no data; set the size to 0. 7990 */ 7991 ecb->dte_alignment = maxalign; 7992 ecb->dte_size = 0; 7993 7994 /* 7995 * If the needed space is still sizeof (dtrace_epid_t), then 7996 * all actions need no additional space; set the needed 7997 * size to 0. 7998 */ 7999 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8000 ecb->dte_needed = 0; 8001 8002 return; 8003 } 8004 8005 /* 8006 * Set our alignment, and make sure that the dte_size and dte_needed 8007 * are aligned to the size of an EPID. 8008 */ 8009 ecb->dte_alignment = maxalign; 8010 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8011 ~(sizeof (dtrace_epid_t) - 1); 8012 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8013 ~(sizeof (dtrace_epid_t) - 1); 8014 ASSERT(ecb->dte_size <= ecb->dte_needed); 8015 } 8016 8017 static dtrace_action_t * 8018 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8019 { 8020 dtrace_aggregation_t *agg; 8021 size_t size = sizeof (uint64_t); 8022 int ntuple = desc->dtad_ntuple; 8023 dtrace_action_t *act; 8024 dtrace_recdesc_t *frec; 8025 dtrace_aggid_t aggid; 8026 dtrace_state_t *state = ecb->dte_state; 8027 8028 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8029 agg->dtag_ecb = ecb; 8030 8031 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8032 8033 switch (desc->dtad_kind) { 8034 case DTRACEAGG_MIN: 8035 agg->dtag_initial = UINT64_MAX; 8036 agg->dtag_aggregate = dtrace_aggregate_min; 8037 break; 8038 8039 case DTRACEAGG_MAX: 8040 agg->dtag_aggregate = dtrace_aggregate_max; 8041 break; 8042 8043 case DTRACEAGG_COUNT: 8044 agg->dtag_aggregate = dtrace_aggregate_count; 8045 break; 8046 8047 case DTRACEAGG_QUANTIZE: 8048 agg->dtag_aggregate = dtrace_aggregate_quantize; 8049 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8050 sizeof (uint64_t); 8051 break; 8052 8053 case DTRACEAGG_LQUANTIZE: { 8054 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8055 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8056 8057 agg->dtag_initial = desc->dtad_arg; 8058 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8059 8060 if (step == 0 || levels == 0) 8061 goto err; 8062 8063 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8064 break; 8065 } 8066 8067 case DTRACEAGG_AVG: 8068 agg->dtag_aggregate = dtrace_aggregate_avg; 8069 size = sizeof (uint64_t) * 2; 8070 break; 8071 8072 case DTRACEAGG_SUM: 8073 agg->dtag_aggregate = dtrace_aggregate_sum; 8074 break; 8075 8076 default: 8077 goto err; 8078 } 8079 8080 agg->dtag_action.dta_rec.dtrd_size = size; 8081 8082 if (ntuple == 0) 8083 goto err; 8084 8085 /* 8086 * We must make sure that we have enough actions for the n-tuple. 8087 */ 8088 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8089 if (DTRACEACT_ISAGG(act->dta_kind)) 8090 break; 8091 8092 if (--ntuple == 0) { 8093 /* 8094 * This is the action with which our n-tuple begins. 8095 */ 8096 agg->dtag_first = act; 8097 goto success; 8098 } 8099 } 8100 8101 /* 8102 * This n-tuple is short by ntuple elements. Return failure. 8103 */ 8104 ASSERT(ntuple != 0); 8105 err: 8106 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8107 return (NULL); 8108 8109 success: 8110 /* 8111 * We need to allocate an id for this aggregation. 8112 */ 8113 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8114 VM_BESTFIT | VM_SLEEP); 8115 8116 if (aggid - 1 >= state->dts_naggregations) { 8117 dtrace_aggregation_t **oaggs = state->dts_aggregations; 8118 dtrace_aggregation_t **aggs; 8119 int naggs = state->dts_naggregations << 1; 8120 int onaggs = state->dts_naggregations; 8121 8122 ASSERT(aggid == state->dts_naggregations + 1); 8123 8124 if (naggs == 0) { 8125 ASSERT(oaggs == NULL); 8126 naggs = 1; 8127 } 8128 8129 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 8130 8131 if (oaggs != NULL) { 8132 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 8133 kmem_free(oaggs, onaggs * sizeof (*aggs)); 8134 } 8135 8136 state->dts_aggregations = aggs; 8137 state->dts_naggregations = naggs; 8138 } 8139 8140 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 8141 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 8142 8143 frec = &agg->dtag_first->dta_rec; 8144 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 8145 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 8146 8147 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 8148 ASSERT(!act->dta_intuple); 8149 act->dta_intuple = 1; 8150 } 8151 8152 return (&agg->dtag_action); 8153 } 8154 8155 static void 8156 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 8157 { 8158 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8159 dtrace_state_t *state = ecb->dte_state; 8160 dtrace_aggid_t aggid = agg->dtag_id; 8161 8162 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 8163 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 8164 8165 ASSERT(state->dts_aggregations[aggid - 1] == agg); 8166 state->dts_aggregations[aggid - 1] = NULL; 8167 8168 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8169 } 8170 8171 static int 8172 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8173 { 8174 dtrace_action_t *action, *last; 8175 dtrace_difo_t *dp = desc->dtad_difo; 8176 uint32_t size = 0, align = sizeof (uint8_t), mask; 8177 uint16_t format = 0; 8178 dtrace_recdesc_t *rec; 8179 dtrace_state_t *state = ecb->dte_state; 8180 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 8181 uint64_t arg = desc->dtad_arg; 8182 8183 ASSERT(MUTEX_HELD(&dtrace_lock)); 8184 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 8185 8186 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 8187 /* 8188 * If this is an aggregating action, there must be neither 8189 * a speculate nor a commit on the action chain. 8190 */ 8191 dtrace_action_t *act; 8192 8193 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8194 if (act->dta_kind == DTRACEACT_COMMIT) 8195 return (EINVAL); 8196 8197 if (act->dta_kind == DTRACEACT_SPECULATE) 8198 return (EINVAL); 8199 } 8200 8201 action = dtrace_ecb_aggregation_create(ecb, desc); 8202 8203 if (action == NULL) 8204 return (EINVAL); 8205 } else { 8206 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 8207 (desc->dtad_kind == DTRACEACT_DIFEXPR && 8208 dp != NULL && dp->dtdo_destructive)) { 8209 state->dts_destructive = 1; 8210 } 8211 8212 switch (desc->dtad_kind) { 8213 case DTRACEACT_PRINTF: 8214 case DTRACEACT_PRINTA: 8215 case DTRACEACT_SYSTEM: 8216 case DTRACEACT_FREOPEN: 8217 /* 8218 * We know that our arg is a string -- turn it into a 8219 * format. 8220 */ 8221 if (arg == NULL) { 8222 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 8223 format = 0; 8224 } else { 8225 ASSERT(arg != NULL); 8226 ASSERT(arg > KERNELBASE); 8227 format = dtrace_format_add(state, 8228 (char *)(uintptr_t)arg); 8229 } 8230 8231 /*FALLTHROUGH*/ 8232 case DTRACEACT_LIBACT: 8233 case DTRACEACT_DIFEXPR: 8234 if (dp == NULL) 8235 return (EINVAL); 8236 8237 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 8238 break; 8239 8240 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 8241 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8242 return (EINVAL); 8243 8244 size = opt[DTRACEOPT_STRSIZE]; 8245 } 8246 8247 break; 8248 8249 case DTRACEACT_STACK: 8250 if ((nframes = arg) == 0) { 8251 nframes = opt[DTRACEOPT_STACKFRAMES]; 8252 ASSERT(nframes > 0); 8253 arg = nframes; 8254 } 8255 8256 size = nframes * sizeof (pc_t); 8257 break; 8258 8259 case DTRACEACT_JSTACK: 8260 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 8261 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 8262 8263 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 8264 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 8265 8266 arg = DTRACE_USTACK_ARG(nframes, strsize); 8267 8268 /*FALLTHROUGH*/ 8269 case DTRACEACT_USTACK: 8270 if (desc->dtad_kind != DTRACEACT_JSTACK && 8271 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 8272 strsize = DTRACE_USTACK_STRSIZE(arg); 8273 nframes = opt[DTRACEOPT_USTACKFRAMES]; 8274 ASSERT(nframes > 0); 8275 arg = DTRACE_USTACK_ARG(nframes, strsize); 8276 } 8277 8278 /* 8279 * Save a slot for the pid. 8280 */ 8281 size = (nframes + 1) * sizeof (uint64_t); 8282 size += DTRACE_USTACK_STRSIZE(arg); 8283 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 8284 8285 break; 8286 8287 case DTRACEACT_STOP: 8288 case DTRACEACT_BREAKPOINT: 8289 case DTRACEACT_PANIC: 8290 break; 8291 8292 case DTRACEACT_CHILL: 8293 case DTRACEACT_DISCARD: 8294 case DTRACEACT_RAISE: 8295 if (dp == NULL) 8296 return (EINVAL); 8297 break; 8298 8299 case DTRACEACT_EXIT: 8300 if (dp == NULL || 8301 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 8302 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8303 return (EINVAL); 8304 break; 8305 8306 case DTRACEACT_SPECULATE: 8307 8308 if (ecb->dte_action != NULL && ecb->dte_size != 0) 8309 return (EINVAL); 8310 8311 if (dp == NULL) 8312 return (EINVAL); 8313 8314 state->dts_speculates = 1; 8315 break; 8316 8317 case DTRACEACT_COMMIT: { 8318 dtrace_action_t *act = ecb->dte_action; 8319 8320 for (; act != NULL; act = act->dta_next) { 8321 if (act->dta_kind == DTRACEACT_COMMIT) 8322 return (EINVAL); 8323 } 8324 8325 if (dp == NULL) 8326 return (EINVAL); 8327 break; 8328 } 8329 8330 default: 8331 return (EINVAL); 8332 } 8333 8334 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 8335 /* 8336 * If this is a data-storing action or a speculate, 8337 * we must be sure that there isn't a commit on the 8338 * action chain. 8339 */ 8340 dtrace_action_t *act = ecb->dte_action; 8341 8342 for (; act != NULL; act = act->dta_next) { 8343 if (act->dta_kind == DTRACEACT_COMMIT) 8344 return (EINVAL); 8345 } 8346 } 8347 8348 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 8349 action->dta_rec.dtrd_size = size; 8350 } 8351 8352 action->dta_refcnt = 1; 8353 rec = &action->dta_rec; 8354 size = rec->dtrd_size; 8355 8356 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 8357 if (!(size & mask)) { 8358 align = mask + 1; 8359 break; 8360 } 8361 } 8362 8363 action->dta_kind = desc->dtad_kind; 8364 8365 if ((action->dta_difo = dp) != NULL) 8366 dtrace_difo_hold(dp); 8367 8368 rec->dtrd_action = action->dta_kind; 8369 rec->dtrd_arg = arg; 8370 8371 if (ecb->dte_state == dtrace_anon.dta_state) { 8372 /* 8373 * If this is an anonymous enabling, explicitly clear the uarg. 8374 */ 8375 rec->dtrd_uarg = 0; 8376 } else { 8377 rec->dtrd_uarg = desc->dtad_uarg; 8378 } 8379 8380 rec->dtrd_alignment = (uint16_t)align; 8381 rec->dtrd_format = format; 8382 8383 if ((last = ecb->dte_action_last) != NULL) { 8384 ASSERT(ecb->dte_action != NULL); 8385 action->dta_prev = last; 8386 last->dta_next = action; 8387 } else { 8388 ASSERT(ecb->dte_action == NULL); 8389 ecb->dte_action = action; 8390 } 8391 8392 ecb->dte_action_last = action; 8393 8394 return (0); 8395 } 8396 8397 static void 8398 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 8399 { 8400 dtrace_action_t *act = ecb->dte_action, *next; 8401 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 8402 dtrace_difo_t *dp; 8403 uint16_t format; 8404 8405 if (act != NULL && act->dta_refcnt > 1) { 8406 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 8407 act->dta_refcnt--; 8408 } else { 8409 for (; act != NULL; act = next) { 8410 next = act->dta_next; 8411 ASSERT(next != NULL || act == ecb->dte_action_last); 8412 ASSERT(act->dta_refcnt == 1); 8413 8414 if ((format = act->dta_rec.dtrd_format) != 0) 8415 dtrace_format_remove(ecb->dte_state, format); 8416 8417 if ((dp = act->dta_difo) != NULL) 8418 dtrace_difo_release(dp, vstate); 8419 8420 if (DTRACEACT_ISAGG(act->dta_kind)) { 8421 dtrace_ecb_aggregation_destroy(ecb, act); 8422 } else { 8423 kmem_free(act, sizeof (dtrace_action_t)); 8424 } 8425 } 8426 } 8427 8428 ecb->dte_action = NULL; 8429 ecb->dte_action_last = NULL; 8430 ecb->dte_size = sizeof (dtrace_epid_t); 8431 } 8432 8433 static void 8434 dtrace_ecb_disable(dtrace_ecb_t *ecb) 8435 { 8436 /* 8437 * We disable the ECB by removing it from its probe. 8438 */ 8439 dtrace_ecb_t *pecb, *prev = NULL; 8440 dtrace_probe_t *probe = ecb->dte_probe; 8441 8442 ASSERT(MUTEX_HELD(&dtrace_lock)); 8443 8444 if (probe == NULL) { 8445 /* 8446 * This is the NULL probe; there is nothing to disable. 8447 */ 8448 return; 8449 } 8450 8451 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 8452 if (pecb == ecb) 8453 break; 8454 prev = pecb; 8455 } 8456 8457 ASSERT(pecb != NULL); 8458 8459 if (prev == NULL) { 8460 probe->dtpr_ecb = ecb->dte_next; 8461 } else { 8462 prev->dte_next = ecb->dte_next; 8463 } 8464 8465 if (ecb == probe->dtpr_ecb_last) { 8466 ASSERT(ecb->dte_next == NULL); 8467 probe->dtpr_ecb_last = prev; 8468 } 8469 8470 /* 8471 * The ECB has been disconnected from the probe; now sync to assure 8472 * that all CPUs have seen the change before returning. 8473 */ 8474 dtrace_sync(); 8475 8476 if (probe->dtpr_ecb == NULL) { 8477 /* 8478 * That was the last ECB on the probe; clear the predicate 8479 * cache ID for the probe, disable it and sync one more time 8480 * to assure that we'll never hit it again. 8481 */ 8482 dtrace_provider_t *prov = probe->dtpr_provider; 8483 8484 ASSERT(ecb->dte_next == NULL); 8485 ASSERT(probe->dtpr_ecb_last == NULL); 8486 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 8487 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 8488 probe->dtpr_id, probe->dtpr_arg); 8489 dtrace_sync(); 8490 } else { 8491 /* 8492 * There is at least one ECB remaining on the probe. If there 8493 * is _exactly_ one, set the probe's predicate cache ID to be 8494 * the predicate cache ID of the remaining ECB. 8495 */ 8496 ASSERT(probe->dtpr_ecb_last != NULL); 8497 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 8498 8499 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 8500 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 8501 8502 ASSERT(probe->dtpr_ecb->dte_next == NULL); 8503 8504 if (p != NULL) 8505 probe->dtpr_predcache = p->dtp_cacheid; 8506 } 8507 8508 ecb->dte_next = NULL; 8509 } 8510 } 8511 8512 static void 8513 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 8514 { 8515 dtrace_state_t *state = ecb->dte_state; 8516 dtrace_vstate_t *vstate = &state->dts_vstate; 8517 dtrace_predicate_t *pred; 8518 dtrace_epid_t epid = ecb->dte_epid; 8519 8520 ASSERT(MUTEX_HELD(&dtrace_lock)); 8521 ASSERT(ecb->dte_next == NULL); 8522 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 8523 8524 if ((pred = ecb->dte_predicate) != NULL) 8525 dtrace_predicate_release(pred, vstate); 8526 8527 dtrace_ecb_action_remove(ecb); 8528 8529 ASSERT(state->dts_ecbs[epid - 1] == ecb); 8530 state->dts_ecbs[epid - 1] = NULL; 8531 8532 kmem_free(ecb, sizeof (dtrace_ecb_t)); 8533 } 8534 8535 static dtrace_ecb_t * 8536 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 8537 dtrace_enabling_t *enab) 8538 { 8539 dtrace_ecb_t *ecb; 8540 dtrace_predicate_t *pred; 8541 dtrace_actdesc_t *act; 8542 dtrace_provider_t *prov; 8543 dtrace_ecbdesc_t *desc = enab->dten_current; 8544 8545 ASSERT(MUTEX_HELD(&dtrace_lock)); 8546 ASSERT(state != NULL); 8547 8548 ecb = dtrace_ecb_add(state, probe); 8549 ecb->dte_uarg = desc->dted_uarg; 8550 8551 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 8552 dtrace_predicate_hold(pred); 8553 ecb->dte_predicate = pred; 8554 } 8555 8556 if (probe != NULL) { 8557 /* 8558 * If the provider shows more leg than the consumer is old 8559 * enough to see, we need to enable the appropriate implicit 8560 * predicate bits to prevent the ecb from activating at 8561 * revealing times. 8562 */ 8563 prov = probe->dtpr_provider; 8564 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 8565 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 8566 ecb->dte_cond |= DTRACE_COND_OWNER; 8567 8568 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 8569 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 8570 ecb->dte_cond |= DTRACE_COND_USERMODE; 8571 } 8572 8573 if (dtrace_ecb_create_cache != NULL) { 8574 /* 8575 * If we have a cached ecb, we'll use its action list instead 8576 * of creating our own (saving both time and space). 8577 */ 8578 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 8579 dtrace_action_t *act = cached->dte_action; 8580 8581 if (act != NULL) { 8582 ASSERT(act->dta_refcnt > 0); 8583 act->dta_refcnt++; 8584 ecb->dte_action = act; 8585 ecb->dte_action_last = cached->dte_action_last; 8586 ecb->dte_needed = cached->dte_needed; 8587 ecb->dte_size = cached->dte_size; 8588 ecb->dte_alignment = cached->dte_alignment; 8589 } 8590 8591 return (ecb); 8592 } 8593 8594 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 8595 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 8596 dtrace_ecb_destroy(ecb); 8597 return (NULL); 8598 } 8599 } 8600 8601 dtrace_ecb_resize(ecb); 8602 8603 return (dtrace_ecb_create_cache = ecb); 8604 } 8605 8606 static int 8607 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 8608 { 8609 dtrace_ecb_t *ecb; 8610 dtrace_enabling_t *enab = arg; 8611 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 8612 8613 ASSERT(state != NULL); 8614 8615 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 8616 /* 8617 * This probe was created in a generation for which this 8618 * enabling has previously created ECBs; we don't want to 8619 * enable it again, so just kick out. 8620 */ 8621 return (DTRACE_MATCH_NEXT); 8622 } 8623 8624 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 8625 return (DTRACE_MATCH_DONE); 8626 8627 dtrace_ecb_enable(ecb); 8628 return (DTRACE_MATCH_NEXT); 8629 } 8630 8631 static dtrace_ecb_t * 8632 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 8633 { 8634 dtrace_ecb_t *ecb; 8635 8636 ASSERT(MUTEX_HELD(&dtrace_lock)); 8637 8638 if (id == 0 || id > state->dts_necbs) 8639 return (NULL); 8640 8641 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 8642 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 8643 8644 return (state->dts_ecbs[id - 1]); 8645 } 8646 8647 static dtrace_aggregation_t * 8648 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 8649 { 8650 dtrace_aggregation_t *agg; 8651 8652 ASSERT(MUTEX_HELD(&dtrace_lock)); 8653 8654 if (id == 0 || id > state->dts_naggregations) 8655 return (NULL); 8656 8657 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 8658 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 8659 agg->dtag_id == id); 8660 8661 return (state->dts_aggregations[id - 1]); 8662 } 8663 8664 /* 8665 * DTrace Buffer Functions 8666 * 8667 * The following functions manipulate DTrace buffers. Most of these functions 8668 * are called in the context of establishing or processing consumer state; 8669 * exceptions are explicitly noted. 8670 */ 8671 8672 /* 8673 * Note: called from cross call context. This function switches the two 8674 * buffers on a given CPU. The atomicity of this operation is assured by 8675 * disabling interrupts while the actual switch takes place; the disabling of 8676 * interrupts serializes the execution with any execution of dtrace_probe() on 8677 * the same CPU. 8678 */ 8679 static void 8680 dtrace_buffer_switch(dtrace_buffer_t *buf) 8681 { 8682 caddr_t tomax = buf->dtb_tomax; 8683 caddr_t xamot = buf->dtb_xamot; 8684 dtrace_icookie_t cookie; 8685 8686 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 8687 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 8688 8689 cookie = dtrace_interrupt_disable(); 8690 buf->dtb_tomax = xamot; 8691 buf->dtb_xamot = tomax; 8692 buf->dtb_xamot_drops = buf->dtb_drops; 8693 buf->dtb_xamot_offset = buf->dtb_offset; 8694 buf->dtb_xamot_errors = buf->dtb_errors; 8695 buf->dtb_xamot_flags = buf->dtb_flags; 8696 buf->dtb_offset = 0; 8697 buf->dtb_drops = 0; 8698 buf->dtb_errors = 0; 8699 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 8700 dtrace_interrupt_enable(cookie); 8701 } 8702 8703 /* 8704 * Note: called from cross call context. This function activates a buffer 8705 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 8706 * is guaranteed by the disabling of interrupts. 8707 */ 8708 static void 8709 dtrace_buffer_activate(dtrace_state_t *state) 8710 { 8711 dtrace_buffer_t *buf; 8712 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 8713 8714 buf = &state->dts_buffer[CPU->cpu_id]; 8715 8716 if (buf->dtb_tomax != NULL) { 8717 /* 8718 * We might like to assert that the buffer is marked inactive, 8719 * but this isn't necessarily true: the buffer for the CPU 8720 * that processes the BEGIN probe has its buffer activated 8721 * manually. In this case, we take the (harmless) action 8722 * re-clearing the bit INACTIVE bit. 8723 */ 8724 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 8725 } 8726 8727 dtrace_interrupt_enable(cookie); 8728 } 8729 8730 static int 8731 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 8732 processorid_t cpu) 8733 { 8734 cpu_t *cp; 8735 dtrace_buffer_t *buf; 8736 8737 ASSERT(MUTEX_HELD(&cpu_lock)); 8738 ASSERT(MUTEX_HELD(&dtrace_lock)); 8739 8740 if (crgetuid(CRED()) != 0 && size > dtrace_nonroot_maxsize) 8741 return (EFBIG); 8742 8743 cp = cpu_list; 8744 8745 do { 8746 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8747 continue; 8748 8749 buf = &bufs[cp->cpu_id]; 8750 8751 /* 8752 * If there is already a buffer allocated for this CPU, it 8753 * is only possible that this is a DR event. In this case, 8754 * the buffer size must match our specified size. 8755 */ 8756 if (buf->dtb_tomax != NULL) { 8757 ASSERT(buf->dtb_size == size); 8758 continue; 8759 } 8760 8761 ASSERT(buf->dtb_xamot == NULL); 8762 8763 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8764 goto err; 8765 8766 buf->dtb_size = size; 8767 buf->dtb_flags = flags; 8768 buf->dtb_offset = 0; 8769 buf->dtb_drops = 0; 8770 8771 if (flags & DTRACEBUF_NOSWITCH) 8772 continue; 8773 8774 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 8775 goto err; 8776 } while ((cp = cp->cpu_next) != cpu_list); 8777 8778 return (0); 8779 8780 err: 8781 cp = cpu_list; 8782 8783 do { 8784 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 8785 continue; 8786 8787 buf = &bufs[cp->cpu_id]; 8788 8789 if (buf->dtb_xamot != NULL) { 8790 ASSERT(buf->dtb_tomax != NULL); 8791 ASSERT(buf->dtb_size == size); 8792 kmem_free(buf->dtb_xamot, size); 8793 } 8794 8795 if (buf->dtb_tomax != NULL) { 8796 ASSERT(buf->dtb_size == size); 8797 kmem_free(buf->dtb_tomax, size); 8798 } 8799 8800 buf->dtb_tomax = NULL; 8801 buf->dtb_xamot = NULL; 8802 buf->dtb_size = 0; 8803 } while ((cp = cp->cpu_next) != cpu_list); 8804 8805 return (ENOMEM); 8806 } 8807 8808 /* 8809 * Note: called from probe context. This function just increments the drop 8810 * count on a buffer. It has been made a function to allow for the 8811 * possibility of understanding the source of mysterious drop counts. (A 8812 * problem for which one may be particularly disappointed that DTrace cannot 8813 * be used to understand DTrace.) 8814 */ 8815 static void 8816 dtrace_buffer_drop(dtrace_buffer_t *buf) 8817 { 8818 buf->dtb_drops++; 8819 } 8820 8821 /* 8822 * Note: called from probe context. This function is called to reserve space 8823 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 8824 * mstate. Returns the new offset in the buffer, or a negative value if an 8825 * error has occurred. 8826 */ 8827 static intptr_t 8828 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 8829 dtrace_state_t *state, dtrace_mstate_t *mstate) 8830 { 8831 intptr_t offs = buf->dtb_offset, soffs; 8832 intptr_t woffs; 8833 caddr_t tomax; 8834 size_t total; 8835 8836 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 8837 return (-1); 8838 8839 if ((tomax = buf->dtb_tomax) == NULL) { 8840 dtrace_buffer_drop(buf); 8841 return (-1); 8842 } 8843 8844 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 8845 while (offs & (align - 1)) { 8846 /* 8847 * Assert that our alignment is off by a number which 8848 * is itself sizeof (uint32_t) aligned. 8849 */ 8850 ASSERT(!((align - (offs & (align - 1))) & 8851 (sizeof (uint32_t) - 1))); 8852 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 8853 offs += sizeof (uint32_t); 8854 } 8855 8856 if ((soffs = offs + needed) > buf->dtb_size) { 8857 dtrace_buffer_drop(buf); 8858 return (-1); 8859 } 8860 8861 if (mstate == NULL) 8862 return (offs); 8863 8864 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 8865 mstate->dtms_scratch_size = buf->dtb_size - soffs; 8866 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 8867 8868 return (offs); 8869 } 8870 8871 if (buf->dtb_flags & DTRACEBUF_FILL) { 8872 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 8873 (buf->dtb_flags & DTRACEBUF_FULL)) 8874 return (-1); 8875 goto out; 8876 } 8877 8878 total = needed + (offs & (align - 1)); 8879 8880 /* 8881 * For a ring buffer, life is quite a bit more complicated. Before 8882 * we can store any padding, we need to adjust our wrapping offset. 8883 * (If we've never before wrapped or we're not about to, no adjustment 8884 * is required.) 8885 */ 8886 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 8887 offs + total > buf->dtb_size) { 8888 woffs = buf->dtb_xamot_offset; 8889 8890 if (offs + total > buf->dtb_size) { 8891 /* 8892 * We can't fit in the end of the buffer. First, a 8893 * sanity check that we can fit in the buffer at all. 8894 */ 8895 if (total > buf->dtb_size) { 8896 dtrace_buffer_drop(buf); 8897 return (-1); 8898 } 8899 8900 /* 8901 * We're going to be storing at the top of the buffer, 8902 * so now we need to deal with the wrapped offset. We 8903 * only reset our wrapped offset to 0 if it is 8904 * currently greater than the current offset. If it 8905 * is less than the current offset, it is because a 8906 * previous allocation induced a wrap -- but the 8907 * allocation didn't subsequently take the space due 8908 * to an error or false predicate evaluation. In this 8909 * case, we'll just leave the wrapped offset alone: if 8910 * the wrapped offset hasn't been advanced far enough 8911 * for this allocation, it will be adjusted in the 8912 * lower loop. 8913 */ 8914 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 8915 if (woffs >= offs) 8916 woffs = 0; 8917 } else { 8918 woffs = 0; 8919 } 8920 8921 /* 8922 * Now we know that we're going to be storing to the 8923 * top of the buffer and that there is room for us 8924 * there. We need to clear the buffer from the current 8925 * offset to the end (there may be old gunk there). 8926 */ 8927 while (offs < buf->dtb_size) 8928 tomax[offs++] = 0; 8929 8930 /* 8931 * We need to set our offset to zero. And because we 8932 * are wrapping, we need to set the bit indicating as 8933 * much. We can also adjust our needed space back 8934 * down to the space required by the ECB -- we know 8935 * that the top of the buffer is aligned. 8936 */ 8937 offs = 0; 8938 total = needed; 8939 buf->dtb_flags |= DTRACEBUF_WRAPPED; 8940 } else { 8941 /* 8942 * There is room for us in the buffer, so we simply 8943 * need to check the wrapped offset. 8944 */ 8945 if (woffs < offs) { 8946 /* 8947 * The wrapped offset is less than the offset. 8948 * This can happen if we allocated buffer space 8949 * that induced a wrap, but then we didn't 8950 * subsequently take the space due to an error 8951 * or false predicate evaluation. This is 8952 * okay; we know that _this_ allocation isn't 8953 * going to induce a wrap. We still can't 8954 * reset the wrapped offset to be zero, 8955 * however: the space may have been trashed in 8956 * the previous failed probe attempt. But at 8957 * least the wrapped offset doesn't need to 8958 * be adjusted at all... 8959 */ 8960 goto out; 8961 } 8962 } 8963 8964 while (offs + total > woffs) { 8965 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 8966 size_t size; 8967 8968 if (epid == DTRACE_EPIDNONE) { 8969 size = sizeof (uint32_t); 8970 } else { 8971 ASSERT(epid <= state->dts_necbs); 8972 ASSERT(state->dts_ecbs[epid - 1] != NULL); 8973 8974 size = state->dts_ecbs[epid - 1]->dte_size; 8975 } 8976 8977 ASSERT(woffs + size <= buf->dtb_size); 8978 ASSERT(size != 0); 8979 8980 if (woffs + size == buf->dtb_size) { 8981 /* 8982 * We've reached the end of the buffer; we want 8983 * to set the wrapped offset to 0 and break 8984 * out. However, if the offs is 0, then we're 8985 * in a strange edge-condition: the amount of 8986 * space that we want to reserve plus the size 8987 * of the record that we're overwriting is 8988 * greater than the size of the buffer. This 8989 * is problematic because if we reserve the 8990 * space but subsequently don't consume it (due 8991 * to a failed predicate or error) the wrapped 8992 * offset will be 0 -- yet the EPID at offset 0 8993 * will not be committed. This situation is 8994 * relatively easy to deal with: if we're in 8995 * this case, the buffer is indistinguishable 8996 * from one that hasn't wrapped; we need only 8997 * finish the job by clearing the wrapped bit, 8998 * explicitly setting the offset to be 0, and 8999 * zero'ing out the old data in the buffer. 9000 */ 9001 if (offs == 0) { 9002 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9003 buf->dtb_offset = 0; 9004 woffs = total; 9005 9006 while (woffs < buf->dtb_size) 9007 tomax[woffs++] = 0; 9008 } 9009 9010 woffs = 0; 9011 break; 9012 } 9013 9014 woffs += size; 9015 } 9016 9017 /* 9018 * We have a wrapped offset. It may be that the wrapped offset 9019 * has become zero -- that's okay. 9020 */ 9021 buf->dtb_xamot_offset = woffs; 9022 } 9023 9024 out: 9025 /* 9026 * Now we can plow the buffer with any necessary padding. 9027 */ 9028 while (offs & (align - 1)) { 9029 /* 9030 * Assert that our alignment is off by a number which 9031 * is itself sizeof (uint32_t) aligned. 9032 */ 9033 ASSERT(!((align - (offs & (align - 1))) & 9034 (sizeof (uint32_t) - 1))); 9035 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9036 offs += sizeof (uint32_t); 9037 } 9038 9039 if (buf->dtb_flags & DTRACEBUF_FILL) { 9040 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9041 buf->dtb_flags |= DTRACEBUF_FULL; 9042 return (-1); 9043 } 9044 } 9045 9046 if (mstate == NULL) 9047 return (offs); 9048 9049 /* 9050 * For ring buffers and fill buffers, the scratch space is always 9051 * the inactive buffer. 9052 */ 9053 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9054 mstate->dtms_scratch_size = buf->dtb_size; 9055 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9056 9057 return (offs); 9058 } 9059 9060 static void 9061 dtrace_buffer_polish(dtrace_buffer_t *buf) 9062 { 9063 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9064 ASSERT(MUTEX_HELD(&dtrace_lock)); 9065 9066 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9067 return; 9068 9069 /* 9070 * We need to polish the ring buffer. There are three cases: 9071 * 9072 * - The first (and presumably most common) is that there is no gap 9073 * between the buffer offset and the wrapped offset. In this case, 9074 * there is nothing in the buffer that isn't valid data; we can 9075 * mark the buffer as polished and return. 9076 * 9077 * - The second (less common than the first but still more common 9078 * than the third) is that there is a gap between the buffer offset 9079 * and the wrapped offset, and the wrapped offset is larger than the 9080 * buffer offset. This can happen because of an alignment issue, or 9081 * can happen because of a call to dtrace_buffer_reserve() that 9082 * didn't subsequently consume the buffer space. In this case, 9083 * we need to zero the data from the buffer offset to the wrapped 9084 * offset. 9085 * 9086 * - The third (and least common) is that there is a gap between the 9087 * buffer offset and the wrapped offset, but the wrapped offset is 9088 * _less_ than the buffer offset. This can only happen because a 9089 * call to dtrace_buffer_reserve() induced a wrap, but the space 9090 * was not subsequently consumed. In this case, we need to zero the 9091 * space from the offset to the end of the buffer _and_ from the 9092 * top of the buffer to the wrapped offset. 9093 */ 9094 if (buf->dtb_offset < buf->dtb_xamot_offset) { 9095 bzero(buf->dtb_tomax + buf->dtb_offset, 9096 buf->dtb_xamot_offset - buf->dtb_offset); 9097 } 9098 9099 if (buf->dtb_offset > buf->dtb_xamot_offset) { 9100 bzero(buf->dtb_tomax + buf->dtb_offset, 9101 buf->dtb_size - buf->dtb_offset); 9102 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 9103 } 9104 } 9105 9106 static void 9107 dtrace_buffer_free(dtrace_buffer_t *bufs) 9108 { 9109 int i; 9110 9111 for (i = 0; i < NCPU; i++) { 9112 dtrace_buffer_t *buf = &bufs[i]; 9113 9114 if (buf->dtb_tomax == NULL) { 9115 ASSERT(buf->dtb_xamot == NULL); 9116 ASSERT(buf->dtb_size == 0); 9117 continue; 9118 } 9119 9120 if (buf->dtb_xamot != NULL) { 9121 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9122 kmem_free(buf->dtb_xamot, buf->dtb_size); 9123 } 9124 9125 kmem_free(buf->dtb_tomax, buf->dtb_size); 9126 buf->dtb_size = 0; 9127 buf->dtb_tomax = NULL; 9128 buf->dtb_xamot = NULL; 9129 } 9130 } 9131 9132 /* 9133 * DTrace Enabling Functions 9134 */ 9135 static dtrace_enabling_t * 9136 dtrace_enabling_create(dtrace_vstate_t *vstate) 9137 { 9138 dtrace_enabling_t *enab; 9139 9140 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 9141 enab->dten_vstate = vstate; 9142 9143 return (enab); 9144 } 9145 9146 static void 9147 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 9148 { 9149 dtrace_ecbdesc_t **ndesc; 9150 size_t osize, nsize; 9151 9152 /* 9153 * We can't add to enablings after we've enabled them, or after we've 9154 * retained them. 9155 */ 9156 ASSERT(enab->dten_probegen == 0); 9157 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9158 9159 if (enab->dten_ndesc < enab->dten_maxdesc) { 9160 enab->dten_desc[enab->dten_ndesc++] = ecb; 9161 return; 9162 } 9163 9164 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9165 9166 if (enab->dten_maxdesc == 0) { 9167 enab->dten_maxdesc = 1; 9168 } else { 9169 enab->dten_maxdesc <<= 1; 9170 } 9171 9172 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 9173 9174 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9175 ndesc = kmem_zalloc(nsize, KM_SLEEP); 9176 bcopy(enab->dten_desc, ndesc, osize); 9177 kmem_free(enab->dten_desc, osize); 9178 9179 enab->dten_desc = ndesc; 9180 enab->dten_desc[enab->dten_ndesc++] = ecb; 9181 } 9182 9183 static void 9184 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 9185 dtrace_probedesc_t *pd) 9186 { 9187 dtrace_ecbdesc_t *new; 9188 dtrace_predicate_t *pred; 9189 dtrace_actdesc_t *act; 9190 9191 /* 9192 * We're going to create a new ECB description that matches the 9193 * specified ECB in every way, but has the specified probe description. 9194 */ 9195 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 9196 9197 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 9198 dtrace_predicate_hold(pred); 9199 9200 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 9201 dtrace_actdesc_hold(act); 9202 9203 new->dted_action = ecb->dted_action; 9204 new->dted_pred = ecb->dted_pred; 9205 new->dted_probe = *pd; 9206 new->dted_uarg = ecb->dted_uarg; 9207 9208 dtrace_enabling_add(enab, new); 9209 } 9210 9211 static void 9212 dtrace_enabling_dump(dtrace_enabling_t *enab) 9213 { 9214 int i; 9215 9216 for (i = 0; i < enab->dten_ndesc; i++) { 9217 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 9218 9219 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 9220 desc->dtpd_provider, desc->dtpd_mod, 9221 desc->dtpd_func, desc->dtpd_name); 9222 } 9223 } 9224 9225 static void 9226 dtrace_enabling_destroy(dtrace_enabling_t *enab) 9227 { 9228 int i; 9229 dtrace_ecbdesc_t *ep; 9230 dtrace_vstate_t *vstate = enab->dten_vstate; 9231 9232 ASSERT(MUTEX_HELD(&dtrace_lock)); 9233 9234 for (i = 0; i < enab->dten_ndesc; i++) { 9235 dtrace_actdesc_t *act, *next; 9236 dtrace_predicate_t *pred; 9237 9238 ep = enab->dten_desc[i]; 9239 9240 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 9241 dtrace_predicate_release(pred, vstate); 9242 9243 for (act = ep->dted_action; act != NULL; act = next) { 9244 next = act->dtad_next; 9245 dtrace_actdesc_release(act, vstate); 9246 } 9247 9248 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 9249 } 9250 9251 kmem_free(enab->dten_desc, 9252 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 9253 9254 /* 9255 * If this was a retained enabling, decrement the dts_nretained count 9256 * and take it off of the dtrace_retained list. 9257 */ 9258 if (enab->dten_prev != NULL || enab->dten_next != NULL || 9259 dtrace_retained == enab) { 9260 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9261 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 9262 enab->dten_vstate->dtvs_state->dts_nretained--; 9263 } 9264 9265 if (enab->dten_prev == NULL) { 9266 if (dtrace_retained == enab) { 9267 dtrace_retained = enab->dten_next; 9268 9269 if (dtrace_retained != NULL) 9270 dtrace_retained->dten_prev = NULL; 9271 } 9272 } else { 9273 ASSERT(enab != dtrace_retained); 9274 ASSERT(dtrace_retained != NULL); 9275 enab->dten_prev->dten_next = enab->dten_next; 9276 } 9277 9278 if (enab->dten_next != NULL) { 9279 ASSERT(dtrace_retained != NULL); 9280 enab->dten_next->dten_prev = enab->dten_prev; 9281 } 9282 9283 kmem_free(enab, sizeof (dtrace_enabling_t)); 9284 } 9285 9286 static int 9287 dtrace_enabling_retain(dtrace_enabling_t *enab) 9288 { 9289 dtrace_state_t *state; 9290 9291 ASSERT(MUTEX_HELD(&dtrace_lock)); 9292 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9293 ASSERT(enab->dten_vstate != NULL); 9294 9295 state = enab->dten_vstate->dtvs_state; 9296 ASSERT(state != NULL); 9297 9298 /* 9299 * We only allow each state to retain dtrace_retain_max enablings. 9300 */ 9301 if (state->dts_nretained >= dtrace_retain_max) 9302 return (ENOSPC); 9303 9304 state->dts_nretained++; 9305 9306 if (dtrace_retained == NULL) { 9307 dtrace_retained = enab; 9308 return (0); 9309 } 9310 9311 enab->dten_next = dtrace_retained; 9312 dtrace_retained->dten_prev = enab; 9313 dtrace_retained = enab; 9314 9315 return (0); 9316 } 9317 9318 static int 9319 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 9320 dtrace_probedesc_t *create) 9321 { 9322 dtrace_enabling_t *new, *enab; 9323 int found = 0, err = ENOENT; 9324 9325 ASSERT(MUTEX_HELD(&dtrace_lock)); 9326 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 9327 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 9328 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 9329 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 9330 9331 new = dtrace_enabling_create(&state->dts_vstate); 9332 9333 /* 9334 * Iterate over all retained enablings, looking for enablings that 9335 * match the specified state. 9336 */ 9337 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9338 int i; 9339 9340 /* 9341 * dtvs_state can only be NULL for helper enablings -- and 9342 * helper enablings can't be retained. 9343 */ 9344 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9345 9346 if (enab->dten_vstate->dtvs_state != state) 9347 continue; 9348 9349 /* 9350 * Now iterate over each probe description; we're looking for 9351 * an exact match to the specified probe description. 9352 */ 9353 for (i = 0; i < enab->dten_ndesc; i++) { 9354 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9355 dtrace_probedesc_t *pd = &ep->dted_probe; 9356 9357 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 9358 continue; 9359 9360 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 9361 continue; 9362 9363 if (strcmp(pd->dtpd_func, match->dtpd_func)) 9364 continue; 9365 9366 if (strcmp(pd->dtpd_name, match->dtpd_name)) 9367 continue; 9368 9369 /* 9370 * We have a winning probe! Add it to our growing 9371 * enabling. 9372 */ 9373 found = 1; 9374 dtrace_enabling_addlike(new, ep, create); 9375 } 9376 } 9377 9378 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 9379 dtrace_enabling_destroy(new); 9380 return (err); 9381 } 9382 9383 return (0); 9384 } 9385 9386 static void 9387 dtrace_enabling_retract(dtrace_state_t *state) 9388 { 9389 dtrace_enabling_t *enab, *next; 9390 9391 ASSERT(MUTEX_HELD(&dtrace_lock)); 9392 9393 /* 9394 * Iterate over all retained enablings, destroy the enablings retained 9395 * for the specified state. 9396 */ 9397 for (enab = dtrace_retained; enab != NULL; enab = next) { 9398 next = enab->dten_next; 9399 9400 /* 9401 * dtvs_state can only be NULL for helper enablings -- and 9402 * helper enablings can't be retained. 9403 */ 9404 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9405 9406 if (enab->dten_vstate->dtvs_state == state) { 9407 ASSERT(state->dts_nretained > 0); 9408 dtrace_enabling_destroy(enab); 9409 } 9410 } 9411 9412 ASSERT(state->dts_nretained == 0); 9413 } 9414 9415 static int 9416 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 9417 { 9418 int i = 0; 9419 int matched = 0; 9420 9421 ASSERT(MUTEX_HELD(&cpu_lock)); 9422 ASSERT(MUTEX_HELD(&dtrace_lock)); 9423 9424 for (i = 0; i < enab->dten_ndesc; i++) { 9425 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9426 9427 enab->dten_current = ep; 9428 enab->dten_error = 0; 9429 9430 matched += dtrace_probe_enable(&ep->dted_probe, enab); 9431 9432 if (enab->dten_error != 0) { 9433 /* 9434 * If we get an error half-way through enabling the 9435 * probes, we kick out -- perhaps with some number of 9436 * them enabled. Leaving enabled probes enabled may 9437 * be slightly confusing for user-level, but we expect 9438 * that no one will attempt to actually drive on in 9439 * the face of such errors. If this is an anonymous 9440 * enabling (indicated with a NULL nmatched pointer), 9441 * we cmn_err() a message. We aren't expecting to 9442 * get such an error -- such as it can exist at all, 9443 * it would be a result of corrupted DOF in the driver 9444 * properties. 9445 */ 9446 if (nmatched == NULL) { 9447 cmn_err(CE_WARN, "dtrace_enabling_match() " 9448 "error on %p: %d", (void *)ep, 9449 enab->dten_error); 9450 } 9451 9452 return (enab->dten_error); 9453 } 9454 } 9455 9456 enab->dten_probegen = dtrace_probegen; 9457 if (nmatched != NULL) 9458 *nmatched = matched; 9459 9460 return (0); 9461 } 9462 9463 static void 9464 dtrace_enabling_matchall(void) 9465 { 9466 dtrace_enabling_t *enab; 9467 9468 mutex_enter(&cpu_lock); 9469 mutex_enter(&dtrace_lock); 9470 9471 /* 9472 * Because we can be called after dtrace_detach() has been called, we 9473 * cannot assert that there are retained enablings. We can safely 9474 * load from dtrace_retained, however: the taskq_destroy() at the 9475 * end of dtrace_detach() will block pending our completion. 9476 */ 9477 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 9478 (void) dtrace_enabling_match(enab, NULL); 9479 9480 mutex_exit(&dtrace_lock); 9481 mutex_exit(&cpu_lock); 9482 } 9483 9484 static int 9485 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 9486 { 9487 dtrace_enabling_t *enab; 9488 int matched, total = 0, err; 9489 9490 ASSERT(MUTEX_HELD(&cpu_lock)); 9491 ASSERT(MUTEX_HELD(&dtrace_lock)); 9492 9493 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9494 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9495 9496 if (enab->dten_vstate->dtvs_state != state) 9497 continue; 9498 9499 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 9500 return (err); 9501 9502 total += matched; 9503 } 9504 9505 if (nmatched != NULL) 9506 *nmatched = total; 9507 9508 return (0); 9509 } 9510 9511 /* 9512 * If an enabling is to be enabled without having matched probes (that is, if 9513 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 9514 * enabling must be _primed_ by creating an ECB for every ECB description. 9515 * This must be done to assure that we know the number of speculations, the 9516 * number of aggregations, the minimum buffer size needed, etc. before we 9517 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 9518 * enabling any probes, we create ECBs for every ECB decription, but with a 9519 * NULL probe -- which is exactly what this function does. 9520 */ 9521 static void 9522 dtrace_enabling_prime(dtrace_state_t *state) 9523 { 9524 dtrace_enabling_t *enab; 9525 int i; 9526 9527 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9528 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9529 9530 if (enab->dten_vstate->dtvs_state != state) 9531 continue; 9532 9533 /* 9534 * We don't want to prime an enabling more than once, lest 9535 * we allow a malicious user to induce resource exhaustion. 9536 * (The ECBs that result from priming an enabling aren't 9537 * leaked -- but they also aren't deallocated until the 9538 * consumer state is destroyed.) 9539 */ 9540 if (enab->dten_primed) 9541 continue; 9542 9543 for (i = 0; i < enab->dten_ndesc; i++) { 9544 enab->dten_current = enab->dten_desc[i]; 9545 (void) dtrace_probe_enable(NULL, enab); 9546 } 9547 9548 enab->dten_primed = 1; 9549 } 9550 } 9551 9552 /* 9553 * Called to indicate that probes should be provided due to retained 9554 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 9555 * must take an initial lap through the enabling calling the dtps_provide() 9556 * entry point explicitly to allow for autocreated probes. 9557 */ 9558 static void 9559 dtrace_enabling_provide(dtrace_provider_t *prv) 9560 { 9561 int i, all = 0; 9562 dtrace_probedesc_t desc; 9563 9564 ASSERT(MUTEX_HELD(&dtrace_lock)); 9565 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9566 9567 if (prv == NULL) { 9568 all = 1; 9569 prv = dtrace_provider; 9570 } 9571 9572 do { 9573 dtrace_enabling_t *enab = dtrace_retained; 9574 void *parg = prv->dtpv_arg; 9575 9576 for (; enab != NULL; enab = enab->dten_next) { 9577 for (i = 0; i < enab->dten_ndesc; i++) { 9578 desc = enab->dten_desc[i]->dted_probe; 9579 mutex_exit(&dtrace_lock); 9580 prv->dtpv_pops.dtps_provide(parg, &desc); 9581 mutex_enter(&dtrace_lock); 9582 } 9583 } 9584 } while (all && (prv = prv->dtpv_next) != NULL); 9585 9586 mutex_exit(&dtrace_lock); 9587 dtrace_probe_provide(NULL, all ? NULL : prv); 9588 mutex_enter(&dtrace_lock); 9589 } 9590 9591 /* 9592 * DTrace DOF Functions 9593 */ 9594 /*ARGSUSED*/ 9595 static void 9596 dtrace_dof_error(dof_hdr_t *dof, const char *str) 9597 { 9598 if (dtrace_err_verbose) 9599 cmn_err(CE_WARN, "failed to process DOF: %s", str); 9600 9601 #ifdef DTRACE_ERRDEBUG 9602 dtrace_errdebug(str); 9603 #endif 9604 } 9605 9606 /* 9607 * Create DOF out of a currently enabled state. Right now, we only create 9608 * DOF containing the run-time options -- but this could be expanded to create 9609 * complete DOF representing the enabled state. 9610 */ 9611 static dof_hdr_t * 9612 dtrace_dof_create(dtrace_state_t *state) 9613 { 9614 dof_hdr_t *dof; 9615 dof_sec_t *sec; 9616 dof_optdesc_t *opt; 9617 int i, len = sizeof (dof_hdr_t) + 9618 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 9619 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9620 9621 ASSERT(MUTEX_HELD(&dtrace_lock)); 9622 9623 dof = kmem_zalloc(len, KM_SLEEP); 9624 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 9625 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 9626 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 9627 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 9628 9629 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 9630 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 9631 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION_1; 9632 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 9633 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 9634 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 9635 9636 dof->dofh_flags = 0; 9637 dof->dofh_hdrsize = sizeof (dof_hdr_t); 9638 dof->dofh_secsize = sizeof (dof_sec_t); 9639 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 9640 dof->dofh_secoff = sizeof (dof_hdr_t); 9641 dof->dofh_loadsz = len; 9642 dof->dofh_filesz = len; 9643 dof->dofh_pad = 0; 9644 9645 /* 9646 * Fill in the option section header... 9647 */ 9648 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 9649 sec->dofs_type = DOF_SECT_OPTDESC; 9650 sec->dofs_align = sizeof (uint64_t); 9651 sec->dofs_flags = DOF_SECF_LOAD; 9652 sec->dofs_entsize = sizeof (dof_optdesc_t); 9653 9654 opt = (dof_optdesc_t *)((uintptr_t)sec + 9655 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 9656 9657 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 9658 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 9659 9660 for (i = 0; i < DTRACEOPT_MAX; i++) { 9661 opt[i].dofo_option = i; 9662 opt[i].dofo_strtab = DOF_SECIDX_NONE; 9663 opt[i].dofo_value = state->dts_options[i]; 9664 } 9665 9666 return (dof); 9667 } 9668 9669 static dof_hdr_t * 9670 dtrace_dof_copyin(uintptr_t uarg, int *errp) 9671 { 9672 dof_hdr_t hdr, *dof; 9673 9674 ASSERT(!MUTEX_HELD(&dtrace_lock)); 9675 9676 /* 9677 * First, we're going to copyin() the sizeof (dof_hdr_t). 9678 */ 9679 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 9680 dtrace_dof_error(NULL, "failed to copyin DOF header"); 9681 *errp = EFAULT; 9682 return (NULL); 9683 } 9684 9685 /* 9686 * Now we'll allocate the entire DOF and copy it in -- provided 9687 * that the length isn't outrageous. 9688 */ 9689 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 9690 dtrace_dof_error(&hdr, "load size exceeds maximum"); 9691 *errp = E2BIG; 9692 return (NULL); 9693 } 9694 9695 if (hdr.dofh_loadsz < sizeof (hdr)) { 9696 dtrace_dof_error(&hdr, "invalid load size"); 9697 *errp = EINVAL; 9698 return (NULL); 9699 } 9700 9701 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 9702 9703 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 9704 kmem_free(dof, hdr.dofh_loadsz); 9705 *errp = EFAULT; 9706 return (NULL); 9707 } 9708 9709 return (dof); 9710 } 9711 9712 static dof_hdr_t * 9713 dtrace_dof_property(const char *name) 9714 { 9715 uchar_t *buf; 9716 uint64_t loadsz; 9717 unsigned int len, i; 9718 dof_hdr_t *dof; 9719 9720 /* 9721 * Unfortunately, array of values in .conf files are always (and 9722 * only) interpreted to be integer arrays. We must read our DOF 9723 * as an integer array, and then squeeze it into a byte array. 9724 */ 9725 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 9726 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 9727 return (NULL); 9728 9729 for (i = 0; i < len; i++) 9730 buf[i] = (uchar_t)(((int *)buf)[i]); 9731 9732 if (len < sizeof (dof_hdr_t)) { 9733 ddi_prop_free(buf); 9734 dtrace_dof_error(NULL, "truncated header"); 9735 return (NULL); 9736 } 9737 9738 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 9739 ddi_prop_free(buf); 9740 dtrace_dof_error(NULL, "truncated DOF"); 9741 return (NULL); 9742 } 9743 9744 if (loadsz >= dtrace_dof_maxsize) { 9745 ddi_prop_free(buf); 9746 dtrace_dof_error(NULL, "oversized DOF"); 9747 return (NULL); 9748 } 9749 9750 dof = kmem_alloc(loadsz, KM_SLEEP); 9751 bcopy(buf, dof, loadsz); 9752 ddi_prop_free(buf); 9753 9754 return (dof); 9755 } 9756 9757 static void 9758 dtrace_dof_destroy(dof_hdr_t *dof) 9759 { 9760 kmem_free(dof, dof->dofh_loadsz); 9761 } 9762 9763 /* 9764 * Return the dof_sec_t pointer corresponding to a given section index. If the 9765 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 9766 * a type other than DOF_SECT_NONE is specified, the header is checked against 9767 * this type and NULL is returned if the types do not match. 9768 */ 9769 static dof_sec_t * 9770 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 9771 { 9772 dof_sec_t *sec = (dof_sec_t *) 9773 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 9774 9775 if (i >= dof->dofh_secnum) { 9776 dtrace_dof_error(dof, "referenced section index is invalid"); 9777 return (NULL); 9778 } 9779 9780 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 9781 dtrace_dof_error(dof, "referenced section is not loadable"); 9782 return (NULL); 9783 } 9784 9785 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 9786 dtrace_dof_error(dof, "referenced section is the wrong type"); 9787 return (NULL); 9788 } 9789 9790 return (sec); 9791 } 9792 9793 static dtrace_probedesc_t * 9794 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 9795 { 9796 dof_probedesc_t *probe; 9797 dof_sec_t *strtab; 9798 uintptr_t daddr = (uintptr_t)dof; 9799 uintptr_t str; 9800 size_t size; 9801 9802 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 9803 dtrace_dof_error(dof, "invalid probe section"); 9804 return (NULL); 9805 } 9806 9807 if (sec->dofs_align != sizeof (dof_secidx_t)) { 9808 dtrace_dof_error(dof, "bad alignment in probe description"); 9809 return (NULL); 9810 } 9811 9812 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 9813 dtrace_dof_error(dof, "truncated probe description"); 9814 return (NULL); 9815 } 9816 9817 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 9818 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 9819 9820 if (strtab == NULL) 9821 return (NULL); 9822 9823 str = daddr + strtab->dofs_offset; 9824 size = strtab->dofs_size; 9825 9826 if (probe->dofp_provider >= strtab->dofs_size) { 9827 dtrace_dof_error(dof, "corrupt probe provider"); 9828 return (NULL); 9829 } 9830 9831 (void) strncpy(desc->dtpd_provider, 9832 (char *)(str + probe->dofp_provider), 9833 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 9834 9835 if (probe->dofp_mod >= strtab->dofs_size) { 9836 dtrace_dof_error(dof, "corrupt probe module"); 9837 return (NULL); 9838 } 9839 9840 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 9841 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 9842 9843 if (probe->dofp_func >= strtab->dofs_size) { 9844 dtrace_dof_error(dof, "corrupt probe function"); 9845 return (NULL); 9846 } 9847 9848 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 9849 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 9850 9851 if (probe->dofp_name >= strtab->dofs_size) { 9852 dtrace_dof_error(dof, "corrupt probe name"); 9853 return (NULL); 9854 } 9855 9856 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 9857 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 9858 9859 return (desc); 9860 } 9861 9862 static dtrace_difo_t * 9863 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 9864 cred_t *cr) 9865 { 9866 dtrace_difo_t *dp; 9867 size_t ttl = 0; 9868 dof_difohdr_t *dofd; 9869 uintptr_t daddr = (uintptr_t)dof; 9870 size_t max = dtrace_difo_maxsize; 9871 int i, l, n; 9872 9873 static const struct { 9874 int section; 9875 int bufoffs; 9876 int lenoffs; 9877 int entsize; 9878 int align; 9879 const char *msg; 9880 } difo[] = { 9881 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 9882 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 9883 sizeof (dif_instr_t), "multiple DIF sections" }, 9884 9885 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 9886 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 9887 sizeof (uint64_t), "multiple integer tables" }, 9888 9889 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 9890 offsetof(dtrace_difo_t, dtdo_strlen), 0, 9891 sizeof (char), "multiple string tables" }, 9892 9893 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 9894 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 9895 sizeof (uint_t), "multiple variable tables" }, 9896 9897 { DOF_SECT_NONE, 0, 0, 0, NULL } 9898 }; 9899 9900 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 9901 dtrace_dof_error(dof, "invalid DIFO header section"); 9902 return (NULL); 9903 } 9904 9905 if (sec->dofs_align != sizeof (dof_secidx_t)) { 9906 dtrace_dof_error(dof, "bad alignment in DIFO header"); 9907 return (NULL); 9908 } 9909 9910 if (sec->dofs_size < sizeof (dof_difohdr_t) || 9911 sec->dofs_size % sizeof (dof_secidx_t)) { 9912 dtrace_dof_error(dof, "bad size in DIFO header"); 9913 return (NULL); 9914 } 9915 9916 dofd = (dof_difohdr_t *)(daddr + sec->dofs_offset); 9917 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 9918 9919 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9920 dp->dtdo_rtype = dofd->dofd_rtype; 9921 9922 for (l = 0; l < n; l++) { 9923 dof_sec_t *subsec; 9924 void **bufp; 9925 uint32_t *lenp; 9926 9927 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 9928 dofd->dofd_links[l])) == NULL) 9929 goto err; /* invalid section link */ 9930 9931 if (ttl + subsec->dofs_size > max) { 9932 dtrace_dof_error(dof, "exceeds maximum size"); 9933 goto err; 9934 } 9935 9936 ttl += subsec->dofs_size; 9937 9938 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 9939 if (subsec->dofs_type != difo[i].section) 9940 continue; 9941 9942 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 9943 dtrace_dof_error(dof, "section not loaded"); 9944 goto err; 9945 } 9946 9947 if (subsec->dofs_align != difo[i].align) { 9948 dtrace_dof_error(dof, "bad alignment"); 9949 goto err; 9950 } 9951 9952 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 9953 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 9954 9955 if (*bufp != NULL) { 9956 dtrace_dof_error(dof, difo[i].msg); 9957 goto err; 9958 } 9959 9960 if (difo[i].entsize != subsec->dofs_entsize) { 9961 dtrace_dof_error(dof, "entry size mismatch"); 9962 goto err; 9963 } 9964 9965 if (subsec->dofs_entsize != 0 && 9966 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 9967 dtrace_dof_error(dof, "corrupt entry size"); 9968 goto err; 9969 } 9970 9971 *lenp = subsec->dofs_size; 9972 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 9973 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 9974 *bufp, subsec->dofs_size); 9975 9976 if (subsec->dofs_entsize != 0) 9977 *lenp /= subsec->dofs_entsize; 9978 9979 break; 9980 } 9981 9982 /* 9983 * If we encounter a loadable DIFO sub-section that is not 9984 * known to us, assume this is a broken program and fail. 9985 */ 9986 if (difo[i].section == DOF_SECT_NONE && 9987 (subsec->dofs_flags & DOF_SECF_LOAD)) { 9988 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 9989 goto err; 9990 } 9991 } 9992 9993 if (dp->dtdo_buf == NULL) { 9994 /* 9995 * We can't have a DIF object without DIF text. 9996 */ 9997 dtrace_dof_error(dof, "missing DIF text"); 9998 goto err; 9999 } 10000 10001 /* 10002 * Before we validate the DIF object, run through the variable table 10003 * looking for the strings -- if any of their size are under, we'll set 10004 * their size to be the system-wide default string size. Note that 10005 * this should _not_ happen if the "strsize" option has been set -- 10006 * in this case, the compiler should have set the size to reflect the 10007 * setting of the option. 10008 */ 10009 for (i = 0; i < dp->dtdo_varlen; i++) { 10010 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10011 dtrace_diftype_t *t = &v->dtdv_type; 10012 10013 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10014 continue; 10015 10016 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10017 t->dtdt_size = dtrace_strsize_default; 10018 } 10019 10020 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10021 goto err; 10022 10023 dtrace_difo_init(dp, vstate); 10024 return (dp); 10025 10026 err: 10027 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10028 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10029 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10030 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10031 10032 kmem_free(dp, sizeof (dtrace_difo_t)); 10033 return (NULL); 10034 } 10035 10036 static dtrace_predicate_t * 10037 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10038 cred_t *cr) 10039 { 10040 dtrace_difo_t *dp; 10041 10042 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10043 return (NULL); 10044 10045 return (dtrace_predicate_create(dp)); 10046 } 10047 10048 static dtrace_actdesc_t * 10049 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10050 cred_t *cr) 10051 { 10052 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10053 dof_actdesc_t *desc; 10054 dof_sec_t *difosec; 10055 size_t offs; 10056 uintptr_t daddr = (uintptr_t)dof; 10057 uint64_t arg; 10058 dtrace_actkind_t kind; 10059 10060 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10061 dtrace_dof_error(dof, "invalid action section"); 10062 return (NULL); 10063 } 10064 10065 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10066 dtrace_dof_error(dof, "truncated action description"); 10067 return (NULL); 10068 } 10069 10070 if (sec->dofs_align != sizeof (uint64_t)) { 10071 dtrace_dof_error(dof, "bad alignment in action description"); 10072 return (NULL); 10073 } 10074 10075 if (sec->dofs_size < sec->dofs_entsize) { 10076 dtrace_dof_error(dof, "section entry size exceeds total size"); 10077 return (NULL); 10078 } 10079 10080 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10081 dtrace_dof_error(dof, "bad entry size in action description"); 10082 return (NULL); 10083 } 10084 10085 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10086 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10087 return (NULL); 10088 } 10089 10090 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 10091 desc = (dof_actdesc_t *)(daddr + 10092 (uintptr_t)sec->dofs_offset + offs); 10093 kind = (dtrace_actkind_t)desc->dofa_kind; 10094 10095 if (DTRACEACT_ISPRINTFLIKE(kind) && 10096 (kind != DTRACEACT_PRINTA || 10097 desc->dofa_strtab != DOF_SECIDX_NONE)) { 10098 dof_sec_t *strtab; 10099 char *str, *fmt; 10100 uint64_t i; 10101 10102 /* 10103 * printf()-like actions must have a format string. 10104 */ 10105 if ((strtab = dtrace_dof_sect(dof, 10106 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 10107 goto err; 10108 10109 str = (char *)((uintptr_t)dof + 10110 (uintptr_t)strtab->dofs_offset); 10111 10112 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 10113 if (str[i] == '\0') 10114 break; 10115 } 10116 10117 if (i >= strtab->dofs_size) { 10118 dtrace_dof_error(dof, "bogus format string"); 10119 goto err; 10120 } 10121 10122 if (i == desc->dofa_arg) { 10123 dtrace_dof_error(dof, "empty format string"); 10124 goto err; 10125 } 10126 10127 i -= desc->dofa_arg; 10128 fmt = kmem_alloc(i + 1, KM_SLEEP); 10129 bcopy(&str[desc->dofa_arg], fmt, i + 1); 10130 arg = (uint64_t)(uintptr_t)fmt; 10131 } else { 10132 if (kind == DTRACEACT_PRINTA) { 10133 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 10134 arg = 0; 10135 } else { 10136 arg = desc->dofa_arg; 10137 } 10138 } 10139 10140 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 10141 desc->dofa_uarg, arg); 10142 10143 if (last != NULL) { 10144 last->dtad_next = act; 10145 } else { 10146 first = act; 10147 } 10148 10149 last = act; 10150 10151 if (desc->dofa_difo == DOF_SECIDX_NONE) 10152 continue; 10153 10154 if ((difosec = dtrace_dof_sect(dof, 10155 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 10156 goto err; 10157 10158 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 10159 10160 if (act->dtad_difo == NULL) 10161 goto err; 10162 } 10163 10164 ASSERT(first != NULL); 10165 return (first); 10166 10167 err: 10168 for (act = first; act != NULL; act = next) { 10169 next = act->dtad_next; 10170 dtrace_actdesc_release(act, vstate); 10171 } 10172 10173 return (NULL); 10174 } 10175 10176 static dtrace_ecbdesc_t * 10177 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10178 cred_t *cr) 10179 { 10180 dtrace_ecbdesc_t *ep; 10181 dof_ecbdesc_t *ecb; 10182 dtrace_probedesc_t *desc; 10183 dtrace_predicate_t *pred = NULL; 10184 10185 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 10186 dtrace_dof_error(dof, "truncated ECB description"); 10187 return (NULL); 10188 } 10189 10190 if (sec->dofs_align != sizeof (uint64_t)) { 10191 dtrace_dof_error(dof, "bad alignment in ECB description"); 10192 return (NULL); 10193 } 10194 10195 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 10196 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 10197 10198 if (sec == NULL) 10199 return (NULL); 10200 10201 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10202 ep->dted_uarg = ecb->dofe_uarg; 10203 desc = &ep->dted_probe; 10204 10205 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 10206 goto err; 10207 10208 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 10209 if ((sec = dtrace_dof_sect(dof, 10210 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 10211 goto err; 10212 10213 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 10214 goto err; 10215 10216 ep->dted_pred.dtpdd_predicate = pred; 10217 } 10218 10219 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 10220 if ((sec = dtrace_dof_sect(dof, 10221 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 10222 goto err; 10223 10224 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 10225 10226 if (ep->dted_action == NULL) 10227 goto err; 10228 } 10229 10230 return (ep); 10231 10232 err: 10233 if (pred != NULL) 10234 dtrace_predicate_release(pred, vstate); 10235 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10236 return (NULL); 10237 } 10238 10239 /* 10240 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 10241 * specified DOF. At present, this amounts to simply adding 'ubase' to the 10242 * site of any user SETX relocations to account for load object base address. 10243 * In the future, if we need other relocations, this function can be extended. 10244 */ 10245 static int 10246 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 10247 { 10248 uintptr_t daddr = (uintptr_t)dof; 10249 dof_relohdr_t *dofr = (dof_relohdr_t *)(daddr + sec->dofs_offset); 10250 dof_sec_t *ss, *rs, *ts; 10251 dof_relodesc_t *r; 10252 uint_t i, n; 10253 10254 if (sec->dofs_size < sizeof (dof_relohdr_t) || 10255 sec->dofs_align != sizeof (dof_secidx_t)) { 10256 dtrace_dof_error(dof, "invalid relocation header"); 10257 return (-1); 10258 } 10259 10260 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 10261 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 10262 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 10263 10264 if (ss == NULL || rs == NULL || ts == NULL) 10265 return (-1); /* dtrace_dof_error() has been called already */ 10266 10267 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 10268 rs->dofs_align != sizeof (uint64_t)) { 10269 dtrace_dof_error(dof, "invalid relocation section"); 10270 return (-1); 10271 } 10272 10273 r = (dof_relodesc_t *)(daddr + rs->dofs_offset); 10274 n = rs->dofs_size / rs->dofs_entsize; 10275 10276 for (i = 0; i < n; i++) { 10277 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 10278 10279 switch (r->dofr_type) { 10280 case DOF_RELO_NONE: 10281 break; 10282 case DOF_RELO_SETX: 10283 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 10284 sizeof (uint64_t) > ts->dofs_size) { 10285 dtrace_dof_error(dof, "bad relocation offset"); 10286 return (-1); 10287 } 10288 10289 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 10290 dtrace_dof_error(dof, "misaligned setx relo"); 10291 return (-1); 10292 } 10293 10294 *(uint64_t *)taddr += ubase; 10295 break; 10296 default: 10297 dtrace_dof_error(dof, "invalid relocation type"); 10298 return (-1); 10299 } 10300 10301 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 10302 } 10303 10304 return (0); 10305 } 10306 10307 /* 10308 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 10309 * header: it should be at the front of a memory region that is at least 10310 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 10311 * size. It need not be validated in any other way. 10312 */ 10313 static int 10314 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 10315 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 10316 { 10317 uint64_t len = dof->dofh_loadsz, seclen; 10318 uintptr_t daddr = (uintptr_t)dof; 10319 dtrace_ecbdesc_t *ep; 10320 dtrace_enabling_t *enab; 10321 uint_t i; 10322 10323 ASSERT(MUTEX_HELD(&dtrace_lock)); 10324 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 10325 10326 /* 10327 * Check the DOF header identification bytes. In addition to checking 10328 * valid settings, we also verify that unused bits/bytes are zeroed so 10329 * we can use them later without fear of regressing existing binaries. 10330 */ 10331 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 10332 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 10333 dtrace_dof_error(dof, "DOF magic string mismatch"); 10334 return (-1); 10335 } 10336 10337 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 10338 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 10339 dtrace_dof_error(dof, "DOF has invalid data model"); 10340 return (-1); 10341 } 10342 10343 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 10344 dtrace_dof_error(dof, "DOF encoding mismatch"); 10345 return (-1); 10346 } 10347 10348 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 10349 dtrace_dof_error(dof, "DOF version mismatch"); 10350 return (-1); 10351 } 10352 10353 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 10354 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 10355 return (-1); 10356 } 10357 10358 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 10359 dtrace_dof_error(dof, "DOF uses too many integer registers"); 10360 return (-1); 10361 } 10362 10363 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 10364 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 10365 return (-1); 10366 } 10367 10368 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 10369 if (dof->dofh_ident[i] != 0) { 10370 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 10371 return (-1); 10372 } 10373 } 10374 10375 if (dof->dofh_flags & ~DOF_FL_VALID) { 10376 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 10377 return (-1); 10378 } 10379 10380 if (dof->dofh_secsize == 0) { 10381 dtrace_dof_error(dof, "zero section header size"); 10382 return (-1); 10383 } 10384 10385 /* 10386 * Check that the section headers don't exceed the amount of DOF 10387 * data. Note that we cast the section size and number of sections 10388 * to uint64_t's to prevent possible overflow in the multiplication. 10389 */ 10390 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 10391 10392 if (dof->dofh_secoff > len || seclen > len || 10393 dof->dofh_secoff + seclen > len) { 10394 dtrace_dof_error(dof, "truncated section headers"); 10395 return (-1); 10396 } 10397 10398 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 10399 dtrace_dof_error(dof, "misaligned section headers"); 10400 return (-1); 10401 } 10402 10403 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 10404 dtrace_dof_error(dof, "misaligned section size"); 10405 return (-1); 10406 } 10407 10408 /* 10409 * Take an initial pass through the section headers to be sure that 10410 * the headers don't have stray offsets. If the 'noprobes' flag is 10411 * set, do not permit sections relating to providers, probes, or args. 10412 */ 10413 for (i = 0; i < dof->dofh_secnum; i++) { 10414 dof_sec_t *sec = (dof_sec_t *)(daddr + 10415 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10416 10417 if (noprobes) { 10418 switch (sec->dofs_type) { 10419 case DOF_SECT_PROVIDER: 10420 case DOF_SECT_PROBES: 10421 case DOF_SECT_PRARGS: 10422 case DOF_SECT_PROFFS: 10423 dtrace_dof_error(dof, "illegal sections " 10424 "for enabling"); 10425 return (-1); 10426 } 10427 } 10428 10429 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10430 continue; /* just ignore non-loadable sections */ 10431 10432 if (sec->dofs_align & (sec->dofs_align - 1)) { 10433 dtrace_dof_error(dof, "bad section alignment"); 10434 return (-1); 10435 } 10436 10437 if (sec->dofs_offset & (sec->dofs_align - 1)) { 10438 dtrace_dof_error(dof, "misaligned section"); 10439 return (-1); 10440 } 10441 10442 if (sec->dofs_offset > len || sec->dofs_size > len || 10443 sec->dofs_offset + sec->dofs_size > len) { 10444 dtrace_dof_error(dof, "corrupt section header"); 10445 return (-1); 10446 } 10447 10448 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 10449 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 10450 dtrace_dof_error(dof, "non-terminating string table"); 10451 return (-1); 10452 } 10453 } 10454 10455 /* 10456 * Take a second pass through the sections and locate and perform any 10457 * relocations that are present. We do this after the first pass to 10458 * be sure that all sections have had their headers validated. 10459 */ 10460 for (i = 0; i < dof->dofh_secnum; i++) { 10461 dof_sec_t *sec = (dof_sec_t *)(daddr + 10462 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10463 10464 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10465 continue; /* skip sections that are not loadable */ 10466 10467 switch (sec->dofs_type) { 10468 case DOF_SECT_URELHDR: 10469 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 10470 return (-1); 10471 break; 10472 } 10473 } 10474 10475 if ((enab = *enabp) == NULL) 10476 enab = *enabp = dtrace_enabling_create(vstate); 10477 10478 for (i = 0; i < dof->dofh_secnum; i++) { 10479 dof_sec_t *sec = (dof_sec_t *)(daddr + 10480 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10481 10482 if (sec->dofs_type != DOF_SECT_ECBDESC) 10483 continue; 10484 10485 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 10486 dtrace_enabling_destroy(enab); 10487 *enabp = NULL; 10488 return (-1); 10489 } 10490 10491 dtrace_enabling_add(enab, ep); 10492 } 10493 10494 return (0); 10495 } 10496 10497 /* 10498 * Process DOF for any options. This routine assumes that the DOF has been 10499 * at least processed by dtrace_dof_slurp(). 10500 */ 10501 static int 10502 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 10503 { 10504 int i, rval; 10505 uint32_t entsize; 10506 size_t offs; 10507 dof_optdesc_t *desc; 10508 10509 for (i = 0; i < dof->dofh_secnum; i++) { 10510 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 10511 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10512 10513 if (sec->dofs_type != DOF_SECT_OPTDESC) 10514 continue; 10515 10516 if (sec->dofs_align != sizeof (uint64_t)) { 10517 dtrace_dof_error(dof, "bad alignment in " 10518 "option description"); 10519 return (EINVAL); 10520 } 10521 10522 if ((entsize = sec->dofs_entsize) == 0) { 10523 dtrace_dof_error(dof, "zeroed option entry size"); 10524 return (EINVAL); 10525 } 10526 10527 if (entsize < sizeof (dof_optdesc_t)) { 10528 dtrace_dof_error(dof, "bad option entry size"); 10529 return (EINVAL); 10530 } 10531 10532 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 10533 desc = (dof_optdesc_t *)((uintptr_t)dof + 10534 (uintptr_t)sec->dofs_offset + offs); 10535 10536 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 10537 dtrace_dof_error(dof, "non-zero option string"); 10538 return (EINVAL); 10539 } 10540 10541 if (desc->dofo_value == DTRACEOPT_UNSET) { 10542 dtrace_dof_error(dof, "unset option"); 10543 return (EINVAL); 10544 } 10545 10546 if ((rval = dtrace_state_option(state, 10547 desc->dofo_option, desc->dofo_value)) != 0) { 10548 dtrace_dof_error(dof, "rejected option"); 10549 return (rval); 10550 } 10551 } 10552 } 10553 10554 return (0); 10555 } 10556 10557 /* 10558 * DTrace Consumer State Functions 10559 */ 10560 int 10561 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 10562 { 10563 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 10564 void *base; 10565 uintptr_t limit; 10566 dtrace_dynvar_t *dvar, *next, *start; 10567 int i; 10568 10569 ASSERT(MUTEX_HELD(&dtrace_lock)); 10570 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 10571 10572 bzero(dstate, sizeof (dtrace_dstate_t)); 10573 10574 if ((dstate->dtds_chunksize = chunksize) == 0) 10575 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 10576 10577 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 10578 size = min; 10579 10580 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10581 return (ENOMEM); 10582 10583 dstate->dtds_size = size; 10584 dstate->dtds_base = base; 10585 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 10586 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 10587 10588 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 10589 10590 if (hashsize != 1 && (hashsize & 1)) 10591 hashsize--; 10592 10593 dstate->dtds_hashsize = hashsize; 10594 dstate->dtds_hash = dstate->dtds_base; 10595 10596 /* 10597 * Determine number of active CPUs. Divide free list evenly among 10598 * active CPUs. 10599 */ 10600 start = (dtrace_dynvar_t *) 10601 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 10602 limit = (uintptr_t)base + size; 10603 10604 maxper = (limit - (uintptr_t)start) / NCPU; 10605 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 10606 10607 for (i = 0; i < NCPU; i++) { 10608 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 10609 10610 /* 10611 * If we don't even have enough chunks to make it once through 10612 * NCPUs, we're just going to allocate everything to the first 10613 * CPU. And if we're on the last CPU, we're going to allocate 10614 * whatever is left over. In either case, we set the limit to 10615 * be the limit of the dynamic variable space. 10616 */ 10617 if (maxper == 0 || i == NCPU - 1) { 10618 limit = (uintptr_t)base + size; 10619 start = NULL; 10620 } else { 10621 limit = (uintptr_t)start + maxper; 10622 start = (dtrace_dynvar_t *)limit; 10623 } 10624 10625 ASSERT(limit <= (uintptr_t)base + size); 10626 10627 for (;;) { 10628 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 10629 dstate->dtds_chunksize); 10630 10631 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 10632 break; 10633 10634 dvar->dtdv_next = next; 10635 dvar = next; 10636 } 10637 10638 if (maxper == 0) 10639 break; 10640 } 10641 10642 return (0); 10643 } 10644 10645 void 10646 dtrace_dstate_fini(dtrace_dstate_t *dstate) 10647 { 10648 ASSERT(MUTEX_HELD(&cpu_lock)); 10649 10650 if (dstate->dtds_base == NULL) 10651 return; 10652 10653 kmem_free(dstate->dtds_base, dstate->dtds_size); 10654 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 10655 } 10656 10657 static void 10658 dtrace_vstate_fini(dtrace_vstate_t *vstate) 10659 { 10660 /* 10661 * Logical XOR, where are you? 10662 */ 10663 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 10664 10665 if (vstate->dtvs_nglobals > 0) { 10666 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 10667 sizeof (dtrace_statvar_t *)); 10668 } 10669 10670 if (vstate->dtvs_ntlocals > 0) { 10671 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 10672 sizeof (dtrace_difv_t)); 10673 } 10674 10675 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 10676 10677 if (vstate->dtvs_nlocals > 0) { 10678 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 10679 sizeof (dtrace_statvar_t *)); 10680 } 10681 } 10682 10683 static void 10684 dtrace_state_clean(dtrace_state_t *state) 10685 { 10686 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 10687 return; 10688 10689 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 10690 dtrace_speculation_clean(state); 10691 } 10692 10693 static void 10694 dtrace_state_deadman(dtrace_state_t *state) 10695 { 10696 hrtime_t now; 10697 10698 dtrace_sync(); 10699 10700 now = dtrace_gethrtime(); 10701 10702 if (state != dtrace_anon.dta_state && 10703 now - state->dts_laststatus >= dtrace_deadman_user) 10704 return; 10705 10706 /* 10707 * We must be sure that dts_alive never appears to be less than the 10708 * value upon entry to dtrace_state_deadman(), and because we lack a 10709 * dtrace_cas64(), we cannot store to it atomically. We thus instead 10710 * store INT64_MAX to it, followed by a memory barrier, followed by 10711 * the new value. This assures that dts_alive never appears to be 10712 * less than its true value, regardless of the order in which the 10713 * stores to the underlying storage are issued. 10714 */ 10715 state->dts_alive = INT64_MAX; 10716 dtrace_membar_producer(); 10717 state->dts_alive = now; 10718 } 10719 10720 dtrace_state_t * 10721 dtrace_state_create(dev_t *devp, cred_t *cr) 10722 { 10723 minor_t minor; 10724 major_t major; 10725 char c[30]; 10726 dtrace_state_t *state; 10727 dtrace_optval_t *opt; 10728 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 10729 10730 ASSERT(MUTEX_HELD(&dtrace_lock)); 10731 ASSERT(MUTEX_HELD(&cpu_lock)); 10732 10733 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 10734 VM_BESTFIT | VM_SLEEP); 10735 10736 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 10737 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 10738 return (NULL); 10739 } 10740 10741 state = ddi_get_soft_state(dtrace_softstate, minor); 10742 state->dts_epid = DTRACE_EPIDNONE + 1; 10743 10744 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 10745 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 10746 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 10747 10748 if (devp != NULL) { 10749 major = getemajor(*devp); 10750 } else { 10751 major = ddi_driver_major(dtrace_devi); 10752 } 10753 10754 state->dts_dev = makedevice(major, minor); 10755 10756 if (devp != NULL) 10757 *devp = state->dts_dev; 10758 10759 /* 10760 * We allocate NCPU buffers. On the one hand, this can be quite 10761 * a bit of memory per instance (nearly 36K on a Starcat). On the 10762 * other hand, it saves an additional memory reference in the probe 10763 * path. 10764 */ 10765 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 10766 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 10767 state->dts_cleaner = CYCLIC_NONE; 10768 state->dts_deadman = CYCLIC_NONE; 10769 state->dts_vstate.dtvs_state = state; 10770 10771 for (i = 0; i < DTRACEOPT_MAX; i++) 10772 state->dts_options[i] = DTRACEOPT_UNSET; 10773 10774 /* 10775 * Set the default options. 10776 */ 10777 opt = state->dts_options; 10778 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 10779 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 10780 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 10781 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 10782 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 10783 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 10784 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 10785 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 10786 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 10787 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 10788 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 10789 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 10790 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 10791 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 10792 10793 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 10794 10795 /* 10796 * Set up the credentials for this instantiation. 10797 */ 10798 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 10799 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 10800 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 10801 } else { 10802 state->dts_cred.dcr_uid = crgetuid(cr); 10803 state->dts_cred.dcr_gid = crgetgid(cr); 10804 10805 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 10806 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 10807 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 10808 } 10809 10810 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) && 10811 PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 10812 state->dts_cred.dcr_visible |= DTRACE_CRV_ALLPROC; 10813 state->dts_cred.dcr_action |= 10814 DTRACE_CRA_PROC_DESTRUCTIVE; 10815 } 10816 10817 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 10818 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 10819 DTRACE_CRV_ALLPROC; 10820 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 10821 DTRACE_CRA_PROC; 10822 10823 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 10824 state->dts_cred.dcr_action |= 10825 DTRACE_CRA_PROC_DESTRUCTIVE; 10826 } 10827 } 10828 10829 return (state); 10830 } 10831 10832 static int 10833 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 10834 { 10835 dtrace_optval_t *opt = state->dts_options, size; 10836 processorid_t cpu; 10837 int flags = 0, rval; 10838 10839 ASSERT(MUTEX_HELD(&dtrace_lock)); 10840 ASSERT(MUTEX_HELD(&cpu_lock)); 10841 ASSERT(which < DTRACEOPT_MAX); 10842 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 10843 (state == dtrace_anon.dta_state && 10844 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 10845 10846 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 10847 return (0); 10848 10849 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 10850 cpu = opt[DTRACEOPT_CPU]; 10851 10852 if (which == DTRACEOPT_SPECSIZE) 10853 flags |= DTRACEBUF_NOSWITCH; 10854 10855 if (which == DTRACEOPT_BUFSIZE) { 10856 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 10857 flags |= DTRACEBUF_RING; 10858 10859 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 10860 flags |= DTRACEBUF_FILL; 10861 10862 flags |= DTRACEBUF_INACTIVE; 10863 } 10864 10865 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 10866 /* 10867 * The size must be 8-byte aligned. If the size is not 8-byte 10868 * aligned, drop it down by the difference. 10869 */ 10870 if (size & (sizeof (uint64_t) - 1)) 10871 size -= size & (sizeof (uint64_t) - 1); 10872 10873 if (size < state->dts_reserve) { 10874 /* 10875 * Buffers always must be large enough to accommodate 10876 * their prereserved space. We return E2BIG instead 10877 * of ENOMEM in this case to allow for user-level 10878 * software to differentiate the cases. 10879 */ 10880 return (E2BIG); 10881 } 10882 10883 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 10884 10885 if (rval != ENOMEM) { 10886 opt[which] = size; 10887 return (rval); 10888 } 10889 10890 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 10891 return (rval); 10892 } 10893 10894 return (ENOMEM); 10895 } 10896 10897 static int 10898 dtrace_state_buffers(dtrace_state_t *state) 10899 { 10900 dtrace_speculation_t *spec = state->dts_speculations; 10901 int rval, i; 10902 10903 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 10904 DTRACEOPT_BUFSIZE)) != 0) 10905 return (rval); 10906 10907 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 10908 DTRACEOPT_AGGSIZE)) != 0) 10909 return (rval); 10910 10911 for (i = 0; i < state->dts_nspeculations; i++) { 10912 if ((rval = dtrace_state_buffer(state, 10913 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 10914 return (rval); 10915 } 10916 10917 return (0); 10918 } 10919 10920 static void 10921 dtrace_state_prereserve(dtrace_state_t *state) 10922 { 10923 dtrace_ecb_t *ecb; 10924 dtrace_probe_t *probe; 10925 10926 state->dts_reserve = 0; 10927 10928 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 10929 return; 10930 10931 /* 10932 * If our buffer policy is a "fill" buffer policy, we need to set the 10933 * prereserved space to be the space required by the END probes. 10934 */ 10935 probe = dtrace_probes[dtrace_probeid_end - 1]; 10936 ASSERT(probe != NULL); 10937 10938 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 10939 if (ecb->dte_state != state) 10940 continue; 10941 10942 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 10943 } 10944 } 10945 10946 static int 10947 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 10948 { 10949 dtrace_optval_t *opt = state->dts_options, sz, nspec; 10950 dtrace_speculation_t *spec; 10951 dtrace_buffer_t *buf; 10952 cyc_handler_t hdlr; 10953 cyc_time_t when; 10954 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 10955 dtrace_icookie_t cookie; 10956 10957 mutex_enter(&cpu_lock); 10958 mutex_enter(&dtrace_lock); 10959 10960 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 10961 rval = EBUSY; 10962 goto out; 10963 } 10964 10965 /* 10966 * Before we can perform any checks, we must prime all of the 10967 * retained enablings that correspond to this state. 10968 */ 10969 dtrace_enabling_prime(state); 10970 10971 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 10972 rval = EACCES; 10973 goto out; 10974 } 10975 10976 dtrace_state_prereserve(state); 10977 10978 /* 10979 * Now we want to do is try to allocate our speculations. 10980 * We do not automatically resize the number of speculations; if 10981 * this fails, we will fail the operation. 10982 */ 10983 nspec = opt[DTRACEOPT_NSPEC]; 10984 ASSERT(nspec != DTRACEOPT_UNSET); 10985 10986 if (nspec > INT_MAX) { 10987 rval = ENOMEM; 10988 goto out; 10989 } 10990 10991 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 10992 10993 if (spec == NULL) { 10994 rval = ENOMEM; 10995 goto out; 10996 } 10997 10998 state->dts_speculations = spec; 10999 state->dts_nspeculations = (int)nspec; 11000 11001 for (i = 0; i < nspec; i++) { 11002 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 11003 rval = ENOMEM; 11004 goto err; 11005 } 11006 11007 spec[i].dtsp_buffer = buf; 11008 } 11009 11010 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 11011 if (dtrace_anon.dta_state == NULL) { 11012 rval = ENOENT; 11013 goto out; 11014 } 11015 11016 if (state->dts_necbs != 0) { 11017 rval = EALREADY; 11018 goto out; 11019 } 11020 11021 state->dts_anon = dtrace_anon_grab(); 11022 ASSERT(state->dts_anon != NULL); 11023 11024 *cpu = dtrace_anon.dta_beganon; 11025 11026 /* 11027 * If the anonymous state is active (as it almost certainly 11028 * is if the anonymous enabling ultimately matched anything), 11029 * we don't allow any further option processing -- but we 11030 * don't return failure. 11031 */ 11032 state = state->dts_anon; 11033 11034 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11035 goto out; 11036 } 11037 11038 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 11039 opt[DTRACEOPT_AGGSIZE] != 0) { 11040 if (state->dts_aggregations == NULL) { 11041 /* 11042 * We're not going to create an aggregation buffer 11043 * because we don't have any ECBs that contain 11044 * aggregations -- set this option to 0. 11045 */ 11046 opt[DTRACEOPT_AGGSIZE] = 0; 11047 } else { 11048 /* 11049 * If we have an aggregation buffer, we must also have 11050 * a buffer to use as scratch. 11051 */ 11052 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 11053 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 11054 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 11055 } 11056 } 11057 } 11058 11059 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 11060 opt[DTRACEOPT_SPECSIZE] != 0) { 11061 if (!state->dts_speculates) { 11062 /* 11063 * We're not going to create speculation buffers 11064 * because we don't have any ECBs that actually 11065 * speculate -- set the speculation size to 0. 11066 */ 11067 opt[DTRACEOPT_SPECSIZE] = 0; 11068 } 11069 } 11070 11071 /* 11072 * The bare minimum size for any buffer that we're actually going to 11073 * do anything to is sizeof (uint64_t). 11074 */ 11075 sz = sizeof (uint64_t); 11076 11077 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 11078 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 11079 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 11080 /* 11081 * A buffer size has been explicitly set to 0 (or to a size 11082 * that will be adjusted to 0) and we need the space -- we 11083 * need to return failure. We return ENOSPC to differentiate 11084 * it from failing to allocate a buffer due to failure to meet 11085 * the reserve (for which we return E2BIG). 11086 */ 11087 rval = ENOSPC; 11088 goto out; 11089 } 11090 11091 if ((rval = dtrace_state_buffers(state)) != 0) 11092 goto err; 11093 11094 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 11095 sz = dtrace_dstate_defsize; 11096 11097 do { 11098 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 11099 11100 if (rval == 0) 11101 break; 11102 11103 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11104 goto err; 11105 } while (sz >>= 1); 11106 11107 opt[DTRACEOPT_DYNVARSIZE] = sz; 11108 11109 if (rval != 0) 11110 goto err; 11111 11112 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 11113 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 11114 11115 if (opt[DTRACEOPT_CLEANRATE] == 0) 11116 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11117 11118 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 11119 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 11120 11121 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 11122 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11123 11124 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 11125 hdlr.cyh_arg = state; 11126 hdlr.cyh_level = CY_LOW_LEVEL; 11127 11128 when.cyt_when = 0; 11129 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 11130 11131 state->dts_cleaner = cyclic_add(&hdlr, &when); 11132 11133 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 11134 hdlr.cyh_arg = state; 11135 hdlr.cyh_level = CY_LOW_LEVEL; 11136 11137 when.cyt_when = 0; 11138 when.cyt_interval = dtrace_deadman_interval; 11139 11140 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 11141 state->dts_deadman = cyclic_add(&hdlr, &when); 11142 11143 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 11144 11145 /* 11146 * Now it's time to actually fire the BEGIN probe. We need to disable 11147 * interrupts here both to record the CPU on which we fired the BEGIN 11148 * probe (the data from this CPU will be processed first at user 11149 * level) and to manually activate the buffer for this CPU. 11150 */ 11151 cookie = dtrace_interrupt_disable(); 11152 *cpu = CPU->cpu_id; 11153 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 11154 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11155 11156 dtrace_probe(dtrace_probeid_begin, 11157 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11158 dtrace_interrupt_enable(cookie); 11159 /* 11160 * We may have had an exit action from a BEGIN probe; only change our 11161 * state to ACTIVE if we're still in WARMUP. 11162 */ 11163 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 11164 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 11165 11166 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 11167 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 11168 11169 /* 11170 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 11171 * want each CPU to transition its principal buffer out of the 11172 * INACTIVE state. Doing this assures that no CPU will suddenly begin 11173 * processing an ECB halfway down a probe's ECB chain; all CPUs will 11174 * atomically transition from processing none of a state's ECBs to 11175 * processing all of them. 11176 */ 11177 dtrace_xcall(DTRACE_CPUALL, 11178 (dtrace_xcall_t)dtrace_buffer_activate, state); 11179 goto out; 11180 11181 err: 11182 dtrace_buffer_free(state->dts_buffer); 11183 dtrace_buffer_free(state->dts_aggbuffer); 11184 11185 if ((nspec = state->dts_nspeculations) == 0) { 11186 ASSERT(state->dts_speculations == NULL); 11187 goto out; 11188 } 11189 11190 spec = state->dts_speculations; 11191 ASSERT(spec != NULL); 11192 11193 for (i = 0; i < state->dts_nspeculations; i++) { 11194 if ((buf = spec[i].dtsp_buffer) == NULL) 11195 break; 11196 11197 dtrace_buffer_free(buf); 11198 kmem_free(buf, bufsize); 11199 } 11200 11201 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11202 state->dts_nspeculations = 0; 11203 state->dts_speculations = NULL; 11204 11205 out: 11206 mutex_exit(&dtrace_lock); 11207 mutex_exit(&cpu_lock); 11208 11209 return (rval); 11210 } 11211 11212 static int 11213 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 11214 { 11215 dtrace_icookie_t cookie; 11216 11217 ASSERT(MUTEX_HELD(&dtrace_lock)); 11218 11219 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 11220 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 11221 return (EINVAL); 11222 11223 /* 11224 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 11225 * to be sure that every CPU has seen it. See below for the details 11226 * on why this is done. 11227 */ 11228 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 11229 dtrace_sync(); 11230 11231 /* 11232 * By this point, it is impossible for any CPU to be still processing 11233 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 11234 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 11235 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 11236 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 11237 * iff we're in the END probe. 11238 */ 11239 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 11240 dtrace_sync(); 11241 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 11242 11243 /* 11244 * Finally, we can release the reserve and call the END probe. We 11245 * disable interrupts across calling the END probe to allow us to 11246 * return the CPU on which we actually called the END probe. This 11247 * allows user-land to be sure that this CPU's principal buffer is 11248 * processed last. 11249 */ 11250 state->dts_reserve = 0; 11251 11252 cookie = dtrace_interrupt_disable(); 11253 *cpu = CPU->cpu_id; 11254 dtrace_probe(dtrace_probeid_end, 11255 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11256 dtrace_interrupt_enable(cookie); 11257 11258 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 11259 dtrace_sync(); 11260 11261 return (0); 11262 } 11263 11264 static int 11265 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 11266 dtrace_optval_t val) 11267 { 11268 ASSERT(MUTEX_HELD(&dtrace_lock)); 11269 11270 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11271 return (EBUSY); 11272 11273 if (option >= DTRACEOPT_MAX) 11274 return (EINVAL); 11275 11276 if (option != DTRACEOPT_CPU && val < 0) 11277 return (EINVAL); 11278 11279 switch (option) { 11280 case DTRACEOPT_DESTRUCTIVE: 11281 if (dtrace_destructive_disallow) 11282 return (EACCES); 11283 11284 state->dts_cred.dcr_destructive = 1; 11285 break; 11286 11287 case DTRACEOPT_BUFSIZE: 11288 case DTRACEOPT_DYNVARSIZE: 11289 case DTRACEOPT_AGGSIZE: 11290 case DTRACEOPT_SPECSIZE: 11291 case DTRACEOPT_STRSIZE: 11292 if (val < 0) 11293 return (EINVAL); 11294 11295 if (val >= LONG_MAX) { 11296 /* 11297 * If this is an otherwise negative value, set it to 11298 * the highest multiple of 128m less than LONG_MAX. 11299 * Technically, we're adjusting the size without 11300 * regard to the buffer resizing policy, but in fact, 11301 * this has no effect -- if we set the buffer size to 11302 * ~LONG_MAX and the buffer policy is ultimately set to 11303 * be "manual", the buffer allocation is guaranteed to 11304 * fail, if only because the allocation requires two 11305 * buffers. (We set the the size to the highest 11306 * multiple of 128m because it ensures that the size 11307 * will remain a multiple of a megabyte when 11308 * repeatedly halved -- all the way down to 15m.) 11309 */ 11310 val = LONG_MAX - (1 << 27) + 1; 11311 } 11312 } 11313 11314 state->dts_options[option] = val; 11315 11316 return (0); 11317 } 11318 11319 static void 11320 dtrace_state_destroy(dtrace_state_t *state) 11321 { 11322 dtrace_ecb_t *ecb; 11323 dtrace_vstate_t *vstate = &state->dts_vstate; 11324 minor_t minor = getminor(state->dts_dev); 11325 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11326 dtrace_speculation_t *spec = state->dts_speculations; 11327 int nspec = state->dts_nspeculations; 11328 uint32_t match; 11329 11330 ASSERT(MUTEX_HELD(&dtrace_lock)); 11331 ASSERT(MUTEX_HELD(&cpu_lock)); 11332 11333 /* 11334 * First, retract any retained enablings for this state. 11335 */ 11336 dtrace_enabling_retract(state); 11337 ASSERT(state->dts_nretained == 0); 11338 11339 /* 11340 * Now we need to disable and destroy any enabled probes. Because any 11341 * DTRACE_PRIV_KERNEL probes may actually be slowing our progress 11342 * (especially if they're all enabled), we take two passes through 11343 * the ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, 11344 * and in the second we disable whatever is left over. 11345 */ 11346 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 11347 for (i = 0; i < state->dts_necbs; i++) { 11348 if ((ecb = state->dts_ecbs[i]) == NULL) 11349 continue; 11350 11351 if (match && ecb->dte_probe != NULL) { 11352 dtrace_probe_t *probe = ecb->dte_probe; 11353 dtrace_provider_t *prov = probe->dtpr_provider; 11354 11355 if (!(prov->dtpv_priv.dtpp_flags & match)) 11356 continue; 11357 } 11358 11359 dtrace_ecb_disable(ecb); 11360 dtrace_ecb_destroy(ecb); 11361 } 11362 11363 if (!match) 11364 break; 11365 } 11366 11367 /* 11368 * Before we free the buffers, perform one more sync to assure that 11369 * every CPU is out of probe context. 11370 */ 11371 dtrace_sync(); 11372 11373 dtrace_buffer_free(state->dts_buffer); 11374 dtrace_buffer_free(state->dts_aggbuffer); 11375 11376 for (i = 0; i < nspec; i++) 11377 dtrace_buffer_free(spec[i].dtsp_buffer); 11378 11379 if (state->dts_cleaner != CYCLIC_NONE) 11380 cyclic_remove(state->dts_cleaner); 11381 11382 if (state->dts_deadman != CYCLIC_NONE) 11383 cyclic_remove(state->dts_deadman); 11384 11385 dtrace_dstate_fini(&vstate->dtvs_dynvars); 11386 dtrace_vstate_fini(vstate); 11387 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 11388 11389 if (state->dts_aggregations != NULL) { 11390 #ifdef DEBUG 11391 for (i = 0; i < state->dts_naggregations; i++) 11392 ASSERT(state->dts_aggregations[i] == NULL); 11393 #endif 11394 ASSERT(state->dts_naggregations > 0); 11395 kmem_free(state->dts_aggregations, 11396 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 11397 } 11398 11399 kmem_free(state->dts_buffer, bufsize); 11400 kmem_free(state->dts_aggbuffer, bufsize); 11401 11402 for (i = 0; i < nspec; i++) 11403 kmem_free(spec[i].dtsp_buffer, bufsize); 11404 11405 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11406 11407 dtrace_format_destroy(state); 11408 11409 vmem_destroy(state->dts_aggid_arena); 11410 ddi_soft_state_free(dtrace_softstate, minor); 11411 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11412 } 11413 11414 /* 11415 * DTrace Anonymous Enabling Functions 11416 */ 11417 static dtrace_state_t * 11418 dtrace_anon_grab(void) 11419 { 11420 dtrace_state_t *state; 11421 11422 ASSERT(MUTEX_HELD(&dtrace_lock)); 11423 11424 if ((state = dtrace_anon.dta_state) == NULL) { 11425 ASSERT(dtrace_anon.dta_enabling == NULL); 11426 return (NULL); 11427 } 11428 11429 ASSERT(dtrace_anon.dta_enabling != NULL); 11430 ASSERT(dtrace_retained != NULL); 11431 11432 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 11433 dtrace_anon.dta_enabling = NULL; 11434 dtrace_anon.dta_state = NULL; 11435 11436 return (state); 11437 } 11438 11439 static void 11440 dtrace_anon_property(void) 11441 { 11442 int i, rv; 11443 dtrace_state_t *state; 11444 dof_hdr_t *dof; 11445 char c[32]; /* enough for "dof-data-" + digits */ 11446 11447 ASSERT(MUTEX_HELD(&dtrace_lock)); 11448 ASSERT(MUTEX_HELD(&cpu_lock)); 11449 11450 for (i = 0; ; i++) { 11451 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 11452 11453 dtrace_err_verbose = 1; 11454 11455 if ((dof = dtrace_dof_property(c)) == NULL) { 11456 dtrace_err_verbose = 0; 11457 break; 11458 } 11459 11460 /* 11461 * We want to create anonymous state, so we need to transition 11462 * the kernel debugger to indicate that DTrace is active. If 11463 * this fails (e.g. because the debugger has modified text in 11464 * some way), we won't continue with the processing. 11465 */ 11466 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 11467 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 11468 "enabling ignored."); 11469 dtrace_dof_destroy(dof); 11470 break; 11471 } 11472 11473 /* 11474 * If we haven't allocated an anonymous state, we'll do so now. 11475 */ 11476 if ((state = dtrace_anon.dta_state) == NULL) { 11477 state = dtrace_state_create(NULL, NULL); 11478 dtrace_anon.dta_state = state; 11479 11480 if (state == NULL) { 11481 /* 11482 * This basically shouldn't happen: the only 11483 * failure mode from dtrace_state_create() is a 11484 * failure of ddi_soft_state_zalloc() that 11485 * itself should never happen. Still, the 11486 * interface allows for a failure mode, and 11487 * we want to fail as gracefully as possible: 11488 * we'll emit an error message and cease 11489 * processing anonymous state in this case. 11490 */ 11491 cmn_err(CE_WARN, "failed to create " 11492 "anonymous state"); 11493 dtrace_dof_destroy(dof); 11494 break; 11495 } 11496 } 11497 11498 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 11499 &dtrace_anon.dta_enabling, 0, B_TRUE); 11500 11501 if (rv == 0) 11502 rv = dtrace_dof_options(dof, state); 11503 11504 dtrace_err_verbose = 0; 11505 dtrace_dof_destroy(dof); 11506 11507 if (rv != 0) { 11508 /* 11509 * This is malformed DOF; chuck any anonymous state 11510 * that we created. 11511 */ 11512 ASSERT(dtrace_anon.dta_enabling == NULL); 11513 dtrace_state_destroy(state); 11514 dtrace_anon.dta_state = NULL; 11515 break; 11516 } 11517 11518 ASSERT(dtrace_anon.dta_enabling != NULL); 11519 } 11520 11521 if (dtrace_anon.dta_enabling != NULL) { 11522 int rval; 11523 11524 /* 11525 * dtrace_enabling_retain() can only fail because we are 11526 * trying to retain more enablings than are allowed -- but 11527 * we only have one anonymous enabling, and we are guaranteed 11528 * to be allowed at least one retained enabling; we assert 11529 * that dtrace_enabling_retain() returns success. 11530 */ 11531 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 11532 ASSERT(rval == 0); 11533 11534 dtrace_enabling_dump(dtrace_anon.dta_enabling); 11535 } 11536 } 11537 11538 /* 11539 * DTrace Helper Functions 11540 */ 11541 static void 11542 dtrace_helper_trace(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate, 11543 int where) 11544 { 11545 uint32_t size, next, nnext, i; 11546 dtrace_helptrace_t *ent; 11547 11548 if (!dtrace_helptrace_enabled) 11549 return; 11550 11551 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 11552 11553 /* 11554 * What would a tracing framework be without its own tracing 11555 * framework? (Well, a hell of a lot simpler, for starters...) 11556 */ 11557 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 11558 sizeof (uint64_t) - sizeof (uint64_t); 11559 11560 /* 11561 * Iterate until we can allocate a slot in the trace buffer. 11562 */ 11563 do { 11564 next = dtrace_helptrace_next; 11565 11566 if (next + size < dtrace_helptrace_bufsize) { 11567 nnext = next + size; 11568 } else { 11569 nnext = size; 11570 } 11571 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 11572 11573 /* 11574 * We have our slot; fill it in. 11575 */ 11576 if (nnext == size) 11577 next = 0; 11578 11579 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 11580 ent->dtht_helper = helper; 11581 ent->dtht_where = where; 11582 ent->dtht_nlocals = vstate->dtvs_nlocals; 11583 11584 for (i = 0; i < vstate->dtvs_nlocals; i++) { 11585 dtrace_statvar_t *svar; 11586 11587 if ((svar = vstate->dtvs_locals[i]) == NULL) 11588 continue; 11589 11590 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 11591 ent->dtht_locals[i] = 11592 ((uint64_t *)svar->dtsv_data)[CPU->cpu_id]; 11593 } 11594 } 11595 11596 static uint64_t 11597 dtrace_helper(int which, dtrace_mstate_t *mstate, 11598 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 11599 { 11600 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 11601 uint64_t sarg0 = mstate->dtms_arg[0]; 11602 uint64_t sarg1 = mstate->dtms_arg[1]; 11603 uint64_t rval; 11604 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 11605 dtrace_helper_action_t *helper; 11606 dtrace_vstate_t *vstate; 11607 dtrace_difo_t *pred; 11608 int i, trace = dtrace_helptrace_enabled; 11609 11610 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 11611 11612 if (helpers == NULL) 11613 return (0); 11614 11615 if ((helper = helpers->dthps_actions[which]) == NULL) 11616 return (0); 11617 11618 vstate = &helpers->dthps_vstate; 11619 mstate->dtms_arg[0] = arg0; 11620 mstate->dtms_arg[1] = arg1; 11621 11622 /* 11623 * Now iterate over each helper. If its predicate evaluates to 'true', 11624 * we'll call the corresponding actions. Note that the below calls 11625 * to dtrace_dif_emulate() may set faults in machine state. This is 11626 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 11627 * the stored DIF offset with its own (which is the desired behavior). 11628 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 11629 * from machine state; this is okay, too. 11630 */ 11631 for (; helper != NULL; helper = helper->dthp_next) { 11632 if ((pred = helper->dthp_predicate) != NULL) { 11633 if (trace) 11634 dtrace_helper_trace(helper, vstate, 0); 11635 11636 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 11637 goto next; 11638 11639 if (*flags & CPU_DTRACE_FAULT) 11640 goto err; 11641 } 11642 11643 for (i = 0; i < helper->dthp_nactions; i++) { 11644 if (trace) 11645 dtrace_helper_trace(helper, vstate, i + 1); 11646 11647 rval = dtrace_dif_emulate(helper->dthp_actions[i], 11648 mstate, vstate, state); 11649 11650 if (*flags & CPU_DTRACE_FAULT) 11651 goto err; 11652 } 11653 11654 next: 11655 if (trace) 11656 dtrace_helper_trace(helper, vstate, 11657 DTRACE_HELPTRACE_NEXT); 11658 } 11659 11660 if (trace) 11661 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_DONE); 11662 11663 /* 11664 * Restore the arg0 that we saved upon entry. 11665 */ 11666 mstate->dtms_arg[0] = sarg0; 11667 mstate->dtms_arg[1] = sarg1; 11668 11669 return (rval); 11670 11671 err: 11672 if (trace) 11673 dtrace_helper_trace(helper, vstate, DTRACE_HELPTRACE_ERR); 11674 11675 /* 11676 * Restore the arg0 that we saved upon entry. 11677 */ 11678 mstate->dtms_arg[0] = sarg0; 11679 mstate->dtms_arg[1] = sarg1; 11680 11681 return (NULL); 11682 } 11683 11684 static void 11685 dtrace_helper_destroy(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate) 11686 { 11687 int i; 11688 11689 if (helper->dthp_predicate != NULL) 11690 dtrace_difo_release(helper->dthp_predicate, vstate); 11691 11692 for (i = 0; i < helper->dthp_nactions; i++) { 11693 ASSERT(helper->dthp_actions[i] != NULL); 11694 dtrace_difo_release(helper->dthp_actions[i], vstate); 11695 } 11696 11697 kmem_free(helper->dthp_actions, 11698 helper->dthp_nactions * sizeof (dtrace_difo_t *)); 11699 kmem_free(helper, sizeof (dtrace_helper_action_t)); 11700 } 11701 11702 static int 11703 dtrace_helper_destroygen(int gen) 11704 { 11705 dtrace_helpers_t *help = curproc->p_dtrace_helpers; 11706 dtrace_vstate_t *vstate; 11707 int i; 11708 11709 ASSERT(MUTEX_HELD(&dtrace_lock)); 11710 11711 if (help == NULL || gen > help->dthps_generation) 11712 return (EINVAL); 11713 11714 vstate = &help->dthps_vstate; 11715 11716 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 11717 dtrace_helper_action_t *last = NULL, *h, *next; 11718 11719 for (h = help->dthps_actions[i]; h != NULL; h = next) { 11720 next = h->dthp_next; 11721 11722 if (h->dthp_generation == gen) { 11723 if (last != NULL) { 11724 last->dthp_next = next; 11725 } else { 11726 help->dthps_actions[i] = next; 11727 } 11728 11729 dtrace_helper_destroy(h, vstate); 11730 } else { 11731 last = h; 11732 } 11733 } 11734 } 11735 11736 return (0); 11737 } 11738 11739 static int 11740 dtrace_helper_validate(dtrace_helper_action_t *helper) 11741 { 11742 int err = 0, i; 11743 dtrace_difo_t *dp; 11744 11745 if ((dp = helper->dthp_predicate) != NULL) 11746 err += dtrace_difo_validate_helper(dp); 11747 11748 for (i = 0; i < helper->dthp_nactions; i++) 11749 err += dtrace_difo_validate_helper(helper->dthp_actions[i]); 11750 11751 return (err == 0); 11752 } 11753 11754 static int 11755 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 11756 { 11757 dtrace_helpers_t *help; 11758 dtrace_helper_action_t *helper, *last; 11759 dtrace_actdesc_t *act; 11760 dtrace_vstate_t *vstate; 11761 dtrace_predicate_t *pred; 11762 int count = 0, nactions = 0, i; 11763 11764 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 11765 return (EINVAL); 11766 11767 help = curproc->p_dtrace_helpers; 11768 last = help->dthps_actions[which]; 11769 vstate = &help->dthps_vstate; 11770 11771 for (count = 0; last != NULL; last = last->dthp_next) { 11772 count++; 11773 if (last->dthp_next == NULL) 11774 break; 11775 } 11776 11777 /* 11778 * If we already have dtrace_helper_actions_max helper actions for this 11779 * helper action type, we'll refuse to add a new one. 11780 */ 11781 if (count >= dtrace_helper_actions_max) 11782 return (ENOSPC); 11783 11784 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 11785 helper->dthp_generation = help->dthps_generation; 11786 11787 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 11788 ASSERT(pred->dtp_difo != NULL); 11789 dtrace_difo_hold(pred->dtp_difo); 11790 helper->dthp_predicate = pred->dtp_difo; 11791 } 11792 11793 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 11794 if (act->dtad_kind != DTRACEACT_DIFEXPR) 11795 goto err; 11796 11797 if (act->dtad_difo == NULL) 11798 goto err; 11799 11800 nactions++; 11801 } 11802 11803 helper->dthp_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 11804 (helper->dthp_nactions = nactions), KM_SLEEP); 11805 11806 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 11807 dtrace_difo_hold(act->dtad_difo); 11808 helper->dthp_actions[i++] = act->dtad_difo; 11809 } 11810 11811 if (!dtrace_helper_validate(helper)) 11812 goto err; 11813 11814 if (last == NULL) { 11815 help->dthps_actions[which] = helper; 11816 } else { 11817 last->dthp_next = helper; 11818 } 11819 11820 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 11821 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 11822 dtrace_helptrace_next = 0; 11823 } 11824 11825 return (0); 11826 err: 11827 dtrace_helper_destroy(helper, vstate); 11828 return (EINVAL); 11829 } 11830 11831 static void 11832 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 11833 dof_helper_t *dofhp) 11834 { 11835 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 11836 11837 mutex_enter(&dtrace_meta_lock); 11838 mutex_enter(&dtrace_lock); 11839 11840 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 11841 /* 11842 * If the dtrace module is loaded but not attached, or if 11843 * there aren't isn't a meta provider registered to deal with 11844 * these provider descriptions, we need to postpone creating 11845 * the actual providers until later. 11846 */ 11847 11848 if (help->dthps_next == NULL && help->dthps_prev == NULL && 11849 dtrace_deferred_pid != help) { 11850 help->dthps_pid = p->p_pid; 11851 help->dthps_next = dtrace_deferred_pid; 11852 help->dthps_prev = NULL; 11853 if (dtrace_deferred_pid != NULL) 11854 dtrace_deferred_pid->dthps_prev = help; 11855 dtrace_deferred_pid = help; 11856 } 11857 11858 mutex_exit(&dtrace_lock); 11859 11860 } else if (dofhp != NULL) { 11861 /* 11862 * If the dtrace module is loaded and we have a particular 11863 * helper provider description, pass that off to the 11864 * meta provider. 11865 */ 11866 11867 mutex_exit(&dtrace_lock); 11868 11869 dtrace_helper_provide(dofhp, p->p_pid); 11870 11871 } else { 11872 /* 11873 * Otherwise, just pass all the helper provider descriptions 11874 * off to the meta provider. 11875 */ 11876 11877 int i; 11878 mutex_exit(&dtrace_lock); 11879 11880 for (i = 0; i < help->dthps_nprovs; i++) { 11881 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 11882 p->p_pid); 11883 } 11884 } 11885 11886 mutex_exit(&dtrace_meta_lock); 11887 } 11888 11889 static int 11890 dtrace_helper_provider_add(dof_helper_t *dofhp) 11891 { 11892 dtrace_helpers_t *help; 11893 dtrace_helper_provider_t *hprov, **tmp_provs; 11894 uint_t tmp_nprovs, i; 11895 11896 help = curproc->p_dtrace_helpers; 11897 ASSERT(help != NULL); 11898 11899 /* 11900 * If we already have dtrace_helper_providers_max helper providers, 11901 * we're refuse to add a new one. 11902 */ 11903 if (help->dthps_nprovs >= dtrace_helper_providers_max) 11904 return (ENOSPC); 11905 11906 /* 11907 * Check to make sure this isn't a duplicate. 11908 */ 11909 for (i = 0; i < help->dthps_nprovs; i++) { 11910 if (dofhp->dofhp_addr == 11911 help->dthps_provs[i]->dthp_prov.dofhp_addr) 11912 return (EALREADY); 11913 } 11914 11915 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 11916 hprov->dthp_prov = *dofhp; 11917 hprov->dthp_ref = 1; 11918 11919 tmp_nprovs = help->dthps_nprovs; 11920 tmp_provs = help->dthps_provs; 11921 help->dthps_nprovs++; 11922 help->dthps_provs = kmem_zalloc(help->dthps_nprovs * 11923 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 11924 11925 help->dthps_provs[tmp_nprovs] = hprov; 11926 if (tmp_provs != NULL) { 11927 bcopy(tmp_provs, help->dthps_provs, tmp_nprovs * 11928 sizeof (dtrace_helper_provider_t *)); 11929 kmem_free(tmp_provs, tmp_nprovs * 11930 sizeof (dtrace_helper_provider_t *)); 11931 } 11932 11933 return (0); 11934 } 11935 11936 static void 11937 dtrace_helper_provider_remove(dtrace_helper_provider_t *hprov) 11938 { 11939 mutex_enter(&dtrace_lock); 11940 11941 if (--hprov->dthp_ref == 0) { 11942 mutex_exit(&dtrace_lock); 11943 dtrace_dof_destroy((dof_hdr_t *)hprov->dthp_prov.dofhp_dof); 11944 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 11945 } else { 11946 mutex_exit(&dtrace_lock); 11947 } 11948 } 11949 11950 static int 11951 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 11952 { 11953 uintptr_t daddr = (uintptr_t)dof; 11954 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec; 11955 dof_provider_t *provider; 11956 dof_probe_t *probe; 11957 uint8_t *arg; 11958 char *strtab, *typestr; 11959 dof_stridx_t typeidx; 11960 size_t typesz; 11961 uint_t nprobes, j, k; 11962 11963 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 11964 11965 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 11966 dtrace_dof_error(dof, "misaligned section offset"); 11967 return (-1); 11968 } 11969 11970 provider = (dof_provider_t *)(daddr + sec->dofs_offset); 11971 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 11972 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 11973 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 11974 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 11975 11976 if (str_sec == NULL || prb_sec == NULL || 11977 arg_sec == NULL || off_sec == NULL) 11978 return (-1); 11979 11980 strtab = (char *)(daddr + str_sec->dofs_offset); 11981 11982 if (provider->dofpv_name >= str_sec->dofs_size || 11983 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 11984 dtrace_dof_error(dof, "invalid provider name"); 11985 return (-1); 11986 } 11987 11988 if (prb_sec->dofs_entsize == 0 || 11989 prb_sec->dofs_entsize > prb_sec->dofs_size) { 11990 dtrace_dof_error(dof, "invalid entry size"); 11991 return (-1); 11992 } 11993 11994 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 11995 dtrace_dof_error(dof, "misaligned entry size"); 11996 return (-1); 11997 } 11998 11999 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 12000 dtrace_dof_error(dof, "invalid entry size"); 12001 return (-1); 12002 } 12003 12004 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 12005 dtrace_dof_error(dof, "misaligned section offset"); 12006 return (-1); 12007 } 12008 12009 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 12010 dtrace_dof_error(dof, "invalid entry size"); 12011 return (-1); 12012 } 12013 12014 arg = (uint8_t *)(daddr + arg_sec->dofs_offset); 12015 12016 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 12017 12018 /* 12019 * Take a pass through the probes to check for errors. 12020 */ 12021 for (j = 0; j < nprobes; j++) { 12022 probe = (dof_probe_t *)(daddr + prb_sec->dofs_offset + 12023 j * prb_sec->dofs_entsize); 12024 12025 if (probe->dofpr_func >= str_sec->dofs_size) { 12026 dtrace_dof_error(dof, "invalid function name"); 12027 return (-1); 12028 } 12029 12030 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 12031 dtrace_dof_error(dof, "function name too long"); 12032 return (-1); 12033 } 12034 12035 if (probe->dofpr_name >= str_sec->dofs_size || 12036 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 12037 dtrace_dof_error(dof, "invalid probe name"); 12038 return (-1); 12039 } 12040 12041 12042 if (probe->dofpr_offidx + probe->dofpr_noffs < 12043 probe->dofpr_offidx || 12044 (probe->dofpr_offidx + probe->dofpr_noffs) * 12045 off_sec->dofs_entsize > off_sec->dofs_size) { 12046 dtrace_dof_error(dof, "invalid probe offset"); 12047 return (-1); 12048 } 12049 12050 if (probe->dofpr_argidx + probe->dofpr_xargc < 12051 probe->dofpr_argidx || 12052 (probe->dofpr_argidx + probe->dofpr_xargc) * 12053 arg_sec->dofs_entsize > arg_sec->dofs_size) { 12054 dtrace_dof_error(dof, "invalid args"); 12055 return (-1); 12056 } 12057 12058 typeidx = probe->dofpr_nargv; 12059 typestr = strtab + probe->dofpr_nargv; 12060 for (k = 0; k < probe->dofpr_nargc; k++) { 12061 if (typeidx >= str_sec->dofs_size) { 12062 dtrace_dof_error(dof, "bad " 12063 "native argument type"); 12064 return (-1); 12065 } 12066 12067 typesz = strlen(typestr) + 1; 12068 if (typesz > DTRACE_ARGTYPELEN) { 12069 dtrace_dof_error(dof, "native " 12070 "argument type too long"); 12071 return (-1); 12072 } 12073 typeidx += typesz; 12074 typestr += typesz; 12075 } 12076 12077 typeidx = probe->dofpr_xargv; 12078 typestr = strtab + probe->dofpr_xargv; 12079 for (k = 0; k < probe->dofpr_xargc; k++) { 12080 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 12081 dtrace_dof_error(dof, "bad " 12082 "native argument index"); 12083 return (-1); 12084 } 12085 12086 if (typeidx >= str_sec->dofs_size) { 12087 dtrace_dof_error(dof, "bad " 12088 "translated argument type"); 12089 return (-1); 12090 } 12091 12092 typesz = strlen(typestr) + 1; 12093 if (typesz > DTRACE_ARGTYPELEN) { 12094 dtrace_dof_error(dof, "translated argument " 12095 "type too long"); 12096 return (-1); 12097 } 12098 12099 typeidx += typesz; 12100 typestr += typesz; 12101 } 12102 } 12103 12104 return (0); 12105 } 12106 12107 static int 12108 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 12109 { 12110 dtrace_helpers_t *help; 12111 dtrace_vstate_t *vstate; 12112 dtrace_enabling_t *enab = NULL; 12113 int i, gen, rv, nhelpers = 0, destroy = 1; 12114 12115 ASSERT(MUTEX_HELD(&dtrace_lock)); 12116 12117 if ((help = curproc->p_dtrace_helpers) == NULL) 12118 help = dtrace_helpers_create(curproc); 12119 12120 vstate = &help->dthps_vstate; 12121 12122 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 12123 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 12124 dtrace_dof_destroy(dof); 12125 return (rv); 12126 } 12127 12128 /* 12129 * Now we need to walk through the ECB descriptions in the enabling. 12130 */ 12131 for (i = 0; i < enab->dten_ndesc; i++) { 12132 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12133 dtrace_probedesc_t *desc = &ep->dted_probe; 12134 12135 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 12136 continue; 12137 12138 if (strcmp(desc->dtpd_mod, "helper") != 0) 12139 continue; 12140 12141 if (strcmp(desc->dtpd_func, "ustack") != 0) 12142 continue; 12143 12144 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 12145 ep)) != 0) { 12146 /* 12147 * Adding this helper action failed -- we are now going 12148 * to rip out the entire generation and return failure. 12149 */ 12150 (void) dtrace_helper_destroygen(help->dthps_generation); 12151 dtrace_enabling_destroy(enab); 12152 dtrace_dof_destroy(dof); 12153 dtrace_error = rv; 12154 return (-1); 12155 } 12156 12157 nhelpers++; 12158 } 12159 12160 if (nhelpers < enab->dten_ndesc) 12161 dtrace_dof_error(dof, "unmatched helpers"); 12162 12163 if (dhp != NULL) { 12164 uintptr_t daddr = (uintptr_t)dof; 12165 int err = 0; 12166 12167 /* 12168 * Look for helper probes. 12169 */ 12170 for (i = 0; i < dof->dofh_secnum; i++) { 12171 dof_sec_t *sec = (dof_sec_t *)(daddr + 12172 dof->dofh_secoff + i * dof->dofh_secsize); 12173 12174 if (sec->dofs_type != DOF_SECT_PROVIDER) 12175 continue; 12176 12177 if (dtrace_helper_provider_validate(dof, sec) != 0) { 12178 err = 1; 12179 break; 12180 } 12181 } 12182 12183 dhp->dofhp_dof = (uint64_t)dof; 12184 if (err == 0 && dtrace_helper_provider_add(dhp) == 0) 12185 destroy = 0; 12186 else 12187 dhp = NULL; 12188 } 12189 12190 gen = help->dthps_generation++; 12191 dtrace_enabling_destroy(enab); 12192 12193 if (dhp != NULL) { 12194 mutex_exit(&dtrace_lock); 12195 dtrace_helper_provider_register(curproc, help, dhp); 12196 mutex_enter(&dtrace_lock); 12197 } 12198 12199 if (destroy) 12200 dtrace_dof_destroy(dof); 12201 12202 return (gen); 12203 } 12204 12205 static dtrace_helpers_t * 12206 dtrace_helpers_create(proc_t *p) 12207 { 12208 dtrace_helpers_t *help; 12209 12210 ASSERT(MUTEX_HELD(&dtrace_lock)); 12211 ASSERT(p->p_dtrace_helpers == NULL); 12212 12213 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 12214 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 12215 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 12216 12217 p->p_dtrace_helpers = help; 12218 dtrace_opens++; 12219 12220 return (help); 12221 } 12222 12223 static void 12224 dtrace_helpers_destroy(void) 12225 { 12226 dtrace_helpers_t *help; 12227 dtrace_vstate_t *vstate; 12228 proc_t *p = curproc; 12229 int i; 12230 12231 mutex_enter(&dtrace_lock); 12232 12233 ASSERT(p->p_dtrace_helpers != NULL); 12234 ASSERT(dtrace_opens > 0); 12235 12236 help = p->p_dtrace_helpers; 12237 vstate = &help->dthps_vstate; 12238 12239 /* 12240 * We're now going to lose the help from this process. 12241 */ 12242 p->p_dtrace_helpers = NULL; 12243 dtrace_sync(); 12244 12245 /* 12246 * Destory the helper actions. 12247 */ 12248 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12249 dtrace_helper_action_t *h, *next; 12250 12251 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12252 next = h->dthp_next; 12253 dtrace_helper_destroy(h, vstate); 12254 h = next; 12255 } 12256 } 12257 12258 mutex_exit(&dtrace_lock); 12259 12260 /* 12261 * Destroy the helper providers. 12262 */ 12263 if (help->dthps_nprovs > 0) { 12264 mutex_enter(&dtrace_meta_lock); 12265 if (dtrace_meta_pid != NULL) { 12266 ASSERT(dtrace_deferred_pid == NULL); 12267 12268 for (i = 0; i < help->dthps_nprovs; i++) { 12269 dtrace_helper_remove( 12270 &help->dthps_provs[i]->dthp_prov, p->p_pid); 12271 } 12272 } else { 12273 mutex_enter(&dtrace_lock); 12274 ASSERT(dtrace_deferred_pid != NULL); 12275 12276 /* 12277 * Remove the helper from the deferred list. 12278 */ 12279 if (help->dthps_next != NULL) 12280 help->dthps_next->dthps_prev = help->dthps_prev; 12281 if (help->dthps_prev != NULL) 12282 help->dthps_prev->dthps_next = help->dthps_next; 12283 if (dtrace_deferred_pid == help) { 12284 dtrace_deferred_pid = help->dthps_next; 12285 ASSERT(help->dthps_prev == NULL); 12286 } 12287 12288 mutex_exit(&dtrace_lock); 12289 } 12290 12291 mutex_exit(&dtrace_meta_lock); 12292 12293 for (i = 0; i < help->dthps_nprovs; i++) { 12294 dtrace_helper_provider_remove(help->dthps_provs[i]); 12295 } 12296 12297 kmem_free(help->dthps_provs, help->dthps_nprovs * 12298 sizeof (dtrace_helper_provider_t *)); 12299 } 12300 12301 mutex_enter(&dtrace_lock); 12302 12303 dtrace_vstate_fini(&help->dthps_vstate); 12304 kmem_free(help->dthps_actions, 12305 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 12306 kmem_free(help, sizeof (dtrace_helpers_t)); 12307 12308 if (--dtrace_opens == 0) 12309 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12310 12311 mutex_exit(&dtrace_lock); 12312 } 12313 12314 static void 12315 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 12316 { 12317 dtrace_helpers_t *help, *newhelp; 12318 dtrace_helper_action_t *helper, *new, *last; 12319 dtrace_difo_t *dp; 12320 dtrace_vstate_t *vstate; 12321 int i, j, sz, hasprovs = 0; 12322 12323 mutex_enter(&dtrace_lock); 12324 ASSERT(from->p_dtrace_helpers != NULL); 12325 ASSERT(dtrace_opens > 0); 12326 12327 help = from->p_dtrace_helpers; 12328 newhelp = dtrace_helpers_create(to); 12329 ASSERT(to->p_dtrace_helpers != NULL); 12330 12331 newhelp->dthps_generation = help->dthps_generation; 12332 vstate = &newhelp->dthps_vstate; 12333 12334 /* 12335 * Duplicate the helper actions. 12336 */ 12337 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12338 if ((helper = help->dthps_actions[i]) == NULL) 12339 continue; 12340 12341 for (last = NULL; helper != NULL; helper = helper->dthp_next) { 12342 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 12343 KM_SLEEP); 12344 new->dthp_generation = helper->dthp_generation; 12345 12346 if ((dp = helper->dthp_predicate) != NULL) { 12347 dp = dtrace_difo_duplicate(dp, vstate); 12348 new->dthp_predicate = dp; 12349 } 12350 12351 new->dthp_nactions = helper->dthp_nactions; 12352 sz = sizeof (dtrace_difo_t *) * new->dthp_nactions; 12353 new->dthp_actions = kmem_alloc(sz, KM_SLEEP); 12354 12355 for (j = 0; j < new->dthp_nactions; j++) { 12356 dtrace_difo_t *dp = helper->dthp_actions[j]; 12357 12358 ASSERT(dp != NULL); 12359 dp = dtrace_difo_duplicate(dp, vstate); 12360 new->dthp_actions[j] = dp; 12361 } 12362 12363 if (last != NULL) { 12364 last->dthp_next = new; 12365 } else { 12366 newhelp->dthps_actions[i] = new; 12367 } 12368 12369 last = new; 12370 } 12371 } 12372 12373 /* 12374 * Duplicate the helper providers and register them with the 12375 * DTrace framework. 12376 */ 12377 if (help->dthps_nprovs > 0) { 12378 newhelp->dthps_nprovs = help->dthps_nprovs; 12379 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 12380 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12381 for (i = 0; i < newhelp->dthps_nprovs; i++) { 12382 newhelp->dthps_provs[i] = help->dthps_provs[i]; 12383 newhelp->dthps_provs[i]->dthp_ref++; 12384 } 12385 12386 hasprovs = 1; 12387 } 12388 12389 mutex_exit(&dtrace_lock); 12390 12391 if (hasprovs) 12392 dtrace_helper_provider_register(to, newhelp, NULL); 12393 } 12394 12395 /* 12396 * DTrace Hook Functions 12397 */ 12398 static void 12399 dtrace_module_loaded(struct modctl *ctl) 12400 { 12401 dtrace_provider_t *prv; 12402 12403 mutex_enter(&dtrace_provider_lock); 12404 mutex_enter(&mod_lock); 12405 12406 ASSERT(ctl->mod_busy); 12407 12408 /* 12409 * We're going to call each providers per-module provide operation 12410 * specifying only this module. 12411 */ 12412 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 12413 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 12414 12415 mutex_exit(&mod_lock); 12416 mutex_exit(&dtrace_provider_lock); 12417 12418 /* 12419 * If we have any retained enablings, we need to match against them. 12420 * Enabling probes requires that cpu_lock be held, and we cannot hold 12421 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 12422 * module. (In particular, this happens when loading scheduling 12423 * classes.) So if we have any retained enablings, we need to dispatch 12424 * our task queue to do the match for us. 12425 */ 12426 mutex_enter(&dtrace_lock); 12427 12428 if (dtrace_retained == NULL) { 12429 mutex_exit(&dtrace_lock); 12430 return; 12431 } 12432 12433 (void) taskq_dispatch(dtrace_taskq, 12434 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 12435 12436 mutex_exit(&dtrace_lock); 12437 12438 /* 12439 * And now, for a little heuristic sleaze: in general, we want to 12440 * match modules as soon as they load. However, we cannot guarantee 12441 * this, because it would lead us to the lock ordering violation 12442 * outlined above. The common case, of course, is that cpu_lock is 12443 * _not_ held -- so we delay here for a clock tick, hoping that that's 12444 * long enough for the task queue to do its work. If it's not, it's 12445 * not a serious problem -- it just means that the module that we 12446 * just loaded may not be immediately instrumentable. 12447 */ 12448 delay(1); 12449 } 12450 12451 static void 12452 dtrace_module_unloaded(struct modctl *ctl) 12453 { 12454 dtrace_probe_t template, *probe, *first, *next; 12455 dtrace_provider_t *prov; 12456 12457 template.dtpr_mod = ctl->mod_modname; 12458 12459 mutex_enter(&dtrace_provider_lock); 12460 mutex_enter(&mod_lock); 12461 mutex_enter(&dtrace_lock); 12462 12463 if (dtrace_bymod == NULL) { 12464 /* 12465 * The DTrace module is loaded (obviously) but not attached; 12466 * we don't have any work to do. 12467 */ 12468 mutex_exit(&dtrace_provider_lock); 12469 mutex_exit(&mod_lock); 12470 mutex_exit(&dtrace_lock); 12471 return; 12472 } 12473 12474 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 12475 probe != NULL; probe = probe->dtpr_nextmod) { 12476 if (probe->dtpr_ecb != NULL) { 12477 mutex_exit(&dtrace_provider_lock); 12478 mutex_exit(&mod_lock); 12479 mutex_exit(&dtrace_lock); 12480 12481 /* 12482 * This shouldn't _actually_ be possible -- we're 12483 * unloading a module that has an enabled probe in it. 12484 * (It's normally up to the provider to make sure that 12485 * this can't happen.) However, because dtps_enable() 12486 * doesn't have a failure mode, there can be an 12487 * enable/unload race. Upshot: we don't want to 12488 * assert, but we're not going to disable the 12489 * probe, either. 12490 */ 12491 if (dtrace_err_verbose) { 12492 cmn_err(CE_WARN, "unloaded module '%s' had " 12493 "enabled probes", ctl->mod_modname); 12494 } 12495 12496 return; 12497 } 12498 } 12499 12500 probe = first; 12501 12502 for (first = NULL; probe != NULL; probe = next) { 12503 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 12504 12505 dtrace_probes[probe->dtpr_id - 1] = NULL; 12506 12507 next = probe->dtpr_nextmod; 12508 dtrace_hash_remove(dtrace_bymod, probe); 12509 dtrace_hash_remove(dtrace_byfunc, probe); 12510 dtrace_hash_remove(dtrace_byname, probe); 12511 12512 if (first == NULL) { 12513 first = probe; 12514 probe->dtpr_nextmod = NULL; 12515 } else { 12516 probe->dtpr_nextmod = first; 12517 first = probe; 12518 } 12519 } 12520 12521 /* 12522 * We've removed all of the module's probes from the hash chains and 12523 * from the probe array. Now issue a dtrace_sync() to be sure that 12524 * everyone has cleared out from any probe array processing. 12525 */ 12526 dtrace_sync(); 12527 12528 for (probe = first; probe != NULL; probe = first) { 12529 first = probe->dtpr_nextmod; 12530 prov = probe->dtpr_provider; 12531 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 12532 probe->dtpr_arg); 12533 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 12534 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 12535 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 12536 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 12537 kmem_free(probe, sizeof (dtrace_probe_t)); 12538 } 12539 12540 mutex_exit(&dtrace_lock); 12541 mutex_exit(&mod_lock); 12542 mutex_exit(&dtrace_provider_lock); 12543 } 12544 12545 void 12546 dtrace_suspend(void) 12547 { 12548 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 12549 } 12550 12551 void 12552 dtrace_resume(void) 12553 { 12554 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 12555 } 12556 12557 static int 12558 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 12559 { 12560 ASSERT(MUTEX_HELD(&cpu_lock)); 12561 mutex_enter(&dtrace_lock); 12562 12563 switch (what) { 12564 case CPU_CONFIG: { 12565 dtrace_state_t *state; 12566 dtrace_optval_t *opt, rs, c; 12567 12568 /* 12569 * For now, we only allocate a new buffer for anonymous state. 12570 */ 12571 if ((state = dtrace_anon.dta_state) == NULL) 12572 break; 12573 12574 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12575 break; 12576 12577 opt = state->dts_options; 12578 c = opt[DTRACEOPT_CPU]; 12579 12580 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 12581 break; 12582 12583 /* 12584 * Regardless of what the actual policy is, we're going to 12585 * temporarily set our resize policy to be manual. We're 12586 * also going to temporarily set our CPU option to denote 12587 * the newly configured CPU. 12588 */ 12589 rs = opt[DTRACEOPT_BUFRESIZE]; 12590 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 12591 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 12592 12593 (void) dtrace_state_buffers(state); 12594 12595 opt[DTRACEOPT_BUFRESIZE] = rs; 12596 opt[DTRACEOPT_CPU] = c; 12597 12598 break; 12599 } 12600 12601 case CPU_UNCONFIG: 12602 /* 12603 * We don't free the buffer in the CPU_UNCONFIG case. (The 12604 * buffer will be freed when the consumer exits.) 12605 */ 12606 break; 12607 12608 default: 12609 break; 12610 } 12611 12612 mutex_exit(&dtrace_lock); 12613 return (0); 12614 } 12615 12616 static void 12617 dtrace_cpu_setup_initial(processorid_t cpu) 12618 { 12619 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 12620 } 12621 12622 static void 12623 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 12624 { 12625 if (dtrace_toxranges >= dtrace_toxranges_max) { 12626 int osize, nsize; 12627 dtrace_toxrange_t *range; 12628 12629 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12630 12631 if (osize == 0) { 12632 ASSERT(dtrace_toxrange == NULL); 12633 ASSERT(dtrace_toxranges_max == 0); 12634 dtrace_toxranges_max = 1; 12635 } else { 12636 dtrace_toxranges_max <<= 1; 12637 } 12638 12639 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 12640 range = kmem_zalloc(nsize, KM_SLEEP); 12641 12642 if (dtrace_toxrange != NULL) { 12643 ASSERT(osize != 0); 12644 bcopy(dtrace_toxrange, range, osize); 12645 kmem_free(dtrace_toxrange, osize); 12646 } 12647 12648 dtrace_toxrange = range; 12649 } 12650 12651 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 12652 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 12653 12654 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 12655 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 12656 dtrace_toxranges++; 12657 } 12658 12659 /* 12660 * DTrace Driver Cookbook Functions 12661 */ 12662 /*ARGSUSED*/ 12663 static int 12664 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 12665 { 12666 dtrace_provider_id_t id; 12667 dtrace_state_t *state = NULL; 12668 dtrace_enabling_t *enab; 12669 12670 mutex_enter(&cpu_lock); 12671 mutex_enter(&dtrace_provider_lock); 12672 mutex_enter(&dtrace_lock); 12673 12674 if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t) + 12675 NCPU * sizeof (dtrace_buffer_t), 0) != 0) { 12676 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 12677 mutex_exit(&cpu_lock); 12678 mutex_exit(&dtrace_provider_lock); 12679 mutex_exit(&dtrace_lock); 12680 return (DDI_FAILURE); 12681 } 12682 12683 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 12684 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 12685 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 12686 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 12687 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 12688 ddi_remove_minor_node(devi, NULL); 12689 ddi_soft_state_fini(&dtrace_softstate); 12690 mutex_exit(&cpu_lock); 12691 mutex_exit(&dtrace_provider_lock); 12692 mutex_exit(&dtrace_lock); 12693 return (DDI_FAILURE); 12694 } 12695 12696 ddi_report_dev(devi); 12697 dtrace_devi = devi; 12698 12699 dtrace_modload = dtrace_module_loaded; 12700 dtrace_modunload = dtrace_module_unloaded; 12701 dtrace_cpu_init = dtrace_cpu_setup_initial; 12702 dtrace_helpers_cleanup = dtrace_helpers_destroy; 12703 dtrace_helpers_fork = dtrace_helpers_duplicate; 12704 dtrace_cpustart_init = dtrace_suspend; 12705 dtrace_cpustart_fini = dtrace_resume; 12706 dtrace_debugger_init = dtrace_suspend; 12707 dtrace_debugger_fini = dtrace_resume; 12708 dtrace_kreloc_init = dtrace_suspend; 12709 dtrace_kreloc_fini = dtrace_resume; 12710 12711 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 12712 12713 ASSERT(MUTEX_HELD(&cpu_lock)); 12714 12715 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 12716 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12717 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 12718 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 12719 VM_SLEEP | VMC_IDENTIFIER); 12720 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 12721 1, INT_MAX, 0); 12722 12723 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 12724 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 12725 NULL, NULL, NULL, NULL, NULL, 0); 12726 12727 ASSERT(MUTEX_HELD(&cpu_lock)); 12728 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 12729 offsetof(dtrace_probe_t, dtpr_nextmod), 12730 offsetof(dtrace_probe_t, dtpr_prevmod)); 12731 12732 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 12733 offsetof(dtrace_probe_t, dtpr_nextfunc), 12734 offsetof(dtrace_probe_t, dtpr_prevfunc)); 12735 12736 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 12737 offsetof(dtrace_probe_t, dtpr_nextname), 12738 offsetof(dtrace_probe_t, dtpr_prevname)); 12739 12740 if (dtrace_retain_max < 1) { 12741 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 12742 "setting to 1", dtrace_retain_max); 12743 dtrace_retain_max = 1; 12744 } 12745 12746 /* 12747 * Now discover our toxic ranges. 12748 */ 12749 dtrace_toxic_ranges(dtrace_toxrange_add); 12750 12751 /* 12752 * Before we register ourselves as a provider to our own framework, 12753 * we would like to assert that dtrace_provider is NULL -- but that's 12754 * not true if we were loaded as a dependency of a DTrace provider. 12755 * Once we've registered, we can assert that dtrace_provider is our 12756 * pseudo provider. 12757 */ 12758 (void) dtrace_register("dtrace", &dtrace_provider_attr, 12759 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 12760 12761 ASSERT(dtrace_provider != NULL); 12762 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 12763 12764 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 12765 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 12766 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 12767 dtrace_provider, NULL, NULL, "END", 0, NULL); 12768 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 12769 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 12770 12771 dtrace_anon_property(); 12772 mutex_exit(&cpu_lock); 12773 12774 /* 12775 * If DTrace helper tracing is enabled, we need to allocate the 12776 * trace buffer and initialize the values. 12777 */ 12778 if (dtrace_helptrace_enabled) { 12779 ASSERT(dtrace_helptrace_buffer == NULL); 12780 dtrace_helptrace_buffer = 12781 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 12782 dtrace_helptrace_next = 0; 12783 } 12784 12785 /* 12786 * If there are already providers, we must ask them to provide their 12787 * probes, and then match any anonymous enabling against them. Note 12788 * that there should be no other retained enablings at this time: 12789 * the only retained enablings at this time should be the anonymous 12790 * enabling. 12791 */ 12792 if (dtrace_anon.dta_enabling != NULL) { 12793 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 12794 12795 dtrace_enabling_provide(NULL); 12796 state = dtrace_anon.dta_state; 12797 12798 /* 12799 * We couldn't hold cpu_lock across the above call to 12800 * dtrace_enabling_provide(), but we must hold it to actually 12801 * enable the probes. We have to drop all of our locks, pick 12802 * up cpu_lock, and regain our locks before matching the 12803 * retained anonymous enabling. 12804 */ 12805 mutex_exit(&dtrace_lock); 12806 mutex_exit(&dtrace_provider_lock); 12807 12808 mutex_enter(&cpu_lock); 12809 mutex_enter(&dtrace_provider_lock); 12810 mutex_enter(&dtrace_lock); 12811 12812 if ((enab = dtrace_anon.dta_enabling) != NULL) 12813 (void) dtrace_enabling_match(enab, NULL); 12814 12815 mutex_exit(&cpu_lock); 12816 } 12817 12818 mutex_exit(&dtrace_lock); 12819 mutex_exit(&dtrace_provider_lock); 12820 12821 if (state != NULL) { 12822 /* 12823 * If we created any anonymous state, set it going now. 12824 */ 12825 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 12826 } 12827 12828 return (DDI_SUCCESS); 12829 } 12830 12831 /*ARGSUSED*/ 12832 static int 12833 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 12834 { 12835 dtrace_state_t *state; 12836 uint32_t priv; 12837 uid_t uid; 12838 12839 if (getminor(*devp) == DTRACEMNRN_HELPER) 12840 return (0); 12841 12842 /* 12843 * If this wasn't an open with the "helper" minor, then it must be 12844 * the "dtrace" minor. 12845 */ 12846 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 12847 12848 /* 12849 * If no DTRACE_PRIV_* bits are set in the credential, then the 12850 * caller lacks sufficient permission to do anything with DTrace. 12851 */ 12852 dtrace_cred2priv(cred_p, &priv, &uid); 12853 if (priv == DTRACE_PRIV_NONE) 12854 return (EACCES); 12855 12856 /* 12857 * Ask all providers to provide all their probes. 12858 */ 12859 mutex_enter(&dtrace_provider_lock); 12860 dtrace_probe_provide(NULL, NULL); 12861 mutex_exit(&dtrace_provider_lock); 12862 12863 mutex_enter(&cpu_lock); 12864 mutex_enter(&dtrace_lock); 12865 dtrace_opens++; 12866 dtrace_membar_producer(); 12867 12868 /* 12869 * If the kernel debugger is active (that is, if the kernel debugger 12870 * modified text in some way), we won't allow the open. 12871 */ 12872 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12873 dtrace_opens--; 12874 mutex_exit(&cpu_lock); 12875 mutex_exit(&dtrace_lock); 12876 return (EBUSY); 12877 } 12878 12879 state = dtrace_state_create(devp, cred_p); 12880 mutex_exit(&cpu_lock); 12881 12882 if (state == NULL) { 12883 if (--dtrace_opens == 0) 12884 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12885 mutex_exit(&dtrace_lock); 12886 return (EAGAIN); 12887 } 12888 12889 mutex_exit(&dtrace_lock); 12890 12891 return (0); 12892 } 12893 12894 /*ARGSUSED*/ 12895 static int 12896 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 12897 { 12898 minor_t minor = getminor(dev); 12899 dtrace_state_t *state; 12900 12901 if (minor == DTRACEMNRN_HELPER) 12902 return (0); 12903 12904 state = ddi_get_soft_state(dtrace_softstate, minor); 12905 12906 mutex_enter(&cpu_lock); 12907 mutex_enter(&dtrace_lock); 12908 12909 if (state->dts_anon) { 12910 /* 12911 * There is anonymous state. Destroy that first. 12912 */ 12913 ASSERT(dtrace_anon.dta_state == NULL); 12914 dtrace_state_destroy(state->dts_anon); 12915 } 12916 12917 dtrace_state_destroy(state); 12918 ASSERT(dtrace_opens > 0); 12919 if (--dtrace_opens == 0) 12920 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 12921 12922 mutex_exit(&dtrace_lock); 12923 mutex_exit(&cpu_lock); 12924 12925 return (0); 12926 } 12927 12928 /*ARGSUSED*/ 12929 static int 12930 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 12931 { 12932 int rval; 12933 dof_helper_t help, *dhp = NULL; 12934 12935 switch (cmd) { 12936 case DTRACEHIOC_ADDDOF: 12937 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 12938 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 12939 return (EFAULT); 12940 } 12941 12942 dhp = &help; 12943 arg = (intptr_t)help.dofhp_dof; 12944 /*FALLTHROUGH*/ 12945 12946 case DTRACEHIOC_ADD: { 12947 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 12948 12949 if (dof == NULL) 12950 return (rval); 12951 12952 mutex_enter(&dtrace_lock); 12953 dtrace_error = 0; 12954 12955 /* 12956 * dtrace_helper_slurp() takes responsibility for the dof -- 12957 * it may free it now or it may save it and free it later. 12958 */ 12959 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 12960 *rv = rval; 12961 rval = 0; 12962 } else { 12963 rval = EINVAL; 12964 } 12965 12966 mutex_exit(&dtrace_lock); 12967 return (rval); 12968 } 12969 12970 case DTRACEHIOC_REMOVE: { 12971 mutex_enter(&dtrace_lock); 12972 rval = dtrace_helper_destroygen(arg); 12973 mutex_exit(&dtrace_lock); 12974 12975 return (rval); 12976 } 12977 12978 default: 12979 break; 12980 } 12981 12982 return (ENOTTY); 12983 } 12984 12985 /*ARGSUSED*/ 12986 static int 12987 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 12988 { 12989 minor_t minor = getminor(dev); 12990 dtrace_state_t *state; 12991 int rval; 12992 12993 if (minor == DTRACEMNRN_HELPER) 12994 return (dtrace_ioctl_helper(cmd, arg, rv)); 12995 12996 state = ddi_get_soft_state(dtrace_softstate, minor); 12997 12998 if (state->dts_anon) { 12999 ASSERT(dtrace_anon.dta_state == NULL); 13000 state = state->dts_anon; 13001 } 13002 13003 switch (cmd) { 13004 case DTRACEIOC_PROVIDER: { 13005 dtrace_providerdesc_t pvd; 13006 dtrace_provider_t *pvp; 13007 13008 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 13009 return (EFAULT); 13010 13011 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 13012 mutex_enter(&dtrace_provider_lock); 13013 13014 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 13015 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 13016 break; 13017 } 13018 13019 mutex_exit(&dtrace_provider_lock); 13020 13021 if (pvp == NULL) 13022 return (ESRCH); 13023 13024 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 13025 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 13026 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 13027 return (EFAULT); 13028 13029 return (0); 13030 } 13031 13032 case DTRACEIOC_EPROBE: { 13033 dtrace_eprobedesc_t epdesc; 13034 dtrace_ecb_t *ecb; 13035 dtrace_action_t *act; 13036 void *buf; 13037 size_t size; 13038 uintptr_t dest; 13039 int nrecs; 13040 13041 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 13042 return (EFAULT); 13043 13044 mutex_enter(&dtrace_lock); 13045 13046 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 13047 mutex_exit(&dtrace_lock); 13048 return (EINVAL); 13049 } 13050 13051 if (ecb->dte_probe == NULL) { 13052 mutex_exit(&dtrace_lock); 13053 return (EINVAL); 13054 } 13055 13056 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 13057 epdesc.dtepd_uarg = ecb->dte_uarg; 13058 epdesc.dtepd_size = ecb->dte_size; 13059 13060 nrecs = epdesc.dtepd_nrecs; 13061 epdesc.dtepd_nrecs = 0; 13062 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13063 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13064 continue; 13065 13066 epdesc.dtepd_nrecs++; 13067 } 13068 13069 /* 13070 * Now that we have the size, we need to allocate a temporary 13071 * buffer in which to store the complete description. We need 13072 * the temporary buffer to be able to drop dtrace_lock() 13073 * across the copyout(), below. 13074 */ 13075 size = sizeof (dtrace_eprobedesc_t) + 13076 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 13077 13078 buf = kmem_alloc(size, KM_SLEEP); 13079 dest = (uintptr_t)buf; 13080 13081 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 13082 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 13083 13084 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13085 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13086 continue; 13087 13088 if (nrecs-- == 0) 13089 break; 13090 13091 bcopy(&act->dta_rec, (void *)dest, 13092 sizeof (dtrace_recdesc_t)); 13093 dest += sizeof (dtrace_recdesc_t); 13094 } 13095 13096 mutex_exit(&dtrace_lock); 13097 13098 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13099 kmem_free(buf, size); 13100 return (EFAULT); 13101 } 13102 13103 kmem_free(buf, size); 13104 return (0); 13105 } 13106 13107 case DTRACEIOC_AGGDESC: { 13108 dtrace_aggdesc_t aggdesc; 13109 dtrace_action_t *act; 13110 dtrace_aggregation_t *agg; 13111 int nrecs; 13112 uint32_t offs; 13113 dtrace_recdesc_t *lrec; 13114 void *buf; 13115 size_t size; 13116 uintptr_t dest; 13117 13118 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 13119 return (EFAULT); 13120 13121 mutex_enter(&dtrace_lock); 13122 13123 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 13124 mutex_exit(&dtrace_lock); 13125 return (EINVAL); 13126 } 13127 13128 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 13129 13130 nrecs = aggdesc.dtagd_nrecs; 13131 aggdesc.dtagd_nrecs = 0; 13132 13133 offs = agg->dtag_base; 13134 lrec = &agg->dtag_action.dta_rec; 13135 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 13136 13137 for (act = agg->dtag_first; ; act = act->dta_next) { 13138 ASSERT(act->dta_intuple || 13139 DTRACEACT_ISAGG(act->dta_kind)); 13140 aggdesc.dtagd_nrecs++; 13141 13142 if (act == &agg->dtag_action) 13143 break; 13144 } 13145 13146 /* 13147 * Now that we have the size, we need to allocate a temporary 13148 * buffer in which to store the complete description. We need 13149 * the temporary buffer to be able to drop dtrace_lock() 13150 * across the copyout(), below. 13151 */ 13152 size = sizeof (dtrace_aggdesc_t) + 13153 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 13154 13155 buf = kmem_alloc(size, KM_SLEEP); 13156 dest = (uintptr_t)buf; 13157 13158 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 13159 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 13160 13161 for (act = agg->dtag_first; ; act = act->dta_next) { 13162 dtrace_recdesc_t rec = act->dta_rec; 13163 13164 if (nrecs-- == 0) 13165 break; 13166 13167 rec.dtrd_offset -= offs; 13168 bcopy(&rec, (void *)dest, sizeof (rec)); 13169 dest += sizeof (dtrace_recdesc_t); 13170 13171 if (act == &agg->dtag_action) 13172 break; 13173 } 13174 13175 mutex_exit(&dtrace_lock); 13176 13177 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13178 kmem_free(buf, size); 13179 return (EFAULT); 13180 } 13181 13182 kmem_free(buf, size); 13183 return (0); 13184 } 13185 13186 case DTRACEIOC_ENABLE: { 13187 dof_hdr_t *dof; 13188 dtrace_enabling_t *enab = NULL; 13189 dtrace_vstate_t *vstate; 13190 int err = 0; 13191 13192 *rv = 0; 13193 13194 /* 13195 * If a NULL argument has been passed, we take this as our 13196 * cue to reevaluate our enablings. 13197 */ 13198 if (arg == NULL) { 13199 mutex_enter(&cpu_lock); 13200 mutex_enter(&dtrace_lock); 13201 err = dtrace_enabling_matchstate(state, rv); 13202 mutex_exit(&dtrace_lock); 13203 mutex_exit(&cpu_lock); 13204 13205 return (err); 13206 } 13207 13208 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 13209 return (rval); 13210 13211 mutex_enter(&cpu_lock); 13212 mutex_enter(&dtrace_lock); 13213 vstate = &state->dts_vstate; 13214 13215 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13216 mutex_exit(&dtrace_lock); 13217 mutex_exit(&cpu_lock); 13218 dtrace_dof_destroy(dof); 13219 return (EBUSY); 13220 } 13221 13222 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 13223 mutex_exit(&dtrace_lock); 13224 mutex_exit(&cpu_lock); 13225 dtrace_dof_destroy(dof); 13226 return (EINVAL); 13227 } 13228 13229 if ((rval = dtrace_dof_options(dof, state)) != 0) { 13230 dtrace_enabling_destroy(enab); 13231 mutex_exit(&dtrace_lock); 13232 mutex_exit(&cpu_lock); 13233 dtrace_dof_destroy(dof); 13234 return (rval); 13235 } 13236 13237 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 13238 err = dtrace_enabling_retain(enab); 13239 } else { 13240 dtrace_enabling_destroy(enab); 13241 } 13242 13243 mutex_exit(&cpu_lock); 13244 mutex_exit(&dtrace_lock); 13245 dtrace_dof_destroy(dof); 13246 13247 return (err); 13248 } 13249 13250 case DTRACEIOC_REPLICATE: { 13251 dtrace_repldesc_t desc; 13252 dtrace_probedesc_t *match = &desc.dtrpd_match; 13253 dtrace_probedesc_t *create = &desc.dtrpd_create; 13254 int err; 13255 13256 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13257 return (EFAULT); 13258 13259 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13260 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13261 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13262 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13263 13264 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13265 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13266 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13267 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13268 13269 mutex_enter(&dtrace_lock); 13270 err = dtrace_enabling_replicate(state, match, create); 13271 mutex_exit(&dtrace_lock); 13272 13273 return (err); 13274 } 13275 13276 case DTRACEIOC_PROBEMATCH: 13277 case DTRACEIOC_PROBES: { 13278 dtrace_probe_t *probe = NULL; 13279 dtrace_probedesc_t desc; 13280 dtrace_probekey_t pkey; 13281 dtrace_id_t i; 13282 int m = 0; 13283 uint32_t priv; 13284 uid_t uid; 13285 13286 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13287 return (EFAULT); 13288 13289 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13290 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13291 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13292 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13293 13294 /* 13295 * Before we attempt to match this probe, we want to give 13296 * all providers the opportunity to provide it. 13297 */ 13298 if (desc.dtpd_id == DTRACE_IDNONE) { 13299 mutex_enter(&dtrace_provider_lock); 13300 dtrace_probe_provide(&desc, NULL); 13301 mutex_exit(&dtrace_provider_lock); 13302 desc.dtpd_id++; 13303 } 13304 13305 if (cmd == DTRACEIOC_PROBEMATCH) { 13306 dtrace_probekey(&desc, &pkey); 13307 pkey.dtpk_id = DTRACE_IDNONE; 13308 } 13309 13310 uid = crgetuid(cr); 13311 dtrace_cred2priv(cr, &priv, &uid); 13312 13313 mutex_enter(&dtrace_lock); 13314 13315 if (cmd == DTRACEIOC_PROBEMATCH) { 13316 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13317 if ((probe = dtrace_probes[i - 1]) != NULL && 13318 (m = dtrace_match_probe(probe, &pkey, 13319 priv, uid)) != 0) 13320 break; 13321 } 13322 13323 if (m < 0) { 13324 mutex_exit(&dtrace_lock); 13325 return (EINVAL); 13326 } 13327 13328 } else { 13329 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13330 if ((probe = dtrace_probes[i - 1]) != NULL && 13331 dtrace_match_priv(probe, priv, uid)) 13332 break; 13333 } 13334 } 13335 13336 if (probe == NULL) { 13337 mutex_exit(&dtrace_lock); 13338 return (ESRCH); 13339 } 13340 13341 dtrace_probe_description(probe, &desc); 13342 mutex_exit(&dtrace_lock); 13343 13344 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13345 return (EFAULT); 13346 13347 return (0); 13348 } 13349 13350 case DTRACEIOC_PROBEARG: { 13351 dtrace_argdesc_t desc; 13352 dtrace_probe_t *probe; 13353 dtrace_provider_t *prov; 13354 13355 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13356 return (EFAULT); 13357 13358 if (desc.dtargd_id == DTRACE_IDNONE) 13359 return (EINVAL); 13360 13361 if (desc.dtargd_ndx == DTRACE_ARGNONE) 13362 return (EINVAL); 13363 13364 mutex_enter(&dtrace_provider_lock); 13365 mutex_enter(&mod_lock); 13366 mutex_enter(&dtrace_lock); 13367 13368 if (desc.dtargd_id > dtrace_nprobes) { 13369 mutex_exit(&dtrace_lock); 13370 mutex_exit(&mod_lock); 13371 mutex_exit(&dtrace_provider_lock); 13372 return (EINVAL); 13373 } 13374 13375 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 13376 mutex_exit(&dtrace_lock); 13377 mutex_exit(&mod_lock); 13378 mutex_exit(&dtrace_provider_lock); 13379 return (EINVAL); 13380 } 13381 13382 mutex_exit(&dtrace_lock); 13383 13384 prov = probe->dtpr_provider; 13385 13386 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 13387 /* 13388 * There isn't any typed information for this probe. 13389 * Set the argument number to DTRACE_ARGNONE. 13390 */ 13391 desc.dtargd_ndx = DTRACE_ARGNONE; 13392 } else { 13393 desc.dtargd_native[0] = '\0'; 13394 desc.dtargd_xlate[0] = '\0'; 13395 desc.dtargd_mapping = desc.dtargd_ndx; 13396 13397 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 13398 probe->dtpr_id, probe->dtpr_arg, &desc); 13399 } 13400 13401 mutex_exit(&mod_lock); 13402 mutex_exit(&dtrace_provider_lock); 13403 13404 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13405 return (EFAULT); 13406 13407 return (0); 13408 } 13409 13410 case DTRACEIOC_GO: { 13411 processorid_t cpuid; 13412 rval = dtrace_state_go(state, &cpuid); 13413 13414 if (rval != 0) 13415 return (rval); 13416 13417 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13418 return (EFAULT); 13419 13420 return (0); 13421 } 13422 13423 case DTRACEIOC_STOP: { 13424 processorid_t cpuid; 13425 13426 mutex_enter(&dtrace_lock); 13427 rval = dtrace_state_stop(state, &cpuid); 13428 mutex_exit(&dtrace_lock); 13429 13430 if (rval != 0) 13431 return (rval); 13432 13433 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 13434 return (EFAULT); 13435 13436 return (0); 13437 } 13438 13439 case DTRACEIOC_DOFGET: { 13440 dof_hdr_t hdr, *dof; 13441 uint64_t len; 13442 13443 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 13444 return (EFAULT); 13445 13446 mutex_enter(&dtrace_lock); 13447 dof = dtrace_dof_create(state); 13448 mutex_exit(&dtrace_lock); 13449 13450 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 13451 rval = copyout(dof, (void *)arg, len); 13452 dtrace_dof_destroy(dof); 13453 13454 return (rval == 0 ? 0 : EFAULT); 13455 } 13456 13457 case DTRACEIOC_AGGSNAP: 13458 case DTRACEIOC_BUFSNAP: { 13459 dtrace_bufdesc_t desc; 13460 caddr_t cached; 13461 dtrace_buffer_t *buf; 13462 13463 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13464 return (EFAULT); 13465 13466 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 13467 return (EINVAL); 13468 13469 mutex_enter(&dtrace_lock); 13470 13471 if (cmd == DTRACEIOC_BUFSNAP) { 13472 buf = &state->dts_buffer[desc.dtbd_cpu]; 13473 } else { 13474 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 13475 } 13476 13477 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 13478 size_t sz = buf->dtb_offset; 13479 13480 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 13481 mutex_exit(&dtrace_lock); 13482 return (EBUSY); 13483 } 13484 13485 /* 13486 * If this buffer has already been consumed, we're 13487 * going to indicate that there's nothing left here 13488 * to consume. 13489 */ 13490 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 13491 mutex_exit(&dtrace_lock); 13492 13493 desc.dtbd_size = 0; 13494 desc.dtbd_drops = 0; 13495 desc.dtbd_errors = 0; 13496 desc.dtbd_oldest = 0; 13497 sz = sizeof (desc); 13498 13499 if (copyout(&desc, (void *)arg, sz) != 0) 13500 return (EFAULT); 13501 13502 return (0); 13503 } 13504 13505 /* 13506 * If this is a ring buffer that has wrapped, we want 13507 * to copy the whole thing out. 13508 */ 13509 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 13510 dtrace_buffer_polish(buf); 13511 sz = buf->dtb_size; 13512 } 13513 13514 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 13515 mutex_exit(&dtrace_lock); 13516 return (EFAULT); 13517 } 13518 13519 desc.dtbd_size = sz; 13520 desc.dtbd_drops = buf->dtb_drops; 13521 desc.dtbd_errors = buf->dtb_errors; 13522 desc.dtbd_oldest = buf->dtb_xamot_offset; 13523 13524 mutex_exit(&dtrace_lock); 13525 13526 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13527 return (EFAULT); 13528 13529 buf->dtb_flags |= DTRACEBUF_CONSUMED; 13530 13531 return (0); 13532 } 13533 13534 if (buf->dtb_tomax == NULL) { 13535 ASSERT(buf->dtb_xamot == NULL); 13536 mutex_exit(&dtrace_lock); 13537 return (ENOENT); 13538 } 13539 13540 cached = buf->dtb_tomax; 13541 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 13542 13543 dtrace_xcall(desc.dtbd_cpu, 13544 (dtrace_xcall_t)dtrace_buffer_switch, buf); 13545 13546 state->dts_errors += buf->dtb_xamot_errors; 13547 13548 /* 13549 * If the buffers did not actually switch, then the cross call 13550 * did not take place -- presumably because the given CPU is 13551 * not in the ready set. If this is the case, we'll return 13552 * ENOENT. 13553 */ 13554 if (buf->dtb_tomax == cached) { 13555 ASSERT(buf->dtb_xamot != cached); 13556 mutex_exit(&dtrace_lock); 13557 return (ENOENT); 13558 } 13559 13560 ASSERT(cached == buf->dtb_xamot); 13561 13562 /* 13563 * We have our snapshot; now copy it out. 13564 */ 13565 if (copyout(buf->dtb_xamot, desc.dtbd_data, 13566 buf->dtb_xamot_offset) != 0) { 13567 mutex_exit(&dtrace_lock); 13568 return (EFAULT); 13569 } 13570 13571 desc.dtbd_size = buf->dtb_xamot_offset; 13572 desc.dtbd_drops = buf->dtb_xamot_drops; 13573 desc.dtbd_errors = buf->dtb_xamot_errors; 13574 desc.dtbd_oldest = 0; 13575 13576 mutex_exit(&dtrace_lock); 13577 13578 /* 13579 * Finally, copy out the buffer description. 13580 */ 13581 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13582 return (EFAULT); 13583 13584 return (0); 13585 } 13586 13587 case DTRACEIOC_CONF: { 13588 dtrace_conf_t conf; 13589 13590 bzero(&conf, sizeof (conf)); 13591 conf.dtc_difversion = DIF_VERSION; 13592 conf.dtc_difintregs = DIF_DIR_NREGS; 13593 conf.dtc_diftupregs = DIF_DTR_NREGS; 13594 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 13595 13596 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 13597 return (EFAULT); 13598 13599 return (0); 13600 } 13601 13602 case DTRACEIOC_STATUS: { 13603 dtrace_status_t stat; 13604 dtrace_dstate_t *dstate; 13605 int i, j; 13606 uint64_t nerrs; 13607 13608 /* 13609 * See the comment in dtrace_state_deadman() for the reason 13610 * for setting dts_laststatus to INT64_MAX before setting 13611 * it to the correct value. 13612 */ 13613 state->dts_laststatus = INT64_MAX; 13614 dtrace_membar_producer(); 13615 state->dts_laststatus = dtrace_gethrtime(); 13616 13617 bzero(&stat, sizeof (stat)); 13618 13619 mutex_enter(&dtrace_lock); 13620 13621 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 13622 mutex_exit(&dtrace_lock); 13623 return (ENOENT); 13624 } 13625 13626 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 13627 stat.dtst_exiting = 1; 13628 13629 nerrs = state->dts_errors; 13630 dstate = &state->dts_vstate.dtvs_dynvars; 13631 13632 for (i = 0; i < NCPU; i++) { 13633 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 13634 13635 stat.dtst_dyndrops += dcpu->dtdsc_drops; 13636 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 13637 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 13638 13639 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 13640 stat.dtst_filled++; 13641 13642 nerrs += state->dts_buffer[i].dtb_errors; 13643 13644 for (j = 0; j < state->dts_nspeculations; j++) { 13645 dtrace_speculation_t *spec; 13646 dtrace_buffer_t *buf; 13647 13648 spec = &state->dts_speculations[j]; 13649 buf = &spec->dtsp_buffer[i]; 13650 stat.dtst_specdrops += buf->dtb_xamot_drops; 13651 } 13652 } 13653 13654 stat.dtst_specdrops_busy = state->dts_speculations_busy; 13655 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 13656 stat.dtst_killed = 13657 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 13658 stat.dtst_errors = nerrs; 13659 13660 mutex_exit(&dtrace_lock); 13661 13662 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 13663 return (EFAULT); 13664 13665 return (0); 13666 } 13667 13668 case DTRACEIOC_FORMAT: { 13669 dtrace_fmtdesc_t fmt; 13670 char *str; 13671 int len; 13672 13673 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 13674 return (EFAULT); 13675 13676 mutex_enter(&dtrace_lock); 13677 13678 if (fmt.dtfd_format == 0 || 13679 fmt.dtfd_format > state->dts_nformats) { 13680 mutex_exit(&dtrace_lock); 13681 return (EINVAL); 13682 } 13683 13684 /* 13685 * Format strings are allocated contiguously and they are 13686 * never freed; if a format index is less than the number 13687 * of formats, we can assert that the format map is non-NULL 13688 * and that the format for the specified index is non-NULL. 13689 */ 13690 ASSERT(state->dts_formats != NULL); 13691 str = state->dts_formats[fmt.dtfd_format - 1]; 13692 ASSERT(str != NULL); 13693 13694 len = strlen(str) + 1; 13695 13696 if (len > fmt.dtfd_length) { 13697 fmt.dtfd_length = len; 13698 13699 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 13700 mutex_exit(&dtrace_lock); 13701 return (EINVAL); 13702 } 13703 } else { 13704 if (copyout(str, fmt.dtfd_string, len) != 0) { 13705 mutex_exit(&dtrace_lock); 13706 return (EINVAL); 13707 } 13708 } 13709 13710 mutex_exit(&dtrace_lock); 13711 return (0); 13712 } 13713 13714 default: 13715 break; 13716 } 13717 13718 return (ENOTTY); 13719 } 13720 13721 /*ARGSUSED*/ 13722 static int 13723 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 13724 { 13725 dtrace_state_t *state; 13726 13727 switch (cmd) { 13728 case DDI_DETACH: 13729 break; 13730 13731 case DDI_SUSPEND: 13732 return (DDI_SUCCESS); 13733 13734 default: 13735 return (DDI_FAILURE); 13736 } 13737 13738 mutex_enter(&cpu_lock); 13739 mutex_enter(&dtrace_provider_lock); 13740 mutex_enter(&dtrace_lock); 13741 13742 if (dtrace_opens > 0) { 13743 /* 13744 * This is only possible because of DTrace helpers attached 13745 * to a process -- they count as a DTrace open. If the locking 13746 * weren't such a mess, we could assert that p_dtrace_helpers 13747 * is non-NULL for some process. 13748 */ 13749 mutex_exit(&dtrace_provider_lock); 13750 mutex_exit(&dtrace_lock); 13751 mutex_exit(&cpu_lock); 13752 return (DDI_FAILURE); 13753 } 13754 13755 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 13756 mutex_exit(&dtrace_provider_lock); 13757 mutex_exit(&dtrace_lock); 13758 mutex_exit(&cpu_lock); 13759 return (DDI_FAILURE); 13760 } 13761 13762 dtrace_provider = NULL; 13763 13764 if ((state = dtrace_anon_grab()) != NULL) { 13765 /* 13766 * If there were ECBs on this state, the provider should 13767 * have not been allowed to detach; assert that there is 13768 * none. 13769 */ 13770 ASSERT(state->dts_necbs == 0); 13771 dtrace_state_destroy(state); 13772 13773 /* 13774 * If we're being detached with anonymous state, we need to 13775 * indicate to the kernel debugger that DTrace is now inactive. 13776 */ 13777 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13778 } 13779 13780 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 13781 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13782 dtrace_cpu_init = NULL; 13783 dtrace_helpers_cleanup = NULL; 13784 dtrace_helpers_fork = NULL; 13785 dtrace_cpustart_init = NULL; 13786 dtrace_cpustart_fini = NULL; 13787 dtrace_debugger_init = NULL; 13788 dtrace_debugger_fini = NULL; 13789 dtrace_kreloc_init = NULL; 13790 dtrace_kreloc_fini = NULL; 13791 dtrace_modload = NULL; 13792 dtrace_modunload = NULL; 13793 13794 mutex_exit(&cpu_lock); 13795 13796 if (dtrace_helptrace_enabled) { 13797 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 13798 dtrace_helptrace_buffer = NULL; 13799 } 13800 13801 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 13802 dtrace_probes = NULL; 13803 dtrace_nprobes = 0; 13804 13805 dtrace_hash_destroy(dtrace_bymod); 13806 dtrace_hash_destroy(dtrace_byfunc); 13807 dtrace_hash_destroy(dtrace_byname); 13808 dtrace_bymod = NULL; 13809 dtrace_byfunc = NULL; 13810 dtrace_byname = NULL; 13811 13812 kmem_cache_destroy(dtrace_state_cache); 13813 vmem_destroy(dtrace_minor); 13814 vmem_destroy(dtrace_arena); 13815 13816 if (dtrace_toxrange != NULL) { 13817 kmem_free(dtrace_toxrange, 13818 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 13819 dtrace_toxrange = NULL; 13820 dtrace_toxranges = 0; 13821 dtrace_toxranges_max = 0; 13822 } 13823 13824 ddi_remove_minor_node(dtrace_devi, NULL); 13825 dtrace_devi = NULL; 13826 13827 ddi_soft_state_fini(&dtrace_softstate); 13828 13829 ASSERT(dtrace_vtime_references == 0); 13830 ASSERT(dtrace_opens == 0); 13831 ASSERT(dtrace_retained == NULL); 13832 13833 mutex_exit(&dtrace_lock); 13834 mutex_exit(&dtrace_provider_lock); 13835 13836 /* 13837 * We don't destroy the task queue until after we have dropped our 13838 * locks (taskq_destroy() may block on running tasks). To prevent 13839 * attempting to do work after we have effectively detached but before 13840 * the task queue has been destroyed, all tasks dispatched via the 13841 * task queue must check that DTrace is still attached before 13842 * performing any operation. 13843 */ 13844 taskq_destroy(dtrace_taskq); 13845 dtrace_taskq = NULL; 13846 13847 return (DDI_SUCCESS); 13848 } 13849 13850 /*ARGSUSED*/ 13851 static int 13852 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 13853 { 13854 int error; 13855 13856 switch (infocmd) { 13857 case DDI_INFO_DEVT2DEVINFO: 13858 *result = (void *)dtrace_devi; 13859 error = DDI_SUCCESS; 13860 break; 13861 case DDI_INFO_DEVT2INSTANCE: 13862 *result = (void *)0; 13863 error = DDI_SUCCESS; 13864 break; 13865 default: 13866 error = DDI_FAILURE; 13867 } 13868 return (error); 13869 } 13870 13871 static struct cb_ops dtrace_cb_ops = { 13872 dtrace_open, /* open */ 13873 dtrace_close, /* close */ 13874 nulldev, /* strategy */ 13875 nulldev, /* print */ 13876 nodev, /* dump */ 13877 nodev, /* read */ 13878 nodev, /* write */ 13879 dtrace_ioctl, /* ioctl */ 13880 nodev, /* devmap */ 13881 nodev, /* mmap */ 13882 nodev, /* segmap */ 13883 nochpoll, /* poll */ 13884 ddi_prop_op, /* cb_prop_op */ 13885 0, /* streamtab */ 13886 D_NEW | D_MP /* Driver compatibility flag */ 13887 }; 13888 13889 static struct dev_ops dtrace_ops = { 13890 DEVO_REV, /* devo_rev */ 13891 0, /* refcnt */ 13892 dtrace_info, /* get_dev_info */ 13893 nulldev, /* identify */ 13894 nulldev, /* probe */ 13895 dtrace_attach, /* attach */ 13896 dtrace_detach, /* detach */ 13897 nodev, /* reset */ 13898 &dtrace_cb_ops, /* driver operations */ 13899 NULL, /* bus operations */ 13900 nodev /* dev power */ 13901 }; 13902 13903 static struct modldrv modldrv = { 13904 &mod_driverops, /* module type (this is a pseudo driver) */ 13905 "Dynamic Tracing", /* name of module */ 13906 &dtrace_ops, /* driver ops */ 13907 }; 13908 13909 static struct modlinkage modlinkage = { 13910 MODREV_1, 13911 (void *)&modldrv, 13912 NULL 13913 }; 13914 13915 int 13916 _init(void) 13917 { 13918 return (mod_install(&modlinkage)); 13919 } 13920 13921 int 13922 _info(struct modinfo *modinfop) 13923 { 13924 return (mod_info(&modlinkage, modinfop)); 13925 } 13926 13927 int 13928 _fini(void) 13929 { 13930 return (mod_remove(&modlinkage)); 13931 } 13932