1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 188 189 /* 190 * DTrace Locking 191 * DTrace is protected by three (relatively coarse-grained) locks: 192 * 193 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 194 * including enabling state, probes, ECBs, consumer state, helper state, 195 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 196 * probe context is lock-free -- synchronization is handled via the 197 * dtrace_sync() cross call mechanism. 198 * 199 * (2) dtrace_provider_lock is required when manipulating provider state, or 200 * when provider state must be held constant. 201 * 202 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 203 * when meta provider state must be held constant. 204 * 205 * The lock ordering between these three locks is dtrace_meta_lock before 206 * dtrace_provider_lock before dtrace_lock. (In particular, there are 207 * several places where dtrace_provider_lock is held by the framework as it 208 * calls into the providers -- which then call back into the framework, 209 * grabbing dtrace_lock.) 210 * 211 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 212 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 213 * role as a coarse-grained lock; it is acquired before both of these locks. 214 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 215 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 216 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 217 * acquired _between_ dtrace_provider_lock and dtrace_lock. 218 */ 219 static kmutex_t dtrace_lock; /* probe state lock */ 220 static kmutex_t dtrace_provider_lock; /* provider state lock */ 221 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 222 223 /* 224 * DTrace Provider Variables 225 * 226 * These are the variables relating to DTrace as a provider (that is, the 227 * provider of the BEGIN, END, and ERROR probes). 228 */ 229 static dtrace_pattr_t dtrace_provider_attr = { 230 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 235 }; 236 237 static void 238 dtrace_nullop(void) 239 {} 240 241 static dtrace_pops_t dtrace_provider_ops = { 242 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 243 (void (*)(void *, struct modctl *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 248 NULL, 249 NULL, 250 NULL, 251 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 252 }; 253 254 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 255 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 256 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 257 258 /* 259 * DTrace Helper Tracing Variables 260 */ 261 uint32_t dtrace_helptrace_next = 0; 262 uint32_t dtrace_helptrace_nlocals; 263 char *dtrace_helptrace_buffer; 264 int dtrace_helptrace_bufsize = 512 * 1024; 265 266 #ifdef DEBUG 267 int dtrace_helptrace_enabled = 1; 268 #else 269 int dtrace_helptrace_enabled = 0; 270 #endif 271 272 /* 273 * DTrace Error Hashing 274 * 275 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 276 * table. This is very useful for checking coverage of tests that are 277 * expected to induce DIF or DOF processing errors, and may be useful for 278 * debugging problems in the DIF code generator or in DOF generation . The 279 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 280 */ 281 #ifdef DEBUG 282 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 283 static const char *dtrace_errlast; 284 static kthread_t *dtrace_errthread; 285 static kmutex_t dtrace_errlock; 286 #endif 287 288 /* 289 * DTrace Macros and Constants 290 * 291 * These are various macros that are useful in various spots in the 292 * implementation, along with a few random constants that have no meaning 293 * outside of the implementation. There is no real structure to this cpp 294 * mishmash -- but is there ever? 295 */ 296 #define DTRACE_HASHSTR(hash, probe) \ 297 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 298 299 #define DTRACE_HASHNEXT(hash, probe) \ 300 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 301 302 #define DTRACE_HASHPREV(hash, probe) \ 303 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 304 305 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 306 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 307 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 308 309 #define DTRACE_AGGHASHSIZE_SLEW 17 310 311 /* 312 * The key for a thread-local variable consists of the lower 61 bits of the 313 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 314 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 315 * equal to a variable identifier. This is necessary (but not sufficient) to 316 * assure that global associative arrays never collide with thread-local 317 * variables. To guarantee that they cannot collide, we must also define the 318 * order for keying dynamic variables. That order is: 319 * 320 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 321 * 322 * Because the variable-key and the tls-key are in orthogonal spaces, there is 323 * no way for a global variable key signature to match a thread-local key 324 * signature. 325 */ 326 #define DTRACE_TLS_THRKEY(where) { \ 327 uint_t intr = 0; \ 328 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 329 for (; actv; actv >>= 1) \ 330 intr++; \ 331 ASSERT(intr < (1 << 3)); \ 332 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 333 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 334 } 335 336 #define DT_BSWAP_8(x) ((x) & 0xff) 337 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 338 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 339 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 340 341 #define DTRACE_STORE(type, tomax, offset, what) \ 342 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 343 344 #ifndef __i386 345 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 346 if (addr & (size - 1)) { \ 347 *flags |= CPU_DTRACE_BADALIGN; \ 348 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 349 return (0); \ 350 } 351 #else 352 #define DTRACE_ALIGNCHECK(addr, size, flags) 353 #endif 354 355 #define DTRACE_LOADFUNC(bits) \ 356 /*CSTYLED*/ \ 357 uint##bits##_t \ 358 dtrace_load##bits(uintptr_t addr) \ 359 { \ 360 size_t size = bits / NBBY; \ 361 /*CSTYLED*/ \ 362 uint##bits##_t rval; \ 363 int i; \ 364 volatile uint16_t *flags = (volatile uint16_t *) \ 365 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 366 \ 367 DTRACE_ALIGNCHECK(addr, size, flags); \ 368 \ 369 for (i = 0; i < dtrace_toxranges; i++) { \ 370 if (addr >= dtrace_toxrange[i].dtt_limit) \ 371 continue; \ 372 \ 373 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 374 continue; \ 375 \ 376 /* \ 377 * This address falls within a toxic region; return 0. \ 378 */ \ 379 *flags |= CPU_DTRACE_BADADDR; \ 380 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 381 return (0); \ 382 } \ 383 \ 384 *flags |= CPU_DTRACE_NOFAULT; \ 385 /*CSTYLED*/ \ 386 rval = *((volatile uint##bits##_t *)addr); \ 387 *flags &= ~CPU_DTRACE_NOFAULT; \ 388 \ 389 return (rval); \ 390 } 391 392 #ifdef _LP64 393 #define dtrace_loadptr dtrace_load64 394 #else 395 #define dtrace_loadptr dtrace_load32 396 #endif 397 398 #define DTRACE_DYNHASH_FREE 0 399 #define DTRACE_DYNHASH_SINK 1 400 #define DTRACE_DYNHASH_VALID 2 401 402 #define DTRACE_MATCH_NEXT 0 403 #define DTRACE_MATCH_DONE 1 404 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 405 #define DTRACE_STATE_ALIGN 64 406 407 #define DTRACE_FLAGS2FLT(flags) \ 408 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 409 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 410 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 411 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 412 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 413 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 414 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 415 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 416 DTRACEFLT_UNKNOWN) 417 418 #define DTRACEACT_ISSTRING(act) \ 419 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 420 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 421 422 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 423 static void dtrace_enabling_provide(dtrace_provider_t *); 424 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 425 static void dtrace_enabling_matchall(void); 426 static dtrace_state_t *dtrace_anon_grab(void); 427 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 428 dtrace_state_t *, uint64_t, uint64_t); 429 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 430 static void dtrace_buffer_drop(dtrace_buffer_t *); 431 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 432 dtrace_state_t *, dtrace_mstate_t *); 433 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 434 dtrace_optval_t); 435 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 436 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 437 438 /* 439 * DTrace Probe Context Functions 440 * 441 * These functions are called from probe context. Because probe context is 442 * any context in which C may be called, arbitrarily locks may be held, 443 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 444 * As a result, functions called from probe context may only call other DTrace 445 * support functions -- they may not interact at all with the system at large. 446 * (Note that the ASSERT macro is made probe-context safe by redefining it in 447 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 448 * loads are to be performed from probe context, they _must_ be in terms of 449 * the safe dtrace_load*() variants. 450 * 451 * Some functions in this block are not actually called from probe context; 452 * for these functions, there will be a comment above the function reading 453 * "Note: not called from probe context." 454 */ 455 void 456 dtrace_panic(const char *format, ...) 457 { 458 va_list alist; 459 460 va_start(alist, format); 461 dtrace_vpanic(format, alist); 462 va_end(alist); 463 } 464 465 int 466 dtrace_assfail(const char *a, const char *f, int l) 467 { 468 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 469 470 /* 471 * We just need something here that even the most clever compiler 472 * cannot optimize away. 473 */ 474 return (a[(uintptr_t)f]); 475 } 476 477 /* 478 * Atomically increment a specified error counter from probe context. 479 */ 480 static void 481 dtrace_error(uint32_t *counter) 482 { 483 /* 484 * Most counters stored to in probe context are per-CPU counters. 485 * However, there are some error conditions that are sufficiently 486 * arcane that they don't merit per-CPU storage. If these counters 487 * are incremented concurrently on different CPUs, scalability will be 488 * adversely affected -- but we don't expect them to be white-hot in a 489 * correctly constructed enabling... 490 */ 491 uint32_t oval, nval; 492 493 do { 494 oval = *counter; 495 496 if ((nval = oval + 1) == 0) { 497 /* 498 * If the counter would wrap, set it to 1 -- assuring 499 * that the counter is never zero when we have seen 500 * errors. (The counter must be 32-bits because we 501 * aren't guaranteed a 64-bit compare&swap operation.) 502 * To save this code both the infamy of being fingered 503 * by a priggish news story and the indignity of being 504 * the target of a neo-puritan witch trial, we're 505 * carefully avoiding any colorful description of the 506 * likelihood of this condition -- but suffice it to 507 * say that it is only slightly more likely than the 508 * overflow of predicate cache IDs, as discussed in 509 * dtrace_predicate_create(). 510 */ 511 nval = 1; 512 } 513 } while (dtrace_cas32(counter, oval, nval) != oval); 514 } 515 516 /* 517 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 518 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 519 */ 520 DTRACE_LOADFUNC(8) 521 DTRACE_LOADFUNC(16) 522 DTRACE_LOADFUNC(32) 523 DTRACE_LOADFUNC(64) 524 525 static int 526 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 527 { 528 if (dest < mstate->dtms_scratch_base) 529 return (0); 530 531 if (dest + size < dest) 532 return (0); 533 534 if (dest + size > mstate->dtms_scratch_ptr) 535 return (0); 536 537 return (1); 538 } 539 540 static int 541 dtrace_canstore_statvar(uint64_t addr, size_t sz, 542 dtrace_statvar_t **svars, int nsvars) 543 { 544 int i; 545 546 for (i = 0; i < nsvars; i++) { 547 dtrace_statvar_t *svar = svars[i]; 548 549 if (svar == NULL || svar->dtsv_size == 0) 550 continue; 551 552 if (addr - svar->dtsv_data < svar->dtsv_size && 553 addr + sz <= svar->dtsv_data + svar->dtsv_size) 554 return (1); 555 } 556 557 return (0); 558 } 559 560 /* 561 * Check to see if the address is within a memory region to which a store may 562 * be issued. This includes the DTrace scratch areas, and any DTrace variable 563 * region. The caller of dtrace_canstore() is responsible for performing any 564 * alignment checks that are needed before stores are actually executed. 565 */ 566 static int 567 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 568 dtrace_vstate_t *vstate) 569 { 570 uintptr_t a; 571 size_t s; 572 573 /* 574 * First, check to see if the address is in scratch space... 575 */ 576 a = mstate->dtms_scratch_base; 577 s = mstate->dtms_scratch_size; 578 579 if (addr - a < s && addr + sz <= a + s) 580 return (1); 581 582 /* 583 * Now check to see if it's a dynamic variable. This check will pick 584 * up both thread-local variables and any global dynamically-allocated 585 * variables. 586 */ 587 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base; 588 s = vstate->dtvs_dynvars.dtds_size; 589 if (addr - a < s && addr + sz <= a + s) 590 return (1); 591 592 /* 593 * Finally, check the static local and global variables. These checks 594 * take the longest, so we perform them last. 595 */ 596 if (dtrace_canstore_statvar(addr, sz, 597 vstate->dtvs_locals, vstate->dtvs_nlocals)) 598 return (1); 599 600 if (dtrace_canstore_statvar(addr, sz, 601 vstate->dtvs_globals, vstate->dtvs_nglobals)) 602 return (1); 603 604 return (0); 605 } 606 607 /* 608 * Compare two strings using safe loads. 609 */ 610 static int 611 dtrace_strncmp(char *s1, char *s2, size_t limit) 612 { 613 uint8_t c1, c2; 614 volatile uint16_t *flags; 615 616 if (s1 == s2 || limit == 0) 617 return (0); 618 619 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 620 621 do { 622 if (s1 == NULL) { 623 c1 = '\0'; 624 } else { 625 c1 = dtrace_load8((uintptr_t)s1++); 626 } 627 628 if (s2 == NULL) { 629 c2 = '\0'; 630 } else { 631 c2 = dtrace_load8((uintptr_t)s2++); 632 } 633 634 if (c1 != c2) 635 return (c1 - c2); 636 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 637 638 return (0); 639 } 640 641 /* 642 * Compute strlen(s) for a string using safe memory accesses. The additional 643 * len parameter is used to specify a maximum length to ensure completion. 644 */ 645 static size_t 646 dtrace_strlen(const char *s, size_t lim) 647 { 648 uint_t len; 649 650 for (len = 0; len != lim; len++) { 651 if (dtrace_load8((uintptr_t)s++) == '\0') 652 break; 653 } 654 655 return (len); 656 } 657 658 /* 659 * Check if an address falls within a toxic region. 660 */ 661 static int 662 dtrace_istoxic(uintptr_t kaddr, size_t size) 663 { 664 uintptr_t taddr, tsize; 665 int i; 666 667 for (i = 0; i < dtrace_toxranges; i++) { 668 taddr = dtrace_toxrange[i].dtt_base; 669 tsize = dtrace_toxrange[i].dtt_limit - taddr; 670 671 if (kaddr - taddr < tsize) { 672 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 673 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 674 return (1); 675 } 676 677 if (taddr - kaddr < size) { 678 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 679 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 680 return (1); 681 } 682 } 683 684 return (0); 685 } 686 687 /* 688 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 689 * memory specified by the DIF program. The dst is assumed to be safe memory 690 * that we can store to directly because it is managed by DTrace. As with 691 * standard bcopy, overlapping copies are handled properly. 692 */ 693 static void 694 dtrace_bcopy(const void *src, void *dst, size_t len) 695 { 696 if (len != 0) { 697 uint8_t *s1 = dst; 698 const uint8_t *s2 = src; 699 700 if (s1 <= s2) { 701 do { 702 *s1++ = dtrace_load8((uintptr_t)s2++); 703 } while (--len != 0); 704 } else { 705 s2 += len; 706 s1 += len; 707 708 do { 709 *--s1 = dtrace_load8((uintptr_t)--s2); 710 } while (--len != 0); 711 } 712 } 713 } 714 715 /* 716 * Copy src to dst using safe memory accesses, up to either the specified 717 * length, or the point that a nul byte is encountered. The src is assumed to 718 * be unsafe memory specified by the DIF program. The dst is assumed to be 719 * safe memory that we can store to directly because it is managed by DTrace. 720 * Unlike dtrace_bcopy(), overlapping regions are not handled. 721 */ 722 static void 723 dtrace_strcpy(const void *src, void *dst, size_t len) 724 { 725 if (len != 0) { 726 uint8_t *s1 = dst, c; 727 const uint8_t *s2 = src; 728 729 do { 730 *s1++ = c = dtrace_load8((uintptr_t)s2++); 731 } while (--len != 0 && c != '\0'); 732 } 733 } 734 735 /* 736 * Copy src to dst, deriving the size and type from the specified (BYREF) 737 * variable type. The src is assumed to be unsafe memory specified by the DIF 738 * program. The dst is assumed to be DTrace variable memory that is of the 739 * specified type; we assume that we can store to directly. 740 */ 741 static void 742 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 743 { 744 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 745 746 if (type->dtdt_kind == DIF_TYPE_STRING) { 747 dtrace_strcpy(src, dst, type->dtdt_size); 748 } else { 749 dtrace_bcopy(src, dst, type->dtdt_size); 750 } 751 } 752 753 /* 754 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 755 * unsafe memory specified by the DIF program. The s2 data is assumed to be 756 * safe memory that we can access directly because it is managed by DTrace. 757 */ 758 static int 759 dtrace_bcmp(const void *s1, const void *s2, size_t len) 760 { 761 volatile uint16_t *flags; 762 763 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 764 765 if (s1 == s2) 766 return (0); 767 768 if (s1 == NULL || s2 == NULL) 769 return (1); 770 771 if (s1 != s2 && len != 0) { 772 const uint8_t *ps1 = s1; 773 const uint8_t *ps2 = s2; 774 775 do { 776 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 777 return (1); 778 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 779 } 780 return (0); 781 } 782 783 /* 784 * Zero the specified region using a simple byte-by-byte loop. Note that this 785 * is for safe DTrace-managed memory only. 786 */ 787 static void 788 dtrace_bzero(void *dst, size_t len) 789 { 790 uchar_t *cp; 791 792 for (cp = dst; len != 0; len--) 793 *cp++ = 0; 794 } 795 796 /* 797 * This privilege check should be used by actions and subroutines to 798 * verify that the user credentials of the process that enabled the 799 * invoking ECB match the target credentials 800 */ 801 static int 802 dtrace_priv_proc_common_user(dtrace_state_t *state) 803 { 804 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 805 806 /* 807 * We should always have a non-NULL state cred here, since if cred 808 * is null (anonymous tracing), we fast-path bypass this routine. 809 */ 810 ASSERT(s_cr != NULL); 811 812 if ((cr = CRED()) != NULL && 813 s_cr->cr_uid == cr->cr_uid && 814 s_cr->cr_uid == cr->cr_ruid && 815 s_cr->cr_uid == cr->cr_suid && 816 s_cr->cr_gid == cr->cr_gid && 817 s_cr->cr_gid == cr->cr_rgid && 818 s_cr->cr_gid == cr->cr_sgid) 819 return (1); 820 821 return (0); 822 } 823 824 /* 825 * This privilege check should be used by actions and subroutines to 826 * verify that the zone of the process that enabled the invoking ECB 827 * matches the target credentials 828 */ 829 static int 830 dtrace_priv_proc_common_zone(dtrace_state_t *state) 831 { 832 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 833 834 /* 835 * We should always have a non-NULL state cred here, since if cred 836 * is null (anonymous tracing), we fast-path bypass this routine. 837 */ 838 ASSERT(s_cr != NULL); 839 840 if ((cr = CRED()) != NULL && 841 s_cr->cr_zone == cr->cr_zone) 842 return (1); 843 844 return (0); 845 } 846 847 /* 848 * This privilege check should be used by actions and subroutines to 849 * verify that the process has not setuid or changed credentials. 850 */ 851 static int 852 dtrace_priv_proc_common_nocd() 853 { 854 proc_t *proc; 855 856 if ((proc = ttoproc(curthread)) != NULL && 857 !(proc->p_flag & SNOCD)) 858 return (1); 859 860 return (0); 861 } 862 863 static int 864 dtrace_priv_proc_destructive(dtrace_state_t *state) 865 { 866 int action = state->dts_cred.dcr_action; 867 868 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 869 dtrace_priv_proc_common_zone(state) == 0) 870 goto bad; 871 872 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 873 dtrace_priv_proc_common_user(state) == 0) 874 goto bad; 875 876 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 877 dtrace_priv_proc_common_nocd() == 0) 878 goto bad; 879 880 return (1); 881 882 bad: 883 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 884 885 return (0); 886 } 887 888 static int 889 dtrace_priv_proc_control(dtrace_state_t *state) 890 { 891 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 892 return (1); 893 894 if (dtrace_priv_proc_common_zone(state) && 895 dtrace_priv_proc_common_user(state) && 896 dtrace_priv_proc_common_nocd()) 897 return (1); 898 899 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 900 901 return (0); 902 } 903 904 static int 905 dtrace_priv_proc(dtrace_state_t *state) 906 { 907 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 908 return (1); 909 910 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 911 912 return (0); 913 } 914 915 static int 916 dtrace_priv_kernel(dtrace_state_t *state) 917 { 918 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 919 return (1); 920 921 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 922 923 return (0); 924 } 925 926 static int 927 dtrace_priv_kernel_destructive(dtrace_state_t *state) 928 { 929 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 930 return (1); 931 932 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 933 934 return (0); 935 } 936 937 /* 938 * Note: not called from probe context. This function is called 939 * asynchronously (and at a regular interval) from outside of probe context to 940 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 941 * cleaning is explained in detail in <sys/dtrace_impl.h>. 942 */ 943 void 944 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 945 { 946 dtrace_dynvar_t *dirty; 947 dtrace_dstate_percpu_t *dcpu; 948 int i, work = 0; 949 950 for (i = 0; i < NCPU; i++) { 951 dcpu = &dstate->dtds_percpu[i]; 952 953 ASSERT(dcpu->dtdsc_rinsing == NULL); 954 955 /* 956 * If the dirty list is NULL, there is no dirty work to do. 957 */ 958 if (dcpu->dtdsc_dirty == NULL) 959 continue; 960 961 /* 962 * If the clean list is non-NULL, then we're not going to do 963 * any work for this CPU -- it means that there has not been 964 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 965 * since the last time we cleaned house. 966 */ 967 if (dcpu->dtdsc_clean != NULL) 968 continue; 969 970 work = 1; 971 972 /* 973 * Atomically move the dirty list aside. 974 */ 975 do { 976 dirty = dcpu->dtdsc_dirty; 977 978 /* 979 * Before we zap the dirty list, set the rinsing list. 980 * (This allows for a potential assertion in 981 * dtrace_dynvar(): if a free dynamic variable appears 982 * on a hash chain, either the dirty list or the 983 * rinsing list for some CPU must be non-NULL.) 984 */ 985 dcpu->dtdsc_rinsing = dirty; 986 dtrace_membar_producer(); 987 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 988 dirty, NULL) != dirty); 989 } 990 991 if (!work) { 992 /* 993 * We have no work to do; we can simply return. 994 */ 995 return; 996 } 997 998 dtrace_sync(); 999 1000 for (i = 0; i < NCPU; i++) { 1001 dcpu = &dstate->dtds_percpu[i]; 1002 1003 if (dcpu->dtdsc_rinsing == NULL) 1004 continue; 1005 1006 /* 1007 * We are now guaranteed that no hash chain contains a pointer 1008 * into this dirty list; we can make it clean. 1009 */ 1010 ASSERT(dcpu->dtdsc_clean == NULL); 1011 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1012 dcpu->dtdsc_rinsing = NULL; 1013 } 1014 1015 /* 1016 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1017 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1018 * This prevents a race whereby a CPU incorrectly decides that 1019 * the state should be something other than DTRACE_DSTATE_CLEAN 1020 * after dtrace_dynvar_clean() has completed. 1021 */ 1022 dtrace_sync(); 1023 1024 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1025 } 1026 1027 /* 1028 * Depending on the value of the op parameter, this function looks-up, 1029 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1030 * allocation is requested, this function will return a pointer to a 1031 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1032 * variable can be allocated. If NULL is returned, the appropriate counter 1033 * will be incremented. 1034 */ 1035 dtrace_dynvar_t * 1036 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1037 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op) 1038 { 1039 uint64_t hashval = DTRACE_DYNHASH_VALID; 1040 dtrace_dynhash_t *hash = dstate->dtds_hash; 1041 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1042 processorid_t me = CPU->cpu_id, cpu = me; 1043 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1044 size_t bucket, ksize; 1045 size_t chunksize = dstate->dtds_chunksize; 1046 uintptr_t kdata, lock, nstate; 1047 uint_t i; 1048 1049 ASSERT(nkeys != 0); 1050 1051 /* 1052 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1053 * algorithm. For the by-value portions, we perform the algorithm in 1054 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1055 * bit, and seems to have only a minute effect on distribution. For 1056 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1057 * over each referenced byte. It's painful to do this, but it's much 1058 * better than pathological hash distribution. The efficacy of the 1059 * hashing algorithm (and a comparison with other algorithms) may be 1060 * found by running the ::dtrace_dynstat MDB dcmd. 1061 */ 1062 for (i = 0; i < nkeys; i++) { 1063 if (key[i].dttk_size == 0) { 1064 uint64_t val = key[i].dttk_value; 1065 1066 hashval += (val >> 48) & 0xffff; 1067 hashval += (hashval << 10); 1068 hashval ^= (hashval >> 6); 1069 1070 hashval += (val >> 32) & 0xffff; 1071 hashval += (hashval << 10); 1072 hashval ^= (hashval >> 6); 1073 1074 hashval += (val >> 16) & 0xffff; 1075 hashval += (hashval << 10); 1076 hashval ^= (hashval >> 6); 1077 1078 hashval += val & 0xffff; 1079 hashval += (hashval << 10); 1080 hashval ^= (hashval >> 6); 1081 } else { 1082 /* 1083 * This is incredibly painful, but it beats the hell 1084 * out of the alternative. 1085 */ 1086 uint64_t j, size = key[i].dttk_size; 1087 uintptr_t base = (uintptr_t)key[i].dttk_value; 1088 1089 for (j = 0; j < size; j++) { 1090 hashval += dtrace_load8(base + j); 1091 hashval += (hashval << 10); 1092 hashval ^= (hashval >> 6); 1093 } 1094 } 1095 } 1096 1097 hashval += (hashval << 3); 1098 hashval ^= (hashval >> 11); 1099 hashval += (hashval << 15); 1100 1101 /* 1102 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1103 * comes out to be one of our two sentinel hash values. If this 1104 * actually happens, we set the hashval to be a value known to be a 1105 * non-sentinel value. 1106 */ 1107 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1108 hashval = DTRACE_DYNHASH_VALID; 1109 1110 /* 1111 * Yes, it's painful to do a divide here. If the cycle count becomes 1112 * important here, tricks can be pulled to reduce it. (However, it's 1113 * critical that hash collisions be kept to an absolute minimum; 1114 * they're much more painful than a divide.) It's better to have a 1115 * solution that generates few collisions and still keeps things 1116 * relatively simple. 1117 */ 1118 bucket = hashval % dstate->dtds_hashsize; 1119 1120 if (op == DTRACE_DYNVAR_DEALLOC) { 1121 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1122 1123 for (;;) { 1124 while ((lock = *lockp) & 1) 1125 continue; 1126 1127 if (dtrace_casptr((void *)lockp, 1128 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1129 break; 1130 } 1131 1132 dtrace_membar_producer(); 1133 } 1134 1135 top: 1136 prev = NULL; 1137 lock = hash[bucket].dtdh_lock; 1138 1139 dtrace_membar_consumer(); 1140 1141 start = hash[bucket].dtdh_chain; 1142 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1143 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1144 op != DTRACE_DYNVAR_DEALLOC)); 1145 1146 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1147 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1148 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1149 1150 if (dvar->dtdv_hashval != hashval) { 1151 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1152 /* 1153 * We've reached the sink, and therefore the 1154 * end of the hash chain; we can kick out of 1155 * the loop knowing that we have seen a valid 1156 * snapshot of state. 1157 */ 1158 ASSERT(dvar->dtdv_next == NULL); 1159 ASSERT(dvar == &dtrace_dynhash_sink); 1160 break; 1161 } 1162 1163 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1164 /* 1165 * We've gone off the rails: somewhere along 1166 * the line, one of the members of this hash 1167 * chain was deleted. Note that we could also 1168 * detect this by simply letting this loop run 1169 * to completion, as we would eventually hit 1170 * the end of the dirty list. However, we 1171 * want to avoid running the length of the 1172 * dirty list unnecessarily (it might be quite 1173 * long), so we catch this as early as 1174 * possible by detecting the hash marker. In 1175 * this case, we simply set dvar to NULL and 1176 * break; the conditional after the loop will 1177 * send us back to top. 1178 */ 1179 dvar = NULL; 1180 break; 1181 } 1182 1183 goto next; 1184 } 1185 1186 if (dtuple->dtt_nkeys != nkeys) 1187 goto next; 1188 1189 for (i = 0; i < nkeys; i++, dkey++) { 1190 if (dkey->dttk_size != key[i].dttk_size) 1191 goto next; /* size or type mismatch */ 1192 1193 if (dkey->dttk_size != 0) { 1194 if (dtrace_bcmp( 1195 (void *)(uintptr_t)key[i].dttk_value, 1196 (void *)(uintptr_t)dkey->dttk_value, 1197 dkey->dttk_size)) 1198 goto next; 1199 } else { 1200 if (dkey->dttk_value != key[i].dttk_value) 1201 goto next; 1202 } 1203 } 1204 1205 if (op != DTRACE_DYNVAR_DEALLOC) 1206 return (dvar); 1207 1208 ASSERT(dvar->dtdv_next == NULL || 1209 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1210 1211 if (prev != NULL) { 1212 ASSERT(hash[bucket].dtdh_chain != dvar); 1213 ASSERT(start != dvar); 1214 ASSERT(prev->dtdv_next == dvar); 1215 prev->dtdv_next = dvar->dtdv_next; 1216 } else { 1217 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1218 start, dvar->dtdv_next) != start) { 1219 /* 1220 * We have failed to atomically swing the 1221 * hash table head pointer, presumably because 1222 * of a conflicting allocation on another CPU. 1223 * We need to reread the hash chain and try 1224 * again. 1225 */ 1226 goto top; 1227 } 1228 } 1229 1230 dtrace_membar_producer(); 1231 1232 /* 1233 * Now set the hash value to indicate that it's free. 1234 */ 1235 ASSERT(hash[bucket].dtdh_chain != dvar); 1236 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1237 1238 dtrace_membar_producer(); 1239 1240 /* 1241 * Set the next pointer to point at the dirty list, and 1242 * atomically swing the dirty pointer to the newly freed dvar. 1243 */ 1244 do { 1245 next = dcpu->dtdsc_dirty; 1246 dvar->dtdv_next = next; 1247 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1248 1249 /* 1250 * Finally, unlock this hash bucket. 1251 */ 1252 ASSERT(hash[bucket].dtdh_lock == lock); 1253 ASSERT(lock & 1); 1254 hash[bucket].dtdh_lock++; 1255 1256 return (NULL); 1257 next: 1258 prev = dvar; 1259 continue; 1260 } 1261 1262 if (dvar == NULL) { 1263 /* 1264 * If dvar is NULL, it is because we went off the rails: 1265 * one of the elements that we traversed in the hash chain 1266 * was deleted while we were traversing it. In this case, 1267 * we assert that we aren't doing a dealloc (deallocs lock 1268 * the hash bucket to prevent themselves from racing with 1269 * one another), and retry the hash chain traversal. 1270 */ 1271 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1272 goto top; 1273 } 1274 1275 if (op != DTRACE_DYNVAR_ALLOC) { 1276 /* 1277 * If we are not to allocate a new variable, we want to 1278 * return NULL now. Before we return, check that the value 1279 * of the lock word hasn't changed. If it has, we may have 1280 * seen an inconsistent snapshot. 1281 */ 1282 if (op == DTRACE_DYNVAR_NOALLOC) { 1283 if (hash[bucket].dtdh_lock != lock) 1284 goto top; 1285 } else { 1286 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1287 ASSERT(hash[bucket].dtdh_lock == lock); 1288 ASSERT(lock & 1); 1289 hash[bucket].dtdh_lock++; 1290 } 1291 1292 return (NULL); 1293 } 1294 1295 /* 1296 * We need to allocate a new dynamic variable. The size we need is the 1297 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1298 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1299 * the size of any referred-to data (dsize). We then round the final 1300 * size up to the chunksize for allocation. 1301 */ 1302 for (ksize = 0, i = 0; i < nkeys; i++) 1303 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1304 1305 /* 1306 * This should be pretty much impossible, but could happen if, say, 1307 * strange DIF specified the tuple. Ideally, this should be an 1308 * assertion and not an error condition -- but that requires that the 1309 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1310 * bullet-proof. (That is, it must not be able to be fooled by 1311 * malicious DIF.) Given the lack of backwards branches in DIF, 1312 * solving this would presumably not amount to solving the Halting 1313 * Problem -- but it still seems awfully hard. 1314 */ 1315 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1316 ksize + dsize > chunksize) { 1317 dcpu->dtdsc_drops++; 1318 return (NULL); 1319 } 1320 1321 nstate = DTRACE_DSTATE_EMPTY; 1322 1323 do { 1324 retry: 1325 free = dcpu->dtdsc_free; 1326 1327 if (free == NULL) { 1328 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1329 void *rval; 1330 1331 if (clean == NULL) { 1332 /* 1333 * We're out of dynamic variable space on 1334 * this CPU. Unless we have tried all CPUs, 1335 * we'll try to allocate from a different 1336 * CPU. 1337 */ 1338 switch (dstate->dtds_state) { 1339 case DTRACE_DSTATE_CLEAN: { 1340 void *sp = &dstate->dtds_state; 1341 1342 if (++cpu >= NCPU) 1343 cpu = 0; 1344 1345 if (dcpu->dtdsc_dirty != NULL && 1346 nstate == DTRACE_DSTATE_EMPTY) 1347 nstate = DTRACE_DSTATE_DIRTY; 1348 1349 if (dcpu->dtdsc_rinsing != NULL) 1350 nstate = DTRACE_DSTATE_RINSING; 1351 1352 dcpu = &dstate->dtds_percpu[cpu]; 1353 1354 if (cpu != me) 1355 goto retry; 1356 1357 (void) dtrace_cas32(sp, 1358 DTRACE_DSTATE_CLEAN, nstate); 1359 1360 /* 1361 * To increment the correct bean 1362 * counter, take another lap. 1363 */ 1364 goto retry; 1365 } 1366 1367 case DTRACE_DSTATE_DIRTY: 1368 dcpu->dtdsc_dirty_drops++; 1369 break; 1370 1371 case DTRACE_DSTATE_RINSING: 1372 dcpu->dtdsc_rinsing_drops++; 1373 break; 1374 1375 case DTRACE_DSTATE_EMPTY: 1376 dcpu->dtdsc_drops++; 1377 break; 1378 } 1379 1380 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1381 return (NULL); 1382 } 1383 1384 /* 1385 * The clean list appears to be non-empty. We want to 1386 * move the clean list to the free list; we start by 1387 * moving the clean pointer aside. 1388 */ 1389 if (dtrace_casptr(&dcpu->dtdsc_clean, 1390 clean, NULL) != clean) { 1391 /* 1392 * We are in one of two situations: 1393 * 1394 * (a) The clean list was switched to the 1395 * free list by another CPU. 1396 * 1397 * (b) The clean list was added to by the 1398 * cleansing cyclic. 1399 * 1400 * In either of these situations, we can 1401 * just reattempt the free list allocation. 1402 */ 1403 goto retry; 1404 } 1405 1406 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1407 1408 /* 1409 * Now we'll move the clean list to the free list. 1410 * It's impossible for this to fail: the only way 1411 * the free list can be updated is through this 1412 * code path, and only one CPU can own the clean list. 1413 * Thus, it would only be possible for this to fail if 1414 * this code were racing with dtrace_dynvar_clean(). 1415 * (That is, if dtrace_dynvar_clean() updated the clean 1416 * list, and we ended up racing to update the free 1417 * list.) This race is prevented by the dtrace_sync() 1418 * in dtrace_dynvar_clean() -- which flushes the 1419 * owners of the clean lists out before resetting 1420 * the clean lists. 1421 */ 1422 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1423 ASSERT(rval == NULL); 1424 goto retry; 1425 } 1426 1427 dvar = free; 1428 new_free = dvar->dtdv_next; 1429 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1430 1431 /* 1432 * We have now allocated a new chunk. We copy the tuple keys into the 1433 * tuple array and copy any referenced key data into the data space 1434 * following the tuple array. As we do this, we relocate dttk_value 1435 * in the final tuple to point to the key data address in the chunk. 1436 */ 1437 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1438 dvar->dtdv_data = (void *)(kdata + ksize); 1439 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1440 1441 for (i = 0; i < nkeys; i++) { 1442 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1443 size_t kesize = key[i].dttk_size; 1444 1445 if (kesize != 0) { 1446 dtrace_bcopy( 1447 (const void *)(uintptr_t)key[i].dttk_value, 1448 (void *)kdata, kesize); 1449 dkey->dttk_value = kdata; 1450 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1451 } else { 1452 dkey->dttk_value = key[i].dttk_value; 1453 } 1454 1455 dkey->dttk_size = kesize; 1456 } 1457 1458 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1459 dvar->dtdv_hashval = hashval; 1460 dvar->dtdv_next = start; 1461 1462 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1463 return (dvar); 1464 1465 /* 1466 * The cas has failed. Either another CPU is adding an element to 1467 * this hash chain, or another CPU is deleting an element from this 1468 * hash chain. The simplest way to deal with both of these cases 1469 * (though not necessarily the most efficient) is to free our 1470 * allocated block and tail-call ourselves. Note that the free is 1471 * to the dirty list and _not_ to the free list. This is to prevent 1472 * races with allocators, above. 1473 */ 1474 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1475 1476 dtrace_membar_producer(); 1477 1478 do { 1479 free = dcpu->dtdsc_dirty; 1480 dvar->dtdv_next = free; 1481 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1482 1483 return (dtrace_dynvar(dstate, nkeys, key, dsize, op)); 1484 } 1485 1486 /*ARGSUSED*/ 1487 static void 1488 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1489 { 1490 if (nval < *oval) 1491 *oval = nval; 1492 } 1493 1494 /*ARGSUSED*/ 1495 static void 1496 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1497 { 1498 if (nval > *oval) 1499 *oval = nval; 1500 } 1501 1502 static void 1503 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1504 { 1505 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1506 int64_t val = (int64_t)nval; 1507 1508 if (val < 0) { 1509 for (i = 0; i < zero; i++) { 1510 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1511 quanta[i] += incr; 1512 return; 1513 } 1514 } 1515 } else { 1516 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1517 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1518 quanta[i - 1] += incr; 1519 return; 1520 } 1521 } 1522 1523 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1524 return; 1525 } 1526 1527 ASSERT(0); 1528 } 1529 1530 static void 1531 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1532 { 1533 uint64_t arg = *lquanta++; 1534 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1535 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1536 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1537 int32_t val = (int32_t)nval, level; 1538 1539 ASSERT(step != 0); 1540 ASSERT(levels != 0); 1541 1542 if (val < base) { 1543 /* 1544 * This is an underflow. 1545 */ 1546 lquanta[0] += incr; 1547 return; 1548 } 1549 1550 level = (val - base) / step; 1551 1552 if (level < levels) { 1553 lquanta[level + 1] += incr; 1554 return; 1555 } 1556 1557 /* 1558 * This is an overflow. 1559 */ 1560 lquanta[levels + 1] += incr; 1561 } 1562 1563 /*ARGSUSED*/ 1564 static void 1565 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1566 { 1567 data[0]++; 1568 data[1] += nval; 1569 } 1570 1571 /*ARGSUSED*/ 1572 static void 1573 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1574 { 1575 *oval = *oval + 1; 1576 } 1577 1578 /*ARGSUSED*/ 1579 static void 1580 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1581 { 1582 *oval += nval; 1583 } 1584 1585 /* 1586 * Aggregate given the tuple in the principal data buffer, and the aggregating 1587 * action denoted by the specified dtrace_aggregation_t. The aggregation 1588 * buffer is specified as the buf parameter. This routine does not return 1589 * failure; if there is no space in the aggregation buffer, the data will be 1590 * dropped, and a corresponding counter incremented. 1591 */ 1592 static void 1593 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1594 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1595 { 1596 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1597 uint32_t i, ndx, size, fsize; 1598 uint32_t align = sizeof (uint64_t) - 1; 1599 dtrace_aggbuffer_t *agb; 1600 dtrace_aggkey_t *key; 1601 uint32_t hashval = 0, limit, isstr; 1602 caddr_t tomax, data, kdata; 1603 dtrace_actkind_t action; 1604 dtrace_action_t *act; 1605 uintptr_t offs; 1606 1607 if (buf == NULL) 1608 return; 1609 1610 if (!agg->dtag_hasarg) { 1611 /* 1612 * Currently, only quantize() and lquantize() take additional 1613 * arguments, and they have the same semantics: an increment 1614 * value that defaults to 1 when not present. If additional 1615 * aggregating actions take arguments, the setting of the 1616 * default argument value will presumably have to become more 1617 * sophisticated... 1618 */ 1619 arg = 1; 1620 } 1621 1622 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1623 size = rec->dtrd_offset - agg->dtag_base; 1624 fsize = size + rec->dtrd_size; 1625 1626 ASSERT(dbuf->dtb_tomax != NULL); 1627 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1628 1629 if ((tomax = buf->dtb_tomax) == NULL) { 1630 dtrace_buffer_drop(buf); 1631 return; 1632 } 1633 1634 /* 1635 * The metastructure is always at the bottom of the buffer. 1636 */ 1637 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1638 sizeof (dtrace_aggbuffer_t)); 1639 1640 if (buf->dtb_offset == 0) { 1641 /* 1642 * We just kludge up approximately 1/8th of the size to be 1643 * buckets. If this guess ends up being routinely 1644 * off-the-mark, we may need to dynamically readjust this 1645 * based on past performance. 1646 */ 1647 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1648 1649 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1650 (uintptr_t)tomax || hashsize == 0) { 1651 /* 1652 * We've been given a ludicrously small buffer; 1653 * increment our drop count and leave. 1654 */ 1655 dtrace_buffer_drop(buf); 1656 return; 1657 } 1658 1659 /* 1660 * And now, a pathetic attempt to try to get a an odd (or 1661 * perchance, a prime) hash size for better hash distribution. 1662 */ 1663 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1664 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1665 1666 agb->dtagb_hashsize = hashsize; 1667 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1668 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1669 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1670 1671 for (i = 0; i < agb->dtagb_hashsize; i++) 1672 agb->dtagb_hash[i] = NULL; 1673 } 1674 1675 ASSERT(agg->dtag_first != NULL); 1676 ASSERT(agg->dtag_first->dta_intuple); 1677 1678 /* 1679 * Calculate the hash value based on the key. Note that we _don't_ 1680 * include the aggid in the hashing (but we will store it as part of 1681 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1682 * algorithm: a simple, quick algorithm that has no known funnels, and 1683 * gets good distribution in practice. The efficacy of the hashing 1684 * algorithm (and a comparison with other algorithms) may be found by 1685 * running the ::dtrace_aggstat MDB dcmd. 1686 */ 1687 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1688 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1689 limit = i + act->dta_rec.dtrd_size; 1690 ASSERT(limit <= size); 1691 isstr = DTRACEACT_ISSTRING(act); 1692 1693 for (; i < limit; i++) { 1694 hashval += data[i]; 1695 hashval += (hashval << 10); 1696 hashval ^= (hashval >> 6); 1697 1698 if (isstr && data[i] == '\0') 1699 break; 1700 } 1701 } 1702 1703 hashval += (hashval << 3); 1704 hashval ^= (hashval >> 11); 1705 hashval += (hashval << 15); 1706 1707 /* 1708 * Yes, the divide here is expensive -- but it's generally the least 1709 * of the performance issues given the amount of data that we iterate 1710 * over to compute hash values, compare data, etc. 1711 */ 1712 ndx = hashval % agb->dtagb_hashsize; 1713 1714 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1715 ASSERT((caddr_t)key >= tomax); 1716 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1717 1718 if (hashval != key->dtak_hashval || key->dtak_size != size) 1719 continue; 1720 1721 kdata = key->dtak_data; 1722 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1723 1724 for (act = agg->dtag_first; act->dta_intuple; 1725 act = act->dta_next) { 1726 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1727 limit = i + act->dta_rec.dtrd_size; 1728 ASSERT(limit <= size); 1729 isstr = DTRACEACT_ISSTRING(act); 1730 1731 for (; i < limit; i++) { 1732 if (kdata[i] != data[i]) 1733 goto next; 1734 1735 if (isstr && data[i] == '\0') 1736 break; 1737 } 1738 } 1739 1740 if (action != key->dtak_action) { 1741 /* 1742 * We are aggregating on the same value in the same 1743 * aggregation with two different aggregating actions. 1744 * (This should have been picked up in the compiler, 1745 * so we may be dealing with errant or devious DIF.) 1746 * This is an error condition; we indicate as much, 1747 * and return. 1748 */ 1749 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1750 return; 1751 } 1752 1753 /* 1754 * This is a hit: we need to apply the aggregator to 1755 * the value at this key. 1756 */ 1757 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1758 return; 1759 next: 1760 continue; 1761 } 1762 1763 /* 1764 * We didn't find it. We need to allocate some zero-filled space, 1765 * link it into the hash table appropriately, and apply the aggregator 1766 * to the (zero-filled) value. 1767 */ 1768 offs = buf->dtb_offset; 1769 while (offs & (align - 1)) 1770 offs += sizeof (uint32_t); 1771 1772 /* 1773 * If we don't have enough room to both allocate a new key _and_ 1774 * its associated data, increment the drop count and return. 1775 */ 1776 if ((uintptr_t)tomax + offs + fsize > 1777 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1778 dtrace_buffer_drop(buf); 1779 return; 1780 } 1781 1782 /*CONSTCOND*/ 1783 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1784 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1785 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1786 1787 key->dtak_data = kdata = tomax + offs; 1788 buf->dtb_offset = offs + fsize; 1789 1790 /* 1791 * Now copy the data across. 1792 */ 1793 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1794 1795 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1796 kdata[i] = data[i]; 1797 1798 /* 1799 * Because strings are not zeroed out by default, we need to iterate 1800 * looking for actions that store strings, and we need to explicitly 1801 * pad these strings out with zeroes. 1802 */ 1803 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1804 int nul; 1805 1806 if (!DTRACEACT_ISSTRING(act)) 1807 continue; 1808 1809 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1810 limit = i + act->dta_rec.dtrd_size; 1811 ASSERT(limit <= size); 1812 1813 for (nul = 0; i < limit; i++) { 1814 if (nul) { 1815 kdata[i] = '\0'; 1816 continue; 1817 } 1818 1819 if (data[i] != '\0') 1820 continue; 1821 1822 nul = 1; 1823 } 1824 } 1825 1826 for (i = size; i < fsize; i++) 1827 kdata[i] = 0; 1828 1829 key->dtak_hashval = hashval; 1830 key->dtak_size = size; 1831 key->dtak_action = action; 1832 key->dtak_next = agb->dtagb_hash[ndx]; 1833 agb->dtagb_hash[ndx] = key; 1834 1835 /* 1836 * Finally, apply the aggregator. 1837 */ 1838 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1839 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1840 } 1841 1842 /* 1843 * Given consumer state, this routine finds a speculation in the INACTIVE 1844 * state and transitions it into the ACTIVE state. If there is no speculation 1845 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1846 * incremented -- it is up to the caller to take appropriate action. 1847 */ 1848 static int 1849 dtrace_speculation(dtrace_state_t *state) 1850 { 1851 int i = 0; 1852 dtrace_speculation_state_t current; 1853 uint32_t *stat = &state->dts_speculations_unavail, count; 1854 1855 while (i < state->dts_nspeculations) { 1856 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1857 1858 current = spec->dtsp_state; 1859 1860 if (current != DTRACESPEC_INACTIVE) { 1861 if (current == DTRACESPEC_COMMITTINGMANY || 1862 current == DTRACESPEC_COMMITTING || 1863 current == DTRACESPEC_DISCARDING) 1864 stat = &state->dts_speculations_busy; 1865 i++; 1866 continue; 1867 } 1868 1869 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1870 current, DTRACESPEC_ACTIVE) == current) 1871 return (i + 1); 1872 } 1873 1874 /* 1875 * We couldn't find a speculation. If we found as much as a single 1876 * busy speculation buffer, we'll attribute this failure as "busy" 1877 * instead of "unavail". 1878 */ 1879 do { 1880 count = *stat; 1881 } while (dtrace_cas32(stat, count, count + 1) != count); 1882 1883 return (0); 1884 } 1885 1886 /* 1887 * This routine commits an active speculation. If the specified speculation 1888 * is not in a valid state to perform a commit(), this routine will silently do 1889 * nothing. The state of the specified speculation is transitioned according 1890 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1891 */ 1892 static void 1893 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1894 dtrace_specid_t which) 1895 { 1896 dtrace_speculation_t *spec; 1897 dtrace_buffer_t *src, *dest; 1898 uintptr_t daddr, saddr, dlimit; 1899 dtrace_speculation_state_t current, new; 1900 intptr_t offs; 1901 1902 if (which == 0) 1903 return; 1904 1905 if (which > state->dts_nspeculations) { 1906 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1907 return; 1908 } 1909 1910 spec = &state->dts_speculations[which - 1]; 1911 src = &spec->dtsp_buffer[cpu]; 1912 dest = &state->dts_buffer[cpu]; 1913 1914 do { 1915 current = spec->dtsp_state; 1916 1917 if (current == DTRACESPEC_COMMITTINGMANY) 1918 break; 1919 1920 switch (current) { 1921 case DTRACESPEC_INACTIVE: 1922 case DTRACESPEC_DISCARDING: 1923 return; 1924 1925 case DTRACESPEC_COMMITTING: 1926 /* 1927 * This is only possible if we are (a) commit()'ing 1928 * without having done a prior speculate() on this CPU 1929 * and (b) racing with another commit() on a different 1930 * CPU. There's nothing to do -- we just assert that 1931 * our offset is 0. 1932 */ 1933 ASSERT(src->dtb_offset == 0); 1934 return; 1935 1936 case DTRACESPEC_ACTIVE: 1937 new = DTRACESPEC_COMMITTING; 1938 break; 1939 1940 case DTRACESPEC_ACTIVEONE: 1941 /* 1942 * This speculation is active on one CPU. If our 1943 * buffer offset is non-zero, we know that the one CPU 1944 * must be us. Otherwise, we are committing on a 1945 * different CPU from the speculate(), and we must 1946 * rely on being asynchronously cleaned. 1947 */ 1948 if (src->dtb_offset != 0) { 1949 new = DTRACESPEC_COMMITTING; 1950 break; 1951 } 1952 /*FALLTHROUGH*/ 1953 1954 case DTRACESPEC_ACTIVEMANY: 1955 new = DTRACESPEC_COMMITTINGMANY; 1956 break; 1957 1958 default: 1959 ASSERT(0); 1960 } 1961 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1962 current, new) != current); 1963 1964 /* 1965 * We have set the state to indicate that we are committing this 1966 * speculation. Now reserve the necessary space in the destination 1967 * buffer. 1968 */ 1969 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 1970 sizeof (uint64_t), state, NULL)) < 0) { 1971 dtrace_buffer_drop(dest); 1972 goto out; 1973 } 1974 1975 /* 1976 * We have the space; copy the buffer across. (Note that this is a 1977 * highly subobtimal bcopy(); in the unlikely event that this becomes 1978 * a serious performance issue, a high-performance DTrace-specific 1979 * bcopy() should obviously be invented.) 1980 */ 1981 daddr = (uintptr_t)dest->dtb_tomax + offs; 1982 dlimit = daddr + src->dtb_offset; 1983 saddr = (uintptr_t)src->dtb_tomax; 1984 1985 /* 1986 * First, the aligned portion. 1987 */ 1988 while (dlimit - daddr >= sizeof (uint64_t)) { 1989 *((uint64_t *)daddr) = *((uint64_t *)saddr); 1990 1991 daddr += sizeof (uint64_t); 1992 saddr += sizeof (uint64_t); 1993 } 1994 1995 /* 1996 * Now any left-over bit... 1997 */ 1998 while (dlimit - daddr) 1999 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2000 2001 /* 2002 * Finally, commit the reserved space in the destination buffer. 2003 */ 2004 dest->dtb_offset = offs + src->dtb_offset; 2005 2006 out: 2007 /* 2008 * If we're lucky enough to be the only active CPU on this speculation 2009 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2010 */ 2011 if (current == DTRACESPEC_ACTIVE || 2012 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2013 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2014 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2015 2016 ASSERT(rval == DTRACESPEC_COMMITTING); 2017 } 2018 2019 src->dtb_offset = 0; 2020 src->dtb_xamot_drops += src->dtb_drops; 2021 src->dtb_drops = 0; 2022 } 2023 2024 /* 2025 * This routine discards an active speculation. If the specified speculation 2026 * is not in a valid state to perform a discard(), this routine will silently 2027 * do nothing. The state of the specified speculation is transitioned 2028 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2029 */ 2030 static void 2031 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2032 dtrace_specid_t which) 2033 { 2034 dtrace_speculation_t *spec; 2035 dtrace_speculation_state_t current, new; 2036 dtrace_buffer_t *buf; 2037 2038 if (which == 0) 2039 return; 2040 2041 if (which > state->dts_nspeculations) { 2042 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2043 return; 2044 } 2045 2046 spec = &state->dts_speculations[which - 1]; 2047 buf = &spec->dtsp_buffer[cpu]; 2048 2049 do { 2050 current = spec->dtsp_state; 2051 2052 switch (current) { 2053 case DTRACESPEC_INACTIVE: 2054 case DTRACESPEC_COMMITTINGMANY: 2055 case DTRACESPEC_COMMITTING: 2056 case DTRACESPEC_DISCARDING: 2057 return; 2058 2059 case DTRACESPEC_ACTIVE: 2060 case DTRACESPEC_ACTIVEMANY: 2061 new = DTRACESPEC_DISCARDING; 2062 break; 2063 2064 case DTRACESPEC_ACTIVEONE: 2065 if (buf->dtb_offset != 0) { 2066 new = DTRACESPEC_INACTIVE; 2067 } else { 2068 new = DTRACESPEC_DISCARDING; 2069 } 2070 break; 2071 2072 default: 2073 ASSERT(0); 2074 } 2075 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2076 current, new) != current); 2077 2078 buf->dtb_offset = 0; 2079 buf->dtb_drops = 0; 2080 } 2081 2082 /* 2083 * Note: not called from probe context. This function is called 2084 * asynchronously from cross call context to clean any speculations that are 2085 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2086 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2087 * speculation. 2088 */ 2089 static void 2090 dtrace_speculation_clean_here(dtrace_state_t *state) 2091 { 2092 dtrace_icookie_t cookie; 2093 processorid_t cpu = CPU->cpu_id; 2094 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2095 dtrace_specid_t i; 2096 2097 cookie = dtrace_interrupt_disable(); 2098 2099 if (dest->dtb_tomax == NULL) { 2100 dtrace_interrupt_enable(cookie); 2101 return; 2102 } 2103 2104 for (i = 0; i < state->dts_nspeculations; i++) { 2105 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2106 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2107 2108 if (src->dtb_tomax == NULL) 2109 continue; 2110 2111 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2112 src->dtb_offset = 0; 2113 continue; 2114 } 2115 2116 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2117 continue; 2118 2119 if (src->dtb_offset == 0) 2120 continue; 2121 2122 dtrace_speculation_commit(state, cpu, i + 1); 2123 } 2124 2125 dtrace_interrupt_enable(cookie); 2126 } 2127 2128 /* 2129 * Note: not called from probe context. This function is called 2130 * asynchronously (and at a regular interval) to clean any speculations that 2131 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2132 * is work to be done, it cross calls all CPUs to perform that work; 2133 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2134 * INACTIVE state until they have been cleaned by all CPUs. 2135 */ 2136 static void 2137 dtrace_speculation_clean(dtrace_state_t *state) 2138 { 2139 int work = 0, rv; 2140 dtrace_specid_t i; 2141 2142 for (i = 0; i < state->dts_nspeculations; i++) { 2143 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2144 2145 ASSERT(!spec->dtsp_cleaning); 2146 2147 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2148 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2149 continue; 2150 2151 work++; 2152 spec->dtsp_cleaning = 1; 2153 } 2154 2155 if (!work) 2156 return; 2157 2158 dtrace_xcall(DTRACE_CPUALL, 2159 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2160 2161 /* 2162 * We now know that all CPUs have committed or discarded their 2163 * speculation buffers, as appropriate. We can now set the state 2164 * to inactive. 2165 */ 2166 for (i = 0; i < state->dts_nspeculations; i++) { 2167 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2168 dtrace_speculation_state_t current, new; 2169 2170 if (!spec->dtsp_cleaning) 2171 continue; 2172 2173 current = spec->dtsp_state; 2174 ASSERT(current == DTRACESPEC_DISCARDING || 2175 current == DTRACESPEC_COMMITTINGMANY); 2176 2177 new = DTRACESPEC_INACTIVE; 2178 2179 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2180 ASSERT(rv == current); 2181 spec->dtsp_cleaning = 0; 2182 } 2183 } 2184 2185 /* 2186 * Called as part of a speculate() to get the speculative buffer associated 2187 * with a given speculation. Returns NULL if the specified speculation is not 2188 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2189 * the active CPU is not the specified CPU -- the speculation will be 2190 * atomically transitioned into the ACTIVEMANY state. 2191 */ 2192 static dtrace_buffer_t * 2193 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2194 dtrace_specid_t which) 2195 { 2196 dtrace_speculation_t *spec; 2197 dtrace_speculation_state_t current, new; 2198 dtrace_buffer_t *buf; 2199 2200 if (which == 0) 2201 return (NULL); 2202 2203 if (which > state->dts_nspeculations) { 2204 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2205 return (NULL); 2206 } 2207 2208 spec = &state->dts_speculations[which - 1]; 2209 buf = &spec->dtsp_buffer[cpuid]; 2210 2211 do { 2212 current = spec->dtsp_state; 2213 2214 switch (current) { 2215 case DTRACESPEC_INACTIVE: 2216 case DTRACESPEC_COMMITTINGMANY: 2217 case DTRACESPEC_DISCARDING: 2218 return (NULL); 2219 2220 case DTRACESPEC_COMMITTING: 2221 ASSERT(buf->dtb_offset == 0); 2222 return (NULL); 2223 2224 case DTRACESPEC_ACTIVEONE: 2225 /* 2226 * This speculation is currently active on one CPU. 2227 * Check the offset in the buffer; if it's non-zero, 2228 * that CPU must be us (and we leave the state alone). 2229 * If it's zero, assume that we're starting on a new 2230 * CPU -- and change the state to indicate that the 2231 * speculation is active on more than one CPU. 2232 */ 2233 if (buf->dtb_offset != 0) 2234 return (buf); 2235 2236 new = DTRACESPEC_ACTIVEMANY; 2237 break; 2238 2239 case DTRACESPEC_ACTIVEMANY: 2240 return (buf); 2241 2242 case DTRACESPEC_ACTIVE: 2243 new = DTRACESPEC_ACTIVEONE; 2244 break; 2245 2246 default: 2247 ASSERT(0); 2248 } 2249 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2250 current, new) != current); 2251 2252 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2253 return (buf); 2254 } 2255 2256 /* 2257 * This function implements the DIF emulator's variable lookups. The emulator 2258 * passes a reserved variable identifier and optional built-in array index. 2259 */ 2260 static uint64_t 2261 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2262 uint64_t ndx) 2263 { 2264 /* 2265 * If we're accessing one of the uncached arguments, we'll turn this 2266 * into a reference in the args array. 2267 */ 2268 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2269 ndx = v - DIF_VAR_ARG0; 2270 v = DIF_VAR_ARGS; 2271 } 2272 2273 switch (v) { 2274 case DIF_VAR_ARGS: 2275 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2276 if (ndx >= sizeof (mstate->dtms_arg) / 2277 sizeof (mstate->dtms_arg[0])) { 2278 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2279 dtrace_provider_t *pv; 2280 uint64_t val; 2281 2282 pv = mstate->dtms_probe->dtpr_provider; 2283 if (pv->dtpv_pops.dtps_getargval != NULL) 2284 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2285 mstate->dtms_probe->dtpr_id, 2286 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2287 else 2288 val = dtrace_getarg(ndx, aframes); 2289 2290 /* 2291 * This is regrettably required to keep the compiler 2292 * from tail-optimizing the call to dtrace_getarg(). 2293 * The condition always evaluates to true, but the 2294 * compiler has no way of figuring that out a priori. 2295 * (None of this would be necessary if the compiler 2296 * could be relied upon to _always_ tail-optimize 2297 * the call to dtrace_getarg() -- but it can't.) 2298 */ 2299 if (mstate->dtms_probe != NULL) 2300 return (val); 2301 2302 ASSERT(0); 2303 } 2304 2305 return (mstate->dtms_arg[ndx]); 2306 2307 case DIF_VAR_UREGS: { 2308 klwp_t *lwp; 2309 2310 if (!dtrace_priv_proc(state)) 2311 return (0); 2312 2313 if ((lwp = curthread->t_lwp) == NULL) { 2314 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2315 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2316 return (0); 2317 } 2318 2319 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2320 } 2321 2322 case DIF_VAR_CURTHREAD: 2323 if (!dtrace_priv_kernel(state)) 2324 return (0); 2325 return ((uint64_t)(uintptr_t)curthread); 2326 2327 case DIF_VAR_TIMESTAMP: 2328 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2329 mstate->dtms_timestamp = dtrace_gethrtime(); 2330 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2331 } 2332 return (mstate->dtms_timestamp); 2333 2334 case DIF_VAR_VTIMESTAMP: 2335 ASSERT(dtrace_vtime_references != 0); 2336 return (curthread->t_dtrace_vtime); 2337 2338 case DIF_VAR_WALLTIMESTAMP: 2339 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2340 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2341 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2342 } 2343 return (mstate->dtms_walltimestamp); 2344 2345 case DIF_VAR_IPL: 2346 if (!dtrace_priv_kernel(state)) 2347 return (0); 2348 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2349 mstate->dtms_ipl = dtrace_getipl(); 2350 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2351 } 2352 return (mstate->dtms_ipl); 2353 2354 case DIF_VAR_EPID: 2355 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2356 return (mstate->dtms_epid); 2357 2358 case DIF_VAR_ID: 2359 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2360 return (mstate->dtms_probe->dtpr_id); 2361 2362 case DIF_VAR_STACKDEPTH: 2363 if (!dtrace_priv_kernel(state)) 2364 return (0); 2365 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2366 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2367 2368 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2369 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2370 } 2371 return (mstate->dtms_stackdepth); 2372 2373 case DIF_VAR_USTACKDEPTH: 2374 if (!dtrace_priv_proc(state)) 2375 return (0); 2376 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2377 /* 2378 * See comment in DIF_VAR_PID. 2379 */ 2380 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2381 CPU_ON_INTR(CPU)) { 2382 mstate->dtms_ustackdepth = 0; 2383 } else { 2384 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2385 mstate->dtms_ustackdepth = 2386 dtrace_getustackdepth(); 2387 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2388 } 2389 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2390 } 2391 return (mstate->dtms_ustackdepth); 2392 2393 case DIF_VAR_CALLER: 2394 if (!dtrace_priv_kernel(state)) 2395 return (0); 2396 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2397 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2398 2399 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2400 /* 2401 * If this is an unanchored probe, we are 2402 * required to go through the slow path: 2403 * dtrace_caller() only guarantees correct 2404 * results for anchored probes. 2405 */ 2406 pc_t caller[2]; 2407 2408 dtrace_getpcstack(caller, 2, aframes, 2409 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2410 mstate->dtms_caller = caller[1]; 2411 } else if ((mstate->dtms_caller = 2412 dtrace_caller(aframes)) == -1) { 2413 /* 2414 * We have failed to do this the quick way; 2415 * we must resort to the slower approach of 2416 * calling dtrace_getpcstack(). 2417 */ 2418 pc_t caller; 2419 2420 dtrace_getpcstack(&caller, 1, aframes, NULL); 2421 mstate->dtms_caller = caller; 2422 } 2423 2424 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2425 } 2426 return (mstate->dtms_caller); 2427 2428 case DIF_VAR_UCALLER: 2429 if (!dtrace_priv_proc(state)) 2430 return (0); 2431 2432 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2433 uint64_t ustack[3]; 2434 2435 /* 2436 * dtrace_getupcstack() fills in the first uint64_t 2437 * with the current PID. The second uint64_t will 2438 * be the program counter at user-level. The third 2439 * uint64_t will contain the caller, which is what 2440 * we're after. 2441 */ 2442 ustack[2] = NULL; 2443 dtrace_getupcstack(ustack, 3); 2444 mstate->dtms_ucaller = ustack[2]; 2445 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2446 } 2447 2448 return (mstate->dtms_ucaller); 2449 2450 case DIF_VAR_PROBEPROV: 2451 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2452 return ((uint64_t)(uintptr_t) 2453 mstate->dtms_probe->dtpr_provider->dtpv_name); 2454 2455 case DIF_VAR_PROBEMOD: 2456 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2457 return ((uint64_t)(uintptr_t) 2458 mstate->dtms_probe->dtpr_mod); 2459 2460 case DIF_VAR_PROBEFUNC: 2461 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2462 return ((uint64_t)(uintptr_t) 2463 mstate->dtms_probe->dtpr_func); 2464 2465 case DIF_VAR_PROBENAME: 2466 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2467 return ((uint64_t)(uintptr_t) 2468 mstate->dtms_probe->dtpr_name); 2469 2470 case DIF_VAR_PID: 2471 if (!dtrace_priv_proc(state)) 2472 return (0); 2473 2474 /* 2475 * Note that we are assuming that an unanchored probe is 2476 * always due to a high-level interrupt. (And we're assuming 2477 * that there is only a single high level interrupt.) 2478 */ 2479 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2480 return (pid0.pid_id); 2481 2482 /* 2483 * It is always safe to dereference one's own t_procp pointer: 2484 * it always points to a valid, allocated proc structure. 2485 * Further, it is always safe to dereference the p_pidp member 2486 * of one's own proc structure. (These are truisms becuase 2487 * threads and processes don't clean up their own state -- 2488 * they leave that task to whomever reaps them.) 2489 */ 2490 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2491 2492 case DIF_VAR_PPID: 2493 if (!dtrace_priv_proc(state)) 2494 return (0); 2495 2496 /* 2497 * See comment in DIF_VAR_PID. 2498 */ 2499 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2500 return (pid0.pid_id); 2501 2502 /* 2503 * It is always safe to dereference one's own t_procp pointer: 2504 * it always points to a valid, allocated proc structure. 2505 * (This is true because threads don't clean up their own 2506 * state -- they leave that task to whomever reaps them.) 2507 */ 2508 return ((uint64_t)curthread->t_procp->p_ppid); 2509 2510 case DIF_VAR_TID: 2511 /* 2512 * See comment in DIF_VAR_PID. 2513 */ 2514 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2515 return (0); 2516 2517 return ((uint64_t)curthread->t_tid); 2518 2519 case DIF_VAR_EXECNAME: 2520 if (!dtrace_priv_proc(state)) 2521 return (0); 2522 2523 /* 2524 * See comment in DIF_VAR_PID. 2525 */ 2526 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2527 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2528 2529 /* 2530 * It is always safe to dereference one's own t_procp pointer: 2531 * it always points to a valid, allocated proc structure. 2532 * (This is true because threads don't clean up their own 2533 * state -- they leave that task to whomever reaps them.) 2534 */ 2535 return ((uint64_t)(uintptr_t) 2536 curthread->t_procp->p_user.u_comm); 2537 2538 case DIF_VAR_ZONENAME: 2539 if (!dtrace_priv_proc(state)) 2540 return (0); 2541 2542 /* 2543 * See comment in DIF_VAR_PID. 2544 */ 2545 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2546 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2547 2548 /* 2549 * It is always safe to dereference one's own t_procp pointer: 2550 * it always points to a valid, allocated proc structure. 2551 * (This is true because threads don't clean up their own 2552 * state -- they leave that task to whomever reaps them.) 2553 */ 2554 return ((uint64_t)(uintptr_t) 2555 curthread->t_procp->p_zone->zone_name); 2556 2557 case DIF_VAR_UID: 2558 if (!dtrace_priv_proc(state)) 2559 return (0); 2560 2561 /* 2562 * See comment in DIF_VAR_PID. 2563 */ 2564 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2565 return ((uint64_t)p0.p_cred->cr_uid); 2566 2567 /* 2568 * It is always safe to dereference one's own t_procp pointer: 2569 * it always points to a valid, allocated proc structure. 2570 * (This is true because threads don't clean up their own 2571 * state -- they leave that task to whomever reaps them.) 2572 * 2573 * Additionally, it is safe to dereference one's own process 2574 * credential, since this is never NULL after process birth. 2575 */ 2576 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 2577 2578 case DIF_VAR_GID: 2579 if (!dtrace_priv_proc(state)) 2580 return (0); 2581 2582 /* 2583 * See comment in DIF_VAR_PID. 2584 */ 2585 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2586 return ((uint64_t)p0.p_cred->cr_gid); 2587 2588 /* 2589 * It is always safe to dereference one's own t_procp pointer: 2590 * it always points to a valid, allocated proc structure. 2591 * (This is true because threads don't clean up their own 2592 * state -- they leave that task to whomever reaps them.) 2593 * 2594 * Additionally, it is safe to dereference one's own process 2595 * credential, since this is never NULL after process birth. 2596 */ 2597 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 2598 2599 case DIF_VAR_ERRNO: { 2600 klwp_t *lwp; 2601 if (!dtrace_priv_proc(state)) 2602 return (0); 2603 2604 /* 2605 * See comment in DIF_VAR_PID. 2606 */ 2607 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2608 return (0); 2609 2610 /* 2611 * It is always safe to dereference one's own t_lwp pointer in 2612 * the event that this pointer is non-NULL. (This is true 2613 * because threads and lwps don't clean up their own state -- 2614 * they leave that task to whomever reaps them.) 2615 */ 2616 if ((lwp = curthread->t_lwp) == NULL) 2617 return (0); 2618 2619 return ((uint64_t)lwp->lwp_errno); 2620 } 2621 default: 2622 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2623 return (0); 2624 } 2625 } 2626 2627 /* 2628 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2629 * Notice that we don't bother validating the proper number of arguments or 2630 * their types in the tuple stack. This isn't needed because all argument 2631 * interpretation is safe because of our load safety -- the worst that can 2632 * happen is that a bogus program can obtain bogus results. 2633 */ 2634 static void 2635 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2636 dtrace_key_t *tupregs, int nargs, 2637 dtrace_mstate_t *mstate, dtrace_state_t *state) 2638 { 2639 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2640 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2641 2642 union { 2643 mutex_impl_t mi; 2644 uint64_t mx; 2645 } m; 2646 2647 union { 2648 krwlock_t ri; 2649 uintptr_t rw; 2650 } r; 2651 2652 switch (subr) { 2653 case DIF_SUBR_RAND: 2654 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2655 break; 2656 2657 case DIF_SUBR_MUTEX_OWNED: 2658 m.mx = dtrace_load64(tupregs[0].dttk_value); 2659 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2660 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2661 else 2662 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2663 break; 2664 2665 case DIF_SUBR_MUTEX_OWNER: 2666 m.mx = dtrace_load64(tupregs[0].dttk_value); 2667 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2668 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2669 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2670 else 2671 regs[rd] = 0; 2672 break; 2673 2674 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2675 m.mx = dtrace_load64(tupregs[0].dttk_value); 2676 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2677 break; 2678 2679 case DIF_SUBR_MUTEX_TYPE_SPIN: 2680 m.mx = dtrace_load64(tupregs[0].dttk_value); 2681 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2682 break; 2683 2684 case DIF_SUBR_RW_READ_HELD: { 2685 uintptr_t tmp; 2686 2687 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2688 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2689 break; 2690 } 2691 2692 case DIF_SUBR_RW_WRITE_HELD: 2693 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2694 regs[rd] = _RW_WRITE_HELD(&r.ri); 2695 break; 2696 2697 case DIF_SUBR_RW_ISWRITER: 2698 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2699 regs[rd] = _RW_ISWRITER(&r.ri); 2700 break; 2701 2702 case DIF_SUBR_BCOPY: { 2703 /* 2704 * We need to be sure that the destination is in the scratch 2705 * region -- no other region is allowed. 2706 */ 2707 uintptr_t src = tupregs[0].dttk_value; 2708 uintptr_t dest = tupregs[1].dttk_value; 2709 size_t size = tupregs[2].dttk_value; 2710 2711 if (!dtrace_inscratch(dest, size, mstate)) { 2712 *flags |= CPU_DTRACE_BADADDR; 2713 *illval = regs[rd]; 2714 break; 2715 } 2716 2717 dtrace_bcopy((void *)src, (void *)dest, size); 2718 break; 2719 } 2720 2721 case DIF_SUBR_ALLOCA: 2722 case DIF_SUBR_COPYIN: { 2723 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2724 uint64_t size = 2725 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2726 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2727 2728 /* 2729 * This action doesn't require any credential checks since 2730 * probes will not activate in user contexts to which the 2731 * enabling user does not have permissions. 2732 */ 2733 if (mstate->dtms_scratch_ptr + scratch_size > 2734 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2735 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2736 regs[rd] = NULL; 2737 break; 2738 } 2739 2740 if (subr == DIF_SUBR_COPYIN) { 2741 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2742 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2743 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2744 } 2745 2746 mstate->dtms_scratch_ptr += scratch_size; 2747 regs[rd] = dest; 2748 break; 2749 } 2750 2751 case DIF_SUBR_COPYINTO: { 2752 uint64_t size = tupregs[1].dttk_value; 2753 uintptr_t dest = tupregs[2].dttk_value; 2754 2755 /* 2756 * This action doesn't require any credential checks since 2757 * probes will not activate in user contexts to which the 2758 * enabling user does not have permissions. 2759 */ 2760 if (!dtrace_inscratch(dest, size, mstate)) { 2761 *flags |= CPU_DTRACE_BADADDR; 2762 *illval = regs[rd]; 2763 break; 2764 } 2765 2766 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2767 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2768 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2769 break; 2770 } 2771 2772 case DIF_SUBR_COPYINSTR: { 2773 uintptr_t dest = mstate->dtms_scratch_ptr; 2774 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2775 2776 if (nargs > 1 && tupregs[1].dttk_value < size) 2777 size = tupregs[1].dttk_value + 1; 2778 2779 /* 2780 * This action doesn't require any credential checks since 2781 * probes will not activate in user contexts to which the 2782 * enabling user does not have permissions. 2783 */ 2784 if (mstate->dtms_scratch_ptr + size > 2785 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2786 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2787 regs[rd] = NULL; 2788 break; 2789 } 2790 2791 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2792 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2793 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2794 2795 ((char *)dest)[size - 1] = '\0'; 2796 mstate->dtms_scratch_ptr += size; 2797 regs[rd] = dest; 2798 break; 2799 } 2800 2801 case DIF_SUBR_MSGSIZE: 2802 case DIF_SUBR_MSGDSIZE: { 2803 uintptr_t baddr = tupregs[0].dttk_value, daddr; 2804 uintptr_t wptr, rptr; 2805 size_t count = 0; 2806 int cont = 0; 2807 2808 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2809 wptr = dtrace_loadptr(baddr + 2810 offsetof(mblk_t, b_wptr)); 2811 2812 rptr = dtrace_loadptr(baddr + 2813 offsetof(mblk_t, b_rptr)); 2814 2815 if (wptr < rptr) { 2816 *flags |= CPU_DTRACE_BADADDR; 2817 *illval = tupregs[0].dttk_value; 2818 break; 2819 } 2820 2821 daddr = dtrace_loadptr(baddr + 2822 offsetof(mblk_t, b_datap)); 2823 2824 baddr = dtrace_loadptr(baddr + 2825 offsetof(mblk_t, b_cont)); 2826 2827 /* 2828 * We want to prevent against denial-of-service here, 2829 * so we're only going to search the list for 2830 * dtrace_msgdsize_max mblks. 2831 */ 2832 if (cont++ > dtrace_msgdsize_max) { 2833 *flags |= CPU_DTRACE_ILLOP; 2834 break; 2835 } 2836 2837 if (subr == DIF_SUBR_MSGDSIZE) { 2838 if (dtrace_load8(daddr + 2839 offsetof(dblk_t, db_type)) != M_DATA) 2840 continue; 2841 } 2842 2843 count += wptr - rptr; 2844 } 2845 2846 if (!(*flags & CPU_DTRACE_FAULT)) 2847 regs[rd] = count; 2848 2849 break; 2850 } 2851 2852 case DIF_SUBR_PROGENYOF: { 2853 pid_t pid = tupregs[0].dttk_value; 2854 proc_t *p; 2855 int rval = 0; 2856 2857 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2858 2859 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 2860 if (p->p_pidp->pid_id == pid) { 2861 rval = 1; 2862 break; 2863 } 2864 } 2865 2866 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2867 2868 regs[rd] = rval; 2869 break; 2870 } 2871 2872 case DIF_SUBR_SPECULATION: 2873 regs[rd] = dtrace_speculation(state); 2874 break; 2875 2876 case DIF_SUBR_COPYOUT: { 2877 uintptr_t kaddr = tupregs[0].dttk_value; 2878 uintptr_t uaddr = tupregs[1].dttk_value; 2879 uint64_t size = tupregs[2].dttk_value; 2880 2881 if (!dtrace_destructive_disallow && 2882 dtrace_priv_proc_control(state) && 2883 !dtrace_istoxic(kaddr, size)) { 2884 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2885 dtrace_copyout(kaddr, uaddr, size); 2886 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2887 } 2888 break; 2889 } 2890 2891 case DIF_SUBR_COPYOUTSTR: { 2892 uintptr_t kaddr = tupregs[0].dttk_value; 2893 uintptr_t uaddr = tupregs[1].dttk_value; 2894 uint64_t size = tupregs[2].dttk_value; 2895 2896 if (!dtrace_destructive_disallow && 2897 dtrace_priv_proc_control(state) && 2898 !dtrace_istoxic(kaddr, size)) { 2899 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2900 dtrace_copyoutstr(kaddr, uaddr, size); 2901 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2902 } 2903 break; 2904 } 2905 2906 case DIF_SUBR_STRLEN: 2907 regs[rd] = dtrace_strlen((char *)(uintptr_t) 2908 tupregs[0].dttk_value, 2909 state->dts_options[DTRACEOPT_STRSIZE]); 2910 break; 2911 2912 case DIF_SUBR_STRCHR: 2913 case DIF_SUBR_STRRCHR: { 2914 /* 2915 * We're going to iterate over the string looking for the 2916 * specified character. We will iterate until we have reached 2917 * the string length or we have found the character. If this 2918 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 2919 * of the specified character instead of the first. 2920 */ 2921 uintptr_t addr = tupregs[0].dttk_value; 2922 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 2923 char c, target = (char)tupregs[1].dttk_value; 2924 2925 for (regs[rd] = NULL; addr < limit; addr++) { 2926 if ((c = dtrace_load8(addr)) == target) { 2927 regs[rd] = addr; 2928 2929 if (subr == DIF_SUBR_STRCHR) 2930 break; 2931 } 2932 2933 if (c == '\0') 2934 break; 2935 } 2936 2937 break; 2938 } 2939 2940 case DIF_SUBR_STRSTR: 2941 case DIF_SUBR_INDEX: 2942 case DIF_SUBR_RINDEX: { 2943 /* 2944 * We're going to iterate over the string looking for the 2945 * specified string. We will iterate until we have reached 2946 * the string length or we have found the string. (Yes, this 2947 * is done in the most naive way possible -- but considering 2948 * that the string we're searching for is likely to be 2949 * relatively short, the complexity of Rabin-Karp or similar 2950 * hardly seems merited.) 2951 */ 2952 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 2953 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 2954 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2955 size_t len = dtrace_strlen(addr, size); 2956 size_t sublen = dtrace_strlen(substr, size); 2957 char *limit = addr + len, *orig = addr; 2958 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 2959 int inc = 1; 2960 2961 regs[rd] = notfound; 2962 2963 /* 2964 * strstr() and index()/rindex() have similar semantics if 2965 * both strings are the empty string: strstr() returns a 2966 * pointer to the (empty) string, and index() and rindex() 2967 * both return index 0 (regardless of any position argument). 2968 */ 2969 if (sublen == 0 && len == 0) { 2970 if (subr == DIF_SUBR_STRSTR) 2971 regs[rd] = (uintptr_t)addr; 2972 else 2973 regs[rd] = 0; 2974 break; 2975 } 2976 2977 if (subr != DIF_SUBR_STRSTR) { 2978 if (subr == DIF_SUBR_RINDEX) { 2979 limit = orig - 1; 2980 addr += len; 2981 inc = -1; 2982 } 2983 2984 /* 2985 * Both index() and rindex() take an optional position 2986 * argument that denotes the starting position. 2987 */ 2988 if (nargs == 3) { 2989 int64_t pos = (int64_t)tupregs[2].dttk_value; 2990 2991 /* 2992 * If the position argument to index() is 2993 * negative, Perl implicitly clamps it at 2994 * zero. This semantic is a little surprising 2995 * given the special meaning of negative 2996 * positions to similar Perl functions like 2997 * substr(), but it appears to reflect a 2998 * notion that index() can start from a 2999 * negative index and increment its way up to 3000 * the string. Given this notion, Perl's 3001 * rindex() is at least self-consistent in 3002 * that it implicitly clamps positions greater 3003 * than the string length to be the string 3004 * length. Where Perl completely loses 3005 * coherence, however, is when the specified 3006 * substring is the empty string (""). In 3007 * this case, even if the position is 3008 * negative, rindex() returns 0 -- and even if 3009 * the position is greater than the length, 3010 * index() returns the string length. These 3011 * semantics violate the notion that index() 3012 * should never return a value less than the 3013 * specified position and that rindex() should 3014 * never return a value greater than the 3015 * specified position. (One assumes that 3016 * these semantics are artifacts of Perl's 3017 * implementation and not the results of 3018 * deliberate design -- it beggars belief that 3019 * even Larry Wall could desire such oddness.) 3020 * While in the abstract one would wish for 3021 * consistent position semantics across 3022 * substr(), index() and rindex() -- or at the 3023 * very least self-consistent position 3024 * semantics for index() and rindex() -- we 3025 * instead opt to keep with the extant Perl 3026 * semantics, in all their broken glory. (Do 3027 * we have more desire to maintain Perl's 3028 * semantics than Perl does? Probably.) 3029 */ 3030 if (subr == DIF_SUBR_RINDEX) { 3031 if (pos < 0) { 3032 if (sublen == 0) 3033 regs[rd] = 0; 3034 break; 3035 } 3036 3037 if (pos > len) 3038 pos = len; 3039 } else { 3040 if (pos < 0) 3041 pos = 0; 3042 3043 if (pos >= len) { 3044 if (sublen == 0) 3045 regs[rd] = len; 3046 break; 3047 } 3048 } 3049 3050 addr = orig + pos; 3051 } 3052 } 3053 3054 for (regs[rd] = notfound; addr != limit; addr += inc) { 3055 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3056 if (subr != DIF_SUBR_STRSTR) { 3057 /* 3058 * As D index() and rindex() are 3059 * modeled on Perl (and not on awk), 3060 * we return a zero-based (and not a 3061 * one-based) index. (For you Perl 3062 * weenies: no, we're not going to add 3063 * $[ -- and shouldn't you be at a con 3064 * or something?) 3065 */ 3066 regs[rd] = (uintptr_t)(addr - orig); 3067 break; 3068 } 3069 3070 ASSERT(subr == DIF_SUBR_STRSTR); 3071 regs[rd] = (uintptr_t)addr; 3072 break; 3073 } 3074 } 3075 3076 break; 3077 } 3078 3079 case DIF_SUBR_STRTOK: { 3080 uintptr_t addr = tupregs[0].dttk_value; 3081 uintptr_t tokaddr = tupregs[1].dttk_value; 3082 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3083 uintptr_t limit, toklimit = tokaddr + size; 3084 uint8_t c, tokmap[32]; /* 256 / 8 */ 3085 char *dest = (char *)mstate->dtms_scratch_ptr; 3086 int i; 3087 3088 if (mstate->dtms_scratch_ptr + size > 3089 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3090 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3091 regs[rd] = NULL; 3092 break; 3093 } 3094 3095 if (addr == NULL) { 3096 /* 3097 * If the address specified is NULL, we use our saved 3098 * strtok pointer from the mstate. Note that this 3099 * means that the saved strtok pointer is _only_ 3100 * valid within multiple enablings of the same probe -- 3101 * it behaves like an implicit clause-local variable. 3102 */ 3103 addr = mstate->dtms_strtok; 3104 } 3105 3106 /* 3107 * First, zero the token map, and then process the token 3108 * string -- setting a bit in the map for every character 3109 * found in the token string. 3110 */ 3111 for (i = 0; i < sizeof (tokmap); i++) 3112 tokmap[i] = 0; 3113 3114 for (; tokaddr < toklimit; tokaddr++) { 3115 if ((c = dtrace_load8(tokaddr)) == '\0') 3116 break; 3117 3118 ASSERT((c >> 3) < sizeof (tokmap)); 3119 tokmap[c >> 3] |= (1 << (c & 0x7)); 3120 } 3121 3122 for (limit = addr + size; addr < limit; addr++) { 3123 /* 3124 * We're looking for a character that is _not_ contained 3125 * in the token string. 3126 */ 3127 if ((c = dtrace_load8(addr)) == '\0') 3128 break; 3129 3130 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3131 break; 3132 } 3133 3134 if (c == '\0') { 3135 /* 3136 * We reached the end of the string without finding 3137 * any character that was not in the token string. 3138 * We return NULL in this case, and we set the saved 3139 * address to NULL as well. 3140 */ 3141 regs[rd] = NULL; 3142 mstate->dtms_strtok = NULL; 3143 break; 3144 } 3145 3146 /* 3147 * From here on, we're copying into the destination string. 3148 */ 3149 for (i = 0; addr < limit && i < size - 1; addr++) { 3150 if ((c = dtrace_load8(addr)) == '\0') 3151 break; 3152 3153 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3154 break; 3155 3156 ASSERT(i < size); 3157 dest[i++] = c; 3158 } 3159 3160 ASSERT(i < size); 3161 dest[i] = '\0'; 3162 regs[rd] = (uintptr_t)dest; 3163 mstate->dtms_scratch_ptr += size; 3164 mstate->dtms_strtok = addr; 3165 break; 3166 } 3167 3168 case DIF_SUBR_SUBSTR: { 3169 uintptr_t s = tupregs[0].dttk_value; 3170 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3171 char *d = (char *)mstate->dtms_scratch_ptr; 3172 int64_t index = (int64_t)tupregs[1].dttk_value; 3173 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3174 size_t len = dtrace_strlen((char *)s, size); 3175 int64_t i = 0; 3176 3177 if (nargs <= 2) 3178 remaining = (int64_t)size; 3179 3180 if (mstate->dtms_scratch_ptr + size > 3181 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3182 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3183 regs[rd] = NULL; 3184 break; 3185 } 3186 3187 if (index < 0) { 3188 index += len; 3189 3190 if (index < 0 && index + remaining > 0) { 3191 remaining += index; 3192 index = 0; 3193 } 3194 } 3195 3196 if (index >= len || index < 0) 3197 index = len; 3198 3199 for (d[0] = '\0'; remaining > 0; remaining--) { 3200 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 3201 break; 3202 3203 if (i == size) { 3204 d[i - 1] = '\0'; 3205 break; 3206 } 3207 } 3208 3209 mstate->dtms_scratch_ptr += size; 3210 regs[rd] = (uintptr_t)d; 3211 break; 3212 } 3213 3214 case DIF_SUBR_GETMAJOR: 3215 #ifdef _LP64 3216 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3217 #else 3218 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3219 #endif 3220 break; 3221 3222 case DIF_SUBR_GETMINOR: 3223 #ifdef _LP64 3224 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3225 #else 3226 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3227 #endif 3228 break; 3229 3230 case DIF_SUBR_DDI_PATHNAME: { 3231 /* 3232 * This one is a galactic mess. We are going to roughly 3233 * emulate ddi_pathname(), but it's made more complicated 3234 * by the fact that we (a) want to include the minor name and 3235 * (b) must proceed iteratively instead of recursively. 3236 */ 3237 uintptr_t dest = mstate->dtms_scratch_ptr; 3238 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3239 char *start = (char *)dest, *end = start + size - 1; 3240 uintptr_t daddr = tupregs[0].dttk_value; 3241 int64_t minor = (int64_t)tupregs[1].dttk_value; 3242 char *s; 3243 int i, len, depth = 0; 3244 3245 if (size == 0 || mstate->dtms_scratch_ptr + size > 3246 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3247 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3248 regs[rd] = NULL; 3249 break; 3250 } 3251 3252 *end = '\0'; 3253 3254 /* 3255 * We want to have a name for the minor. In order to do this, 3256 * we need to walk the minor list from the devinfo. We want 3257 * to be sure that we don't infinitely walk a circular list, 3258 * so we check for circularity by sending a scout pointer 3259 * ahead two elements for every element that we iterate over; 3260 * if the list is circular, these will ultimately point to the 3261 * same element. You may recognize this little trick as the 3262 * answer to a stupid interview question -- one that always 3263 * seems to be asked by those who had to have it laboriously 3264 * explained to them, and who can't even concisely describe 3265 * the conditions under which one would be forced to resort to 3266 * this technique. Needless to say, those conditions are 3267 * found here -- and probably only here. Is this is the only 3268 * use of this infamous trick in shipping, production code? 3269 * If it isn't, it probably should be... 3270 */ 3271 if (minor != -1) { 3272 uintptr_t maddr = dtrace_loadptr(daddr + 3273 offsetof(struct dev_info, devi_minor)); 3274 3275 uintptr_t next = offsetof(struct ddi_minor_data, next); 3276 uintptr_t name = offsetof(struct ddi_minor_data, 3277 d_minor) + offsetof(struct ddi_minor, name); 3278 uintptr_t dev = offsetof(struct ddi_minor_data, 3279 d_minor) + offsetof(struct ddi_minor, dev); 3280 uintptr_t scout; 3281 3282 if (maddr != NULL) 3283 scout = dtrace_loadptr(maddr + next); 3284 3285 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3286 uint64_t m; 3287 #ifdef _LP64 3288 m = dtrace_load64(maddr + dev) & MAXMIN64; 3289 #else 3290 m = dtrace_load32(maddr + dev) & MAXMIN; 3291 #endif 3292 if (m != minor) { 3293 maddr = dtrace_loadptr(maddr + next); 3294 3295 if (scout == NULL) 3296 continue; 3297 3298 scout = dtrace_loadptr(scout + next); 3299 3300 if (scout == NULL) 3301 continue; 3302 3303 scout = dtrace_loadptr(scout + next); 3304 3305 if (scout == NULL) 3306 continue; 3307 3308 if (scout == maddr) { 3309 *flags |= CPU_DTRACE_ILLOP; 3310 break; 3311 } 3312 3313 continue; 3314 } 3315 3316 /* 3317 * We have the minor data. Now we need to 3318 * copy the minor's name into the end of the 3319 * pathname. 3320 */ 3321 s = (char *)dtrace_loadptr(maddr + name); 3322 len = dtrace_strlen(s, size); 3323 3324 if (*flags & CPU_DTRACE_FAULT) 3325 break; 3326 3327 if (len != 0) { 3328 if ((end -= (len + 1)) < start) 3329 break; 3330 3331 *end = ':'; 3332 } 3333 3334 for (i = 1; i <= len; i++) 3335 end[i] = dtrace_load8((uintptr_t)s++); 3336 break; 3337 } 3338 } 3339 3340 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3341 ddi_node_state_t devi_state; 3342 3343 devi_state = dtrace_load32(daddr + 3344 offsetof(struct dev_info, devi_node_state)); 3345 3346 if (*flags & CPU_DTRACE_FAULT) 3347 break; 3348 3349 if (devi_state >= DS_INITIALIZED) { 3350 s = (char *)dtrace_loadptr(daddr + 3351 offsetof(struct dev_info, devi_addr)); 3352 len = dtrace_strlen(s, size); 3353 3354 if (*flags & CPU_DTRACE_FAULT) 3355 break; 3356 3357 if (len != 0) { 3358 if ((end -= (len + 1)) < start) 3359 break; 3360 3361 *end = '@'; 3362 } 3363 3364 for (i = 1; i <= len; i++) 3365 end[i] = dtrace_load8((uintptr_t)s++); 3366 } 3367 3368 /* 3369 * Now for the node name... 3370 */ 3371 s = (char *)dtrace_loadptr(daddr + 3372 offsetof(struct dev_info, devi_node_name)); 3373 3374 daddr = dtrace_loadptr(daddr + 3375 offsetof(struct dev_info, devi_parent)); 3376 3377 /* 3378 * If our parent is NULL (that is, if we're the root 3379 * node), we're going to use the special path 3380 * "devices". 3381 */ 3382 if (daddr == NULL) 3383 s = "devices"; 3384 3385 len = dtrace_strlen(s, size); 3386 if (*flags & CPU_DTRACE_FAULT) 3387 break; 3388 3389 if ((end -= (len + 1)) < start) 3390 break; 3391 3392 for (i = 1; i <= len; i++) 3393 end[i] = dtrace_load8((uintptr_t)s++); 3394 *end = '/'; 3395 3396 if (depth++ > dtrace_devdepth_max) { 3397 *flags |= CPU_DTRACE_ILLOP; 3398 break; 3399 } 3400 } 3401 3402 if (end < start) 3403 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3404 3405 if (daddr == NULL) { 3406 regs[rd] = (uintptr_t)end; 3407 mstate->dtms_scratch_ptr += size; 3408 } 3409 3410 break; 3411 } 3412 3413 case DIF_SUBR_STRJOIN: { 3414 char *d = (char *)mstate->dtms_scratch_ptr; 3415 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3416 uintptr_t s1 = tupregs[0].dttk_value; 3417 uintptr_t s2 = tupregs[1].dttk_value; 3418 int i = 0; 3419 3420 if (mstate->dtms_scratch_ptr + size > 3421 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3422 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3423 regs[rd] = NULL; 3424 break; 3425 } 3426 3427 for (;;) { 3428 if (i >= size) { 3429 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3430 regs[rd] = NULL; 3431 break; 3432 } 3433 3434 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3435 i--; 3436 break; 3437 } 3438 } 3439 3440 for (;;) { 3441 if (i >= size) { 3442 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3443 regs[rd] = NULL; 3444 break; 3445 } 3446 3447 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3448 break; 3449 } 3450 3451 if (i < size) { 3452 mstate->dtms_scratch_ptr += i; 3453 regs[rd] = (uintptr_t)d; 3454 } 3455 3456 break; 3457 } 3458 3459 case DIF_SUBR_LLTOSTR: { 3460 int64_t i = (int64_t)tupregs[0].dttk_value; 3461 int64_t val = i < 0 ? i * -1 : i; 3462 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3463 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3464 3465 if (mstate->dtms_scratch_ptr + size > 3466 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3467 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3468 regs[rd] = NULL; 3469 break; 3470 } 3471 3472 for (*end-- = '\0'; val; val /= 10) 3473 *end-- = '0' + (val % 10); 3474 3475 if (i == 0) 3476 *end-- = '0'; 3477 3478 if (i < 0) 3479 *end-- = '-'; 3480 3481 regs[rd] = (uintptr_t)end + 1; 3482 mstate->dtms_scratch_ptr += size; 3483 break; 3484 } 3485 3486 case DIF_SUBR_HTONS: 3487 case DIF_SUBR_NTOHS: 3488 #ifdef _BIG_ENDIAN 3489 regs[rd] = (uint16_t)tupregs[0].dttk_value; 3490 #else 3491 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 3492 #endif 3493 break; 3494 3495 3496 case DIF_SUBR_HTONL: 3497 case DIF_SUBR_NTOHL: 3498 #ifdef _BIG_ENDIAN 3499 regs[rd] = (uint32_t)tupregs[0].dttk_value; 3500 #else 3501 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 3502 #endif 3503 break; 3504 3505 3506 case DIF_SUBR_HTONLL: 3507 case DIF_SUBR_NTOHLL: 3508 #ifdef _BIG_ENDIAN 3509 regs[rd] = (uint64_t)tupregs[0].dttk_value; 3510 #else 3511 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 3512 #endif 3513 break; 3514 3515 3516 case DIF_SUBR_DIRNAME: 3517 case DIF_SUBR_BASENAME: { 3518 char *dest = (char *)mstate->dtms_scratch_ptr; 3519 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3520 uintptr_t src = tupregs[0].dttk_value; 3521 int i, j, len = dtrace_strlen((char *)src, size); 3522 int lastbase = -1, firstbase = -1, lastdir = -1; 3523 int start, end; 3524 3525 if (mstate->dtms_scratch_ptr + size > 3526 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3527 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3528 regs[rd] = NULL; 3529 break; 3530 } 3531 3532 /* 3533 * The basename and dirname for a zero-length string is 3534 * defined to be "." 3535 */ 3536 if (len == 0) { 3537 len = 1; 3538 src = (uintptr_t)"."; 3539 } 3540 3541 /* 3542 * Start from the back of the string, moving back toward the 3543 * front until we see a character that isn't a slash. That 3544 * character is the last character in the basename. 3545 */ 3546 for (i = len - 1; i >= 0; i--) { 3547 if (dtrace_load8(src + i) != '/') 3548 break; 3549 } 3550 3551 if (i >= 0) 3552 lastbase = i; 3553 3554 /* 3555 * Starting from the last character in the basename, move 3556 * towards the front until we find a slash. The character 3557 * that we processed immediately before that is the first 3558 * character in the basename. 3559 */ 3560 for (; i >= 0; i--) { 3561 if (dtrace_load8(src + i) == '/') 3562 break; 3563 } 3564 3565 if (i >= 0) 3566 firstbase = i + 1; 3567 3568 /* 3569 * Now keep going until we find a non-slash character. That 3570 * character is the last character in the dirname. 3571 */ 3572 for (; i >= 0; i--) { 3573 if (dtrace_load8(src + i) != '/') 3574 break; 3575 } 3576 3577 if (i >= 0) 3578 lastdir = i; 3579 3580 ASSERT(!(lastbase == -1 && firstbase != -1)); 3581 ASSERT(!(firstbase == -1 && lastdir != -1)); 3582 3583 if (lastbase == -1) { 3584 /* 3585 * We didn't find a non-slash character. We know that 3586 * the length is non-zero, so the whole string must be 3587 * slashes. In either the dirname or the basename 3588 * case, we return '/'. 3589 */ 3590 ASSERT(firstbase == -1); 3591 firstbase = lastbase = lastdir = 0; 3592 } 3593 3594 if (firstbase == -1) { 3595 /* 3596 * The entire string consists only of a basename 3597 * component. If we're looking for dirname, we need 3598 * to change our string to be just "."; if we're 3599 * looking for a basename, we'll just set the first 3600 * character of the basename to be 0. 3601 */ 3602 if (subr == DIF_SUBR_DIRNAME) { 3603 ASSERT(lastdir == -1); 3604 src = (uintptr_t)"."; 3605 lastdir = 0; 3606 } else { 3607 firstbase = 0; 3608 } 3609 } 3610 3611 if (subr == DIF_SUBR_DIRNAME) { 3612 if (lastdir == -1) { 3613 /* 3614 * We know that we have a slash in the name -- 3615 * or lastdir would be set to 0, above. And 3616 * because lastdir is -1, we know that this 3617 * slash must be the first character. (That 3618 * is, the full string must be of the form 3619 * "/basename".) In this case, the last 3620 * character of the directory name is 0. 3621 */ 3622 lastdir = 0; 3623 } 3624 3625 start = 0; 3626 end = lastdir; 3627 } else { 3628 ASSERT(subr == DIF_SUBR_BASENAME); 3629 ASSERT(firstbase != -1 && lastbase != -1); 3630 start = firstbase; 3631 end = lastbase; 3632 } 3633 3634 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3635 dest[j] = dtrace_load8(src + i); 3636 3637 dest[j] = '\0'; 3638 regs[rd] = (uintptr_t)dest; 3639 mstate->dtms_scratch_ptr += size; 3640 break; 3641 } 3642 3643 case DIF_SUBR_CLEANPATH: { 3644 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3645 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3646 uintptr_t src = tupregs[0].dttk_value; 3647 int i = 0, j = 0; 3648 3649 if (mstate->dtms_scratch_ptr + size > 3650 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3651 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3652 regs[rd] = NULL; 3653 break; 3654 } 3655 3656 /* 3657 * Move forward, loading each character. 3658 */ 3659 do { 3660 c = dtrace_load8(src + i++); 3661 next: 3662 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3663 break; 3664 3665 if (c != '/') { 3666 dest[j++] = c; 3667 continue; 3668 } 3669 3670 c = dtrace_load8(src + i++); 3671 3672 if (c == '/') { 3673 /* 3674 * We have two slashes -- we can just advance 3675 * to the next character. 3676 */ 3677 goto next; 3678 } 3679 3680 if (c != '.') { 3681 /* 3682 * This is not "." and it's not ".." -- we can 3683 * just store the "/" and this character and 3684 * drive on. 3685 */ 3686 dest[j++] = '/'; 3687 dest[j++] = c; 3688 continue; 3689 } 3690 3691 c = dtrace_load8(src + i++); 3692 3693 if (c == '/') { 3694 /* 3695 * This is a "/./" component. We're not going 3696 * to store anything in the destination buffer; 3697 * we're just going to go to the next component. 3698 */ 3699 goto next; 3700 } 3701 3702 if (c != '.') { 3703 /* 3704 * This is not ".." -- we can just store the 3705 * "/." and this character and continue 3706 * processing. 3707 */ 3708 dest[j++] = '/'; 3709 dest[j++] = '.'; 3710 dest[j++] = c; 3711 continue; 3712 } 3713 3714 c = dtrace_load8(src + i++); 3715 3716 if (c != '/' && c != '\0') { 3717 /* 3718 * This is not ".." -- it's "..[mumble]". 3719 * We'll store the "/.." and this character 3720 * and continue processing. 3721 */ 3722 dest[j++] = '/'; 3723 dest[j++] = '.'; 3724 dest[j++] = '.'; 3725 dest[j++] = c; 3726 continue; 3727 } 3728 3729 /* 3730 * This is "/../" or "/..\0". We need to back up 3731 * our destination pointer until we find a "/". 3732 */ 3733 i--; 3734 while (j != 0 && dest[--j] != '/') 3735 continue; 3736 3737 if (c == '\0') 3738 dest[++j] = '/'; 3739 } while (c != '\0'); 3740 3741 dest[j] = '\0'; 3742 regs[rd] = (uintptr_t)dest; 3743 mstate->dtms_scratch_ptr += size; 3744 break; 3745 } 3746 } 3747 } 3748 3749 /* 3750 * Emulate the execution of DTrace IR instructions specified by the given 3751 * DIF object. This function is deliberately void of assertions as all of 3752 * the necessary checks are handled by a call to dtrace_difo_validate(). 3753 */ 3754 static uint64_t 3755 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 3756 dtrace_vstate_t *vstate, dtrace_state_t *state) 3757 { 3758 const dif_instr_t *text = difo->dtdo_buf; 3759 const uint_t textlen = difo->dtdo_len; 3760 const char *strtab = difo->dtdo_strtab; 3761 const uint64_t *inttab = difo->dtdo_inttab; 3762 3763 uint64_t rval = 0; 3764 dtrace_statvar_t *svar; 3765 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 3766 dtrace_difv_t *v; 3767 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3768 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3769 3770 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 3771 uint64_t regs[DIF_DIR_NREGS]; 3772 uint64_t *tmp; 3773 3774 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 3775 int64_t cc_r; 3776 uint_t pc = 0, id, opc; 3777 uint8_t ttop = 0; 3778 dif_instr_t instr; 3779 uint_t r1, r2, rd; 3780 3781 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 3782 3783 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 3784 opc = pc; 3785 3786 instr = text[pc++]; 3787 r1 = DIF_INSTR_R1(instr); 3788 r2 = DIF_INSTR_R2(instr); 3789 rd = DIF_INSTR_RD(instr); 3790 3791 switch (DIF_INSTR_OP(instr)) { 3792 case DIF_OP_OR: 3793 regs[rd] = regs[r1] | regs[r2]; 3794 break; 3795 case DIF_OP_XOR: 3796 regs[rd] = regs[r1] ^ regs[r2]; 3797 break; 3798 case DIF_OP_AND: 3799 regs[rd] = regs[r1] & regs[r2]; 3800 break; 3801 case DIF_OP_SLL: 3802 regs[rd] = regs[r1] << regs[r2]; 3803 break; 3804 case DIF_OP_SRL: 3805 regs[rd] = regs[r1] >> regs[r2]; 3806 break; 3807 case DIF_OP_SUB: 3808 regs[rd] = regs[r1] - regs[r2]; 3809 break; 3810 case DIF_OP_ADD: 3811 regs[rd] = regs[r1] + regs[r2]; 3812 break; 3813 case DIF_OP_MUL: 3814 regs[rd] = regs[r1] * regs[r2]; 3815 break; 3816 case DIF_OP_SDIV: 3817 if (regs[r2] == 0) { 3818 regs[rd] = 0; 3819 *flags |= CPU_DTRACE_DIVZERO; 3820 } else { 3821 regs[rd] = (int64_t)regs[r1] / 3822 (int64_t)regs[r2]; 3823 } 3824 break; 3825 3826 case DIF_OP_UDIV: 3827 if (regs[r2] == 0) { 3828 regs[rd] = 0; 3829 *flags |= CPU_DTRACE_DIVZERO; 3830 } else { 3831 regs[rd] = regs[r1] / regs[r2]; 3832 } 3833 break; 3834 3835 case DIF_OP_SREM: 3836 if (regs[r2] == 0) { 3837 regs[rd] = 0; 3838 *flags |= CPU_DTRACE_DIVZERO; 3839 } else { 3840 regs[rd] = (int64_t)regs[r1] % 3841 (int64_t)regs[r2]; 3842 } 3843 break; 3844 3845 case DIF_OP_UREM: 3846 if (regs[r2] == 0) { 3847 regs[rd] = 0; 3848 *flags |= CPU_DTRACE_DIVZERO; 3849 } else { 3850 regs[rd] = regs[r1] % regs[r2]; 3851 } 3852 break; 3853 3854 case DIF_OP_NOT: 3855 regs[rd] = ~regs[r1]; 3856 break; 3857 case DIF_OP_MOV: 3858 regs[rd] = regs[r1]; 3859 break; 3860 case DIF_OP_CMP: 3861 cc_r = regs[r1] - regs[r2]; 3862 cc_n = cc_r < 0; 3863 cc_z = cc_r == 0; 3864 cc_v = 0; 3865 cc_c = regs[r1] < regs[r2]; 3866 break; 3867 case DIF_OP_TST: 3868 cc_n = cc_v = cc_c = 0; 3869 cc_z = regs[r1] == 0; 3870 break; 3871 case DIF_OP_BA: 3872 pc = DIF_INSTR_LABEL(instr); 3873 break; 3874 case DIF_OP_BE: 3875 if (cc_z) 3876 pc = DIF_INSTR_LABEL(instr); 3877 break; 3878 case DIF_OP_BNE: 3879 if (cc_z == 0) 3880 pc = DIF_INSTR_LABEL(instr); 3881 break; 3882 case DIF_OP_BG: 3883 if ((cc_z | (cc_n ^ cc_v)) == 0) 3884 pc = DIF_INSTR_LABEL(instr); 3885 break; 3886 case DIF_OP_BGU: 3887 if ((cc_c | cc_z) == 0) 3888 pc = DIF_INSTR_LABEL(instr); 3889 break; 3890 case DIF_OP_BGE: 3891 if ((cc_n ^ cc_v) == 0) 3892 pc = DIF_INSTR_LABEL(instr); 3893 break; 3894 case DIF_OP_BGEU: 3895 if (cc_c == 0) 3896 pc = DIF_INSTR_LABEL(instr); 3897 break; 3898 case DIF_OP_BL: 3899 if (cc_n ^ cc_v) 3900 pc = DIF_INSTR_LABEL(instr); 3901 break; 3902 case DIF_OP_BLU: 3903 if (cc_c) 3904 pc = DIF_INSTR_LABEL(instr); 3905 break; 3906 case DIF_OP_BLE: 3907 if (cc_z | (cc_n ^ cc_v)) 3908 pc = DIF_INSTR_LABEL(instr); 3909 break; 3910 case DIF_OP_BLEU: 3911 if (cc_c | cc_z) 3912 pc = DIF_INSTR_LABEL(instr); 3913 break; 3914 case DIF_OP_RLDSB: 3915 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3916 *flags |= CPU_DTRACE_KPRIV; 3917 *illval = regs[r1]; 3918 break; 3919 } 3920 /*FALLTHROUGH*/ 3921 case DIF_OP_LDSB: 3922 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 3923 break; 3924 case DIF_OP_RLDSH: 3925 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3926 *flags |= CPU_DTRACE_KPRIV; 3927 *illval = regs[r1]; 3928 break; 3929 } 3930 /*FALLTHROUGH*/ 3931 case DIF_OP_LDSH: 3932 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 3933 break; 3934 case DIF_OP_RLDSW: 3935 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3936 *flags |= CPU_DTRACE_KPRIV; 3937 *illval = regs[r1]; 3938 break; 3939 } 3940 /*FALLTHROUGH*/ 3941 case DIF_OP_LDSW: 3942 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 3943 break; 3944 case DIF_OP_RLDUB: 3945 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3946 *flags |= CPU_DTRACE_KPRIV; 3947 *illval = regs[r1]; 3948 break; 3949 } 3950 /*FALLTHROUGH*/ 3951 case DIF_OP_LDUB: 3952 regs[rd] = dtrace_load8(regs[r1]); 3953 break; 3954 case DIF_OP_RLDUH: 3955 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3956 *flags |= CPU_DTRACE_KPRIV; 3957 *illval = regs[r1]; 3958 break; 3959 } 3960 /*FALLTHROUGH*/ 3961 case DIF_OP_LDUH: 3962 regs[rd] = dtrace_load16(regs[r1]); 3963 break; 3964 case DIF_OP_RLDUW: 3965 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3966 *flags |= CPU_DTRACE_KPRIV; 3967 *illval = regs[r1]; 3968 break; 3969 } 3970 /*FALLTHROUGH*/ 3971 case DIF_OP_LDUW: 3972 regs[rd] = dtrace_load32(regs[r1]); 3973 break; 3974 case DIF_OP_RLDX: 3975 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 3976 *flags |= CPU_DTRACE_KPRIV; 3977 *illval = regs[r1]; 3978 break; 3979 } 3980 /*FALLTHROUGH*/ 3981 case DIF_OP_LDX: 3982 regs[rd] = dtrace_load64(regs[r1]); 3983 break; 3984 case DIF_OP_ULDSB: 3985 regs[rd] = (int8_t) 3986 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3987 break; 3988 case DIF_OP_ULDSH: 3989 regs[rd] = (int16_t) 3990 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3991 break; 3992 case DIF_OP_ULDSW: 3993 regs[rd] = (int32_t) 3994 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3995 break; 3996 case DIF_OP_ULDUB: 3997 regs[rd] = 3998 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3999 break; 4000 case DIF_OP_ULDUH: 4001 regs[rd] = 4002 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4003 break; 4004 case DIF_OP_ULDUW: 4005 regs[rd] = 4006 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4007 break; 4008 case DIF_OP_ULDX: 4009 regs[rd] = 4010 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4011 break; 4012 case DIF_OP_RET: 4013 rval = regs[rd]; 4014 break; 4015 case DIF_OP_NOP: 4016 break; 4017 case DIF_OP_SETX: 4018 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4019 break; 4020 case DIF_OP_SETS: 4021 regs[rd] = (uint64_t)(uintptr_t) 4022 (strtab + DIF_INSTR_STRING(instr)); 4023 break; 4024 case DIF_OP_SCMP: 4025 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1], 4026 (char *)(uintptr_t)regs[r2], 4027 state->dts_options[DTRACEOPT_STRSIZE]); 4028 4029 cc_n = cc_r < 0; 4030 cc_z = cc_r == 0; 4031 cc_v = cc_c = 0; 4032 break; 4033 case DIF_OP_LDGA: 4034 regs[rd] = dtrace_dif_variable(mstate, state, 4035 r1, regs[r2]); 4036 break; 4037 case DIF_OP_LDGS: 4038 id = DIF_INSTR_VAR(instr); 4039 4040 if (id >= DIF_VAR_OTHER_UBASE) { 4041 uintptr_t a; 4042 4043 id -= DIF_VAR_OTHER_UBASE; 4044 svar = vstate->dtvs_globals[id]; 4045 ASSERT(svar != NULL); 4046 v = &svar->dtsv_var; 4047 4048 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4049 regs[rd] = svar->dtsv_data; 4050 break; 4051 } 4052 4053 a = (uintptr_t)svar->dtsv_data; 4054 4055 if (*(uint8_t *)a == UINT8_MAX) { 4056 /* 4057 * If the 0th byte is set to UINT8_MAX 4058 * then this is to be treated as a 4059 * reference to a NULL variable. 4060 */ 4061 regs[rd] = NULL; 4062 } else { 4063 regs[rd] = a + sizeof (uint64_t); 4064 } 4065 4066 break; 4067 } 4068 4069 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4070 break; 4071 4072 case DIF_OP_STGS: 4073 id = DIF_INSTR_VAR(instr); 4074 4075 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4076 id -= DIF_VAR_OTHER_UBASE; 4077 4078 svar = vstate->dtvs_globals[id]; 4079 ASSERT(svar != NULL); 4080 v = &svar->dtsv_var; 4081 4082 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4083 uintptr_t a = (uintptr_t)svar->dtsv_data; 4084 4085 ASSERT(a != NULL); 4086 ASSERT(svar->dtsv_size != 0); 4087 4088 if (regs[rd] == NULL) { 4089 *(uint8_t *)a = UINT8_MAX; 4090 break; 4091 } else { 4092 *(uint8_t *)a = 0; 4093 a += sizeof (uint64_t); 4094 } 4095 4096 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4097 (void *)a, &v->dtdv_type); 4098 break; 4099 } 4100 4101 svar->dtsv_data = regs[rd]; 4102 break; 4103 4104 case DIF_OP_LDTA: 4105 /* 4106 * There are no DTrace built-in thread-local arrays at 4107 * present. This opcode is saved for future work. 4108 */ 4109 *flags |= CPU_DTRACE_ILLOP; 4110 regs[rd] = 0; 4111 break; 4112 4113 case DIF_OP_LDLS: 4114 id = DIF_INSTR_VAR(instr); 4115 4116 if (id < DIF_VAR_OTHER_UBASE) { 4117 /* 4118 * For now, this has no meaning. 4119 */ 4120 regs[rd] = 0; 4121 break; 4122 } 4123 4124 id -= DIF_VAR_OTHER_UBASE; 4125 4126 ASSERT(id < vstate->dtvs_nlocals); 4127 ASSERT(vstate->dtvs_locals != NULL); 4128 4129 svar = vstate->dtvs_locals[id]; 4130 ASSERT(svar != NULL); 4131 v = &svar->dtsv_var; 4132 4133 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4134 uintptr_t a = (uintptr_t)svar->dtsv_data; 4135 size_t sz = v->dtdv_type.dtdt_size; 4136 4137 sz += sizeof (uint64_t); 4138 ASSERT(svar->dtsv_size == NCPU * sz); 4139 a += CPU->cpu_id * sz; 4140 4141 if (*(uint8_t *)a == UINT8_MAX) { 4142 /* 4143 * If the 0th byte is set to UINT8_MAX 4144 * then this is to be treated as a 4145 * reference to a NULL variable. 4146 */ 4147 regs[rd] = NULL; 4148 } else { 4149 regs[rd] = a + sizeof (uint64_t); 4150 } 4151 4152 break; 4153 } 4154 4155 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4156 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4157 regs[rd] = tmp[CPU->cpu_id]; 4158 break; 4159 4160 case DIF_OP_STLS: 4161 id = DIF_INSTR_VAR(instr); 4162 4163 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4164 id -= DIF_VAR_OTHER_UBASE; 4165 ASSERT(id < vstate->dtvs_nlocals); 4166 4167 ASSERT(vstate->dtvs_locals != NULL); 4168 svar = vstate->dtvs_locals[id]; 4169 ASSERT(svar != NULL); 4170 v = &svar->dtsv_var; 4171 4172 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4173 uintptr_t a = (uintptr_t)svar->dtsv_data; 4174 size_t sz = v->dtdv_type.dtdt_size; 4175 4176 sz += sizeof (uint64_t); 4177 ASSERT(svar->dtsv_size == NCPU * sz); 4178 a += CPU->cpu_id * sz; 4179 4180 if (regs[rd] == NULL) { 4181 *(uint8_t *)a = UINT8_MAX; 4182 break; 4183 } else { 4184 *(uint8_t *)a = 0; 4185 a += sizeof (uint64_t); 4186 } 4187 4188 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4189 (void *)a, &v->dtdv_type); 4190 break; 4191 } 4192 4193 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4194 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4195 tmp[CPU->cpu_id] = regs[rd]; 4196 break; 4197 4198 case DIF_OP_LDTS: { 4199 dtrace_dynvar_t *dvar; 4200 dtrace_key_t *key; 4201 4202 id = DIF_INSTR_VAR(instr); 4203 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4204 id -= DIF_VAR_OTHER_UBASE; 4205 v = &vstate->dtvs_tlocals[id]; 4206 4207 key = &tupregs[DIF_DTR_NREGS]; 4208 key[0].dttk_value = (uint64_t)id; 4209 key[0].dttk_size = 0; 4210 DTRACE_TLS_THRKEY(key[1].dttk_value); 4211 key[1].dttk_size = 0; 4212 4213 dvar = dtrace_dynvar(dstate, 2, key, 4214 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC); 4215 4216 if (dvar == NULL) { 4217 regs[rd] = 0; 4218 break; 4219 } 4220 4221 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4222 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4223 } else { 4224 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4225 } 4226 4227 break; 4228 } 4229 4230 case DIF_OP_STTS: { 4231 dtrace_dynvar_t *dvar; 4232 dtrace_key_t *key; 4233 4234 id = DIF_INSTR_VAR(instr); 4235 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4236 id -= DIF_VAR_OTHER_UBASE; 4237 4238 key = &tupregs[DIF_DTR_NREGS]; 4239 key[0].dttk_value = (uint64_t)id; 4240 key[0].dttk_size = 0; 4241 DTRACE_TLS_THRKEY(key[1].dttk_value); 4242 key[1].dttk_size = 0; 4243 v = &vstate->dtvs_tlocals[id]; 4244 4245 dvar = dtrace_dynvar(dstate, 2, key, 4246 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4247 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4248 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4249 DTRACE_DYNVAR_DEALLOC); 4250 4251 /* 4252 * Given that we're storing to thread-local data, 4253 * we need to flush our predicate cache. 4254 */ 4255 curthread->t_predcache = NULL; 4256 4257 if (dvar == NULL) 4258 break; 4259 4260 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4261 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4262 dvar->dtdv_data, &v->dtdv_type); 4263 } else { 4264 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4265 } 4266 4267 break; 4268 } 4269 4270 case DIF_OP_SRA: 4271 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 4272 break; 4273 4274 case DIF_OP_CALL: 4275 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 4276 regs, tupregs, ttop, mstate, state); 4277 break; 4278 4279 case DIF_OP_PUSHTR: 4280 if (ttop == DIF_DTR_NREGS) { 4281 *flags |= CPU_DTRACE_TUPOFLOW; 4282 break; 4283 } 4284 4285 if (r1 == DIF_TYPE_STRING) { 4286 /* 4287 * If this is a string type and the size is 0, 4288 * we'll use the system-wide default string 4289 * size. Note that we are _not_ looking at 4290 * the value of the DTRACEOPT_STRSIZE option; 4291 * had this been set, we would expect to have 4292 * a non-zero size value in the "pushtr". 4293 */ 4294 tupregs[ttop].dttk_size = 4295 dtrace_strlen((char *)(uintptr_t)regs[rd], 4296 regs[r2] ? regs[r2] : 4297 dtrace_strsize_default) + 1; 4298 } else { 4299 tupregs[ttop].dttk_size = regs[r2]; 4300 } 4301 4302 tupregs[ttop++].dttk_value = regs[rd]; 4303 break; 4304 4305 case DIF_OP_PUSHTV: 4306 if (ttop == DIF_DTR_NREGS) { 4307 *flags |= CPU_DTRACE_TUPOFLOW; 4308 break; 4309 } 4310 4311 tupregs[ttop].dttk_value = regs[rd]; 4312 tupregs[ttop++].dttk_size = 0; 4313 break; 4314 4315 case DIF_OP_POPTS: 4316 if (ttop != 0) 4317 ttop--; 4318 break; 4319 4320 case DIF_OP_FLUSHTS: 4321 ttop = 0; 4322 break; 4323 4324 case DIF_OP_LDGAA: 4325 case DIF_OP_LDTAA: { 4326 dtrace_dynvar_t *dvar; 4327 dtrace_key_t *key = tupregs; 4328 uint_t nkeys = ttop; 4329 4330 id = DIF_INSTR_VAR(instr); 4331 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4332 id -= DIF_VAR_OTHER_UBASE; 4333 4334 key[nkeys].dttk_value = (uint64_t)id; 4335 key[nkeys++].dttk_size = 0; 4336 4337 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4338 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4339 key[nkeys++].dttk_size = 0; 4340 v = &vstate->dtvs_tlocals[id]; 4341 } else { 4342 v = &vstate->dtvs_globals[id]->dtsv_var; 4343 } 4344 4345 dvar = dtrace_dynvar(dstate, nkeys, key, 4346 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4347 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4348 DTRACE_DYNVAR_NOALLOC); 4349 4350 if (dvar == NULL) { 4351 regs[rd] = 0; 4352 break; 4353 } 4354 4355 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4356 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4357 } else { 4358 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4359 } 4360 4361 break; 4362 } 4363 4364 case DIF_OP_STGAA: 4365 case DIF_OP_STTAA: { 4366 dtrace_dynvar_t *dvar; 4367 dtrace_key_t *key = tupregs; 4368 uint_t nkeys = ttop; 4369 4370 id = DIF_INSTR_VAR(instr); 4371 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4372 id -= DIF_VAR_OTHER_UBASE; 4373 4374 key[nkeys].dttk_value = (uint64_t)id; 4375 key[nkeys++].dttk_size = 0; 4376 4377 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4378 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4379 key[nkeys++].dttk_size = 0; 4380 v = &vstate->dtvs_tlocals[id]; 4381 } else { 4382 v = &vstate->dtvs_globals[id]->dtsv_var; 4383 } 4384 4385 dvar = dtrace_dynvar(dstate, nkeys, key, 4386 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4387 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4388 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4389 DTRACE_DYNVAR_DEALLOC); 4390 4391 if (dvar == NULL) 4392 break; 4393 4394 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4395 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4396 dvar->dtdv_data, &v->dtdv_type); 4397 } else { 4398 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4399 } 4400 4401 break; 4402 } 4403 4404 case DIF_OP_ALLOCS: { 4405 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4406 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4407 4408 if (mstate->dtms_scratch_ptr + size > 4409 mstate->dtms_scratch_base + 4410 mstate->dtms_scratch_size) { 4411 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4412 regs[rd] = NULL; 4413 } else { 4414 dtrace_bzero((void *) 4415 mstate->dtms_scratch_ptr, size); 4416 mstate->dtms_scratch_ptr += size; 4417 regs[rd] = ptr; 4418 } 4419 break; 4420 } 4421 4422 case DIF_OP_COPYS: 4423 if (!dtrace_canstore(regs[rd], regs[r2], 4424 mstate, vstate)) { 4425 *flags |= CPU_DTRACE_BADADDR; 4426 *illval = regs[rd]; 4427 break; 4428 } 4429 4430 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4431 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4432 break; 4433 4434 case DIF_OP_STB: 4435 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4436 *flags |= CPU_DTRACE_BADADDR; 4437 *illval = regs[rd]; 4438 break; 4439 } 4440 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4441 break; 4442 4443 case DIF_OP_STH: 4444 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4445 *flags |= CPU_DTRACE_BADADDR; 4446 *illval = regs[rd]; 4447 break; 4448 } 4449 if (regs[rd] & 1) { 4450 *flags |= CPU_DTRACE_BADALIGN; 4451 *illval = regs[rd]; 4452 break; 4453 } 4454 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4455 break; 4456 4457 case DIF_OP_STW: 4458 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4459 *flags |= CPU_DTRACE_BADADDR; 4460 *illval = regs[rd]; 4461 break; 4462 } 4463 if (regs[rd] & 3) { 4464 *flags |= CPU_DTRACE_BADALIGN; 4465 *illval = regs[rd]; 4466 break; 4467 } 4468 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4469 break; 4470 4471 case DIF_OP_STX: 4472 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4473 *flags |= CPU_DTRACE_BADADDR; 4474 *illval = regs[rd]; 4475 break; 4476 } 4477 if (regs[rd] & 7) { 4478 *flags |= CPU_DTRACE_BADALIGN; 4479 *illval = regs[rd]; 4480 break; 4481 } 4482 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4483 break; 4484 } 4485 } 4486 4487 if (!(*flags & CPU_DTRACE_FAULT)) 4488 return (rval); 4489 4490 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4491 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4492 4493 return (0); 4494 } 4495 4496 static void 4497 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4498 { 4499 dtrace_probe_t *probe = ecb->dte_probe; 4500 dtrace_provider_t *prov = probe->dtpr_provider; 4501 char c[DTRACE_FULLNAMELEN + 80], *str; 4502 char *msg = "dtrace: breakpoint action at probe "; 4503 char *ecbmsg = " (ecb "; 4504 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4505 uintptr_t val = (uintptr_t)ecb; 4506 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4507 4508 if (dtrace_destructive_disallow) 4509 return; 4510 4511 /* 4512 * It's impossible to be taking action on the NULL probe. 4513 */ 4514 ASSERT(probe != NULL); 4515 4516 /* 4517 * This is a poor man's (destitute man's?) sprintf(): we want to 4518 * print the provider name, module name, function name and name of 4519 * the probe, along with the hex address of the ECB with the breakpoint 4520 * action -- all of which we must place in the character buffer by 4521 * hand. 4522 */ 4523 while (*msg != '\0') 4524 c[i++] = *msg++; 4525 4526 for (str = prov->dtpv_name; *str != '\0'; str++) 4527 c[i++] = *str; 4528 c[i++] = ':'; 4529 4530 for (str = probe->dtpr_mod; *str != '\0'; str++) 4531 c[i++] = *str; 4532 c[i++] = ':'; 4533 4534 for (str = probe->dtpr_func; *str != '\0'; str++) 4535 c[i++] = *str; 4536 c[i++] = ':'; 4537 4538 for (str = probe->dtpr_name; *str != '\0'; str++) 4539 c[i++] = *str; 4540 4541 while (*ecbmsg != '\0') 4542 c[i++] = *ecbmsg++; 4543 4544 while (shift >= 0) { 4545 mask = (uintptr_t)0xf << shift; 4546 4547 if (val >= ((uintptr_t)1 << shift)) 4548 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4549 shift -= 4; 4550 } 4551 4552 c[i++] = ')'; 4553 c[i] = '\0'; 4554 4555 debug_enter(c); 4556 } 4557 4558 static void 4559 dtrace_action_panic(dtrace_ecb_t *ecb) 4560 { 4561 dtrace_probe_t *probe = ecb->dte_probe; 4562 4563 /* 4564 * It's impossible to be taking action on the NULL probe. 4565 */ 4566 ASSERT(probe != NULL); 4567 4568 if (dtrace_destructive_disallow) 4569 return; 4570 4571 if (dtrace_panicked != NULL) 4572 return; 4573 4574 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4575 return; 4576 4577 /* 4578 * We won the right to panic. (We want to be sure that only one 4579 * thread calls panic() from dtrace_probe(), and that panic() is 4580 * called exactly once.) 4581 */ 4582 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4583 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4584 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4585 } 4586 4587 static void 4588 dtrace_action_raise(uint64_t sig) 4589 { 4590 if (dtrace_destructive_disallow) 4591 return; 4592 4593 if (sig >= NSIG) { 4594 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4595 return; 4596 } 4597 4598 /* 4599 * raise() has a queue depth of 1 -- we ignore all subsequent 4600 * invocations of the raise() action. 4601 */ 4602 if (curthread->t_dtrace_sig == 0) 4603 curthread->t_dtrace_sig = (uint8_t)sig; 4604 4605 curthread->t_sig_check = 1; 4606 aston(curthread); 4607 } 4608 4609 static void 4610 dtrace_action_stop(void) 4611 { 4612 if (dtrace_destructive_disallow) 4613 return; 4614 4615 if (!curthread->t_dtrace_stop) { 4616 curthread->t_dtrace_stop = 1; 4617 curthread->t_sig_check = 1; 4618 aston(curthread); 4619 } 4620 } 4621 4622 static void 4623 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4624 { 4625 hrtime_t now; 4626 volatile uint16_t *flags; 4627 cpu_t *cpu = CPU; 4628 4629 if (dtrace_destructive_disallow) 4630 return; 4631 4632 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4633 4634 now = dtrace_gethrtime(); 4635 4636 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4637 /* 4638 * We need to advance the mark to the current time. 4639 */ 4640 cpu->cpu_dtrace_chillmark = now; 4641 cpu->cpu_dtrace_chilled = 0; 4642 } 4643 4644 /* 4645 * Now check to see if the requested chill time would take us over 4646 * the maximum amount of time allowed in the chill interval. (Or 4647 * worse, if the calculation itself induces overflow.) 4648 */ 4649 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4650 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4651 *flags |= CPU_DTRACE_ILLOP; 4652 return; 4653 } 4654 4655 while (dtrace_gethrtime() - now < val) 4656 continue; 4657 4658 /* 4659 * Normally, we assure that the value of the variable "timestamp" does 4660 * not change within an ECB. The presence of chill() represents an 4661 * exception to this rule, however. 4662 */ 4663 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4664 cpu->cpu_dtrace_chilled += val; 4665 } 4666 4667 static void 4668 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4669 uint64_t *buf, uint64_t arg) 4670 { 4671 int nframes = DTRACE_USTACK_NFRAMES(arg); 4672 int strsize = DTRACE_USTACK_STRSIZE(arg); 4673 uint64_t *pcs = &buf[1], *fps; 4674 char *str = (char *)&pcs[nframes]; 4675 int size, offs = 0, i, j; 4676 uintptr_t old = mstate->dtms_scratch_ptr, saved; 4677 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4678 char *sym; 4679 4680 /* 4681 * Should be taking a faster path if string space has not been 4682 * allocated. 4683 */ 4684 ASSERT(strsize != 0); 4685 4686 /* 4687 * We will first allocate some temporary space for the frame pointers. 4688 */ 4689 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4690 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 4691 (nframes * sizeof (uint64_t)); 4692 4693 if (mstate->dtms_scratch_ptr + size > 4694 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 4695 /* 4696 * Not enough room for our frame pointers -- need to indicate 4697 * that we ran out of scratch space. 4698 */ 4699 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4700 return; 4701 } 4702 4703 mstate->dtms_scratch_ptr += size; 4704 saved = mstate->dtms_scratch_ptr; 4705 4706 /* 4707 * Now get a stack with both program counters and frame pointers. 4708 */ 4709 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4710 dtrace_getufpstack(buf, fps, nframes + 1); 4711 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4712 4713 /* 4714 * If that faulted, we're cooked. 4715 */ 4716 if (*flags & CPU_DTRACE_FAULT) 4717 goto out; 4718 4719 /* 4720 * Now we want to walk up the stack, calling the USTACK helper. For 4721 * each iteration, we restore the scratch pointer. 4722 */ 4723 for (i = 0; i < nframes; i++) { 4724 mstate->dtms_scratch_ptr = saved; 4725 4726 if (offs >= strsize) 4727 break; 4728 4729 sym = (char *)(uintptr_t)dtrace_helper( 4730 DTRACE_HELPER_ACTION_USTACK, 4731 mstate, state, pcs[i], fps[i]); 4732 4733 /* 4734 * If we faulted while running the helper, we're going to 4735 * clear the fault and null out the corresponding string. 4736 */ 4737 if (*flags & CPU_DTRACE_FAULT) { 4738 *flags &= ~CPU_DTRACE_FAULT; 4739 str[offs++] = '\0'; 4740 continue; 4741 } 4742 4743 if (sym == NULL) { 4744 str[offs++] = '\0'; 4745 continue; 4746 } 4747 4748 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4749 4750 /* 4751 * Now copy in the string that the helper returned to us. 4752 */ 4753 for (j = 0; offs + j < strsize; j++) { 4754 if ((str[offs + j] = sym[j]) == '\0') 4755 break; 4756 } 4757 4758 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4759 4760 offs += j + 1; 4761 } 4762 4763 if (offs >= strsize) { 4764 /* 4765 * If we didn't have room for all of the strings, we don't 4766 * abort processing -- this needn't be a fatal error -- but we 4767 * still want to increment a counter (dts_stkstroverflows) to 4768 * allow this condition to be warned about. (If this is from 4769 * a jstack() action, it is easily tuned via jstackstrsize.) 4770 */ 4771 dtrace_error(&state->dts_stkstroverflows); 4772 } 4773 4774 while (offs < strsize) 4775 str[offs++] = '\0'; 4776 4777 out: 4778 mstate->dtms_scratch_ptr = old; 4779 } 4780 4781 /* 4782 * If you're looking for the epicenter of DTrace, you just found it. This 4783 * is the function called by the provider to fire a probe -- from which all 4784 * subsequent probe-context DTrace activity emanates. 4785 */ 4786 void 4787 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 4788 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 4789 { 4790 processorid_t cpuid; 4791 dtrace_icookie_t cookie; 4792 dtrace_probe_t *probe; 4793 dtrace_mstate_t mstate; 4794 dtrace_ecb_t *ecb; 4795 dtrace_action_t *act; 4796 intptr_t offs; 4797 size_t size; 4798 int vtime, onintr; 4799 volatile uint16_t *flags; 4800 hrtime_t now; 4801 4802 /* 4803 * Kick out immediately if this CPU is still being born (in which case 4804 * curthread will be set to -1) 4805 */ 4806 if ((uintptr_t)curthread & 1) 4807 return; 4808 4809 cookie = dtrace_interrupt_disable(); 4810 probe = dtrace_probes[id - 1]; 4811 cpuid = CPU->cpu_id; 4812 onintr = CPU_ON_INTR(CPU); 4813 4814 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 4815 probe->dtpr_predcache == curthread->t_predcache) { 4816 /* 4817 * We have hit in the predicate cache; we know that 4818 * this predicate would evaluate to be false. 4819 */ 4820 dtrace_interrupt_enable(cookie); 4821 return; 4822 } 4823 4824 if (panic_quiesce) { 4825 /* 4826 * We don't trace anything if we're panicking. 4827 */ 4828 dtrace_interrupt_enable(cookie); 4829 return; 4830 } 4831 4832 now = dtrace_gethrtime(); 4833 vtime = dtrace_vtime_references != 0; 4834 4835 if (vtime && curthread->t_dtrace_start) 4836 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 4837 4838 mstate.dtms_probe = probe; 4839 mstate.dtms_arg[0] = arg0; 4840 mstate.dtms_arg[1] = arg1; 4841 mstate.dtms_arg[2] = arg2; 4842 mstate.dtms_arg[3] = arg3; 4843 mstate.dtms_arg[4] = arg4; 4844 4845 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 4846 4847 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 4848 dtrace_predicate_t *pred = ecb->dte_predicate; 4849 dtrace_state_t *state = ecb->dte_state; 4850 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 4851 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 4852 dtrace_vstate_t *vstate = &state->dts_vstate; 4853 dtrace_provider_t *prov = probe->dtpr_provider; 4854 int committed = 0; 4855 caddr_t tomax; 4856 4857 /* 4858 * A little subtlety with the following (seemingly innocuous) 4859 * declaration of the automatic 'val': by looking at the 4860 * code, you might think that it could be declared in the 4861 * action processing loop, below. (That is, it's only used in 4862 * the action processing loop.) However, it must be declared 4863 * out of that scope because in the case of DIF expression 4864 * arguments to aggregating actions, one iteration of the 4865 * action loop will use the last iteration's value. 4866 */ 4867 #ifdef lint 4868 uint64_t val = 0; 4869 #else 4870 uint64_t val; 4871 #endif 4872 4873 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 4874 *flags &= ~CPU_DTRACE_ERROR; 4875 4876 if (prov == dtrace_provider) { 4877 /* 4878 * If dtrace itself is the provider of this probe, 4879 * we're only going to continue processing the ECB if 4880 * arg0 (the dtrace_state_t) is equal to the ECB's 4881 * creating state. (This prevents disjoint consumers 4882 * from seeing one another's metaprobes.) 4883 */ 4884 if (arg0 != (uint64_t)(uintptr_t)state) 4885 continue; 4886 } 4887 4888 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 4889 /* 4890 * We're not currently active. If our provider isn't 4891 * the dtrace pseudo provider, we're not interested. 4892 */ 4893 if (prov != dtrace_provider) 4894 continue; 4895 4896 /* 4897 * Now we must further check if we are in the BEGIN 4898 * probe. If we are, we will only continue processing 4899 * if we're still in WARMUP -- if one BEGIN enabling 4900 * has invoked the exit() action, we don't want to 4901 * evaluate subsequent BEGIN enablings. 4902 */ 4903 if (probe->dtpr_id == dtrace_probeid_begin && 4904 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 4905 ASSERT(state->dts_activity == 4906 DTRACE_ACTIVITY_DRAINING); 4907 continue; 4908 } 4909 } 4910 4911 if (ecb->dte_cond) { 4912 /* 4913 * If the dte_cond bits indicate that this 4914 * consumer is only allowed to see user-mode firings 4915 * of this probe, call the provider's dtps_usermode() 4916 * entry point to check that the probe was fired 4917 * while in a user context. Skip this ECB if that's 4918 * not the case. 4919 */ 4920 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 4921 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 4922 probe->dtpr_id, probe->dtpr_arg) == 0) 4923 continue; 4924 4925 /* 4926 * This is more subtle than it looks. We have to be 4927 * absolutely certain that CRED() isn't going to 4928 * change out from under us so it's only legit to 4929 * examine that structure if we're in constrained 4930 * situations. Currently, the only times we'll this 4931 * check is if a non-super-user has enabled the 4932 * profile or syscall providers -- providers that 4933 * allow visibility of all processes. For the 4934 * profile case, the check above will ensure that 4935 * we're examining a user context. 4936 */ 4937 if (ecb->dte_cond & DTRACE_COND_OWNER) { 4938 cred_t *cr; 4939 cred_t *s_cr = 4940 ecb->dte_state->dts_cred.dcr_cred; 4941 proc_t *proc; 4942 4943 ASSERT(s_cr != NULL); 4944 4945 if ((cr = CRED()) == NULL || 4946 s_cr->cr_uid != cr->cr_uid || 4947 s_cr->cr_uid != cr->cr_ruid || 4948 s_cr->cr_uid != cr->cr_suid || 4949 s_cr->cr_gid != cr->cr_gid || 4950 s_cr->cr_gid != cr->cr_rgid || 4951 s_cr->cr_gid != cr->cr_sgid || 4952 (proc = ttoproc(curthread)) == NULL || 4953 (proc->p_flag & SNOCD)) 4954 continue; 4955 } 4956 4957 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 4958 cred_t *cr; 4959 cred_t *s_cr = 4960 ecb->dte_state->dts_cred.dcr_cred; 4961 4962 ASSERT(s_cr != NULL); 4963 4964 if ((cr = CRED()) == NULL || 4965 s_cr->cr_zone->zone_id != 4966 cr->cr_zone->zone_id) 4967 continue; 4968 } 4969 } 4970 4971 if (now - state->dts_alive > dtrace_deadman_timeout) { 4972 /* 4973 * We seem to be dead. Unless we (a) have kernel 4974 * destructive permissions (b) have expicitly enabled 4975 * destructive actions and (c) destructive actions have 4976 * not been disabled, we're going to transition into 4977 * the KILLED state, from which no further processing 4978 * on this state will be performed. 4979 */ 4980 if (!dtrace_priv_kernel_destructive(state) || 4981 !state->dts_cred.dcr_destructive || 4982 dtrace_destructive_disallow) { 4983 void *activity = &state->dts_activity; 4984 dtrace_activity_t current; 4985 4986 do { 4987 current = state->dts_activity; 4988 } while (dtrace_cas32(activity, current, 4989 DTRACE_ACTIVITY_KILLED) != current); 4990 4991 continue; 4992 } 4993 } 4994 4995 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 4996 ecb->dte_alignment, state, &mstate)) < 0) 4997 continue; 4998 4999 tomax = buf->dtb_tomax; 5000 ASSERT(tomax != NULL); 5001 5002 if (ecb->dte_size != 0) 5003 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5004 5005 mstate.dtms_epid = ecb->dte_epid; 5006 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5007 5008 if (pred != NULL) { 5009 dtrace_difo_t *dp = pred->dtp_difo; 5010 int rval; 5011 5012 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5013 5014 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5015 dtrace_cacheid_t cid = probe->dtpr_predcache; 5016 5017 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5018 /* 5019 * Update the predicate cache... 5020 */ 5021 ASSERT(cid == pred->dtp_cacheid); 5022 curthread->t_predcache = cid; 5023 } 5024 5025 continue; 5026 } 5027 } 5028 5029 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5030 act != NULL; act = act->dta_next) { 5031 size_t valoffs; 5032 dtrace_difo_t *dp; 5033 dtrace_recdesc_t *rec = &act->dta_rec; 5034 5035 size = rec->dtrd_size; 5036 valoffs = offs + rec->dtrd_offset; 5037 5038 if (DTRACEACT_ISAGG(act->dta_kind)) { 5039 uint64_t v = 0xbad; 5040 dtrace_aggregation_t *agg; 5041 5042 agg = (dtrace_aggregation_t *)act; 5043 5044 if ((dp = act->dta_difo) != NULL) 5045 v = dtrace_dif_emulate(dp, 5046 &mstate, vstate, state); 5047 5048 if (*flags & CPU_DTRACE_ERROR) 5049 continue; 5050 5051 /* 5052 * Note that we always pass the expression 5053 * value from the previous iteration of the 5054 * action loop. This value will only be used 5055 * if there is an expression argument to the 5056 * aggregating action, denoted by the 5057 * dtag_hasarg field. 5058 */ 5059 dtrace_aggregate(agg, buf, 5060 offs, aggbuf, v, val); 5061 continue; 5062 } 5063 5064 switch (act->dta_kind) { 5065 case DTRACEACT_STOP: 5066 if (dtrace_priv_proc_destructive(state)) 5067 dtrace_action_stop(); 5068 continue; 5069 5070 case DTRACEACT_BREAKPOINT: 5071 if (dtrace_priv_kernel_destructive(state)) 5072 dtrace_action_breakpoint(ecb); 5073 continue; 5074 5075 case DTRACEACT_PANIC: 5076 if (dtrace_priv_kernel_destructive(state)) 5077 dtrace_action_panic(ecb); 5078 continue; 5079 5080 case DTRACEACT_STACK: 5081 if (!dtrace_priv_kernel(state)) 5082 continue; 5083 5084 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5085 size / sizeof (pc_t), probe->dtpr_aframes, 5086 DTRACE_ANCHORED(probe) ? NULL : 5087 (uint32_t *)arg0); 5088 5089 continue; 5090 5091 case DTRACEACT_JSTACK: 5092 case DTRACEACT_USTACK: 5093 if (!dtrace_priv_proc(state)) 5094 continue; 5095 5096 /* 5097 * See comment in DIF_VAR_PID. 5098 */ 5099 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5100 CPU_ON_INTR(CPU)) { 5101 int depth = DTRACE_USTACK_NFRAMES( 5102 rec->dtrd_arg) + 1; 5103 5104 dtrace_bzero((void *)(tomax + valoffs), 5105 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5106 + depth * sizeof (uint64_t)); 5107 5108 continue; 5109 } 5110 5111 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5112 curproc->p_dtrace_helpers != NULL) { 5113 /* 5114 * This is the slow path -- we have 5115 * allocated string space, and we're 5116 * getting the stack of a process that 5117 * has helpers. Call into a separate 5118 * routine to perform this processing. 5119 */ 5120 dtrace_action_ustack(&mstate, state, 5121 (uint64_t *)(tomax + valoffs), 5122 rec->dtrd_arg); 5123 continue; 5124 } 5125 5126 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5127 dtrace_getupcstack((uint64_t *) 5128 (tomax + valoffs), 5129 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5130 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5131 continue; 5132 5133 default: 5134 break; 5135 } 5136 5137 dp = act->dta_difo; 5138 ASSERT(dp != NULL); 5139 5140 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5141 5142 if (*flags & CPU_DTRACE_ERROR) 5143 continue; 5144 5145 switch (act->dta_kind) { 5146 case DTRACEACT_SPECULATE: 5147 ASSERT(buf == &state->dts_buffer[cpuid]); 5148 buf = dtrace_speculation_buffer(state, 5149 cpuid, val); 5150 5151 if (buf == NULL) { 5152 *flags |= CPU_DTRACE_DROP; 5153 continue; 5154 } 5155 5156 offs = dtrace_buffer_reserve(buf, 5157 ecb->dte_needed, ecb->dte_alignment, 5158 state, NULL); 5159 5160 if (offs < 0) { 5161 *flags |= CPU_DTRACE_DROP; 5162 continue; 5163 } 5164 5165 tomax = buf->dtb_tomax; 5166 ASSERT(tomax != NULL); 5167 5168 if (ecb->dte_size != 0) 5169 DTRACE_STORE(uint32_t, tomax, offs, 5170 ecb->dte_epid); 5171 continue; 5172 5173 case DTRACEACT_CHILL: 5174 if (dtrace_priv_kernel_destructive(state)) 5175 dtrace_action_chill(&mstate, val); 5176 continue; 5177 5178 case DTRACEACT_RAISE: 5179 if (dtrace_priv_proc_destructive(state)) 5180 dtrace_action_raise(val); 5181 continue; 5182 5183 case DTRACEACT_COMMIT: 5184 ASSERT(!committed); 5185 5186 /* 5187 * We need to commit our buffer state. 5188 */ 5189 if (ecb->dte_size) 5190 buf->dtb_offset = offs + ecb->dte_size; 5191 buf = &state->dts_buffer[cpuid]; 5192 dtrace_speculation_commit(state, cpuid, val); 5193 committed = 1; 5194 continue; 5195 5196 case DTRACEACT_DISCARD: 5197 dtrace_speculation_discard(state, cpuid, val); 5198 continue; 5199 5200 case DTRACEACT_DIFEXPR: 5201 case DTRACEACT_LIBACT: 5202 case DTRACEACT_PRINTF: 5203 case DTRACEACT_PRINTA: 5204 case DTRACEACT_SYSTEM: 5205 case DTRACEACT_FREOPEN: 5206 break; 5207 5208 case DTRACEACT_SYM: 5209 case DTRACEACT_MOD: 5210 if (!dtrace_priv_kernel(state)) 5211 continue; 5212 break; 5213 5214 case DTRACEACT_USYM: 5215 case DTRACEACT_UMOD: 5216 case DTRACEACT_UADDR: { 5217 struct pid *pid = curthread->t_procp->p_pidp; 5218 5219 if (!dtrace_priv_proc(state)) 5220 continue; 5221 5222 DTRACE_STORE(uint64_t, tomax, 5223 valoffs, (uint64_t)pid->pid_id); 5224 DTRACE_STORE(uint64_t, tomax, 5225 valoffs + sizeof (uint64_t), val); 5226 5227 continue; 5228 } 5229 5230 case DTRACEACT_EXIT: { 5231 /* 5232 * For the exit action, we are going to attempt 5233 * to atomically set our activity to be 5234 * draining. If this fails (either because 5235 * another CPU has beat us to the exit action, 5236 * or because our current activity is something 5237 * other than ACTIVE or WARMUP), we will 5238 * continue. This assures that the exit action 5239 * can be successfully recorded at most once 5240 * when we're in the ACTIVE state. If we're 5241 * encountering the exit() action while in 5242 * COOLDOWN, however, we want to honor the new 5243 * status code. (We know that we're the only 5244 * thread in COOLDOWN, so there is no race.) 5245 */ 5246 void *activity = &state->dts_activity; 5247 dtrace_activity_t current = state->dts_activity; 5248 5249 if (current == DTRACE_ACTIVITY_COOLDOWN) 5250 break; 5251 5252 if (current != DTRACE_ACTIVITY_WARMUP) 5253 current = DTRACE_ACTIVITY_ACTIVE; 5254 5255 if (dtrace_cas32(activity, current, 5256 DTRACE_ACTIVITY_DRAINING) != current) { 5257 *flags |= CPU_DTRACE_DROP; 5258 continue; 5259 } 5260 5261 break; 5262 } 5263 5264 default: 5265 ASSERT(0); 5266 } 5267 5268 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 5269 uintptr_t end = valoffs + size; 5270 5271 /* 5272 * If this is a string, we're going to only 5273 * load until we find the zero byte -- after 5274 * which we'll store zero bytes. 5275 */ 5276 if (dp->dtdo_rtype.dtdt_kind == 5277 DIF_TYPE_STRING) { 5278 char c = '\0' + 1; 5279 int intuple = act->dta_intuple; 5280 size_t s; 5281 5282 for (s = 0; s < size; s++) { 5283 if (c != '\0') 5284 c = dtrace_load8(val++); 5285 5286 DTRACE_STORE(uint8_t, tomax, 5287 valoffs++, c); 5288 5289 if (c == '\0' && intuple) 5290 break; 5291 } 5292 5293 continue; 5294 } 5295 5296 while (valoffs < end) { 5297 DTRACE_STORE(uint8_t, tomax, valoffs++, 5298 dtrace_load8(val++)); 5299 } 5300 5301 continue; 5302 } 5303 5304 switch (size) { 5305 case 0: 5306 break; 5307 5308 case sizeof (uint8_t): 5309 DTRACE_STORE(uint8_t, tomax, valoffs, val); 5310 break; 5311 case sizeof (uint16_t): 5312 DTRACE_STORE(uint16_t, tomax, valoffs, val); 5313 break; 5314 case sizeof (uint32_t): 5315 DTRACE_STORE(uint32_t, tomax, valoffs, val); 5316 break; 5317 case sizeof (uint64_t): 5318 DTRACE_STORE(uint64_t, tomax, valoffs, val); 5319 break; 5320 default: 5321 /* 5322 * Any other size should have been returned by 5323 * reference, not by value. 5324 */ 5325 ASSERT(0); 5326 break; 5327 } 5328 } 5329 5330 if (*flags & CPU_DTRACE_DROP) 5331 continue; 5332 5333 if (*flags & CPU_DTRACE_FAULT) { 5334 int ndx; 5335 dtrace_action_t *err; 5336 5337 buf->dtb_errors++; 5338 5339 if (probe->dtpr_id == dtrace_probeid_error) { 5340 /* 5341 * There's nothing we can do -- we had an 5342 * error on the error probe. We bump an 5343 * error counter to at least indicate that 5344 * this condition happened. 5345 */ 5346 dtrace_error(&state->dts_dblerrors); 5347 continue; 5348 } 5349 5350 if (vtime) { 5351 /* 5352 * Before recursing on dtrace_probe(), we 5353 * need to explicitly clear out our start 5354 * time to prevent it from being accumulated 5355 * into t_dtrace_vtime. 5356 */ 5357 curthread->t_dtrace_start = 0; 5358 } 5359 5360 /* 5361 * Iterate over the actions to figure out which action 5362 * we were processing when we experienced the error. 5363 * Note that act points _past_ the faulting action; if 5364 * act is ecb->dte_action, the fault was in the 5365 * predicate, if it's ecb->dte_action->dta_next it's 5366 * in action #1, and so on. 5367 */ 5368 for (err = ecb->dte_action, ndx = 0; 5369 err != act; err = err->dta_next, ndx++) 5370 continue; 5371 5372 dtrace_probe_error(state, ecb->dte_epid, ndx, 5373 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5374 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 5375 cpu_core[cpuid].cpuc_dtrace_illval); 5376 5377 continue; 5378 } 5379 5380 if (!committed) 5381 buf->dtb_offset = offs + ecb->dte_size; 5382 } 5383 5384 if (vtime) 5385 curthread->t_dtrace_start = dtrace_gethrtime(); 5386 5387 dtrace_interrupt_enable(cookie); 5388 } 5389 5390 /* 5391 * DTrace Probe Hashing Functions 5392 * 5393 * The functions in this section (and indeed, the functions in remaining 5394 * sections) are not _called_ from probe context. (Any exceptions to this are 5395 * marked with a "Note:".) Rather, they are called from elsewhere in the 5396 * DTrace framework to look-up probes in, add probes to and remove probes from 5397 * the DTrace probe hashes. (Each probe is hashed by each element of the 5398 * probe tuple -- allowing for fast lookups, regardless of what was 5399 * specified.) 5400 */ 5401 static uint_t 5402 dtrace_hash_str(char *p) 5403 { 5404 unsigned int g; 5405 uint_t hval = 0; 5406 5407 while (*p) { 5408 hval = (hval << 4) + *p++; 5409 if ((g = (hval & 0xf0000000)) != 0) 5410 hval ^= g >> 24; 5411 hval &= ~g; 5412 } 5413 return (hval); 5414 } 5415 5416 static dtrace_hash_t * 5417 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5418 { 5419 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5420 5421 hash->dth_stroffs = stroffs; 5422 hash->dth_nextoffs = nextoffs; 5423 hash->dth_prevoffs = prevoffs; 5424 5425 hash->dth_size = 1; 5426 hash->dth_mask = hash->dth_size - 1; 5427 5428 hash->dth_tab = kmem_zalloc(hash->dth_size * 5429 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5430 5431 return (hash); 5432 } 5433 5434 static void 5435 dtrace_hash_destroy(dtrace_hash_t *hash) 5436 { 5437 #ifdef DEBUG 5438 int i; 5439 5440 for (i = 0; i < hash->dth_size; i++) 5441 ASSERT(hash->dth_tab[i] == NULL); 5442 #endif 5443 5444 kmem_free(hash->dth_tab, 5445 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5446 kmem_free(hash, sizeof (dtrace_hash_t)); 5447 } 5448 5449 static void 5450 dtrace_hash_resize(dtrace_hash_t *hash) 5451 { 5452 int size = hash->dth_size, i, ndx; 5453 int new_size = hash->dth_size << 1; 5454 int new_mask = new_size - 1; 5455 dtrace_hashbucket_t **new_tab, *bucket, *next; 5456 5457 ASSERT((new_size & new_mask) == 0); 5458 5459 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5460 5461 for (i = 0; i < size; i++) { 5462 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5463 dtrace_probe_t *probe = bucket->dthb_chain; 5464 5465 ASSERT(probe != NULL); 5466 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5467 5468 next = bucket->dthb_next; 5469 bucket->dthb_next = new_tab[ndx]; 5470 new_tab[ndx] = bucket; 5471 } 5472 } 5473 5474 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5475 hash->dth_tab = new_tab; 5476 hash->dth_size = new_size; 5477 hash->dth_mask = new_mask; 5478 } 5479 5480 static void 5481 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5482 { 5483 int hashval = DTRACE_HASHSTR(hash, new); 5484 int ndx = hashval & hash->dth_mask; 5485 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5486 dtrace_probe_t **nextp, **prevp; 5487 5488 for (; bucket != NULL; bucket = bucket->dthb_next) { 5489 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5490 goto add; 5491 } 5492 5493 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5494 dtrace_hash_resize(hash); 5495 dtrace_hash_add(hash, new); 5496 return; 5497 } 5498 5499 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5500 bucket->dthb_next = hash->dth_tab[ndx]; 5501 hash->dth_tab[ndx] = bucket; 5502 hash->dth_nbuckets++; 5503 5504 add: 5505 nextp = DTRACE_HASHNEXT(hash, new); 5506 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5507 *nextp = bucket->dthb_chain; 5508 5509 if (bucket->dthb_chain != NULL) { 5510 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5511 ASSERT(*prevp == NULL); 5512 *prevp = new; 5513 } 5514 5515 bucket->dthb_chain = new; 5516 bucket->dthb_len++; 5517 } 5518 5519 static dtrace_probe_t * 5520 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5521 { 5522 int hashval = DTRACE_HASHSTR(hash, template); 5523 int ndx = hashval & hash->dth_mask; 5524 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5525 5526 for (; bucket != NULL; bucket = bucket->dthb_next) { 5527 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5528 return (bucket->dthb_chain); 5529 } 5530 5531 return (NULL); 5532 } 5533 5534 static int 5535 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5536 { 5537 int hashval = DTRACE_HASHSTR(hash, template); 5538 int ndx = hashval & hash->dth_mask; 5539 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5540 5541 for (; bucket != NULL; bucket = bucket->dthb_next) { 5542 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5543 return (bucket->dthb_len); 5544 } 5545 5546 return (NULL); 5547 } 5548 5549 static void 5550 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5551 { 5552 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5553 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5554 5555 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5556 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5557 5558 /* 5559 * Find the bucket that we're removing this probe from. 5560 */ 5561 for (; bucket != NULL; bucket = bucket->dthb_next) { 5562 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5563 break; 5564 } 5565 5566 ASSERT(bucket != NULL); 5567 5568 if (*prevp == NULL) { 5569 if (*nextp == NULL) { 5570 /* 5571 * The removed probe was the only probe on this 5572 * bucket; we need to remove the bucket. 5573 */ 5574 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5575 5576 ASSERT(bucket->dthb_chain == probe); 5577 ASSERT(b != NULL); 5578 5579 if (b == bucket) { 5580 hash->dth_tab[ndx] = bucket->dthb_next; 5581 } else { 5582 while (b->dthb_next != bucket) 5583 b = b->dthb_next; 5584 b->dthb_next = bucket->dthb_next; 5585 } 5586 5587 ASSERT(hash->dth_nbuckets > 0); 5588 hash->dth_nbuckets--; 5589 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5590 return; 5591 } 5592 5593 bucket->dthb_chain = *nextp; 5594 } else { 5595 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5596 } 5597 5598 if (*nextp != NULL) 5599 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5600 } 5601 5602 /* 5603 * DTrace Utility Functions 5604 * 5605 * These are random utility functions that are _not_ called from probe context. 5606 */ 5607 static int 5608 dtrace_badattr(const dtrace_attribute_t *a) 5609 { 5610 return (a->dtat_name > DTRACE_STABILITY_MAX || 5611 a->dtat_data > DTRACE_STABILITY_MAX || 5612 a->dtat_class > DTRACE_CLASS_MAX); 5613 } 5614 5615 /* 5616 * Return a duplicate copy of a string. If the specified string is NULL, 5617 * this function returns a zero-length string. 5618 */ 5619 static char * 5620 dtrace_strdup(const char *str) 5621 { 5622 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5623 5624 if (str != NULL) 5625 (void) strcpy(new, str); 5626 5627 return (new); 5628 } 5629 5630 #define DTRACE_ISALPHA(c) \ 5631 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5632 5633 static int 5634 dtrace_badname(const char *s) 5635 { 5636 char c; 5637 5638 if (s == NULL || (c = *s++) == '\0') 5639 return (0); 5640 5641 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5642 return (1); 5643 5644 while ((c = *s++) != '\0') { 5645 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5646 c != '-' && c != '_' && c != '.' && c != '`') 5647 return (1); 5648 } 5649 5650 return (0); 5651 } 5652 5653 static void 5654 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 5655 { 5656 uint32_t priv; 5657 5658 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5659 /* 5660 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 5661 */ 5662 priv = DTRACE_PRIV_ALL; 5663 } else { 5664 *uidp = crgetuid(cr); 5665 *zoneidp = crgetzoneid(cr); 5666 5667 priv = 0; 5668 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 5669 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 5670 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 5671 priv |= DTRACE_PRIV_USER; 5672 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 5673 priv |= DTRACE_PRIV_PROC; 5674 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 5675 priv |= DTRACE_PRIV_OWNER; 5676 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 5677 priv |= DTRACE_PRIV_ZONEOWNER; 5678 } 5679 5680 *privp = priv; 5681 } 5682 5683 #ifdef DTRACE_ERRDEBUG 5684 static void 5685 dtrace_errdebug(const char *str) 5686 { 5687 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 5688 int occupied = 0; 5689 5690 mutex_enter(&dtrace_errlock); 5691 dtrace_errlast = str; 5692 dtrace_errthread = curthread; 5693 5694 while (occupied++ < DTRACE_ERRHASHSZ) { 5695 if (dtrace_errhash[hval].dter_msg == str) { 5696 dtrace_errhash[hval].dter_count++; 5697 goto out; 5698 } 5699 5700 if (dtrace_errhash[hval].dter_msg != NULL) { 5701 hval = (hval + 1) % DTRACE_ERRHASHSZ; 5702 continue; 5703 } 5704 5705 dtrace_errhash[hval].dter_msg = str; 5706 dtrace_errhash[hval].dter_count = 1; 5707 goto out; 5708 } 5709 5710 panic("dtrace: undersized error hash"); 5711 out: 5712 mutex_exit(&dtrace_errlock); 5713 } 5714 #endif 5715 5716 /* 5717 * DTrace Matching Functions 5718 * 5719 * These functions are used to match groups of probes, given some elements of 5720 * a probe tuple, or some globbed expressions for elements of a probe tuple. 5721 */ 5722 static int 5723 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 5724 zoneid_t zoneid) 5725 { 5726 if (priv != DTRACE_PRIV_ALL) { 5727 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 5728 uint32_t match = priv & ppriv; 5729 5730 /* 5731 * No PRIV_DTRACE_* privileges... 5732 */ 5733 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 5734 DTRACE_PRIV_KERNEL)) == 0) 5735 return (0); 5736 5737 /* 5738 * No matching bits, but there were bits to match... 5739 */ 5740 if (match == 0 && ppriv != 0) 5741 return (0); 5742 5743 /* 5744 * Need to have permissions to the process, but don't... 5745 */ 5746 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 5747 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 5748 return (0); 5749 } 5750 5751 /* 5752 * Need to be in the same zone unless we possess the 5753 * privilege to examine all zones. 5754 */ 5755 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 5756 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 5757 return (0); 5758 } 5759 } 5760 5761 return (1); 5762 } 5763 5764 /* 5765 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 5766 * consists of input pattern strings and an ops-vector to evaluate them. 5767 * This function returns >0 for match, 0 for no match, and <0 for error. 5768 */ 5769 static int 5770 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 5771 uint32_t priv, uid_t uid, zoneid_t zoneid) 5772 { 5773 dtrace_provider_t *pvp = prp->dtpr_provider; 5774 int rv; 5775 5776 if (pvp->dtpv_defunct) 5777 return (0); 5778 5779 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 5780 return (rv); 5781 5782 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 5783 return (rv); 5784 5785 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 5786 return (rv); 5787 5788 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 5789 return (rv); 5790 5791 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 5792 return (0); 5793 5794 return (rv); 5795 } 5796 5797 /* 5798 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 5799 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 5800 * libc's version, the kernel version only applies to 8-bit ASCII strings. 5801 * In addition, all of the recursion cases except for '*' matching have been 5802 * unwound. For '*', we still implement recursive evaluation, but a depth 5803 * counter is maintained and matching is aborted if we recurse too deep. 5804 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 5805 */ 5806 static int 5807 dtrace_match_glob(const char *s, const char *p, int depth) 5808 { 5809 const char *olds; 5810 char s1, c; 5811 int gs; 5812 5813 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 5814 return (-1); 5815 5816 if (s == NULL) 5817 s = ""; /* treat NULL as empty string */ 5818 5819 top: 5820 olds = s; 5821 s1 = *s++; 5822 5823 if (p == NULL) 5824 return (0); 5825 5826 if ((c = *p++) == '\0') 5827 return (s1 == '\0'); 5828 5829 switch (c) { 5830 case '[': { 5831 int ok = 0, notflag = 0; 5832 char lc = '\0'; 5833 5834 if (s1 == '\0') 5835 return (0); 5836 5837 if (*p == '!') { 5838 notflag = 1; 5839 p++; 5840 } 5841 5842 if ((c = *p++) == '\0') 5843 return (0); 5844 5845 do { 5846 if (c == '-' && lc != '\0' && *p != ']') { 5847 if ((c = *p++) == '\0') 5848 return (0); 5849 if (c == '\\' && (c = *p++) == '\0') 5850 return (0); 5851 5852 if (notflag) { 5853 if (s1 < lc || s1 > c) 5854 ok++; 5855 else 5856 return (0); 5857 } else if (lc <= s1 && s1 <= c) 5858 ok++; 5859 5860 } else if (c == '\\' && (c = *p++) == '\0') 5861 return (0); 5862 5863 lc = c; /* save left-hand 'c' for next iteration */ 5864 5865 if (notflag) { 5866 if (s1 != c) 5867 ok++; 5868 else 5869 return (0); 5870 } else if (s1 == c) 5871 ok++; 5872 5873 if ((c = *p++) == '\0') 5874 return (0); 5875 5876 } while (c != ']'); 5877 5878 if (ok) 5879 goto top; 5880 5881 return (0); 5882 } 5883 5884 case '\\': 5885 if ((c = *p++) == '\0') 5886 return (0); 5887 /*FALLTHRU*/ 5888 5889 default: 5890 if (c != s1) 5891 return (0); 5892 /*FALLTHRU*/ 5893 5894 case '?': 5895 if (s1 != '\0') 5896 goto top; 5897 return (0); 5898 5899 case '*': 5900 while (*p == '*') 5901 p++; /* consecutive *'s are identical to a single one */ 5902 5903 if (*p == '\0') 5904 return (1); 5905 5906 for (s = olds; *s != '\0'; s++) { 5907 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 5908 return (gs); 5909 } 5910 5911 return (0); 5912 } 5913 } 5914 5915 /*ARGSUSED*/ 5916 static int 5917 dtrace_match_string(const char *s, const char *p, int depth) 5918 { 5919 return (s != NULL && strcmp(s, p) == 0); 5920 } 5921 5922 /*ARGSUSED*/ 5923 static int 5924 dtrace_match_nul(const char *s, const char *p, int depth) 5925 { 5926 return (1); /* always match the empty pattern */ 5927 } 5928 5929 /*ARGSUSED*/ 5930 static int 5931 dtrace_match_nonzero(const char *s, const char *p, int depth) 5932 { 5933 return (s != NULL && s[0] != '\0'); 5934 } 5935 5936 static int 5937 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 5938 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 5939 { 5940 dtrace_probe_t template, *probe; 5941 dtrace_hash_t *hash = NULL; 5942 int len, best = INT_MAX, nmatched = 0; 5943 dtrace_id_t i; 5944 5945 ASSERT(MUTEX_HELD(&dtrace_lock)); 5946 5947 /* 5948 * If the probe ID is specified in the key, just lookup by ID and 5949 * invoke the match callback once if a matching probe is found. 5950 */ 5951 if (pkp->dtpk_id != DTRACE_IDNONE) { 5952 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 5953 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 5954 (void) (*matched)(probe, arg); 5955 nmatched++; 5956 } 5957 return (nmatched); 5958 } 5959 5960 template.dtpr_mod = (char *)pkp->dtpk_mod; 5961 template.dtpr_func = (char *)pkp->dtpk_func; 5962 template.dtpr_name = (char *)pkp->dtpk_name; 5963 5964 /* 5965 * We want to find the most distinct of the module name, function 5966 * name, and name. So for each one that is not a glob pattern or 5967 * empty string, we perform a lookup in the corresponding hash and 5968 * use the hash table with the fewest collisions to do our search. 5969 */ 5970 if (pkp->dtpk_mmatch == &dtrace_match_string && 5971 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 5972 best = len; 5973 hash = dtrace_bymod; 5974 } 5975 5976 if (pkp->dtpk_fmatch == &dtrace_match_string && 5977 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 5978 best = len; 5979 hash = dtrace_byfunc; 5980 } 5981 5982 if (pkp->dtpk_nmatch == &dtrace_match_string && 5983 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 5984 best = len; 5985 hash = dtrace_byname; 5986 } 5987 5988 /* 5989 * If we did not select a hash table, iterate over every probe and 5990 * invoke our callback for each one that matches our input probe key. 5991 */ 5992 if (hash == NULL) { 5993 for (i = 0; i < dtrace_nprobes; i++) { 5994 if ((probe = dtrace_probes[i]) == NULL || 5995 dtrace_match_probe(probe, pkp, priv, uid, 5996 zoneid) <= 0) 5997 continue; 5998 5999 nmatched++; 6000 6001 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6002 break; 6003 } 6004 6005 return (nmatched); 6006 } 6007 6008 /* 6009 * If we selected a hash table, iterate over each probe of the same key 6010 * name and invoke the callback for every probe that matches the other 6011 * attributes of our input probe key. 6012 */ 6013 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6014 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6015 6016 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6017 continue; 6018 6019 nmatched++; 6020 6021 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6022 break; 6023 } 6024 6025 return (nmatched); 6026 } 6027 6028 /* 6029 * Return the function pointer dtrace_probecmp() should use to compare the 6030 * specified pattern with a string. For NULL or empty patterns, we select 6031 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6032 * For non-empty non-glob strings, we use dtrace_match_string(). 6033 */ 6034 static dtrace_probekey_f * 6035 dtrace_probekey_func(const char *p) 6036 { 6037 char c; 6038 6039 if (p == NULL || *p == '\0') 6040 return (&dtrace_match_nul); 6041 6042 while ((c = *p++) != '\0') { 6043 if (c == '[' || c == '?' || c == '*' || c == '\\') 6044 return (&dtrace_match_glob); 6045 } 6046 6047 return (&dtrace_match_string); 6048 } 6049 6050 /* 6051 * Build a probe comparison key for use with dtrace_match_probe() from the 6052 * given probe description. By convention, a null key only matches anchored 6053 * probes: if each field is the empty string, reset dtpk_fmatch to 6054 * dtrace_match_nonzero(). 6055 */ 6056 static void 6057 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6058 { 6059 pkp->dtpk_prov = pdp->dtpd_provider; 6060 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6061 6062 pkp->dtpk_mod = pdp->dtpd_mod; 6063 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6064 6065 pkp->dtpk_func = pdp->dtpd_func; 6066 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6067 6068 pkp->dtpk_name = pdp->dtpd_name; 6069 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6070 6071 pkp->dtpk_id = pdp->dtpd_id; 6072 6073 if (pkp->dtpk_id == DTRACE_IDNONE && 6074 pkp->dtpk_pmatch == &dtrace_match_nul && 6075 pkp->dtpk_mmatch == &dtrace_match_nul && 6076 pkp->dtpk_fmatch == &dtrace_match_nul && 6077 pkp->dtpk_nmatch == &dtrace_match_nul) 6078 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6079 } 6080 6081 /* 6082 * DTrace Provider-to-Framework API Functions 6083 * 6084 * These functions implement much of the Provider-to-Framework API, as 6085 * described in <sys/dtrace.h>. The parts of the API not in this section are 6086 * the functions in the API for probe management (found below), and 6087 * dtrace_probe() itself (found above). 6088 */ 6089 6090 /* 6091 * Register the calling provider with the DTrace framework. This should 6092 * generally be called by DTrace providers in their attach(9E) entry point. 6093 */ 6094 int 6095 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6096 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6097 { 6098 dtrace_provider_t *provider; 6099 6100 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6101 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6102 "arguments", name ? name : "<NULL>"); 6103 return (EINVAL); 6104 } 6105 6106 if (name[0] == '\0' || dtrace_badname(name)) { 6107 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6108 "provider name", name); 6109 return (EINVAL); 6110 } 6111 6112 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6113 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6114 pops->dtps_destroy == NULL || 6115 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6116 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6117 "provider ops", name); 6118 return (EINVAL); 6119 } 6120 6121 if (dtrace_badattr(&pap->dtpa_provider) || 6122 dtrace_badattr(&pap->dtpa_mod) || 6123 dtrace_badattr(&pap->dtpa_func) || 6124 dtrace_badattr(&pap->dtpa_name) || 6125 dtrace_badattr(&pap->dtpa_args)) { 6126 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6127 "provider attributes", name); 6128 return (EINVAL); 6129 } 6130 6131 if (priv & ~DTRACE_PRIV_ALL) { 6132 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6133 "privilege attributes", name); 6134 return (EINVAL); 6135 } 6136 6137 if ((priv & DTRACE_PRIV_KERNEL) && 6138 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 6139 pops->dtps_usermode == NULL) { 6140 cmn_err(CE_WARN, "failed to register provider '%s': need " 6141 "dtps_usermode() op for given privilege attributes", name); 6142 return (EINVAL); 6143 } 6144 6145 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 6146 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6147 (void) strcpy(provider->dtpv_name, name); 6148 6149 provider->dtpv_attr = *pap; 6150 provider->dtpv_priv.dtpp_flags = priv; 6151 if (cr != NULL) { 6152 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 6153 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 6154 } 6155 provider->dtpv_pops = *pops; 6156 6157 if (pops->dtps_provide == NULL) { 6158 ASSERT(pops->dtps_provide_module != NULL); 6159 provider->dtpv_pops.dtps_provide = 6160 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 6161 } 6162 6163 if (pops->dtps_provide_module == NULL) { 6164 ASSERT(pops->dtps_provide != NULL); 6165 provider->dtpv_pops.dtps_provide_module = 6166 (void (*)(void *, struct modctl *))dtrace_nullop; 6167 } 6168 6169 if (pops->dtps_suspend == NULL) { 6170 ASSERT(pops->dtps_resume == NULL); 6171 provider->dtpv_pops.dtps_suspend = 6172 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6173 provider->dtpv_pops.dtps_resume = 6174 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6175 } 6176 6177 provider->dtpv_arg = arg; 6178 *idp = (dtrace_provider_id_t)provider; 6179 6180 if (pops == &dtrace_provider_ops) { 6181 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6182 ASSERT(MUTEX_HELD(&dtrace_lock)); 6183 ASSERT(dtrace_anon.dta_enabling == NULL); 6184 6185 /* 6186 * We make sure that the DTrace provider is at the head of 6187 * the provider chain. 6188 */ 6189 provider->dtpv_next = dtrace_provider; 6190 dtrace_provider = provider; 6191 return (0); 6192 } 6193 6194 mutex_enter(&dtrace_provider_lock); 6195 mutex_enter(&dtrace_lock); 6196 6197 /* 6198 * If there is at least one provider registered, we'll add this 6199 * provider after the first provider. 6200 */ 6201 if (dtrace_provider != NULL) { 6202 provider->dtpv_next = dtrace_provider->dtpv_next; 6203 dtrace_provider->dtpv_next = provider; 6204 } else { 6205 dtrace_provider = provider; 6206 } 6207 6208 if (dtrace_retained != NULL) { 6209 dtrace_enabling_provide(provider); 6210 6211 /* 6212 * Now we need to call dtrace_enabling_matchall() -- which 6213 * will acquire cpu_lock and dtrace_lock. We therefore need 6214 * to drop all of our locks before calling into it... 6215 */ 6216 mutex_exit(&dtrace_lock); 6217 mutex_exit(&dtrace_provider_lock); 6218 dtrace_enabling_matchall(); 6219 6220 return (0); 6221 } 6222 6223 mutex_exit(&dtrace_lock); 6224 mutex_exit(&dtrace_provider_lock); 6225 6226 return (0); 6227 } 6228 6229 /* 6230 * Unregister the specified provider from the DTrace framework. This should 6231 * generally be called by DTrace providers in their detach(9E) entry point. 6232 */ 6233 int 6234 dtrace_unregister(dtrace_provider_id_t id) 6235 { 6236 dtrace_provider_t *old = (dtrace_provider_t *)id; 6237 dtrace_provider_t *prev = NULL; 6238 int i, self = 0; 6239 dtrace_probe_t *probe, *first = NULL; 6240 6241 if (old->dtpv_pops.dtps_enable == 6242 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 6243 /* 6244 * If DTrace itself is the provider, we're called with locks 6245 * already held. 6246 */ 6247 ASSERT(old == dtrace_provider); 6248 ASSERT(dtrace_devi != NULL); 6249 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6250 ASSERT(MUTEX_HELD(&dtrace_lock)); 6251 self = 1; 6252 6253 if (dtrace_provider->dtpv_next != NULL) { 6254 /* 6255 * There's another provider here; return failure. 6256 */ 6257 return (EBUSY); 6258 } 6259 } else { 6260 mutex_enter(&dtrace_provider_lock); 6261 mutex_enter(&mod_lock); 6262 mutex_enter(&dtrace_lock); 6263 } 6264 6265 /* 6266 * If anyone has /dev/dtrace open, or if there are anonymous enabled 6267 * probes, we refuse to let providers slither away, unless this 6268 * provider has already been explicitly invalidated. 6269 */ 6270 if (!old->dtpv_defunct && 6271 (dtrace_opens || (dtrace_anon.dta_state != NULL && 6272 dtrace_anon.dta_state->dts_necbs > 0))) { 6273 if (!self) { 6274 mutex_exit(&dtrace_lock); 6275 mutex_exit(&mod_lock); 6276 mutex_exit(&dtrace_provider_lock); 6277 } 6278 return (EBUSY); 6279 } 6280 6281 /* 6282 * Attempt to destroy the probes associated with this provider. 6283 */ 6284 for (i = 0; i < dtrace_nprobes; i++) { 6285 if ((probe = dtrace_probes[i]) == NULL) 6286 continue; 6287 6288 if (probe->dtpr_provider != old) 6289 continue; 6290 6291 if (probe->dtpr_ecb == NULL) 6292 continue; 6293 6294 /* 6295 * We have at least one ECB; we can't remove this provider. 6296 */ 6297 if (!self) { 6298 mutex_exit(&dtrace_lock); 6299 mutex_exit(&mod_lock); 6300 mutex_exit(&dtrace_provider_lock); 6301 } 6302 return (EBUSY); 6303 } 6304 6305 /* 6306 * All of the probes for this provider are disabled; we can safely 6307 * remove all of them from their hash chains and from the probe array. 6308 */ 6309 for (i = 0; i < dtrace_nprobes; i++) { 6310 if ((probe = dtrace_probes[i]) == NULL) 6311 continue; 6312 6313 if (probe->dtpr_provider != old) 6314 continue; 6315 6316 dtrace_probes[i] = NULL; 6317 6318 dtrace_hash_remove(dtrace_bymod, probe); 6319 dtrace_hash_remove(dtrace_byfunc, probe); 6320 dtrace_hash_remove(dtrace_byname, probe); 6321 6322 if (first == NULL) { 6323 first = probe; 6324 probe->dtpr_nextmod = NULL; 6325 } else { 6326 probe->dtpr_nextmod = first; 6327 first = probe; 6328 } 6329 } 6330 6331 /* 6332 * The provider's probes have been removed from the hash chains and 6333 * from the probe array. Now issue a dtrace_sync() to be sure that 6334 * everyone has cleared out from any probe array processing. 6335 */ 6336 dtrace_sync(); 6337 6338 for (probe = first; probe != NULL; probe = first) { 6339 first = probe->dtpr_nextmod; 6340 6341 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6342 probe->dtpr_arg); 6343 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6344 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6345 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6346 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6347 kmem_free(probe, sizeof (dtrace_probe_t)); 6348 } 6349 6350 if ((prev = dtrace_provider) == old) { 6351 ASSERT(self || dtrace_devi == NULL); 6352 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6353 dtrace_provider = old->dtpv_next; 6354 } else { 6355 while (prev != NULL && prev->dtpv_next != old) 6356 prev = prev->dtpv_next; 6357 6358 if (prev == NULL) { 6359 panic("attempt to unregister non-existent " 6360 "dtrace provider %p\n", (void *)id); 6361 } 6362 6363 prev->dtpv_next = old->dtpv_next; 6364 } 6365 6366 if (!self) { 6367 mutex_exit(&dtrace_lock); 6368 mutex_exit(&mod_lock); 6369 mutex_exit(&dtrace_provider_lock); 6370 } 6371 6372 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6373 kmem_free(old, sizeof (dtrace_provider_t)); 6374 6375 return (0); 6376 } 6377 6378 /* 6379 * Invalidate the specified provider. All subsequent probe lookups for the 6380 * specified provider will fail, but its probes will not be removed. 6381 */ 6382 void 6383 dtrace_invalidate(dtrace_provider_id_t id) 6384 { 6385 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6386 6387 ASSERT(pvp->dtpv_pops.dtps_enable != 6388 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6389 6390 mutex_enter(&dtrace_provider_lock); 6391 mutex_enter(&dtrace_lock); 6392 6393 pvp->dtpv_defunct = 1; 6394 6395 mutex_exit(&dtrace_lock); 6396 mutex_exit(&dtrace_provider_lock); 6397 } 6398 6399 /* 6400 * Indicate whether or not DTrace has attached. 6401 */ 6402 int 6403 dtrace_attached(void) 6404 { 6405 /* 6406 * dtrace_provider will be non-NULL iff the DTrace driver has 6407 * attached. (It's non-NULL because DTrace is always itself a 6408 * provider.) 6409 */ 6410 return (dtrace_provider != NULL); 6411 } 6412 6413 /* 6414 * Remove all the unenabled probes for the given provider. This function is 6415 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6416 * -- just as many of its associated probes as it can. 6417 */ 6418 int 6419 dtrace_condense(dtrace_provider_id_t id) 6420 { 6421 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6422 int i; 6423 dtrace_probe_t *probe; 6424 6425 /* 6426 * Make sure this isn't the dtrace provider itself. 6427 */ 6428 ASSERT(prov->dtpv_pops.dtps_enable != 6429 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6430 6431 mutex_enter(&dtrace_provider_lock); 6432 mutex_enter(&dtrace_lock); 6433 6434 /* 6435 * Attempt to destroy the probes associated with this provider. 6436 */ 6437 for (i = 0; i < dtrace_nprobes; i++) { 6438 if ((probe = dtrace_probes[i]) == NULL) 6439 continue; 6440 6441 if (probe->dtpr_provider != prov) 6442 continue; 6443 6444 if (probe->dtpr_ecb != NULL) 6445 continue; 6446 6447 dtrace_probes[i] = NULL; 6448 6449 dtrace_hash_remove(dtrace_bymod, probe); 6450 dtrace_hash_remove(dtrace_byfunc, probe); 6451 dtrace_hash_remove(dtrace_byname, probe); 6452 6453 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 6454 probe->dtpr_arg); 6455 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6456 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6457 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6458 kmem_free(probe, sizeof (dtrace_probe_t)); 6459 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 6460 } 6461 6462 mutex_exit(&dtrace_lock); 6463 mutex_exit(&dtrace_provider_lock); 6464 6465 return (0); 6466 } 6467 6468 /* 6469 * DTrace Probe Management Functions 6470 * 6471 * The functions in this section perform the DTrace probe management, 6472 * including functions to create probes, look-up probes, and call into the 6473 * providers to request that probes be provided. Some of these functions are 6474 * in the Provider-to-Framework API; these functions can be identified by the 6475 * fact that they are not declared "static". 6476 */ 6477 6478 /* 6479 * Create a probe with the specified module name, function name, and name. 6480 */ 6481 dtrace_id_t 6482 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6483 const char *func, const char *name, int aframes, void *arg) 6484 { 6485 dtrace_probe_t *probe, **probes; 6486 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6487 dtrace_id_t id; 6488 6489 if (provider == dtrace_provider) { 6490 ASSERT(MUTEX_HELD(&dtrace_lock)); 6491 } else { 6492 mutex_enter(&dtrace_lock); 6493 } 6494 6495 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6496 VM_BESTFIT | VM_SLEEP); 6497 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6498 6499 probe->dtpr_id = id; 6500 probe->dtpr_gen = dtrace_probegen++; 6501 probe->dtpr_mod = dtrace_strdup(mod); 6502 probe->dtpr_func = dtrace_strdup(func); 6503 probe->dtpr_name = dtrace_strdup(name); 6504 probe->dtpr_arg = arg; 6505 probe->dtpr_aframes = aframes; 6506 probe->dtpr_provider = provider; 6507 6508 dtrace_hash_add(dtrace_bymod, probe); 6509 dtrace_hash_add(dtrace_byfunc, probe); 6510 dtrace_hash_add(dtrace_byname, probe); 6511 6512 if (id - 1 >= dtrace_nprobes) { 6513 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6514 size_t nsize = osize << 1; 6515 6516 if (nsize == 0) { 6517 ASSERT(osize == 0); 6518 ASSERT(dtrace_probes == NULL); 6519 nsize = sizeof (dtrace_probe_t *); 6520 } 6521 6522 probes = kmem_zalloc(nsize, KM_SLEEP); 6523 6524 if (dtrace_probes == NULL) { 6525 ASSERT(osize == 0); 6526 dtrace_probes = probes; 6527 dtrace_nprobes = 1; 6528 } else { 6529 dtrace_probe_t **oprobes = dtrace_probes; 6530 6531 bcopy(oprobes, probes, osize); 6532 dtrace_membar_producer(); 6533 dtrace_probes = probes; 6534 6535 dtrace_sync(); 6536 6537 /* 6538 * All CPUs are now seeing the new probes array; we can 6539 * safely free the old array. 6540 */ 6541 kmem_free(oprobes, osize); 6542 dtrace_nprobes <<= 1; 6543 } 6544 6545 ASSERT(id - 1 < dtrace_nprobes); 6546 } 6547 6548 ASSERT(dtrace_probes[id - 1] == NULL); 6549 dtrace_probes[id - 1] = probe; 6550 6551 if (provider != dtrace_provider) 6552 mutex_exit(&dtrace_lock); 6553 6554 return (id); 6555 } 6556 6557 static dtrace_probe_t * 6558 dtrace_probe_lookup_id(dtrace_id_t id) 6559 { 6560 ASSERT(MUTEX_HELD(&dtrace_lock)); 6561 6562 if (id == 0 || id > dtrace_nprobes) 6563 return (NULL); 6564 6565 return (dtrace_probes[id - 1]); 6566 } 6567 6568 static int 6569 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6570 { 6571 *((dtrace_id_t *)arg) = probe->dtpr_id; 6572 6573 return (DTRACE_MATCH_DONE); 6574 } 6575 6576 /* 6577 * Look up a probe based on provider and one or more of module name, function 6578 * name and probe name. 6579 */ 6580 dtrace_id_t 6581 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6582 const char *func, const char *name) 6583 { 6584 dtrace_probekey_t pkey; 6585 dtrace_id_t id; 6586 int match; 6587 6588 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6589 pkey.dtpk_pmatch = &dtrace_match_string; 6590 pkey.dtpk_mod = mod; 6591 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6592 pkey.dtpk_func = func; 6593 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6594 pkey.dtpk_name = name; 6595 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6596 pkey.dtpk_id = DTRACE_IDNONE; 6597 6598 mutex_enter(&dtrace_lock); 6599 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 6600 dtrace_probe_lookup_match, &id); 6601 mutex_exit(&dtrace_lock); 6602 6603 ASSERT(match == 1 || match == 0); 6604 return (match ? id : 0); 6605 } 6606 6607 /* 6608 * Returns the probe argument associated with the specified probe. 6609 */ 6610 void * 6611 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6612 { 6613 dtrace_probe_t *probe; 6614 void *rval = NULL; 6615 6616 mutex_enter(&dtrace_lock); 6617 6618 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6619 probe->dtpr_provider == (dtrace_provider_t *)id) 6620 rval = probe->dtpr_arg; 6621 6622 mutex_exit(&dtrace_lock); 6623 6624 return (rval); 6625 } 6626 6627 /* 6628 * Copy a probe into a probe description. 6629 */ 6630 static void 6631 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6632 { 6633 bzero(pdp, sizeof (dtrace_probedesc_t)); 6634 pdp->dtpd_id = prp->dtpr_id; 6635 6636 (void) strncpy(pdp->dtpd_provider, 6637 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6638 6639 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6640 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6641 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6642 } 6643 6644 /* 6645 * Called to indicate that a probe -- or probes -- should be provided by a 6646 * specfied provider. If the specified description is NULL, the provider will 6647 * be told to provide all of its probes. (This is done whenever a new 6648 * consumer comes along, or whenever a retained enabling is to be matched.) If 6649 * the specified description is non-NULL, the provider is given the 6650 * opportunity to dynamically provide the specified probe, allowing providers 6651 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6652 * probes.) If the provider is NULL, the operations will be applied to all 6653 * providers; if the provider is non-NULL the operations will only be applied 6654 * to the specified provider. The dtrace_provider_lock must be held, and the 6655 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6656 * will need to grab the dtrace_lock when it reenters the framework through 6657 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6658 */ 6659 static void 6660 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 6661 { 6662 struct modctl *ctl; 6663 int all = 0; 6664 6665 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6666 6667 if (prv == NULL) { 6668 all = 1; 6669 prv = dtrace_provider; 6670 } 6671 6672 do { 6673 /* 6674 * First, call the blanket provide operation. 6675 */ 6676 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 6677 6678 /* 6679 * Now call the per-module provide operation. We will grab 6680 * mod_lock to prevent the list from being modified. Note 6681 * that this also prevents the mod_busy bits from changing. 6682 * (mod_busy can only be changed with mod_lock held.) 6683 */ 6684 mutex_enter(&mod_lock); 6685 6686 ctl = &modules; 6687 do { 6688 if (ctl->mod_busy || ctl->mod_mp == NULL) 6689 continue; 6690 6691 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 6692 6693 } while ((ctl = ctl->mod_next) != &modules); 6694 6695 mutex_exit(&mod_lock); 6696 } while (all && (prv = prv->dtpv_next) != NULL); 6697 } 6698 6699 /* 6700 * Iterate over each probe, and call the Framework-to-Provider API function 6701 * denoted by offs. 6702 */ 6703 static void 6704 dtrace_probe_foreach(uintptr_t offs) 6705 { 6706 dtrace_provider_t *prov; 6707 void (*func)(void *, dtrace_id_t, void *); 6708 dtrace_probe_t *probe; 6709 dtrace_icookie_t cookie; 6710 int i; 6711 6712 /* 6713 * We disable interrupts to walk through the probe array. This is 6714 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 6715 * won't see stale data. 6716 */ 6717 cookie = dtrace_interrupt_disable(); 6718 6719 for (i = 0; i < dtrace_nprobes; i++) { 6720 if ((probe = dtrace_probes[i]) == NULL) 6721 continue; 6722 6723 if (probe->dtpr_ecb == NULL) { 6724 /* 6725 * This probe isn't enabled -- don't call the function. 6726 */ 6727 continue; 6728 } 6729 6730 prov = probe->dtpr_provider; 6731 func = *((void(**)(void *, dtrace_id_t, void *)) 6732 ((uintptr_t)&prov->dtpv_pops + offs)); 6733 6734 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 6735 } 6736 6737 dtrace_interrupt_enable(cookie); 6738 } 6739 6740 static int 6741 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 6742 { 6743 dtrace_probekey_t pkey; 6744 uint32_t priv; 6745 uid_t uid; 6746 zoneid_t zoneid; 6747 6748 ASSERT(MUTEX_HELD(&dtrace_lock)); 6749 dtrace_ecb_create_cache = NULL; 6750 6751 if (desc == NULL) { 6752 /* 6753 * If we're passed a NULL description, we're being asked to 6754 * create an ECB with a NULL probe. 6755 */ 6756 (void) dtrace_ecb_create_enable(NULL, enab); 6757 return (0); 6758 } 6759 6760 dtrace_probekey(desc, &pkey); 6761 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 6762 &priv, &uid, &zoneid); 6763 6764 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 6765 enab)); 6766 } 6767 6768 /* 6769 * DTrace Helper Provider Functions 6770 */ 6771 static void 6772 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 6773 { 6774 attr->dtat_name = DOF_ATTR_NAME(dofattr); 6775 attr->dtat_data = DOF_ATTR_DATA(dofattr); 6776 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 6777 } 6778 6779 static void 6780 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 6781 const dof_provider_t *dofprov, char *strtab) 6782 { 6783 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 6784 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 6785 dofprov->dofpv_provattr); 6786 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 6787 dofprov->dofpv_modattr); 6788 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 6789 dofprov->dofpv_funcattr); 6790 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 6791 dofprov->dofpv_nameattr); 6792 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 6793 dofprov->dofpv_argsattr); 6794 } 6795 6796 static void 6797 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6798 { 6799 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6800 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6801 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 6802 dof_provider_t *provider; 6803 dof_probe_t *probe; 6804 uint32_t *off, *enoff; 6805 uint8_t *arg; 6806 char *strtab; 6807 uint_t i, nprobes; 6808 dtrace_helper_provdesc_t dhpv; 6809 dtrace_helper_probedesc_t dhpb; 6810 dtrace_meta_t *meta = dtrace_meta_pid; 6811 dtrace_mops_t *mops = &meta->dtm_mops; 6812 void *parg; 6813 6814 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6815 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6816 provider->dofpv_strtab * dof->dofh_secsize); 6817 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6818 provider->dofpv_probes * dof->dofh_secsize); 6819 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6820 provider->dofpv_prargs * dof->dofh_secsize); 6821 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6822 provider->dofpv_proffs * dof->dofh_secsize); 6823 6824 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6825 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 6826 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 6827 enoff = NULL; 6828 6829 /* 6830 * See dtrace_helper_provider_validate(). 6831 */ 6832 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 6833 provider->dofpv_prenoffs != DOF_SECT_NONE) { 6834 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6835 provider->dofpv_prenoffs * dof->dofh_secsize); 6836 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 6837 } 6838 6839 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 6840 6841 /* 6842 * Create the provider. 6843 */ 6844 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6845 6846 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 6847 return; 6848 6849 meta->dtm_count++; 6850 6851 /* 6852 * Create the probes. 6853 */ 6854 for (i = 0; i < nprobes; i++) { 6855 probe = (dof_probe_t *)(uintptr_t)(daddr + 6856 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 6857 6858 dhpb.dthpb_mod = dhp->dofhp_mod; 6859 dhpb.dthpb_func = strtab + probe->dofpr_func; 6860 dhpb.dthpb_name = strtab + probe->dofpr_name; 6861 dhpb.dthpb_base = probe->dofpr_addr; 6862 dhpb.dthpb_offs = off + probe->dofpr_offidx; 6863 dhpb.dthpb_noffs = probe->dofpr_noffs; 6864 if (enoff != NULL) { 6865 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 6866 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 6867 } else { 6868 dhpb.dthpb_enoffs = NULL; 6869 dhpb.dthpb_nenoffs = 0; 6870 } 6871 dhpb.dthpb_args = arg + probe->dofpr_argidx; 6872 dhpb.dthpb_nargc = probe->dofpr_nargc; 6873 dhpb.dthpb_xargc = probe->dofpr_xargc; 6874 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 6875 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 6876 6877 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 6878 } 6879 } 6880 6881 static void 6882 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 6883 { 6884 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6885 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6886 int i; 6887 6888 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6889 6890 for (i = 0; i < dof->dofh_secnum; i++) { 6891 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6892 dof->dofh_secoff + i * dof->dofh_secsize); 6893 6894 if (sec->dofs_type != DOF_SECT_PROVIDER) 6895 continue; 6896 6897 dtrace_helper_provide_one(dhp, sec, pid); 6898 } 6899 6900 /* 6901 * We may have just created probes, so we must now rematch against 6902 * any retained enablings. Note that this call will acquire both 6903 * cpu_lock and dtrace_lock; the fact that we are holding 6904 * dtrace_meta_lock now is what defines the ordering with respect to 6905 * these three locks. 6906 */ 6907 dtrace_enabling_matchall(); 6908 } 6909 6910 static void 6911 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6912 { 6913 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6914 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6915 dof_sec_t *str_sec; 6916 dof_provider_t *provider; 6917 char *strtab; 6918 dtrace_helper_provdesc_t dhpv; 6919 dtrace_meta_t *meta = dtrace_meta_pid; 6920 dtrace_mops_t *mops = &meta->dtm_mops; 6921 6922 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6923 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6924 provider->dofpv_strtab * dof->dofh_secsize); 6925 6926 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6927 6928 /* 6929 * Create the provider. 6930 */ 6931 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6932 6933 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 6934 6935 meta->dtm_count--; 6936 } 6937 6938 static void 6939 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 6940 { 6941 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6942 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6943 int i; 6944 6945 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6946 6947 for (i = 0; i < dof->dofh_secnum; i++) { 6948 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6949 dof->dofh_secoff + i * dof->dofh_secsize); 6950 6951 if (sec->dofs_type != DOF_SECT_PROVIDER) 6952 continue; 6953 6954 dtrace_helper_provider_remove_one(dhp, sec, pid); 6955 } 6956 } 6957 6958 /* 6959 * DTrace Meta Provider-to-Framework API Functions 6960 * 6961 * These functions implement the Meta Provider-to-Framework API, as described 6962 * in <sys/dtrace.h>. 6963 */ 6964 int 6965 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 6966 dtrace_meta_provider_id_t *idp) 6967 { 6968 dtrace_meta_t *meta; 6969 dtrace_helpers_t *help, *next; 6970 int i; 6971 6972 *idp = DTRACE_METAPROVNONE; 6973 6974 /* 6975 * We strictly don't need the name, but we hold onto it for 6976 * debuggability. All hail error queues! 6977 */ 6978 if (name == NULL) { 6979 cmn_err(CE_WARN, "failed to register meta-provider: " 6980 "invalid name"); 6981 return (EINVAL); 6982 } 6983 6984 if (mops == NULL || 6985 mops->dtms_create_probe == NULL || 6986 mops->dtms_provide_pid == NULL || 6987 mops->dtms_remove_pid == NULL) { 6988 cmn_err(CE_WARN, "failed to register meta-register %s: " 6989 "invalid ops", name); 6990 return (EINVAL); 6991 } 6992 6993 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 6994 meta->dtm_mops = *mops; 6995 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6996 (void) strcpy(meta->dtm_name, name); 6997 meta->dtm_arg = arg; 6998 6999 mutex_enter(&dtrace_meta_lock); 7000 mutex_enter(&dtrace_lock); 7001 7002 if (dtrace_meta_pid != NULL) { 7003 mutex_exit(&dtrace_lock); 7004 mutex_exit(&dtrace_meta_lock); 7005 cmn_err(CE_WARN, "failed to register meta-register %s: " 7006 "user-land meta-provider exists", name); 7007 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7008 kmem_free(meta, sizeof (dtrace_meta_t)); 7009 return (EINVAL); 7010 } 7011 7012 dtrace_meta_pid = meta; 7013 *idp = (dtrace_meta_provider_id_t)meta; 7014 7015 /* 7016 * If there are providers and probes ready to go, pass them 7017 * off to the new meta provider now. 7018 */ 7019 7020 help = dtrace_deferred_pid; 7021 dtrace_deferred_pid = NULL; 7022 7023 mutex_exit(&dtrace_lock); 7024 7025 while (help != NULL) { 7026 for (i = 0; i < help->dthps_nprovs; i++) { 7027 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7028 help->dthps_pid); 7029 } 7030 7031 next = help->dthps_next; 7032 help->dthps_next = NULL; 7033 help->dthps_prev = NULL; 7034 help->dthps_deferred = 0; 7035 help = next; 7036 } 7037 7038 mutex_exit(&dtrace_meta_lock); 7039 7040 return (0); 7041 } 7042 7043 int 7044 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7045 { 7046 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7047 7048 mutex_enter(&dtrace_meta_lock); 7049 mutex_enter(&dtrace_lock); 7050 7051 if (old == dtrace_meta_pid) { 7052 pp = &dtrace_meta_pid; 7053 } else { 7054 panic("attempt to unregister non-existent " 7055 "dtrace meta-provider %p\n", (void *)old); 7056 } 7057 7058 if (old->dtm_count != 0) { 7059 mutex_exit(&dtrace_lock); 7060 mutex_exit(&dtrace_meta_lock); 7061 return (EBUSY); 7062 } 7063 7064 *pp = NULL; 7065 7066 mutex_exit(&dtrace_lock); 7067 mutex_exit(&dtrace_meta_lock); 7068 7069 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7070 kmem_free(old, sizeof (dtrace_meta_t)); 7071 7072 return (0); 7073 } 7074 7075 7076 /* 7077 * DTrace DIF Object Functions 7078 */ 7079 static int 7080 dtrace_difo_err(uint_t pc, const char *format, ...) 7081 { 7082 if (dtrace_err_verbose) { 7083 va_list alist; 7084 7085 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7086 va_start(alist, format); 7087 (void) vuprintf(format, alist); 7088 va_end(alist); 7089 } 7090 7091 #ifdef DTRACE_ERRDEBUG 7092 dtrace_errdebug(format); 7093 #endif 7094 return (1); 7095 } 7096 7097 /* 7098 * Validate a DTrace DIF object by checking the IR instructions. The following 7099 * rules are currently enforced by dtrace_difo_validate(): 7100 * 7101 * 1. Each instruction must have a valid opcode 7102 * 2. Each register, string, variable, or subroutine reference must be valid 7103 * 3. No instruction can modify register %r0 (must be zero) 7104 * 4. All instruction reserved bits must be set to zero 7105 * 5. The last instruction must be a "ret" instruction 7106 * 6. All branch targets must reference a valid instruction _after_ the branch 7107 */ 7108 static int 7109 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7110 cred_t *cr) 7111 { 7112 int err = 0, i; 7113 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7114 int kcheck; 7115 uint_t pc; 7116 7117 kcheck = cr == NULL || 7118 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0; 7119 7120 dp->dtdo_destructive = 0; 7121 7122 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 7123 dif_instr_t instr = dp->dtdo_buf[pc]; 7124 7125 uint_t r1 = DIF_INSTR_R1(instr); 7126 uint_t r2 = DIF_INSTR_R2(instr); 7127 uint_t rd = DIF_INSTR_RD(instr); 7128 uint_t rs = DIF_INSTR_RS(instr); 7129 uint_t label = DIF_INSTR_LABEL(instr); 7130 uint_t v = DIF_INSTR_VAR(instr); 7131 uint_t subr = DIF_INSTR_SUBR(instr); 7132 uint_t type = DIF_INSTR_TYPE(instr); 7133 uint_t op = DIF_INSTR_OP(instr); 7134 7135 switch (op) { 7136 case DIF_OP_OR: 7137 case DIF_OP_XOR: 7138 case DIF_OP_AND: 7139 case DIF_OP_SLL: 7140 case DIF_OP_SRL: 7141 case DIF_OP_SRA: 7142 case DIF_OP_SUB: 7143 case DIF_OP_ADD: 7144 case DIF_OP_MUL: 7145 case DIF_OP_SDIV: 7146 case DIF_OP_UDIV: 7147 case DIF_OP_SREM: 7148 case DIF_OP_UREM: 7149 case DIF_OP_COPYS: 7150 if (r1 >= nregs) 7151 err += efunc(pc, "invalid register %u\n", r1); 7152 if (r2 >= nregs) 7153 err += efunc(pc, "invalid register %u\n", r2); 7154 if (rd >= nregs) 7155 err += efunc(pc, "invalid register %u\n", rd); 7156 if (rd == 0) 7157 err += efunc(pc, "cannot write to %r0\n"); 7158 break; 7159 case DIF_OP_NOT: 7160 case DIF_OP_MOV: 7161 case DIF_OP_ALLOCS: 7162 if (r1 >= nregs) 7163 err += efunc(pc, "invalid register %u\n", r1); 7164 if (r2 != 0) 7165 err += efunc(pc, "non-zero reserved bits\n"); 7166 if (rd >= nregs) 7167 err += efunc(pc, "invalid register %u\n", rd); 7168 if (rd == 0) 7169 err += efunc(pc, "cannot write to %r0\n"); 7170 break; 7171 case DIF_OP_LDSB: 7172 case DIF_OP_LDSH: 7173 case DIF_OP_LDSW: 7174 case DIF_OP_LDUB: 7175 case DIF_OP_LDUH: 7176 case DIF_OP_LDUW: 7177 case DIF_OP_LDX: 7178 if (r1 >= nregs) 7179 err += efunc(pc, "invalid register %u\n", r1); 7180 if (r2 != 0) 7181 err += efunc(pc, "non-zero reserved bits\n"); 7182 if (rd >= nregs) 7183 err += efunc(pc, "invalid register %u\n", rd); 7184 if (rd == 0) 7185 err += efunc(pc, "cannot write to %r0\n"); 7186 if (kcheck) 7187 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 7188 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 7189 break; 7190 case DIF_OP_RLDSB: 7191 case DIF_OP_RLDSH: 7192 case DIF_OP_RLDSW: 7193 case DIF_OP_RLDUB: 7194 case DIF_OP_RLDUH: 7195 case DIF_OP_RLDUW: 7196 case DIF_OP_RLDX: 7197 if (r1 >= nregs) 7198 err += efunc(pc, "invalid register %u\n", r1); 7199 if (r2 != 0) 7200 err += efunc(pc, "non-zero reserved bits\n"); 7201 if (rd >= nregs) 7202 err += efunc(pc, "invalid register %u\n", rd); 7203 if (rd == 0) 7204 err += efunc(pc, "cannot write to %r0\n"); 7205 break; 7206 case DIF_OP_ULDSB: 7207 case DIF_OP_ULDSH: 7208 case DIF_OP_ULDSW: 7209 case DIF_OP_ULDUB: 7210 case DIF_OP_ULDUH: 7211 case DIF_OP_ULDUW: 7212 case DIF_OP_ULDX: 7213 if (r1 >= nregs) 7214 err += efunc(pc, "invalid register %u\n", r1); 7215 if (r2 != 0) 7216 err += efunc(pc, "non-zero reserved bits\n"); 7217 if (rd >= nregs) 7218 err += efunc(pc, "invalid register %u\n", rd); 7219 if (rd == 0) 7220 err += efunc(pc, "cannot write to %r0\n"); 7221 break; 7222 case DIF_OP_STB: 7223 case DIF_OP_STH: 7224 case DIF_OP_STW: 7225 case DIF_OP_STX: 7226 if (r1 >= nregs) 7227 err += efunc(pc, "invalid register %u\n", r1); 7228 if (r2 != 0) 7229 err += efunc(pc, "non-zero reserved bits\n"); 7230 if (rd >= nregs) 7231 err += efunc(pc, "invalid register %u\n", rd); 7232 if (rd == 0) 7233 err += efunc(pc, "cannot write to 0 address\n"); 7234 break; 7235 case DIF_OP_CMP: 7236 case DIF_OP_SCMP: 7237 if (r1 >= nregs) 7238 err += efunc(pc, "invalid register %u\n", r1); 7239 if (r2 >= nregs) 7240 err += efunc(pc, "invalid register %u\n", r2); 7241 if (rd != 0) 7242 err += efunc(pc, "non-zero reserved bits\n"); 7243 break; 7244 case DIF_OP_TST: 7245 if (r1 >= nregs) 7246 err += efunc(pc, "invalid register %u\n", r1); 7247 if (r2 != 0 || rd != 0) 7248 err += efunc(pc, "non-zero reserved bits\n"); 7249 break; 7250 case DIF_OP_BA: 7251 case DIF_OP_BE: 7252 case DIF_OP_BNE: 7253 case DIF_OP_BG: 7254 case DIF_OP_BGU: 7255 case DIF_OP_BGE: 7256 case DIF_OP_BGEU: 7257 case DIF_OP_BL: 7258 case DIF_OP_BLU: 7259 case DIF_OP_BLE: 7260 case DIF_OP_BLEU: 7261 if (label >= dp->dtdo_len) { 7262 err += efunc(pc, "invalid branch target %u\n", 7263 label); 7264 } 7265 if (label <= pc) { 7266 err += efunc(pc, "backward branch to %u\n", 7267 label); 7268 } 7269 break; 7270 case DIF_OP_RET: 7271 if (r1 != 0 || r2 != 0) 7272 err += efunc(pc, "non-zero reserved bits\n"); 7273 if (rd >= nregs) 7274 err += efunc(pc, "invalid register %u\n", rd); 7275 break; 7276 case DIF_OP_NOP: 7277 case DIF_OP_POPTS: 7278 case DIF_OP_FLUSHTS: 7279 if (r1 != 0 || r2 != 0 || rd != 0) 7280 err += efunc(pc, "non-zero reserved bits\n"); 7281 break; 7282 case DIF_OP_SETX: 7283 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 7284 err += efunc(pc, "invalid integer ref %u\n", 7285 DIF_INSTR_INTEGER(instr)); 7286 } 7287 if (rd >= nregs) 7288 err += efunc(pc, "invalid register %u\n", rd); 7289 if (rd == 0) 7290 err += efunc(pc, "cannot write to %r0\n"); 7291 break; 7292 case DIF_OP_SETS: 7293 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 7294 err += efunc(pc, "invalid string ref %u\n", 7295 DIF_INSTR_STRING(instr)); 7296 } 7297 if (rd >= nregs) 7298 err += efunc(pc, "invalid register %u\n", rd); 7299 if (rd == 0) 7300 err += efunc(pc, "cannot write to %r0\n"); 7301 break; 7302 case DIF_OP_LDGA: 7303 case DIF_OP_LDTA: 7304 if (r1 > DIF_VAR_ARRAY_MAX) 7305 err += efunc(pc, "invalid array %u\n", r1); 7306 if (r2 >= nregs) 7307 err += efunc(pc, "invalid register %u\n", r2); 7308 if (rd >= nregs) 7309 err += efunc(pc, "invalid register %u\n", rd); 7310 if (rd == 0) 7311 err += efunc(pc, "cannot write to %r0\n"); 7312 break; 7313 case DIF_OP_LDGS: 7314 case DIF_OP_LDTS: 7315 case DIF_OP_LDLS: 7316 case DIF_OP_LDGAA: 7317 case DIF_OP_LDTAA: 7318 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 7319 err += efunc(pc, "invalid variable %u\n", v); 7320 if (rd >= nregs) 7321 err += efunc(pc, "invalid register %u\n", rd); 7322 if (rd == 0) 7323 err += efunc(pc, "cannot write to %r0\n"); 7324 break; 7325 case DIF_OP_STGS: 7326 case DIF_OP_STTS: 7327 case DIF_OP_STLS: 7328 case DIF_OP_STGAA: 7329 case DIF_OP_STTAA: 7330 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 7331 err += efunc(pc, "invalid variable %u\n", v); 7332 if (rs >= nregs) 7333 err += efunc(pc, "invalid register %u\n", rd); 7334 break; 7335 case DIF_OP_CALL: 7336 if (subr > DIF_SUBR_MAX) 7337 err += efunc(pc, "invalid subr %u\n", subr); 7338 if (rd >= nregs) 7339 err += efunc(pc, "invalid register %u\n", rd); 7340 if (rd == 0) 7341 err += efunc(pc, "cannot write to %r0\n"); 7342 7343 if (subr == DIF_SUBR_COPYOUT || 7344 subr == DIF_SUBR_COPYOUTSTR) { 7345 dp->dtdo_destructive = 1; 7346 } 7347 break; 7348 case DIF_OP_PUSHTR: 7349 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 7350 err += efunc(pc, "invalid ref type %u\n", type); 7351 if (r2 >= nregs) 7352 err += efunc(pc, "invalid register %u\n", r2); 7353 if (rs >= nregs) 7354 err += efunc(pc, "invalid register %u\n", rs); 7355 break; 7356 case DIF_OP_PUSHTV: 7357 if (type != DIF_TYPE_CTF) 7358 err += efunc(pc, "invalid val type %u\n", type); 7359 if (r2 >= nregs) 7360 err += efunc(pc, "invalid register %u\n", r2); 7361 if (rs >= nregs) 7362 err += efunc(pc, "invalid register %u\n", rs); 7363 break; 7364 default: 7365 err += efunc(pc, "invalid opcode %u\n", 7366 DIF_INSTR_OP(instr)); 7367 } 7368 } 7369 7370 if (dp->dtdo_len != 0 && 7371 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7372 err += efunc(dp->dtdo_len - 1, 7373 "expected 'ret' as last DIF instruction\n"); 7374 } 7375 7376 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7377 /* 7378 * If we're not returning by reference, the size must be either 7379 * 0 or the size of one of the base types. 7380 */ 7381 switch (dp->dtdo_rtype.dtdt_size) { 7382 case 0: 7383 case sizeof (uint8_t): 7384 case sizeof (uint16_t): 7385 case sizeof (uint32_t): 7386 case sizeof (uint64_t): 7387 break; 7388 7389 default: 7390 err += efunc(dp->dtdo_len - 1, "bad return size"); 7391 } 7392 } 7393 7394 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7395 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7396 dtrace_diftype_t *vt, *et; 7397 uint_t id, ndx; 7398 7399 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7400 v->dtdv_scope != DIFV_SCOPE_THREAD && 7401 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7402 err += efunc(i, "unrecognized variable scope %d\n", 7403 v->dtdv_scope); 7404 break; 7405 } 7406 7407 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7408 v->dtdv_kind != DIFV_KIND_SCALAR) { 7409 err += efunc(i, "unrecognized variable type %d\n", 7410 v->dtdv_kind); 7411 break; 7412 } 7413 7414 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7415 err += efunc(i, "%d exceeds variable id limit\n", id); 7416 break; 7417 } 7418 7419 if (id < DIF_VAR_OTHER_UBASE) 7420 continue; 7421 7422 /* 7423 * For user-defined variables, we need to check that this 7424 * definition is identical to any previous definition that we 7425 * encountered. 7426 */ 7427 ndx = id - DIF_VAR_OTHER_UBASE; 7428 7429 switch (v->dtdv_scope) { 7430 case DIFV_SCOPE_GLOBAL: 7431 if (ndx < vstate->dtvs_nglobals) { 7432 dtrace_statvar_t *svar; 7433 7434 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7435 existing = &svar->dtsv_var; 7436 } 7437 7438 break; 7439 7440 case DIFV_SCOPE_THREAD: 7441 if (ndx < vstate->dtvs_ntlocals) 7442 existing = &vstate->dtvs_tlocals[ndx]; 7443 break; 7444 7445 case DIFV_SCOPE_LOCAL: 7446 if (ndx < vstate->dtvs_nlocals) { 7447 dtrace_statvar_t *svar; 7448 7449 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 7450 existing = &svar->dtsv_var; 7451 } 7452 7453 break; 7454 } 7455 7456 vt = &v->dtdv_type; 7457 7458 if (vt->dtdt_flags & DIF_TF_BYREF) { 7459 if (vt->dtdt_size == 0) { 7460 err += efunc(i, "zero-sized variable\n"); 7461 break; 7462 } 7463 7464 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 7465 vt->dtdt_size > dtrace_global_maxsize) { 7466 err += efunc(i, "oversized by-ref global\n"); 7467 break; 7468 } 7469 } 7470 7471 if (existing == NULL || existing->dtdv_id == 0) 7472 continue; 7473 7474 ASSERT(existing->dtdv_id == v->dtdv_id); 7475 ASSERT(existing->dtdv_scope == v->dtdv_scope); 7476 7477 if (existing->dtdv_kind != v->dtdv_kind) 7478 err += efunc(i, "%d changed variable kind\n", id); 7479 7480 et = &existing->dtdv_type; 7481 7482 if (vt->dtdt_flags != et->dtdt_flags) { 7483 err += efunc(i, "%d changed variable type flags\n", id); 7484 break; 7485 } 7486 7487 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 7488 err += efunc(i, "%d changed variable type size\n", id); 7489 break; 7490 } 7491 } 7492 7493 return (err); 7494 } 7495 7496 /* 7497 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7498 * are much more constrained than normal DIFOs. Specifically, they may 7499 * not: 7500 * 7501 * 1. Make calls to subroutines other than copyin(), copyinstr() or 7502 * miscellaneous string routines 7503 * 2. Access DTrace variables other than the args[] array, and the 7504 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 7505 * 3. Have thread-local variables. 7506 * 4. Have dynamic variables. 7507 */ 7508 static int 7509 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7510 { 7511 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7512 int err = 0; 7513 uint_t pc; 7514 7515 for (pc = 0; pc < dp->dtdo_len; pc++) { 7516 dif_instr_t instr = dp->dtdo_buf[pc]; 7517 7518 uint_t v = DIF_INSTR_VAR(instr); 7519 uint_t subr = DIF_INSTR_SUBR(instr); 7520 uint_t op = DIF_INSTR_OP(instr); 7521 7522 switch (op) { 7523 case DIF_OP_OR: 7524 case DIF_OP_XOR: 7525 case DIF_OP_AND: 7526 case DIF_OP_SLL: 7527 case DIF_OP_SRL: 7528 case DIF_OP_SRA: 7529 case DIF_OP_SUB: 7530 case DIF_OP_ADD: 7531 case DIF_OP_MUL: 7532 case DIF_OP_SDIV: 7533 case DIF_OP_UDIV: 7534 case DIF_OP_SREM: 7535 case DIF_OP_UREM: 7536 case DIF_OP_COPYS: 7537 case DIF_OP_NOT: 7538 case DIF_OP_MOV: 7539 case DIF_OP_RLDSB: 7540 case DIF_OP_RLDSH: 7541 case DIF_OP_RLDSW: 7542 case DIF_OP_RLDUB: 7543 case DIF_OP_RLDUH: 7544 case DIF_OP_RLDUW: 7545 case DIF_OP_RLDX: 7546 case DIF_OP_ULDSB: 7547 case DIF_OP_ULDSH: 7548 case DIF_OP_ULDSW: 7549 case DIF_OP_ULDUB: 7550 case DIF_OP_ULDUH: 7551 case DIF_OP_ULDUW: 7552 case DIF_OP_ULDX: 7553 case DIF_OP_STB: 7554 case DIF_OP_STH: 7555 case DIF_OP_STW: 7556 case DIF_OP_STX: 7557 case DIF_OP_ALLOCS: 7558 case DIF_OP_CMP: 7559 case DIF_OP_SCMP: 7560 case DIF_OP_TST: 7561 case DIF_OP_BA: 7562 case DIF_OP_BE: 7563 case DIF_OP_BNE: 7564 case DIF_OP_BG: 7565 case DIF_OP_BGU: 7566 case DIF_OP_BGE: 7567 case DIF_OP_BGEU: 7568 case DIF_OP_BL: 7569 case DIF_OP_BLU: 7570 case DIF_OP_BLE: 7571 case DIF_OP_BLEU: 7572 case DIF_OP_RET: 7573 case DIF_OP_NOP: 7574 case DIF_OP_POPTS: 7575 case DIF_OP_FLUSHTS: 7576 case DIF_OP_SETX: 7577 case DIF_OP_SETS: 7578 case DIF_OP_LDGA: 7579 case DIF_OP_LDLS: 7580 case DIF_OP_STGS: 7581 case DIF_OP_STLS: 7582 case DIF_OP_PUSHTR: 7583 case DIF_OP_PUSHTV: 7584 break; 7585 7586 case DIF_OP_LDGS: 7587 if (v >= DIF_VAR_OTHER_UBASE) 7588 break; 7589 7590 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7591 break; 7592 7593 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7594 v == DIF_VAR_PPID || v == DIF_VAR_TID || 7595 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 7596 v == DIF_VAR_UID || v == DIF_VAR_GID) 7597 break; 7598 7599 err += efunc(pc, "illegal variable %u\n", v); 7600 break; 7601 7602 case DIF_OP_LDTA: 7603 case DIF_OP_LDTS: 7604 case DIF_OP_LDGAA: 7605 case DIF_OP_LDTAA: 7606 err += efunc(pc, "illegal dynamic variable load\n"); 7607 break; 7608 7609 case DIF_OP_STTS: 7610 case DIF_OP_STGAA: 7611 case DIF_OP_STTAA: 7612 err += efunc(pc, "illegal dynamic variable store\n"); 7613 break; 7614 7615 case DIF_OP_CALL: 7616 if (subr == DIF_SUBR_ALLOCA || 7617 subr == DIF_SUBR_BCOPY || 7618 subr == DIF_SUBR_COPYIN || 7619 subr == DIF_SUBR_COPYINTO || 7620 subr == DIF_SUBR_COPYINSTR || 7621 subr == DIF_SUBR_INDEX || 7622 subr == DIF_SUBR_LLTOSTR || 7623 subr == DIF_SUBR_RINDEX || 7624 subr == DIF_SUBR_STRCHR || 7625 subr == DIF_SUBR_STRJOIN || 7626 subr == DIF_SUBR_STRRCHR || 7627 subr == DIF_SUBR_STRSTR || 7628 subr == DIF_SUBR_HTONS || 7629 subr == DIF_SUBR_HTONL || 7630 subr == DIF_SUBR_HTONLL || 7631 subr == DIF_SUBR_NTOHS || 7632 subr == DIF_SUBR_NTOHL || 7633 subr == DIF_SUBR_NTOHLL) 7634 break; 7635 7636 err += efunc(pc, "invalid subr %u\n", subr); 7637 break; 7638 7639 default: 7640 err += efunc(pc, "invalid opcode %u\n", 7641 DIF_INSTR_OP(instr)); 7642 } 7643 } 7644 7645 return (err); 7646 } 7647 7648 /* 7649 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7650 * basis; 0 if not. 7651 */ 7652 static int 7653 dtrace_difo_cacheable(dtrace_difo_t *dp) 7654 { 7655 int i; 7656 7657 if (dp == NULL) 7658 return (0); 7659 7660 for (i = 0; i < dp->dtdo_varlen; i++) { 7661 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7662 7663 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 7664 continue; 7665 7666 switch (v->dtdv_id) { 7667 case DIF_VAR_CURTHREAD: 7668 case DIF_VAR_PID: 7669 case DIF_VAR_TID: 7670 case DIF_VAR_EXECNAME: 7671 case DIF_VAR_ZONENAME: 7672 break; 7673 7674 default: 7675 return (0); 7676 } 7677 } 7678 7679 /* 7680 * This DIF object may be cacheable. Now we need to look for any 7681 * array loading instructions, any memory loading instructions, or 7682 * any stores to thread-local variables. 7683 */ 7684 for (i = 0; i < dp->dtdo_len; i++) { 7685 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 7686 7687 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 7688 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 7689 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 7690 op == DIF_OP_LDGA || op == DIF_OP_STTS) 7691 return (0); 7692 } 7693 7694 return (1); 7695 } 7696 7697 static void 7698 dtrace_difo_hold(dtrace_difo_t *dp) 7699 { 7700 int i; 7701 7702 ASSERT(MUTEX_HELD(&dtrace_lock)); 7703 7704 dp->dtdo_refcnt++; 7705 ASSERT(dp->dtdo_refcnt != 0); 7706 7707 /* 7708 * We need to check this DIF object for references to the variable 7709 * DIF_VAR_VTIMESTAMP. 7710 */ 7711 for (i = 0; i < dp->dtdo_varlen; i++) { 7712 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7713 7714 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7715 continue; 7716 7717 if (dtrace_vtime_references++ == 0) 7718 dtrace_vtime_enable(); 7719 } 7720 } 7721 7722 /* 7723 * This routine calculates the dynamic variable chunksize for a given DIF 7724 * object. The calculation is not fool-proof, and can probably be tricked by 7725 * malicious DIF -- but it works for all compiler-generated DIF. Because this 7726 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 7727 * if a dynamic variable size exceeds the chunksize. 7728 */ 7729 static void 7730 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7731 { 7732 uint64_t sval; 7733 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 7734 const dif_instr_t *text = dp->dtdo_buf; 7735 uint_t pc, srd = 0; 7736 uint_t ttop = 0; 7737 size_t size, ksize; 7738 uint_t id, i; 7739 7740 for (pc = 0; pc < dp->dtdo_len; pc++) { 7741 dif_instr_t instr = text[pc]; 7742 uint_t op = DIF_INSTR_OP(instr); 7743 uint_t rd = DIF_INSTR_RD(instr); 7744 uint_t r1 = DIF_INSTR_R1(instr); 7745 uint_t nkeys = 0; 7746 uchar_t scope; 7747 7748 dtrace_key_t *key = tupregs; 7749 7750 switch (op) { 7751 case DIF_OP_SETX: 7752 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 7753 srd = rd; 7754 continue; 7755 7756 case DIF_OP_STTS: 7757 key = &tupregs[DIF_DTR_NREGS]; 7758 key[0].dttk_size = 0; 7759 key[1].dttk_size = 0; 7760 nkeys = 2; 7761 scope = DIFV_SCOPE_THREAD; 7762 break; 7763 7764 case DIF_OP_STGAA: 7765 case DIF_OP_STTAA: 7766 nkeys = ttop; 7767 7768 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 7769 key[nkeys++].dttk_size = 0; 7770 7771 key[nkeys++].dttk_size = 0; 7772 7773 if (op == DIF_OP_STTAA) { 7774 scope = DIFV_SCOPE_THREAD; 7775 } else { 7776 scope = DIFV_SCOPE_GLOBAL; 7777 } 7778 7779 break; 7780 7781 case DIF_OP_PUSHTR: 7782 if (ttop == DIF_DTR_NREGS) 7783 return; 7784 7785 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 7786 /* 7787 * If the register for the size of the "pushtr" 7788 * is %r0 (or the value is 0) and the type is 7789 * a string, we'll use the system-wide default 7790 * string size. 7791 */ 7792 tupregs[ttop++].dttk_size = 7793 dtrace_strsize_default; 7794 } else { 7795 if (srd == 0) 7796 return; 7797 7798 tupregs[ttop++].dttk_size = sval; 7799 } 7800 7801 break; 7802 7803 case DIF_OP_PUSHTV: 7804 if (ttop == DIF_DTR_NREGS) 7805 return; 7806 7807 tupregs[ttop++].dttk_size = 0; 7808 break; 7809 7810 case DIF_OP_FLUSHTS: 7811 ttop = 0; 7812 break; 7813 7814 case DIF_OP_POPTS: 7815 if (ttop != 0) 7816 ttop--; 7817 break; 7818 } 7819 7820 sval = 0; 7821 srd = 0; 7822 7823 if (nkeys == 0) 7824 continue; 7825 7826 /* 7827 * We have a dynamic variable allocation; calculate its size. 7828 */ 7829 for (ksize = 0, i = 0; i < nkeys; i++) 7830 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 7831 7832 size = sizeof (dtrace_dynvar_t); 7833 size += sizeof (dtrace_key_t) * (nkeys - 1); 7834 size += ksize; 7835 7836 /* 7837 * Now we need to determine the size of the stored data. 7838 */ 7839 id = DIF_INSTR_VAR(instr); 7840 7841 for (i = 0; i < dp->dtdo_varlen; i++) { 7842 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7843 7844 if (v->dtdv_id == id && v->dtdv_scope == scope) { 7845 size += v->dtdv_type.dtdt_size; 7846 break; 7847 } 7848 } 7849 7850 if (i == dp->dtdo_varlen) 7851 return; 7852 7853 /* 7854 * We have the size. If this is larger than the chunk size 7855 * for our dynamic variable state, reset the chunk size. 7856 */ 7857 size = P2ROUNDUP(size, sizeof (uint64_t)); 7858 7859 if (size > vstate->dtvs_dynvars.dtds_chunksize) 7860 vstate->dtvs_dynvars.dtds_chunksize = size; 7861 } 7862 } 7863 7864 static void 7865 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7866 { 7867 int i, oldsvars, osz, nsz, otlocals, ntlocals; 7868 uint_t id; 7869 7870 ASSERT(MUTEX_HELD(&dtrace_lock)); 7871 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 7872 7873 for (i = 0; i < dp->dtdo_varlen; i++) { 7874 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7875 dtrace_statvar_t *svar, ***svarp; 7876 size_t dsize = 0; 7877 uint8_t scope = v->dtdv_scope; 7878 int *np; 7879 7880 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7881 continue; 7882 7883 id -= DIF_VAR_OTHER_UBASE; 7884 7885 switch (scope) { 7886 case DIFV_SCOPE_THREAD: 7887 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 7888 dtrace_difv_t *tlocals; 7889 7890 if ((ntlocals = (otlocals << 1)) == 0) 7891 ntlocals = 1; 7892 7893 osz = otlocals * sizeof (dtrace_difv_t); 7894 nsz = ntlocals * sizeof (dtrace_difv_t); 7895 7896 tlocals = kmem_zalloc(nsz, KM_SLEEP); 7897 7898 if (osz != 0) { 7899 bcopy(vstate->dtvs_tlocals, 7900 tlocals, osz); 7901 kmem_free(vstate->dtvs_tlocals, osz); 7902 } 7903 7904 vstate->dtvs_tlocals = tlocals; 7905 vstate->dtvs_ntlocals = ntlocals; 7906 } 7907 7908 vstate->dtvs_tlocals[id] = *v; 7909 continue; 7910 7911 case DIFV_SCOPE_LOCAL: 7912 np = &vstate->dtvs_nlocals; 7913 svarp = &vstate->dtvs_locals; 7914 7915 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7916 dsize = NCPU * (v->dtdv_type.dtdt_size + 7917 sizeof (uint64_t)); 7918 else 7919 dsize = NCPU * sizeof (uint64_t); 7920 7921 break; 7922 7923 case DIFV_SCOPE_GLOBAL: 7924 np = &vstate->dtvs_nglobals; 7925 svarp = &vstate->dtvs_globals; 7926 7927 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7928 dsize = v->dtdv_type.dtdt_size + 7929 sizeof (uint64_t); 7930 7931 break; 7932 7933 default: 7934 ASSERT(0); 7935 } 7936 7937 while (id >= (oldsvars = *np)) { 7938 dtrace_statvar_t **statics; 7939 int newsvars, oldsize, newsize; 7940 7941 if ((newsvars = (oldsvars << 1)) == 0) 7942 newsvars = 1; 7943 7944 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 7945 newsize = newsvars * sizeof (dtrace_statvar_t *); 7946 7947 statics = kmem_zalloc(newsize, KM_SLEEP); 7948 7949 if (oldsize != 0) { 7950 bcopy(*svarp, statics, oldsize); 7951 kmem_free(*svarp, oldsize); 7952 } 7953 7954 *svarp = statics; 7955 *np = newsvars; 7956 } 7957 7958 if ((svar = (*svarp)[id]) == NULL) { 7959 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 7960 svar->dtsv_var = *v; 7961 7962 if ((svar->dtsv_size = dsize) != 0) { 7963 svar->dtsv_data = (uint64_t)(uintptr_t) 7964 kmem_zalloc(dsize, KM_SLEEP); 7965 } 7966 7967 (*svarp)[id] = svar; 7968 } 7969 7970 svar->dtsv_refcnt++; 7971 } 7972 7973 dtrace_difo_chunksize(dp, vstate); 7974 dtrace_difo_hold(dp); 7975 } 7976 7977 static dtrace_difo_t * 7978 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7979 { 7980 dtrace_difo_t *new; 7981 size_t sz; 7982 7983 ASSERT(dp->dtdo_buf != NULL); 7984 ASSERT(dp->dtdo_refcnt != 0); 7985 7986 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 7987 7988 ASSERT(dp->dtdo_buf != NULL); 7989 sz = dp->dtdo_len * sizeof (dif_instr_t); 7990 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 7991 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 7992 new->dtdo_len = dp->dtdo_len; 7993 7994 if (dp->dtdo_strtab != NULL) { 7995 ASSERT(dp->dtdo_strlen != 0); 7996 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 7997 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 7998 new->dtdo_strlen = dp->dtdo_strlen; 7999 } 8000 8001 if (dp->dtdo_inttab != NULL) { 8002 ASSERT(dp->dtdo_intlen != 0); 8003 sz = dp->dtdo_intlen * sizeof (uint64_t); 8004 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8005 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8006 new->dtdo_intlen = dp->dtdo_intlen; 8007 } 8008 8009 if (dp->dtdo_vartab != NULL) { 8010 ASSERT(dp->dtdo_varlen != 0); 8011 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8012 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8013 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8014 new->dtdo_varlen = dp->dtdo_varlen; 8015 } 8016 8017 dtrace_difo_init(new, vstate); 8018 return (new); 8019 } 8020 8021 static void 8022 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8023 { 8024 int i; 8025 8026 ASSERT(dp->dtdo_refcnt == 0); 8027 8028 for (i = 0; i < dp->dtdo_varlen; i++) { 8029 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8030 dtrace_statvar_t *svar, **svarp; 8031 uint_t id; 8032 uint8_t scope = v->dtdv_scope; 8033 int *np; 8034 8035 switch (scope) { 8036 case DIFV_SCOPE_THREAD: 8037 continue; 8038 8039 case DIFV_SCOPE_LOCAL: 8040 np = &vstate->dtvs_nlocals; 8041 svarp = vstate->dtvs_locals; 8042 break; 8043 8044 case DIFV_SCOPE_GLOBAL: 8045 np = &vstate->dtvs_nglobals; 8046 svarp = vstate->dtvs_globals; 8047 break; 8048 8049 default: 8050 ASSERT(0); 8051 } 8052 8053 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8054 continue; 8055 8056 id -= DIF_VAR_OTHER_UBASE; 8057 ASSERT(id < *np); 8058 8059 svar = svarp[id]; 8060 ASSERT(svar != NULL); 8061 ASSERT(svar->dtsv_refcnt > 0); 8062 8063 if (--svar->dtsv_refcnt > 0) 8064 continue; 8065 8066 if (svar->dtsv_size != 0) { 8067 ASSERT(svar->dtsv_data != NULL); 8068 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8069 svar->dtsv_size); 8070 } 8071 8072 kmem_free(svar, sizeof (dtrace_statvar_t)); 8073 svarp[id] = NULL; 8074 } 8075 8076 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8077 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8078 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8079 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8080 8081 kmem_free(dp, sizeof (dtrace_difo_t)); 8082 } 8083 8084 static void 8085 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8086 { 8087 int i; 8088 8089 ASSERT(MUTEX_HELD(&dtrace_lock)); 8090 ASSERT(dp->dtdo_refcnt != 0); 8091 8092 for (i = 0; i < dp->dtdo_varlen; i++) { 8093 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8094 8095 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8096 continue; 8097 8098 ASSERT(dtrace_vtime_references > 0); 8099 if (--dtrace_vtime_references == 0) 8100 dtrace_vtime_disable(); 8101 } 8102 8103 if (--dp->dtdo_refcnt == 0) 8104 dtrace_difo_destroy(dp, vstate); 8105 } 8106 8107 /* 8108 * DTrace Format Functions 8109 */ 8110 static uint16_t 8111 dtrace_format_add(dtrace_state_t *state, char *str) 8112 { 8113 char *fmt, **new; 8114 uint16_t ndx, len = strlen(str) + 1; 8115 8116 fmt = kmem_zalloc(len, KM_SLEEP); 8117 bcopy(str, fmt, len); 8118 8119 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 8120 if (state->dts_formats[ndx] == NULL) { 8121 state->dts_formats[ndx] = fmt; 8122 return (ndx + 1); 8123 } 8124 } 8125 8126 if (state->dts_nformats == USHRT_MAX) { 8127 /* 8128 * This is only likely if a denial-of-service attack is being 8129 * attempted. As such, it's okay to fail silently here. 8130 */ 8131 kmem_free(fmt, len); 8132 return (0); 8133 } 8134 8135 /* 8136 * For simplicity, we always resize the formats array to be exactly the 8137 * number of formats. 8138 */ 8139 ndx = state->dts_nformats++; 8140 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 8141 8142 if (state->dts_formats != NULL) { 8143 ASSERT(ndx != 0); 8144 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 8145 kmem_free(state->dts_formats, ndx * sizeof (char *)); 8146 } 8147 8148 state->dts_formats = new; 8149 state->dts_formats[ndx] = fmt; 8150 8151 return (ndx + 1); 8152 } 8153 8154 static void 8155 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 8156 { 8157 char *fmt; 8158 8159 ASSERT(state->dts_formats != NULL); 8160 ASSERT(format <= state->dts_nformats); 8161 ASSERT(state->dts_formats[format - 1] != NULL); 8162 8163 fmt = state->dts_formats[format - 1]; 8164 kmem_free(fmt, strlen(fmt) + 1); 8165 state->dts_formats[format - 1] = NULL; 8166 } 8167 8168 static void 8169 dtrace_format_destroy(dtrace_state_t *state) 8170 { 8171 int i; 8172 8173 if (state->dts_nformats == 0) { 8174 ASSERT(state->dts_formats == NULL); 8175 return; 8176 } 8177 8178 ASSERT(state->dts_formats != NULL); 8179 8180 for (i = 0; i < state->dts_nformats; i++) { 8181 char *fmt = state->dts_formats[i]; 8182 8183 if (fmt == NULL) 8184 continue; 8185 8186 kmem_free(fmt, strlen(fmt) + 1); 8187 } 8188 8189 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 8190 state->dts_nformats = 0; 8191 state->dts_formats = NULL; 8192 } 8193 8194 /* 8195 * DTrace Predicate Functions 8196 */ 8197 static dtrace_predicate_t * 8198 dtrace_predicate_create(dtrace_difo_t *dp) 8199 { 8200 dtrace_predicate_t *pred; 8201 8202 ASSERT(MUTEX_HELD(&dtrace_lock)); 8203 ASSERT(dp->dtdo_refcnt != 0); 8204 8205 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 8206 pred->dtp_difo = dp; 8207 pred->dtp_refcnt = 1; 8208 8209 if (!dtrace_difo_cacheable(dp)) 8210 return (pred); 8211 8212 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 8213 /* 8214 * This is only theoretically possible -- we have had 2^32 8215 * cacheable predicates on this machine. We cannot allow any 8216 * more predicates to become cacheable: as unlikely as it is, 8217 * there may be a thread caching a (now stale) predicate cache 8218 * ID. (N.B.: the temptation is being successfully resisted to 8219 * have this cmn_err() "Holy shit -- we executed this code!") 8220 */ 8221 return (pred); 8222 } 8223 8224 pred->dtp_cacheid = dtrace_predcache_id++; 8225 8226 return (pred); 8227 } 8228 8229 static void 8230 dtrace_predicate_hold(dtrace_predicate_t *pred) 8231 { 8232 ASSERT(MUTEX_HELD(&dtrace_lock)); 8233 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 8234 ASSERT(pred->dtp_refcnt > 0); 8235 8236 pred->dtp_refcnt++; 8237 } 8238 8239 static void 8240 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 8241 { 8242 dtrace_difo_t *dp = pred->dtp_difo; 8243 8244 ASSERT(MUTEX_HELD(&dtrace_lock)); 8245 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 8246 ASSERT(pred->dtp_refcnt > 0); 8247 8248 if (--pred->dtp_refcnt == 0) { 8249 dtrace_difo_release(pred->dtp_difo, vstate); 8250 kmem_free(pred, sizeof (dtrace_predicate_t)); 8251 } 8252 } 8253 8254 /* 8255 * DTrace Action Description Functions 8256 */ 8257 static dtrace_actdesc_t * 8258 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 8259 uint64_t uarg, uint64_t arg) 8260 { 8261 dtrace_actdesc_t *act; 8262 8263 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 8264 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 8265 8266 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 8267 act->dtad_kind = kind; 8268 act->dtad_ntuple = ntuple; 8269 act->dtad_uarg = uarg; 8270 act->dtad_arg = arg; 8271 act->dtad_refcnt = 1; 8272 8273 return (act); 8274 } 8275 8276 static void 8277 dtrace_actdesc_hold(dtrace_actdesc_t *act) 8278 { 8279 ASSERT(act->dtad_refcnt >= 1); 8280 act->dtad_refcnt++; 8281 } 8282 8283 static void 8284 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 8285 { 8286 dtrace_actkind_t kind = act->dtad_kind; 8287 dtrace_difo_t *dp; 8288 8289 ASSERT(act->dtad_refcnt >= 1); 8290 8291 if (--act->dtad_refcnt != 0) 8292 return; 8293 8294 if ((dp = act->dtad_difo) != NULL) 8295 dtrace_difo_release(dp, vstate); 8296 8297 if (DTRACEACT_ISPRINTFLIKE(kind)) { 8298 char *str = (char *)(uintptr_t)act->dtad_arg; 8299 8300 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 8301 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 8302 8303 if (str != NULL) 8304 kmem_free(str, strlen(str) + 1); 8305 } 8306 8307 kmem_free(act, sizeof (dtrace_actdesc_t)); 8308 } 8309 8310 /* 8311 * DTrace ECB Functions 8312 */ 8313 static dtrace_ecb_t * 8314 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 8315 { 8316 dtrace_ecb_t *ecb; 8317 dtrace_epid_t epid; 8318 8319 ASSERT(MUTEX_HELD(&dtrace_lock)); 8320 8321 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 8322 ecb->dte_predicate = NULL; 8323 ecb->dte_probe = probe; 8324 8325 /* 8326 * The default size is the size of the default action: recording 8327 * the epid. 8328 */ 8329 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8330 ecb->dte_alignment = sizeof (dtrace_epid_t); 8331 8332 epid = state->dts_epid++; 8333 8334 if (epid - 1 >= state->dts_necbs) { 8335 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 8336 int necbs = state->dts_necbs << 1; 8337 8338 ASSERT(epid == state->dts_necbs + 1); 8339 8340 if (necbs == 0) { 8341 ASSERT(oecbs == NULL); 8342 necbs = 1; 8343 } 8344 8345 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 8346 8347 if (oecbs != NULL) 8348 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 8349 8350 dtrace_membar_producer(); 8351 state->dts_ecbs = ecbs; 8352 8353 if (oecbs != NULL) { 8354 /* 8355 * If this state is active, we must dtrace_sync() 8356 * before we can free the old dts_ecbs array: we're 8357 * coming in hot, and there may be active ring 8358 * buffer processing (which indexes into the dts_ecbs 8359 * array) on another CPU. 8360 */ 8361 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 8362 dtrace_sync(); 8363 8364 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 8365 } 8366 8367 dtrace_membar_producer(); 8368 state->dts_necbs = necbs; 8369 } 8370 8371 ecb->dte_state = state; 8372 8373 ASSERT(state->dts_ecbs[epid - 1] == NULL); 8374 dtrace_membar_producer(); 8375 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8376 8377 return (ecb); 8378 } 8379 8380 static void 8381 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8382 { 8383 dtrace_probe_t *probe = ecb->dte_probe; 8384 8385 ASSERT(MUTEX_HELD(&cpu_lock)); 8386 ASSERT(MUTEX_HELD(&dtrace_lock)); 8387 ASSERT(ecb->dte_next == NULL); 8388 8389 if (probe == NULL) { 8390 /* 8391 * This is the NULL probe -- there's nothing to do. 8392 */ 8393 return; 8394 } 8395 8396 if (probe->dtpr_ecb == NULL) { 8397 dtrace_provider_t *prov = probe->dtpr_provider; 8398 8399 /* 8400 * We're the first ECB on this probe. 8401 */ 8402 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8403 8404 if (ecb->dte_predicate != NULL) 8405 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8406 8407 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8408 probe->dtpr_id, probe->dtpr_arg); 8409 } else { 8410 /* 8411 * This probe is already active. Swing the last pointer to 8412 * point to the new ECB, and issue a dtrace_sync() to assure 8413 * that all CPUs have seen the change. 8414 */ 8415 ASSERT(probe->dtpr_ecb_last != NULL); 8416 probe->dtpr_ecb_last->dte_next = ecb; 8417 probe->dtpr_ecb_last = ecb; 8418 probe->dtpr_predcache = 0; 8419 8420 dtrace_sync(); 8421 } 8422 } 8423 8424 static void 8425 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8426 { 8427 uint32_t maxalign = sizeof (dtrace_epid_t); 8428 uint32_t align = sizeof (uint8_t), offs, diff; 8429 dtrace_action_t *act; 8430 int wastuple = 0; 8431 uint32_t aggbase = UINT32_MAX; 8432 dtrace_state_t *state = ecb->dte_state; 8433 8434 /* 8435 * If we record anything, we always record the epid. (And we always 8436 * record it first.) 8437 */ 8438 offs = sizeof (dtrace_epid_t); 8439 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8440 8441 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8442 dtrace_recdesc_t *rec = &act->dta_rec; 8443 8444 if ((align = rec->dtrd_alignment) > maxalign) 8445 maxalign = align; 8446 8447 if (!wastuple && act->dta_intuple) { 8448 /* 8449 * This is the first record in a tuple. Align the 8450 * offset to be at offset 4 in an 8-byte aligned 8451 * block. 8452 */ 8453 diff = offs + sizeof (dtrace_aggid_t); 8454 8455 if (diff = (diff & (sizeof (uint64_t) - 1))) 8456 offs += sizeof (uint64_t) - diff; 8457 8458 aggbase = offs - sizeof (dtrace_aggid_t); 8459 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 8460 } 8461 8462 /*LINTED*/ 8463 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 8464 /* 8465 * The current offset is not properly aligned; align it. 8466 */ 8467 offs += align - diff; 8468 } 8469 8470 rec->dtrd_offset = offs; 8471 8472 if (offs + rec->dtrd_size > ecb->dte_needed) { 8473 ecb->dte_needed = offs + rec->dtrd_size; 8474 8475 if (ecb->dte_needed > state->dts_needed) 8476 state->dts_needed = ecb->dte_needed; 8477 } 8478 8479 if (DTRACEACT_ISAGG(act->dta_kind)) { 8480 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8481 dtrace_action_t *first = agg->dtag_first, *prev; 8482 8483 ASSERT(rec->dtrd_size != 0 && first != NULL); 8484 ASSERT(wastuple); 8485 ASSERT(aggbase != UINT32_MAX); 8486 8487 agg->dtag_base = aggbase; 8488 8489 while ((prev = first->dta_prev) != NULL && 8490 DTRACEACT_ISAGG(prev->dta_kind)) { 8491 agg = (dtrace_aggregation_t *)prev; 8492 first = agg->dtag_first; 8493 } 8494 8495 if (prev != NULL) { 8496 offs = prev->dta_rec.dtrd_offset + 8497 prev->dta_rec.dtrd_size; 8498 } else { 8499 offs = sizeof (dtrace_epid_t); 8500 } 8501 wastuple = 0; 8502 } else { 8503 if (!act->dta_intuple) 8504 ecb->dte_size = offs + rec->dtrd_size; 8505 8506 offs += rec->dtrd_size; 8507 } 8508 8509 wastuple = act->dta_intuple; 8510 } 8511 8512 if ((act = ecb->dte_action) != NULL && 8513 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8514 ecb->dte_size == sizeof (dtrace_epid_t)) { 8515 /* 8516 * If the size is still sizeof (dtrace_epid_t), then all 8517 * actions store no data; set the size to 0. 8518 */ 8519 ecb->dte_alignment = maxalign; 8520 ecb->dte_size = 0; 8521 8522 /* 8523 * If the needed space is still sizeof (dtrace_epid_t), then 8524 * all actions need no additional space; set the needed 8525 * size to 0. 8526 */ 8527 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8528 ecb->dte_needed = 0; 8529 8530 return; 8531 } 8532 8533 /* 8534 * Set our alignment, and make sure that the dte_size and dte_needed 8535 * are aligned to the size of an EPID. 8536 */ 8537 ecb->dte_alignment = maxalign; 8538 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8539 ~(sizeof (dtrace_epid_t) - 1); 8540 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8541 ~(sizeof (dtrace_epid_t) - 1); 8542 ASSERT(ecb->dte_size <= ecb->dte_needed); 8543 } 8544 8545 static dtrace_action_t * 8546 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8547 { 8548 dtrace_aggregation_t *agg; 8549 size_t size = sizeof (uint64_t); 8550 int ntuple = desc->dtad_ntuple; 8551 dtrace_action_t *act; 8552 dtrace_recdesc_t *frec; 8553 dtrace_aggid_t aggid; 8554 dtrace_state_t *state = ecb->dte_state; 8555 8556 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8557 agg->dtag_ecb = ecb; 8558 8559 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8560 8561 switch (desc->dtad_kind) { 8562 case DTRACEAGG_MIN: 8563 agg->dtag_initial = UINT64_MAX; 8564 agg->dtag_aggregate = dtrace_aggregate_min; 8565 break; 8566 8567 case DTRACEAGG_MAX: 8568 agg->dtag_aggregate = dtrace_aggregate_max; 8569 break; 8570 8571 case DTRACEAGG_COUNT: 8572 agg->dtag_aggregate = dtrace_aggregate_count; 8573 break; 8574 8575 case DTRACEAGG_QUANTIZE: 8576 agg->dtag_aggregate = dtrace_aggregate_quantize; 8577 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8578 sizeof (uint64_t); 8579 break; 8580 8581 case DTRACEAGG_LQUANTIZE: { 8582 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8583 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8584 8585 agg->dtag_initial = desc->dtad_arg; 8586 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8587 8588 if (step == 0 || levels == 0) 8589 goto err; 8590 8591 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8592 break; 8593 } 8594 8595 case DTRACEAGG_AVG: 8596 agg->dtag_aggregate = dtrace_aggregate_avg; 8597 size = sizeof (uint64_t) * 2; 8598 break; 8599 8600 case DTRACEAGG_SUM: 8601 agg->dtag_aggregate = dtrace_aggregate_sum; 8602 break; 8603 8604 default: 8605 goto err; 8606 } 8607 8608 agg->dtag_action.dta_rec.dtrd_size = size; 8609 8610 if (ntuple == 0) 8611 goto err; 8612 8613 /* 8614 * We must make sure that we have enough actions for the n-tuple. 8615 */ 8616 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8617 if (DTRACEACT_ISAGG(act->dta_kind)) 8618 break; 8619 8620 if (--ntuple == 0) { 8621 /* 8622 * This is the action with which our n-tuple begins. 8623 */ 8624 agg->dtag_first = act; 8625 goto success; 8626 } 8627 } 8628 8629 /* 8630 * This n-tuple is short by ntuple elements. Return failure. 8631 */ 8632 ASSERT(ntuple != 0); 8633 err: 8634 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8635 return (NULL); 8636 8637 success: 8638 /* 8639 * If the last action in the tuple has a size of zero, it's actually 8640 * an expression argument for the aggregating action. 8641 */ 8642 ASSERT(ecb->dte_action_last != NULL); 8643 act = ecb->dte_action_last; 8644 8645 if (act->dta_kind == DTRACEACT_DIFEXPR) { 8646 ASSERT(act->dta_difo != NULL); 8647 8648 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 8649 agg->dtag_hasarg = 1; 8650 } 8651 8652 /* 8653 * We need to allocate an id for this aggregation. 8654 */ 8655 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8656 VM_BESTFIT | VM_SLEEP); 8657 8658 if (aggid - 1 >= state->dts_naggregations) { 8659 dtrace_aggregation_t **oaggs = state->dts_aggregations; 8660 dtrace_aggregation_t **aggs; 8661 int naggs = state->dts_naggregations << 1; 8662 int onaggs = state->dts_naggregations; 8663 8664 ASSERT(aggid == state->dts_naggregations + 1); 8665 8666 if (naggs == 0) { 8667 ASSERT(oaggs == NULL); 8668 naggs = 1; 8669 } 8670 8671 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 8672 8673 if (oaggs != NULL) { 8674 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 8675 kmem_free(oaggs, onaggs * sizeof (*aggs)); 8676 } 8677 8678 state->dts_aggregations = aggs; 8679 state->dts_naggregations = naggs; 8680 } 8681 8682 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 8683 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 8684 8685 frec = &agg->dtag_first->dta_rec; 8686 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 8687 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 8688 8689 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 8690 ASSERT(!act->dta_intuple); 8691 act->dta_intuple = 1; 8692 } 8693 8694 return (&agg->dtag_action); 8695 } 8696 8697 static void 8698 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 8699 { 8700 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8701 dtrace_state_t *state = ecb->dte_state; 8702 dtrace_aggid_t aggid = agg->dtag_id; 8703 8704 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 8705 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 8706 8707 ASSERT(state->dts_aggregations[aggid - 1] == agg); 8708 state->dts_aggregations[aggid - 1] = NULL; 8709 8710 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8711 } 8712 8713 static int 8714 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8715 { 8716 dtrace_action_t *action, *last; 8717 dtrace_difo_t *dp = desc->dtad_difo; 8718 uint32_t size = 0, align = sizeof (uint8_t), mask; 8719 uint16_t format = 0; 8720 dtrace_recdesc_t *rec; 8721 dtrace_state_t *state = ecb->dte_state; 8722 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 8723 uint64_t arg = desc->dtad_arg; 8724 8725 ASSERT(MUTEX_HELD(&dtrace_lock)); 8726 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 8727 8728 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 8729 /* 8730 * If this is an aggregating action, there must be neither 8731 * a speculate nor a commit on the action chain. 8732 */ 8733 dtrace_action_t *act; 8734 8735 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8736 if (act->dta_kind == DTRACEACT_COMMIT) 8737 return (EINVAL); 8738 8739 if (act->dta_kind == DTRACEACT_SPECULATE) 8740 return (EINVAL); 8741 } 8742 8743 action = dtrace_ecb_aggregation_create(ecb, desc); 8744 8745 if (action == NULL) 8746 return (EINVAL); 8747 } else { 8748 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 8749 (desc->dtad_kind == DTRACEACT_DIFEXPR && 8750 dp != NULL && dp->dtdo_destructive)) { 8751 state->dts_destructive = 1; 8752 } 8753 8754 switch (desc->dtad_kind) { 8755 case DTRACEACT_PRINTF: 8756 case DTRACEACT_PRINTA: 8757 case DTRACEACT_SYSTEM: 8758 case DTRACEACT_FREOPEN: 8759 /* 8760 * We know that our arg is a string -- turn it into a 8761 * format. 8762 */ 8763 if (arg == NULL) { 8764 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 8765 format = 0; 8766 } else { 8767 ASSERT(arg != NULL); 8768 ASSERT(arg > KERNELBASE); 8769 format = dtrace_format_add(state, 8770 (char *)(uintptr_t)arg); 8771 } 8772 8773 /*FALLTHROUGH*/ 8774 case DTRACEACT_LIBACT: 8775 case DTRACEACT_DIFEXPR: 8776 if (dp == NULL) 8777 return (EINVAL); 8778 8779 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 8780 break; 8781 8782 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 8783 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8784 return (EINVAL); 8785 8786 size = opt[DTRACEOPT_STRSIZE]; 8787 } 8788 8789 break; 8790 8791 case DTRACEACT_STACK: 8792 if ((nframes = arg) == 0) { 8793 nframes = opt[DTRACEOPT_STACKFRAMES]; 8794 ASSERT(nframes > 0); 8795 arg = nframes; 8796 } 8797 8798 size = nframes * sizeof (pc_t); 8799 break; 8800 8801 case DTRACEACT_JSTACK: 8802 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 8803 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 8804 8805 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 8806 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 8807 8808 arg = DTRACE_USTACK_ARG(nframes, strsize); 8809 8810 /*FALLTHROUGH*/ 8811 case DTRACEACT_USTACK: 8812 if (desc->dtad_kind != DTRACEACT_JSTACK && 8813 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 8814 strsize = DTRACE_USTACK_STRSIZE(arg); 8815 nframes = opt[DTRACEOPT_USTACKFRAMES]; 8816 ASSERT(nframes > 0); 8817 arg = DTRACE_USTACK_ARG(nframes, strsize); 8818 } 8819 8820 /* 8821 * Save a slot for the pid. 8822 */ 8823 size = (nframes + 1) * sizeof (uint64_t); 8824 size += DTRACE_USTACK_STRSIZE(arg); 8825 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 8826 8827 break; 8828 8829 case DTRACEACT_SYM: 8830 case DTRACEACT_MOD: 8831 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 8832 sizeof (uint64_t)) || 8833 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8834 return (EINVAL); 8835 break; 8836 8837 case DTRACEACT_USYM: 8838 case DTRACEACT_UMOD: 8839 case DTRACEACT_UADDR: 8840 if (dp == NULL || 8841 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 8842 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8843 return (EINVAL); 8844 8845 /* 8846 * We have a slot for the pid, plus a slot for the 8847 * argument. To keep things simple (aligned with 8848 * bitness-neutral sizing), we store each as a 64-bit 8849 * quantity. 8850 */ 8851 size = 2 * sizeof (uint64_t); 8852 break; 8853 8854 case DTRACEACT_STOP: 8855 case DTRACEACT_BREAKPOINT: 8856 case DTRACEACT_PANIC: 8857 break; 8858 8859 case DTRACEACT_CHILL: 8860 case DTRACEACT_DISCARD: 8861 case DTRACEACT_RAISE: 8862 if (dp == NULL) 8863 return (EINVAL); 8864 break; 8865 8866 case DTRACEACT_EXIT: 8867 if (dp == NULL || 8868 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 8869 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8870 return (EINVAL); 8871 break; 8872 8873 case DTRACEACT_SPECULATE: 8874 if (ecb->dte_size > sizeof (dtrace_epid_t)) 8875 return (EINVAL); 8876 8877 if (dp == NULL) 8878 return (EINVAL); 8879 8880 state->dts_speculates = 1; 8881 break; 8882 8883 case DTRACEACT_COMMIT: { 8884 dtrace_action_t *act = ecb->dte_action; 8885 8886 for (; act != NULL; act = act->dta_next) { 8887 if (act->dta_kind == DTRACEACT_COMMIT) 8888 return (EINVAL); 8889 } 8890 8891 if (dp == NULL) 8892 return (EINVAL); 8893 break; 8894 } 8895 8896 default: 8897 return (EINVAL); 8898 } 8899 8900 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 8901 /* 8902 * If this is a data-storing action or a speculate, 8903 * we must be sure that there isn't a commit on the 8904 * action chain. 8905 */ 8906 dtrace_action_t *act = ecb->dte_action; 8907 8908 for (; act != NULL; act = act->dta_next) { 8909 if (act->dta_kind == DTRACEACT_COMMIT) 8910 return (EINVAL); 8911 } 8912 } 8913 8914 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 8915 action->dta_rec.dtrd_size = size; 8916 } 8917 8918 action->dta_refcnt = 1; 8919 rec = &action->dta_rec; 8920 size = rec->dtrd_size; 8921 8922 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 8923 if (!(size & mask)) { 8924 align = mask + 1; 8925 break; 8926 } 8927 } 8928 8929 action->dta_kind = desc->dtad_kind; 8930 8931 if ((action->dta_difo = dp) != NULL) 8932 dtrace_difo_hold(dp); 8933 8934 rec->dtrd_action = action->dta_kind; 8935 rec->dtrd_arg = arg; 8936 rec->dtrd_uarg = desc->dtad_uarg; 8937 rec->dtrd_alignment = (uint16_t)align; 8938 rec->dtrd_format = format; 8939 8940 if ((last = ecb->dte_action_last) != NULL) { 8941 ASSERT(ecb->dte_action != NULL); 8942 action->dta_prev = last; 8943 last->dta_next = action; 8944 } else { 8945 ASSERT(ecb->dte_action == NULL); 8946 ecb->dte_action = action; 8947 } 8948 8949 ecb->dte_action_last = action; 8950 8951 return (0); 8952 } 8953 8954 static void 8955 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 8956 { 8957 dtrace_action_t *act = ecb->dte_action, *next; 8958 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 8959 dtrace_difo_t *dp; 8960 uint16_t format; 8961 8962 if (act != NULL && act->dta_refcnt > 1) { 8963 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 8964 act->dta_refcnt--; 8965 } else { 8966 for (; act != NULL; act = next) { 8967 next = act->dta_next; 8968 ASSERT(next != NULL || act == ecb->dte_action_last); 8969 ASSERT(act->dta_refcnt == 1); 8970 8971 if ((format = act->dta_rec.dtrd_format) != 0) 8972 dtrace_format_remove(ecb->dte_state, format); 8973 8974 if ((dp = act->dta_difo) != NULL) 8975 dtrace_difo_release(dp, vstate); 8976 8977 if (DTRACEACT_ISAGG(act->dta_kind)) { 8978 dtrace_ecb_aggregation_destroy(ecb, act); 8979 } else { 8980 kmem_free(act, sizeof (dtrace_action_t)); 8981 } 8982 } 8983 } 8984 8985 ecb->dte_action = NULL; 8986 ecb->dte_action_last = NULL; 8987 ecb->dte_size = sizeof (dtrace_epid_t); 8988 } 8989 8990 static void 8991 dtrace_ecb_disable(dtrace_ecb_t *ecb) 8992 { 8993 /* 8994 * We disable the ECB by removing it from its probe. 8995 */ 8996 dtrace_ecb_t *pecb, *prev = NULL; 8997 dtrace_probe_t *probe = ecb->dte_probe; 8998 8999 ASSERT(MUTEX_HELD(&dtrace_lock)); 9000 9001 if (probe == NULL) { 9002 /* 9003 * This is the NULL probe; there is nothing to disable. 9004 */ 9005 return; 9006 } 9007 9008 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9009 if (pecb == ecb) 9010 break; 9011 prev = pecb; 9012 } 9013 9014 ASSERT(pecb != NULL); 9015 9016 if (prev == NULL) { 9017 probe->dtpr_ecb = ecb->dte_next; 9018 } else { 9019 prev->dte_next = ecb->dte_next; 9020 } 9021 9022 if (ecb == probe->dtpr_ecb_last) { 9023 ASSERT(ecb->dte_next == NULL); 9024 probe->dtpr_ecb_last = prev; 9025 } 9026 9027 /* 9028 * The ECB has been disconnected from the probe; now sync to assure 9029 * that all CPUs have seen the change before returning. 9030 */ 9031 dtrace_sync(); 9032 9033 if (probe->dtpr_ecb == NULL) { 9034 /* 9035 * That was the last ECB on the probe; clear the predicate 9036 * cache ID for the probe, disable it and sync one more time 9037 * to assure that we'll never hit it again. 9038 */ 9039 dtrace_provider_t *prov = probe->dtpr_provider; 9040 9041 ASSERT(ecb->dte_next == NULL); 9042 ASSERT(probe->dtpr_ecb_last == NULL); 9043 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9044 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9045 probe->dtpr_id, probe->dtpr_arg); 9046 dtrace_sync(); 9047 } else { 9048 /* 9049 * There is at least one ECB remaining on the probe. If there 9050 * is _exactly_ one, set the probe's predicate cache ID to be 9051 * the predicate cache ID of the remaining ECB. 9052 */ 9053 ASSERT(probe->dtpr_ecb_last != NULL); 9054 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9055 9056 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9057 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9058 9059 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9060 9061 if (p != NULL) 9062 probe->dtpr_predcache = p->dtp_cacheid; 9063 } 9064 9065 ecb->dte_next = NULL; 9066 } 9067 } 9068 9069 static void 9070 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9071 { 9072 dtrace_state_t *state = ecb->dte_state; 9073 dtrace_vstate_t *vstate = &state->dts_vstate; 9074 dtrace_predicate_t *pred; 9075 dtrace_epid_t epid = ecb->dte_epid; 9076 9077 ASSERT(MUTEX_HELD(&dtrace_lock)); 9078 ASSERT(ecb->dte_next == NULL); 9079 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 9080 9081 if ((pred = ecb->dte_predicate) != NULL) 9082 dtrace_predicate_release(pred, vstate); 9083 9084 dtrace_ecb_action_remove(ecb); 9085 9086 ASSERT(state->dts_ecbs[epid - 1] == ecb); 9087 state->dts_ecbs[epid - 1] = NULL; 9088 9089 kmem_free(ecb, sizeof (dtrace_ecb_t)); 9090 } 9091 9092 static dtrace_ecb_t * 9093 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 9094 dtrace_enabling_t *enab) 9095 { 9096 dtrace_ecb_t *ecb; 9097 dtrace_predicate_t *pred; 9098 dtrace_actdesc_t *act; 9099 dtrace_provider_t *prov; 9100 dtrace_ecbdesc_t *desc = enab->dten_current; 9101 9102 ASSERT(MUTEX_HELD(&dtrace_lock)); 9103 ASSERT(state != NULL); 9104 9105 ecb = dtrace_ecb_add(state, probe); 9106 ecb->dte_uarg = desc->dted_uarg; 9107 9108 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 9109 dtrace_predicate_hold(pred); 9110 ecb->dte_predicate = pred; 9111 } 9112 9113 if (probe != NULL) { 9114 /* 9115 * If the provider shows more leg than the consumer is old 9116 * enough to see, we need to enable the appropriate implicit 9117 * predicate bits to prevent the ecb from activating at 9118 * revealing times. 9119 * 9120 * Providers specifying DTRACE_PRIV_USER at register time 9121 * are stating that they need the /proc-style privilege 9122 * model to be enforced, and this is what DTRACE_COND_OWNER 9123 * and DTRACE_COND_ZONEOWNER will then do at probe time. 9124 */ 9125 prov = probe->dtpr_provider; 9126 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 9127 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9128 ecb->dte_cond |= DTRACE_COND_OWNER; 9129 9130 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 9131 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9132 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 9133 9134 /* 9135 * If the provider shows us kernel innards and the user 9136 * is lacking sufficient privilege, enable the 9137 * DTRACE_COND_USERMODE implicit predicate. 9138 */ 9139 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 9140 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 9141 ecb->dte_cond |= DTRACE_COND_USERMODE; 9142 } 9143 9144 if (dtrace_ecb_create_cache != NULL) { 9145 /* 9146 * If we have a cached ecb, we'll use its action list instead 9147 * of creating our own (saving both time and space). 9148 */ 9149 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 9150 dtrace_action_t *act = cached->dte_action; 9151 9152 if (act != NULL) { 9153 ASSERT(act->dta_refcnt > 0); 9154 act->dta_refcnt++; 9155 ecb->dte_action = act; 9156 ecb->dte_action_last = cached->dte_action_last; 9157 ecb->dte_needed = cached->dte_needed; 9158 ecb->dte_size = cached->dte_size; 9159 ecb->dte_alignment = cached->dte_alignment; 9160 } 9161 9162 return (ecb); 9163 } 9164 9165 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 9166 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 9167 dtrace_ecb_destroy(ecb); 9168 return (NULL); 9169 } 9170 } 9171 9172 dtrace_ecb_resize(ecb); 9173 9174 return (dtrace_ecb_create_cache = ecb); 9175 } 9176 9177 static int 9178 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 9179 { 9180 dtrace_ecb_t *ecb; 9181 dtrace_enabling_t *enab = arg; 9182 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 9183 9184 ASSERT(state != NULL); 9185 9186 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 9187 /* 9188 * This probe was created in a generation for which this 9189 * enabling has previously created ECBs; we don't want to 9190 * enable it again, so just kick out. 9191 */ 9192 return (DTRACE_MATCH_NEXT); 9193 } 9194 9195 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 9196 return (DTRACE_MATCH_DONE); 9197 9198 dtrace_ecb_enable(ecb); 9199 return (DTRACE_MATCH_NEXT); 9200 } 9201 9202 static dtrace_ecb_t * 9203 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 9204 { 9205 dtrace_ecb_t *ecb; 9206 9207 ASSERT(MUTEX_HELD(&dtrace_lock)); 9208 9209 if (id == 0 || id > state->dts_necbs) 9210 return (NULL); 9211 9212 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 9213 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 9214 9215 return (state->dts_ecbs[id - 1]); 9216 } 9217 9218 static dtrace_aggregation_t * 9219 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 9220 { 9221 dtrace_aggregation_t *agg; 9222 9223 ASSERT(MUTEX_HELD(&dtrace_lock)); 9224 9225 if (id == 0 || id > state->dts_naggregations) 9226 return (NULL); 9227 9228 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 9229 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 9230 agg->dtag_id == id); 9231 9232 return (state->dts_aggregations[id - 1]); 9233 } 9234 9235 /* 9236 * DTrace Buffer Functions 9237 * 9238 * The following functions manipulate DTrace buffers. Most of these functions 9239 * are called in the context of establishing or processing consumer state; 9240 * exceptions are explicitly noted. 9241 */ 9242 9243 /* 9244 * Note: called from cross call context. This function switches the two 9245 * buffers on a given CPU. The atomicity of this operation is assured by 9246 * disabling interrupts while the actual switch takes place; the disabling of 9247 * interrupts serializes the execution with any execution of dtrace_probe() on 9248 * the same CPU. 9249 */ 9250 static void 9251 dtrace_buffer_switch(dtrace_buffer_t *buf) 9252 { 9253 caddr_t tomax = buf->dtb_tomax; 9254 caddr_t xamot = buf->dtb_xamot; 9255 dtrace_icookie_t cookie; 9256 9257 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9258 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 9259 9260 cookie = dtrace_interrupt_disable(); 9261 buf->dtb_tomax = xamot; 9262 buf->dtb_xamot = tomax; 9263 buf->dtb_xamot_drops = buf->dtb_drops; 9264 buf->dtb_xamot_offset = buf->dtb_offset; 9265 buf->dtb_xamot_errors = buf->dtb_errors; 9266 buf->dtb_xamot_flags = buf->dtb_flags; 9267 buf->dtb_offset = 0; 9268 buf->dtb_drops = 0; 9269 buf->dtb_errors = 0; 9270 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 9271 dtrace_interrupt_enable(cookie); 9272 } 9273 9274 /* 9275 * Note: called from cross call context. This function activates a buffer 9276 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 9277 * is guaranteed by the disabling of interrupts. 9278 */ 9279 static void 9280 dtrace_buffer_activate(dtrace_state_t *state) 9281 { 9282 dtrace_buffer_t *buf; 9283 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 9284 9285 buf = &state->dts_buffer[CPU->cpu_id]; 9286 9287 if (buf->dtb_tomax != NULL) { 9288 /* 9289 * We might like to assert that the buffer is marked inactive, 9290 * but this isn't necessarily true: the buffer for the CPU 9291 * that processes the BEGIN probe has its buffer activated 9292 * manually. In this case, we take the (harmless) action 9293 * re-clearing the bit INACTIVE bit. 9294 */ 9295 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 9296 } 9297 9298 dtrace_interrupt_enable(cookie); 9299 } 9300 9301 static int 9302 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 9303 processorid_t cpu) 9304 { 9305 cpu_t *cp; 9306 dtrace_buffer_t *buf; 9307 9308 ASSERT(MUTEX_HELD(&cpu_lock)); 9309 ASSERT(MUTEX_HELD(&dtrace_lock)); 9310 9311 if (size > dtrace_nonroot_maxsize && 9312 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 9313 return (EFBIG); 9314 9315 cp = cpu_list; 9316 9317 do { 9318 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9319 continue; 9320 9321 buf = &bufs[cp->cpu_id]; 9322 9323 /* 9324 * If there is already a buffer allocated for this CPU, it 9325 * is only possible that this is a DR event. In this case, 9326 * the buffer size must match our specified size. 9327 */ 9328 if (buf->dtb_tomax != NULL) { 9329 ASSERT(buf->dtb_size == size); 9330 continue; 9331 } 9332 9333 ASSERT(buf->dtb_xamot == NULL); 9334 9335 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9336 goto err; 9337 9338 buf->dtb_size = size; 9339 buf->dtb_flags = flags; 9340 buf->dtb_offset = 0; 9341 buf->dtb_drops = 0; 9342 9343 if (flags & DTRACEBUF_NOSWITCH) 9344 continue; 9345 9346 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9347 goto err; 9348 } while ((cp = cp->cpu_next) != cpu_list); 9349 9350 return (0); 9351 9352 err: 9353 cp = cpu_list; 9354 9355 do { 9356 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9357 continue; 9358 9359 buf = &bufs[cp->cpu_id]; 9360 9361 if (buf->dtb_xamot != NULL) { 9362 ASSERT(buf->dtb_tomax != NULL); 9363 ASSERT(buf->dtb_size == size); 9364 kmem_free(buf->dtb_xamot, size); 9365 } 9366 9367 if (buf->dtb_tomax != NULL) { 9368 ASSERT(buf->dtb_size == size); 9369 kmem_free(buf->dtb_tomax, size); 9370 } 9371 9372 buf->dtb_tomax = NULL; 9373 buf->dtb_xamot = NULL; 9374 buf->dtb_size = 0; 9375 } while ((cp = cp->cpu_next) != cpu_list); 9376 9377 return (ENOMEM); 9378 } 9379 9380 /* 9381 * Note: called from probe context. This function just increments the drop 9382 * count on a buffer. It has been made a function to allow for the 9383 * possibility of understanding the source of mysterious drop counts. (A 9384 * problem for which one may be particularly disappointed that DTrace cannot 9385 * be used to understand DTrace.) 9386 */ 9387 static void 9388 dtrace_buffer_drop(dtrace_buffer_t *buf) 9389 { 9390 buf->dtb_drops++; 9391 } 9392 9393 /* 9394 * Note: called from probe context. This function is called to reserve space 9395 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9396 * mstate. Returns the new offset in the buffer, or a negative value if an 9397 * error has occurred. 9398 */ 9399 static intptr_t 9400 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9401 dtrace_state_t *state, dtrace_mstate_t *mstate) 9402 { 9403 intptr_t offs = buf->dtb_offset, soffs; 9404 intptr_t woffs; 9405 caddr_t tomax; 9406 size_t total; 9407 9408 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9409 return (-1); 9410 9411 if ((tomax = buf->dtb_tomax) == NULL) { 9412 dtrace_buffer_drop(buf); 9413 return (-1); 9414 } 9415 9416 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9417 while (offs & (align - 1)) { 9418 /* 9419 * Assert that our alignment is off by a number which 9420 * is itself sizeof (uint32_t) aligned. 9421 */ 9422 ASSERT(!((align - (offs & (align - 1))) & 9423 (sizeof (uint32_t) - 1))); 9424 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9425 offs += sizeof (uint32_t); 9426 } 9427 9428 if ((soffs = offs + needed) > buf->dtb_size) { 9429 dtrace_buffer_drop(buf); 9430 return (-1); 9431 } 9432 9433 if (mstate == NULL) 9434 return (offs); 9435 9436 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 9437 mstate->dtms_scratch_size = buf->dtb_size - soffs; 9438 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9439 9440 return (offs); 9441 } 9442 9443 if (buf->dtb_flags & DTRACEBUF_FILL) { 9444 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 9445 (buf->dtb_flags & DTRACEBUF_FULL)) 9446 return (-1); 9447 goto out; 9448 } 9449 9450 total = needed + (offs & (align - 1)); 9451 9452 /* 9453 * For a ring buffer, life is quite a bit more complicated. Before 9454 * we can store any padding, we need to adjust our wrapping offset. 9455 * (If we've never before wrapped or we're not about to, no adjustment 9456 * is required.) 9457 */ 9458 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 9459 offs + total > buf->dtb_size) { 9460 woffs = buf->dtb_xamot_offset; 9461 9462 if (offs + total > buf->dtb_size) { 9463 /* 9464 * We can't fit in the end of the buffer. First, a 9465 * sanity check that we can fit in the buffer at all. 9466 */ 9467 if (total > buf->dtb_size) { 9468 dtrace_buffer_drop(buf); 9469 return (-1); 9470 } 9471 9472 /* 9473 * We're going to be storing at the top of the buffer, 9474 * so now we need to deal with the wrapped offset. We 9475 * only reset our wrapped offset to 0 if it is 9476 * currently greater than the current offset. If it 9477 * is less than the current offset, it is because a 9478 * previous allocation induced a wrap -- but the 9479 * allocation didn't subsequently take the space due 9480 * to an error or false predicate evaluation. In this 9481 * case, we'll just leave the wrapped offset alone: if 9482 * the wrapped offset hasn't been advanced far enough 9483 * for this allocation, it will be adjusted in the 9484 * lower loop. 9485 */ 9486 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 9487 if (woffs >= offs) 9488 woffs = 0; 9489 } else { 9490 woffs = 0; 9491 } 9492 9493 /* 9494 * Now we know that we're going to be storing to the 9495 * top of the buffer and that there is room for us 9496 * there. We need to clear the buffer from the current 9497 * offset to the end (there may be old gunk there). 9498 */ 9499 while (offs < buf->dtb_size) 9500 tomax[offs++] = 0; 9501 9502 /* 9503 * We need to set our offset to zero. And because we 9504 * are wrapping, we need to set the bit indicating as 9505 * much. We can also adjust our needed space back 9506 * down to the space required by the ECB -- we know 9507 * that the top of the buffer is aligned. 9508 */ 9509 offs = 0; 9510 total = needed; 9511 buf->dtb_flags |= DTRACEBUF_WRAPPED; 9512 } else { 9513 /* 9514 * There is room for us in the buffer, so we simply 9515 * need to check the wrapped offset. 9516 */ 9517 if (woffs < offs) { 9518 /* 9519 * The wrapped offset is less than the offset. 9520 * This can happen if we allocated buffer space 9521 * that induced a wrap, but then we didn't 9522 * subsequently take the space due to an error 9523 * or false predicate evaluation. This is 9524 * okay; we know that _this_ allocation isn't 9525 * going to induce a wrap. We still can't 9526 * reset the wrapped offset to be zero, 9527 * however: the space may have been trashed in 9528 * the previous failed probe attempt. But at 9529 * least the wrapped offset doesn't need to 9530 * be adjusted at all... 9531 */ 9532 goto out; 9533 } 9534 } 9535 9536 while (offs + total > woffs) { 9537 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 9538 size_t size; 9539 9540 if (epid == DTRACE_EPIDNONE) { 9541 size = sizeof (uint32_t); 9542 } else { 9543 ASSERT(epid <= state->dts_necbs); 9544 ASSERT(state->dts_ecbs[epid - 1] != NULL); 9545 9546 size = state->dts_ecbs[epid - 1]->dte_size; 9547 } 9548 9549 ASSERT(woffs + size <= buf->dtb_size); 9550 ASSERT(size != 0); 9551 9552 if (woffs + size == buf->dtb_size) { 9553 /* 9554 * We've reached the end of the buffer; we want 9555 * to set the wrapped offset to 0 and break 9556 * out. However, if the offs is 0, then we're 9557 * in a strange edge-condition: the amount of 9558 * space that we want to reserve plus the size 9559 * of the record that we're overwriting is 9560 * greater than the size of the buffer. This 9561 * is problematic because if we reserve the 9562 * space but subsequently don't consume it (due 9563 * to a failed predicate or error) the wrapped 9564 * offset will be 0 -- yet the EPID at offset 0 9565 * will not be committed. This situation is 9566 * relatively easy to deal with: if we're in 9567 * this case, the buffer is indistinguishable 9568 * from one that hasn't wrapped; we need only 9569 * finish the job by clearing the wrapped bit, 9570 * explicitly setting the offset to be 0, and 9571 * zero'ing out the old data in the buffer. 9572 */ 9573 if (offs == 0) { 9574 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9575 buf->dtb_offset = 0; 9576 woffs = total; 9577 9578 while (woffs < buf->dtb_size) 9579 tomax[woffs++] = 0; 9580 } 9581 9582 woffs = 0; 9583 break; 9584 } 9585 9586 woffs += size; 9587 } 9588 9589 /* 9590 * We have a wrapped offset. It may be that the wrapped offset 9591 * has become zero -- that's okay. 9592 */ 9593 buf->dtb_xamot_offset = woffs; 9594 } 9595 9596 out: 9597 /* 9598 * Now we can plow the buffer with any necessary padding. 9599 */ 9600 while (offs & (align - 1)) { 9601 /* 9602 * Assert that our alignment is off by a number which 9603 * is itself sizeof (uint32_t) aligned. 9604 */ 9605 ASSERT(!((align - (offs & (align - 1))) & 9606 (sizeof (uint32_t) - 1))); 9607 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9608 offs += sizeof (uint32_t); 9609 } 9610 9611 if (buf->dtb_flags & DTRACEBUF_FILL) { 9612 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9613 buf->dtb_flags |= DTRACEBUF_FULL; 9614 return (-1); 9615 } 9616 } 9617 9618 if (mstate == NULL) 9619 return (offs); 9620 9621 /* 9622 * For ring buffers and fill buffers, the scratch space is always 9623 * the inactive buffer. 9624 */ 9625 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9626 mstate->dtms_scratch_size = buf->dtb_size; 9627 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9628 9629 return (offs); 9630 } 9631 9632 static void 9633 dtrace_buffer_polish(dtrace_buffer_t *buf) 9634 { 9635 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9636 ASSERT(MUTEX_HELD(&dtrace_lock)); 9637 9638 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9639 return; 9640 9641 /* 9642 * We need to polish the ring buffer. There are three cases: 9643 * 9644 * - The first (and presumably most common) is that there is no gap 9645 * between the buffer offset and the wrapped offset. In this case, 9646 * there is nothing in the buffer that isn't valid data; we can 9647 * mark the buffer as polished and return. 9648 * 9649 * - The second (less common than the first but still more common 9650 * than the third) is that there is a gap between the buffer offset 9651 * and the wrapped offset, and the wrapped offset is larger than the 9652 * buffer offset. This can happen because of an alignment issue, or 9653 * can happen because of a call to dtrace_buffer_reserve() that 9654 * didn't subsequently consume the buffer space. In this case, 9655 * we need to zero the data from the buffer offset to the wrapped 9656 * offset. 9657 * 9658 * - The third (and least common) is that there is a gap between the 9659 * buffer offset and the wrapped offset, but the wrapped offset is 9660 * _less_ than the buffer offset. This can only happen because a 9661 * call to dtrace_buffer_reserve() induced a wrap, but the space 9662 * was not subsequently consumed. In this case, we need to zero the 9663 * space from the offset to the end of the buffer _and_ from the 9664 * top of the buffer to the wrapped offset. 9665 */ 9666 if (buf->dtb_offset < buf->dtb_xamot_offset) { 9667 bzero(buf->dtb_tomax + buf->dtb_offset, 9668 buf->dtb_xamot_offset - buf->dtb_offset); 9669 } 9670 9671 if (buf->dtb_offset > buf->dtb_xamot_offset) { 9672 bzero(buf->dtb_tomax + buf->dtb_offset, 9673 buf->dtb_size - buf->dtb_offset); 9674 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 9675 } 9676 } 9677 9678 static void 9679 dtrace_buffer_free(dtrace_buffer_t *bufs) 9680 { 9681 int i; 9682 9683 for (i = 0; i < NCPU; i++) { 9684 dtrace_buffer_t *buf = &bufs[i]; 9685 9686 if (buf->dtb_tomax == NULL) { 9687 ASSERT(buf->dtb_xamot == NULL); 9688 ASSERT(buf->dtb_size == 0); 9689 continue; 9690 } 9691 9692 if (buf->dtb_xamot != NULL) { 9693 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9694 kmem_free(buf->dtb_xamot, buf->dtb_size); 9695 } 9696 9697 kmem_free(buf->dtb_tomax, buf->dtb_size); 9698 buf->dtb_size = 0; 9699 buf->dtb_tomax = NULL; 9700 buf->dtb_xamot = NULL; 9701 } 9702 } 9703 9704 /* 9705 * DTrace Enabling Functions 9706 */ 9707 static dtrace_enabling_t * 9708 dtrace_enabling_create(dtrace_vstate_t *vstate) 9709 { 9710 dtrace_enabling_t *enab; 9711 9712 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 9713 enab->dten_vstate = vstate; 9714 9715 return (enab); 9716 } 9717 9718 static void 9719 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 9720 { 9721 dtrace_ecbdesc_t **ndesc; 9722 size_t osize, nsize; 9723 9724 /* 9725 * We can't add to enablings after we've enabled them, or after we've 9726 * retained them. 9727 */ 9728 ASSERT(enab->dten_probegen == 0); 9729 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9730 9731 if (enab->dten_ndesc < enab->dten_maxdesc) { 9732 enab->dten_desc[enab->dten_ndesc++] = ecb; 9733 return; 9734 } 9735 9736 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9737 9738 if (enab->dten_maxdesc == 0) { 9739 enab->dten_maxdesc = 1; 9740 } else { 9741 enab->dten_maxdesc <<= 1; 9742 } 9743 9744 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 9745 9746 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9747 ndesc = kmem_zalloc(nsize, KM_SLEEP); 9748 bcopy(enab->dten_desc, ndesc, osize); 9749 kmem_free(enab->dten_desc, osize); 9750 9751 enab->dten_desc = ndesc; 9752 enab->dten_desc[enab->dten_ndesc++] = ecb; 9753 } 9754 9755 static void 9756 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 9757 dtrace_probedesc_t *pd) 9758 { 9759 dtrace_ecbdesc_t *new; 9760 dtrace_predicate_t *pred; 9761 dtrace_actdesc_t *act; 9762 9763 /* 9764 * We're going to create a new ECB description that matches the 9765 * specified ECB in every way, but has the specified probe description. 9766 */ 9767 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 9768 9769 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 9770 dtrace_predicate_hold(pred); 9771 9772 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 9773 dtrace_actdesc_hold(act); 9774 9775 new->dted_action = ecb->dted_action; 9776 new->dted_pred = ecb->dted_pred; 9777 new->dted_probe = *pd; 9778 new->dted_uarg = ecb->dted_uarg; 9779 9780 dtrace_enabling_add(enab, new); 9781 } 9782 9783 static void 9784 dtrace_enabling_dump(dtrace_enabling_t *enab) 9785 { 9786 int i; 9787 9788 for (i = 0; i < enab->dten_ndesc; i++) { 9789 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 9790 9791 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 9792 desc->dtpd_provider, desc->dtpd_mod, 9793 desc->dtpd_func, desc->dtpd_name); 9794 } 9795 } 9796 9797 static void 9798 dtrace_enabling_destroy(dtrace_enabling_t *enab) 9799 { 9800 int i; 9801 dtrace_ecbdesc_t *ep; 9802 dtrace_vstate_t *vstate = enab->dten_vstate; 9803 9804 ASSERT(MUTEX_HELD(&dtrace_lock)); 9805 9806 for (i = 0; i < enab->dten_ndesc; i++) { 9807 dtrace_actdesc_t *act, *next; 9808 dtrace_predicate_t *pred; 9809 9810 ep = enab->dten_desc[i]; 9811 9812 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 9813 dtrace_predicate_release(pred, vstate); 9814 9815 for (act = ep->dted_action; act != NULL; act = next) { 9816 next = act->dtad_next; 9817 dtrace_actdesc_release(act, vstate); 9818 } 9819 9820 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 9821 } 9822 9823 kmem_free(enab->dten_desc, 9824 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 9825 9826 /* 9827 * If this was a retained enabling, decrement the dts_nretained count 9828 * and take it off of the dtrace_retained list. 9829 */ 9830 if (enab->dten_prev != NULL || enab->dten_next != NULL || 9831 dtrace_retained == enab) { 9832 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9833 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 9834 enab->dten_vstate->dtvs_state->dts_nretained--; 9835 } 9836 9837 if (enab->dten_prev == NULL) { 9838 if (dtrace_retained == enab) { 9839 dtrace_retained = enab->dten_next; 9840 9841 if (dtrace_retained != NULL) 9842 dtrace_retained->dten_prev = NULL; 9843 } 9844 } else { 9845 ASSERT(enab != dtrace_retained); 9846 ASSERT(dtrace_retained != NULL); 9847 enab->dten_prev->dten_next = enab->dten_next; 9848 } 9849 9850 if (enab->dten_next != NULL) { 9851 ASSERT(dtrace_retained != NULL); 9852 enab->dten_next->dten_prev = enab->dten_prev; 9853 } 9854 9855 kmem_free(enab, sizeof (dtrace_enabling_t)); 9856 } 9857 9858 static int 9859 dtrace_enabling_retain(dtrace_enabling_t *enab) 9860 { 9861 dtrace_state_t *state; 9862 9863 ASSERT(MUTEX_HELD(&dtrace_lock)); 9864 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9865 ASSERT(enab->dten_vstate != NULL); 9866 9867 state = enab->dten_vstate->dtvs_state; 9868 ASSERT(state != NULL); 9869 9870 /* 9871 * We only allow each state to retain dtrace_retain_max enablings. 9872 */ 9873 if (state->dts_nretained >= dtrace_retain_max) 9874 return (ENOSPC); 9875 9876 state->dts_nretained++; 9877 9878 if (dtrace_retained == NULL) { 9879 dtrace_retained = enab; 9880 return (0); 9881 } 9882 9883 enab->dten_next = dtrace_retained; 9884 dtrace_retained->dten_prev = enab; 9885 dtrace_retained = enab; 9886 9887 return (0); 9888 } 9889 9890 static int 9891 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 9892 dtrace_probedesc_t *create) 9893 { 9894 dtrace_enabling_t *new, *enab; 9895 int found = 0, err = ENOENT; 9896 9897 ASSERT(MUTEX_HELD(&dtrace_lock)); 9898 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 9899 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 9900 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 9901 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 9902 9903 new = dtrace_enabling_create(&state->dts_vstate); 9904 9905 /* 9906 * Iterate over all retained enablings, looking for enablings that 9907 * match the specified state. 9908 */ 9909 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9910 int i; 9911 9912 /* 9913 * dtvs_state can only be NULL for helper enablings -- and 9914 * helper enablings can't be retained. 9915 */ 9916 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9917 9918 if (enab->dten_vstate->dtvs_state != state) 9919 continue; 9920 9921 /* 9922 * Now iterate over each probe description; we're looking for 9923 * an exact match to the specified probe description. 9924 */ 9925 for (i = 0; i < enab->dten_ndesc; i++) { 9926 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9927 dtrace_probedesc_t *pd = &ep->dted_probe; 9928 9929 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 9930 continue; 9931 9932 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 9933 continue; 9934 9935 if (strcmp(pd->dtpd_func, match->dtpd_func)) 9936 continue; 9937 9938 if (strcmp(pd->dtpd_name, match->dtpd_name)) 9939 continue; 9940 9941 /* 9942 * We have a winning probe! Add it to our growing 9943 * enabling. 9944 */ 9945 found = 1; 9946 dtrace_enabling_addlike(new, ep, create); 9947 } 9948 } 9949 9950 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 9951 dtrace_enabling_destroy(new); 9952 return (err); 9953 } 9954 9955 return (0); 9956 } 9957 9958 static void 9959 dtrace_enabling_retract(dtrace_state_t *state) 9960 { 9961 dtrace_enabling_t *enab, *next; 9962 9963 ASSERT(MUTEX_HELD(&dtrace_lock)); 9964 9965 /* 9966 * Iterate over all retained enablings, destroy the enablings retained 9967 * for the specified state. 9968 */ 9969 for (enab = dtrace_retained; enab != NULL; enab = next) { 9970 next = enab->dten_next; 9971 9972 /* 9973 * dtvs_state can only be NULL for helper enablings -- and 9974 * helper enablings can't be retained. 9975 */ 9976 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9977 9978 if (enab->dten_vstate->dtvs_state == state) { 9979 ASSERT(state->dts_nretained > 0); 9980 dtrace_enabling_destroy(enab); 9981 } 9982 } 9983 9984 ASSERT(state->dts_nretained == 0); 9985 } 9986 9987 static int 9988 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 9989 { 9990 int i = 0; 9991 int matched = 0; 9992 9993 ASSERT(MUTEX_HELD(&cpu_lock)); 9994 ASSERT(MUTEX_HELD(&dtrace_lock)); 9995 9996 for (i = 0; i < enab->dten_ndesc; i++) { 9997 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9998 9999 enab->dten_current = ep; 10000 enab->dten_error = 0; 10001 10002 matched += dtrace_probe_enable(&ep->dted_probe, enab); 10003 10004 if (enab->dten_error != 0) { 10005 /* 10006 * If we get an error half-way through enabling the 10007 * probes, we kick out -- perhaps with some number of 10008 * them enabled. Leaving enabled probes enabled may 10009 * be slightly confusing for user-level, but we expect 10010 * that no one will attempt to actually drive on in 10011 * the face of such errors. If this is an anonymous 10012 * enabling (indicated with a NULL nmatched pointer), 10013 * we cmn_err() a message. We aren't expecting to 10014 * get such an error -- such as it can exist at all, 10015 * it would be a result of corrupted DOF in the driver 10016 * properties. 10017 */ 10018 if (nmatched == NULL) { 10019 cmn_err(CE_WARN, "dtrace_enabling_match() " 10020 "error on %p: %d", (void *)ep, 10021 enab->dten_error); 10022 } 10023 10024 return (enab->dten_error); 10025 } 10026 } 10027 10028 enab->dten_probegen = dtrace_probegen; 10029 if (nmatched != NULL) 10030 *nmatched = matched; 10031 10032 return (0); 10033 } 10034 10035 static void 10036 dtrace_enabling_matchall(void) 10037 { 10038 dtrace_enabling_t *enab; 10039 10040 mutex_enter(&cpu_lock); 10041 mutex_enter(&dtrace_lock); 10042 10043 /* 10044 * Because we can be called after dtrace_detach() has been called, we 10045 * cannot assert that there are retained enablings. We can safely 10046 * load from dtrace_retained, however: the taskq_destroy() at the 10047 * end of dtrace_detach() will block pending our completion. 10048 */ 10049 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 10050 (void) dtrace_enabling_match(enab, NULL); 10051 10052 mutex_exit(&dtrace_lock); 10053 mutex_exit(&cpu_lock); 10054 } 10055 10056 static int 10057 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 10058 { 10059 dtrace_enabling_t *enab; 10060 int matched, total = 0, err; 10061 10062 ASSERT(MUTEX_HELD(&cpu_lock)); 10063 ASSERT(MUTEX_HELD(&dtrace_lock)); 10064 10065 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10066 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10067 10068 if (enab->dten_vstate->dtvs_state != state) 10069 continue; 10070 10071 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 10072 return (err); 10073 10074 total += matched; 10075 } 10076 10077 if (nmatched != NULL) 10078 *nmatched = total; 10079 10080 return (0); 10081 } 10082 10083 /* 10084 * If an enabling is to be enabled without having matched probes (that is, if 10085 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 10086 * enabling must be _primed_ by creating an ECB for every ECB description. 10087 * This must be done to assure that we know the number of speculations, the 10088 * number of aggregations, the minimum buffer size needed, etc. before we 10089 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 10090 * enabling any probes, we create ECBs for every ECB decription, but with a 10091 * NULL probe -- which is exactly what this function does. 10092 */ 10093 static void 10094 dtrace_enabling_prime(dtrace_state_t *state) 10095 { 10096 dtrace_enabling_t *enab; 10097 int i; 10098 10099 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10100 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10101 10102 if (enab->dten_vstate->dtvs_state != state) 10103 continue; 10104 10105 /* 10106 * We don't want to prime an enabling more than once, lest 10107 * we allow a malicious user to induce resource exhaustion. 10108 * (The ECBs that result from priming an enabling aren't 10109 * leaked -- but they also aren't deallocated until the 10110 * consumer state is destroyed.) 10111 */ 10112 if (enab->dten_primed) 10113 continue; 10114 10115 for (i = 0; i < enab->dten_ndesc; i++) { 10116 enab->dten_current = enab->dten_desc[i]; 10117 (void) dtrace_probe_enable(NULL, enab); 10118 } 10119 10120 enab->dten_primed = 1; 10121 } 10122 } 10123 10124 /* 10125 * Called to indicate that probes should be provided due to retained 10126 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 10127 * must take an initial lap through the enabling calling the dtps_provide() 10128 * entry point explicitly to allow for autocreated probes. 10129 */ 10130 static void 10131 dtrace_enabling_provide(dtrace_provider_t *prv) 10132 { 10133 int i, all = 0; 10134 dtrace_probedesc_t desc; 10135 10136 ASSERT(MUTEX_HELD(&dtrace_lock)); 10137 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 10138 10139 if (prv == NULL) { 10140 all = 1; 10141 prv = dtrace_provider; 10142 } 10143 10144 do { 10145 dtrace_enabling_t *enab = dtrace_retained; 10146 void *parg = prv->dtpv_arg; 10147 10148 for (; enab != NULL; enab = enab->dten_next) { 10149 for (i = 0; i < enab->dten_ndesc; i++) { 10150 desc = enab->dten_desc[i]->dted_probe; 10151 mutex_exit(&dtrace_lock); 10152 prv->dtpv_pops.dtps_provide(parg, &desc); 10153 mutex_enter(&dtrace_lock); 10154 } 10155 } 10156 } while (all && (prv = prv->dtpv_next) != NULL); 10157 10158 mutex_exit(&dtrace_lock); 10159 dtrace_probe_provide(NULL, all ? NULL : prv); 10160 mutex_enter(&dtrace_lock); 10161 } 10162 10163 /* 10164 * DTrace DOF Functions 10165 */ 10166 /*ARGSUSED*/ 10167 static void 10168 dtrace_dof_error(dof_hdr_t *dof, const char *str) 10169 { 10170 if (dtrace_err_verbose) 10171 cmn_err(CE_WARN, "failed to process DOF: %s", str); 10172 10173 #ifdef DTRACE_ERRDEBUG 10174 dtrace_errdebug(str); 10175 #endif 10176 } 10177 10178 /* 10179 * Create DOF out of a currently enabled state. Right now, we only create 10180 * DOF containing the run-time options -- but this could be expanded to create 10181 * complete DOF representing the enabled state. 10182 */ 10183 static dof_hdr_t * 10184 dtrace_dof_create(dtrace_state_t *state) 10185 { 10186 dof_hdr_t *dof; 10187 dof_sec_t *sec; 10188 dof_optdesc_t *opt; 10189 int i, len = sizeof (dof_hdr_t) + 10190 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 10191 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10192 10193 ASSERT(MUTEX_HELD(&dtrace_lock)); 10194 10195 dof = kmem_zalloc(len, KM_SLEEP); 10196 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 10197 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 10198 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 10199 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 10200 10201 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 10202 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 10203 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 10204 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 10205 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 10206 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 10207 10208 dof->dofh_flags = 0; 10209 dof->dofh_hdrsize = sizeof (dof_hdr_t); 10210 dof->dofh_secsize = sizeof (dof_sec_t); 10211 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 10212 dof->dofh_secoff = sizeof (dof_hdr_t); 10213 dof->dofh_loadsz = len; 10214 dof->dofh_filesz = len; 10215 dof->dofh_pad = 0; 10216 10217 /* 10218 * Fill in the option section header... 10219 */ 10220 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 10221 sec->dofs_type = DOF_SECT_OPTDESC; 10222 sec->dofs_align = sizeof (uint64_t); 10223 sec->dofs_flags = DOF_SECF_LOAD; 10224 sec->dofs_entsize = sizeof (dof_optdesc_t); 10225 10226 opt = (dof_optdesc_t *)((uintptr_t)sec + 10227 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 10228 10229 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 10230 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10231 10232 for (i = 0; i < DTRACEOPT_MAX; i++) { 10233 opt[i].dofo_option = i; 10234 opt[i].dofo_strtab = DOF_SECIDX_NONE; 10235 opt[i].dofo_value = state->dts_options[i]; 10236 } 10237 10238 return (dof); 10239 } 10240 10241 static dof_hdr_t * 10242 dtrace_dof_copyin(uintptr_t uarg, int *errp) 10243 { 10244 dof_hdr_t hdr, *dof; 10245 10246 ASSERT(!MUTEX_HELD(&dtrace_lock)); 10247 10248 /* 10249 * First, we're going to copyin() the sizeof (dof_hdr_t). 10250 */ 10251 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 10252 dtrace_dof_error(NULL, "failed to copyin DOF header"); 10253 *errp = EFAULT; 10254 return (NULL); 10255 } 10256 10257 /* 10258 * Now we'll allocate the entire DOF and copy it in -- provided 10259 * that the length isn't outrageous. 10260 */ 10261 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 10262 dtrace_dof_error(&hdr, "load size exceeds maximum"); 10263 *errp = E2BIG; 10264 return (NULL); 10265 } 10266 10267 if (hdr.dofh_loadsz < sizeof (hdr)) { 10268 dtrace_dof_error(&hdr, "invalid load size"); 10269 *errp = EINVAL; 10270 return (NULL); 10271 } 10272 10273 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 10274 10275 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 10276 kmem_free(dof, hdr.dofh_loadsz); 10277 *errp = EFAULT; 10278 return (NULL); 10279 } 10280 10281 return (dof); 10282 } 10283 10284 static dof_hdr_t * 10285 dtrace_dof_property(const char *name) 10286 { 10287 uchar_t *buf; 10288 uint64_t loadsz; 10289 unsigned int len, i; 10290 dof_hdr_t *dof; 10291 10292 /* 10293 * Unfortunately, array of values in .conf files are always (and 10294 * only) interpreted to be integer arrays. We must read our DOF 10295 * as an integer array, and then squeeze it into a byte array. 10296 */ 10297 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 10298 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 10299 return (NULL); 10300 10301 for (i = 0; i < len; i++) 10302 buf[i] = (uchar_t)(((int *)buf)[i]); 10303 10304 if (len < sizeof (dof_hdr_t)) { 10305 ddi_prop_free(buf); 10306 dtrace_dof_error(NULL, "truncated header"); 10307 return (NULL); 10308 } 10309 10310 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 10311 ddi_prop_free(buf); 10312 dtrace_dof_error(NULL, "truncated DOF"); 10313 return (NULL); 10314 } 10315 10316 if (loadsz >= dtrace_dof_maxsize) { 10317 ddi_prop_free(buf); 10318 dtrace_dof_error(NULL, "oversized DOF"); 10319 return (NULL); 10320 } 10321 10322 dof = kmem_alloc(loadsz, KM_SLEEP); 10323 bcopy(buf, dof, loadsz); 10324 ddi_prop_free(buf); 10325 10326 return (dof); 10327 } 10328 10329 static void 10330 dtrace_dof_destroy(dof_hdr_t *dof) 10331 { 10332 kmem_free(dof, dof->dofh_loadsz); 10333 } 10334 10335 /* 10336 * Return the dof_sec_t pointer corresponding to a given section index. If the 10337 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 10338 * a type other than DOF_SECT_NONE is specified, the header is checked against 10339 * this type and NULL is returned if the types do not match. 10340 */ 10341 static dof_sec_t * 10342 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 10343 { 10344 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 10345 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 10346 10347 if (i >= dof->dofh_secnum) { 10348 dtrace_dof_error(dof, "referenced section index is invalid"); 10349 return (NULL); 10350 } 10351 10352 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 10353 dtrace_dof_error(dof, "referenced section is not loadable"); 10354 return (NULL); 10355 } 10356 10357 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 10358 dtrace_dof_error(dof, "referenced section is the wrong type"); 10359 return (NULL); 10360 } 10361 10362 return (sec); 10363 } 10364 10365 static dtrace_probedesc_t * 10366 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 10367 { 10368 dof_probedesc_t *probe; 10369 dof_sec_t *strtab; 10370 uintptr_t daddr = (uintptr_t)dof; 10371 uintptr_t str; 10372 size_t size; 10373 10374 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 10375 dtrace_dof_error(dof, "invalid probe section"); 10376 return (NULL); 10377 } 10378 10379 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10380 dtrace_dof_error(dof, "bad alignment in probe description"); 10381 return (NULL); 10382 } 10383 10384 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10385 dtrace_dof_error(dof, "truncated probe description"); 10386 return (NULL); 10387 } 10388 10389 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10390 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10391 10392 if (strtab == NULL) 10393 return (NULL); 10394 10395 str = daddr + strtab->dofs_offset; 10396 size = strtab->dofs_size; 10397 10398 if (probe->dofp_provider >= strtab->dofs_size) { 10399 dtrace_dof_error(dof, "corrupt probe provider"); 10400 return (NULL); 10401 } 10402 10403 (void) strncpy(desc->dtpd_provider, 10404 (char *)(str + probe->dofp_provider), 10405 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10406 10407 if (probe->dofp_mod >= strtab->dofs_size) { 10408 dtrace_dof_error(dof, "corrupt probe module"); 10409 return (NULL); 10410 } 10411 10412 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10413 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10414 10415 if (probe->dofp_func >= strtab->dofs_size) { 10416 dtrace_dof_error(dof, "corrupt probe function"); 10417 return (NULL); 10418 } 10419 10420 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10421 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10422 10423 if (probe->dofp_name >= strtab->dofs_size) { 10424 dtrace_dof_error(dof, "corrupt probe name"); 10425 return (NULL); 10426 } 10427 10428 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10429 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10430 10431 return (desc); 10432 } 10433 10434 static dtrace_difo_t * 10435 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10436 cred_t *cr) 10437 { 10438 dtrace_difo_t *dp; 10439 size_t ttl = 0; 10440 dof_difohdr_t *dofd; 10441 uintptr_t daddr = (uintptr_t)dof; 10442 size_t max = dtrace_difo_maxsize; 10443 int i, l, n; 10444 10445 static const struct { 10446 int section; 10447 int bufoffs; 10448 int lenoffs; 10449 int entsize; 10450 int align; 10451 const char *msg; 10452 } difo[] = { 10453 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 10454 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 10455 sizeof (dif_instr_t), "multiple DIF sections" }, 10456 10457 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 10458 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 10459 sizeof (uint64_t), "multiple integer tables" }, 10460 10461 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 10462 offsetof(dtrace_difo_t, dtdo_strlen), 0, 10463 sizeof (char), "multiple string tables" }, 10464 10465 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 10466 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 10467 sizeof (uint_t), "multiple variable tables" }, 10468 10469 { DOF_SECT_NONE, 0, 0, 0, NULL } 10470 }; 10471 10472 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 10473 dtrace_dof_error(dof, "invalid DIFO header section"); 10474 return (NULL); 10475 } 10476 10477 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10478 dtrace_dof_error(dof, "bad alignment in DIFO header"); 10479 return (NULL); 10480 } 10481 10482 if (sec->dofs_size < sizeof (dof_difohdr_t) || 10483 sec->dofs_size % sizeof (dof_secidx_t)) { 10484 dtrace_dof_error(dof, "bad size in DIFO header"); 10485 return (NULL); 10486 } 10487 10488 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10489 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 10490 10491 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10492 dp->dtdo_rtype = dofd->dofd_rtype; 10493 10494 for (l = 0; l < n; l++) { 10495 dof_sec_t *subsec; 10496 void **bufp; 10497 uint32_t *lenp; 10498 10499 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 10500 dofd->dofd_links[l])) == NULL) 10501 goto err; /* invalid section link */ 10502 10503 if (ttl + subsec->dofs_size > max) { 10504 dtrace_dof_error(dof, "exceeds maximum size"); 10505 goto err; 10506 } 10507 10508 ttl += subsec->dofs_size; 10509 10510 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 10511 if (subsec->dofs_type != difo[i].section) 10512 continue; 10513 10514 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 10515 dtrace_dof_error(dof, "section not loaded"); 10516 goto err; 10517 } 10518 10519 if (subsec->dofs_align != difo[i].align) { 10520 dtrace_dof_error(dof, "bad alignment"); 10521 goto err; 10522 } 10523 10524 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 10525 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 10526 10527 if (*bufp != NULL) { 10528 dtrace_dof_error(dof, difo[i].msg); 10529 goto err; 10530 } 10531 10532 if (difo[i].entsize != subsec->dofs_entsize) { 10533 dtrace_dof_error(dof, "entry size mismatch"); 10534 goto err; 10535 } 10536 10537 if (subsec->dofs_entsize != 0 && 10538 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 10539 dtrace_dof_error(dof, "corrupt entry size"); 10540 goto err; 10541 } 10542 10543 *lenp = subsec->dofs_size; 10544 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 10545 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 10546 *bufp, subsec->dofs_size); 10547 10548 if (subsec->dofs_entsize != 0) 10549 *lenp /= subsec->dofs_entsize; 10550 10551 break; 10552 } 10553 10554 /* 10555 * If we encounter a loadable DIFO sub-section that is not 10556 * known to us, assume this is a broken program and fail. 10557 */ 10558 if (difo[i].section == DOF_SECT_NONE && 10559 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10560 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10561 goto err; 10562 } 10563 } 10564 10565 if (dp->dtdo_buf == NULL) { 10566 /* 10567 * We can't have a DIF object without DIF text. 10568 */ 10569 dtrace_dof_error(dof, "missing DIF text"); 10570 goto err; 10571 } 10572 10573 /* 10574 * Before we validate the DIF object, run through the variable table 10575 * looking for the strings -- if any of their size are under, we'll set 10576 * their size to be the system-wide default string size. Note that 10577 * this should _not_ happen if the "strsize" option has been set -- 10578 * in this case, the compiler should have set the size to reflect the 10579 * setting of the option. 10580 */ 10581 for (i = 0; i < dp->dtdo_varlen; i++) { 10582 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10583 dtrace_diftype_t *t = &v->dtdv_type; 10584 10585 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10586 continue; 10587 10588 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10589 t->dtdt_size = dtrace_strsize_default; 10590 } 10591 10592 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10593 goto err; 10594 10595 dtrace_difo_init(dp, vstate); 10596 return (dp); 10597 10598 err: 10599 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10600 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10601 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10602 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10603 10604 kmem_free(dp, sizeof (dtrace_difo_t)); 10605 return (NULL); 10606 } 10607 10608 static dtrace_predicate_t * 10609 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10610 cred_t *cr) 10611 { 10612 dtrace_difo_t *dp; 10613 10614 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10615 return (NULL); 10616 10617 return (dtrace_predicate_create(dp)); 10618 } 10619 10620 static dtrace_actdesc_t * 10621 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10622 cred_t *cr) 10623 { 10624 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10625 dof_actdesc_t *desc; 10626 dof_sec_t *difosec; 10627 size_t offs; 10628 uintptr_t daddr = (uintptr_t)dof; 10629 uint64_t arg; 10630 dtrace_actkind_t kind; 10631 10632 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10633 dtrace_dof_error(dof, "invalid action section"); 10634 return (NULL); 10635 } 10636 10637 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10638 dtrace_dof_error(dof, "truncated action description"); 10639 return (NULL); 10640 } 10641 10642 if (sec->dofs_align != sizeof (uint64_t)) { 10643 dtrace_dof_error(dof, "bad alignment in action description"); 10644 return (NULL); 10645 } 10646 10647 if (sec->dofs_size < sec->dofs_entsize) { 10648 dtrace_dof_error(dof, "section entry size exceeds total size"); 10649 return (NULL); 10650 } 10651 10652 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10653 dtrace_dof_error(dof, "bad entry size in action description"); 10654 return (NULL); 10655 } 10656 10657 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10658 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10659 return (NULL); 10660 } 10661 10662 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 10663 desc = (dof_actdesc_t *)(daddr + 10664 (uintptr_t)sec->dofs_offset + offs); 10665 kind = (dtrace_actkind_t)desc->dofa_kind; 10666 10667 if (DTRACEACT_ISPRINTFLIKE(kind) && 10668 (kind != DTRACEACT_PRINTA || 10669 desc->dofa_strtab != DOF_SECIDX_NONE)) { 10670 dof_sec_t *strtab; 10671 char *str, *fmt; 10672 uint64_t i; 10673 10674 /* 10675 * printf()-like actions must have a format string. 10676 */ 10677 if ((strtab = dtrace_dof_sect(dof, 10678 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 10679 goto err; 10680 10681 str = (char *)((uintptr_t)dof + 10682 (uintptr_t)strtab->dofs_offset); 10683 10684 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 10685 if (str[i] == '\0') 10686 break; 10687 } 10688 10689 if (i >= strtab->dofs_size) { 10690 dtrace_dof_error(dof, "bogus format string"); 10691 goto err; 10692 } 10693 10694 if (i == desc->dofa_arg) { 10695 dtrace_dof_error(dof, "empty format string"); 10696 goto err; 10697 } 10698 10699 i -= desc->dofa_arg; 10700 fmt = kmem_alloc(i + 1, KM_SLEEP); 10701 bcopy(&str[desc->dofa_arg], fmt, i + 1); 10702 arg = (uint64_t)(uintptr_t)fmt; 10703 } else { 10704 if (kind == DTRACEACT_PRINTA) { 10705 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 10706 arg = 0; 10707 } else { 10708 arg = desc->dofa_arg; 10709 } 10710 } 10711 10712 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 10713 desc->dofa_uarg, arg); 10714 10715 if (last != NULL) { 10716 last->dtad_next = act; 10717 } else { 10718 first = act; 10719 } 10720 10721 last = act; 10722 10723 if (desc->dofa_difo == DOF_SECIDX_NONE) 10724 continue; 10725 10726 if ((difosec = dtrace_dof_sect(dof, 10727 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 10728 goto err; 10729 10730 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 10731 10732 if (act->dtad_difo == NULL) 10733 goto err; 10734 } 10735 10736 ASSERT(first != NULL); 10737 return (first); 10738 10739 err: 10740 for (act = first; act != NULL; act = next) { 10741 next = act->dtad_next; 10742 dtrace_actdesc_release(act, vstate); 10743 } 10744 10745 return (NULL); 10746 } 10747 10748 static dtrace_ecbdesc_t * 10749 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10750 cred_t *cr) 10751 { 10752 dtrace_ecbdesc_t *ep; 10753 dof_ecbdesc_t *ecb; 10754 dtrace_probedesc_t *desc; 10755 dtrace_predicate_t *pred = NULL; 10756 10757 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 10758 dtrace_dof_error(dof, "truncated ECB description"); 10759 return (NULL); 10760 } 10761 10762 if (sec->dofs_align != sizeof (uint64_t)) { 10763 dtrace_dof_error(dof, "bad alignment in ECB description"); 10764 return (NULL); 10765 } 10766 10767 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 10768 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 10769 10770 if (sec == NULL) 10771 return (NULL); 10772 10773 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10774 ep->dted_uarg = ecb->dofe_uarg; 10775 desc = &ep->dted_probe; 10776 10777 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 10778 goto err; 10779 10780 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 10781 if ((sec = dtrace_dof_sect(dof, 10782 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 10783 goto err; 10784 10785 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 10786 goto err; 10787 10788 ep->dted_pred.dtpdd_predicate = pred; 10789 } 10790 10791 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 10792 if ((sec = dtrace_dof_sect(dof, 10793 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 10794 goto err; 10795 10796 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 10797 10798 if (ep->dted_action == NULL) 10799 goto err; 10800 } 10801 10802 return (ep); 10803 10804 err: 10805 if (pred != NULL) 10806 dtrace_predicate_release(pred, vstate); 10807 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10808 return (NULL); 10809 } 10810 10811 /* 10812 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 10813 * specified DOF. At present, this amounts to simply adding 'ubase' to the 10814 * site of any user SETX relocations to account for load object base address. 10815 * In the future, if we need other relocations, this function can be extended. 10816 */ 10817 static int 10818 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 10819 { 10820 uintptr_t daddr = (uintptr_t)dof; 10821 dof_relohdr_t *dofr = 10822 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10823 dof_sec_t *ss, *rs, *ts; 10824 dof_relodesc_t *r; 10825 uint_t i, n; 10826 10827 if (sec->dofs_size < sizeof (dof_relohdr_t) || 10828 sec->dofs_align != sizeof (dof_secidx_t)) { 10829 dtrace_dof_error(dof, "invalid relocation header"); 10830 return (-1); 10831 } 10832 10833 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 10834 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 10835 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 10836 10837 if (ss == NULL || rs == NULL || ts == NULL) 10838 return (-1); /* dtrace_dof_error() has been called already */ 10839 10840 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 10841 rs->dofs_align != sizeof (uint64_t)) { 10842 dtrace_dof_error(dof, "invalid relocation section"); 10843 return (-1); 10844 } 10845 10846 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 10847 n = rs->dofs_size / rs->dofs_entsize; 10848 10849 for (i = 0; i < n; i++) { 10850 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 10851 10852 switch (r->dofr_type) { 10853 case DOF_RELO_NONE: 10854 break; 10855 case DOF_RELO_SETX: 10856 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 10857 sizeof (uint64_t) > ts->dofs_size) { 10858 dtrace_dof_error(dof, "bad relocation offset"); 10859 return (-1); 10860 } 10861 10862 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 10863 dtrace_dof_error(dof, "misaligned setx relo"); 10864 return (-1); 10865 } 10866 10867 *(uint64_t *)taddr += ubase; 10868 break; 10869 default: 10870 dtrace_dof_error(dof, "invalid relocation type"); 10871 return (-1); 10872 } 10873 10874 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 10875 } 10876 10877 return (0); 10878 } 10879 10880 /* 10881 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 10882 * header: it should be at the front of a memory region that is at least 10883 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 10884 * size. It need not be validated in any other way. 10885 */ 10886 static int 10887 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 10888 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 10889 { 10890 uint64_t len = dof->dofh_loadsz, seclen; 10891 uintptr_t daddr = (uintptr_t)dof; 10892 dtrace_ecbdesc_t *ep; 10893 dtrace_enabling_t *enab; 10894 uint_t i; 10895 10896 ASSERT(MUTEX_HELD(&dtrace_lock)); 10897 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 10898 10899 /* 10900 * Check the DOF header identification bytes. In addition to checking 10901 * valid settings, we also verify that unused bits/bytes are zeroed so 10902 * we can use them later without fear of regressing existing binaries. 10903 */ 10904 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 10905 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 10906 dtrace_dof_error(dof, "DOF magic string mismatch"); 10907 return (-1); 10908 } 10909 10910 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 10911 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 10912 dtrace_dof_error(dof, "DOF has invalid data model"); 10913 return (-1); 10914 } 10915 10916 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 10917 dtrace_dof_error(dof, "DOF encoding mismatch"); 10918 return (-1); 10919 } 10920 10921 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 10922 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 10923 dtrace_dof_error(dof, "DOF version mismatch"); 10924 return (-1); 10925 } 10926 10927 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 10928 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 10929 return (-1); 10930 } 10931 10932 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 10933 dtrace_dof_error(dof, "DOF uses too many integer registers"); 10934 return (-1); 10935 } 10936 10937 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 10938 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 10939 return (-1); 10940 } 10941 10942 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 10943 if (dof->dofh_ident[i] != 0) { 10944 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 10945 return (-1); 10946 } 10947 } 10948 10949 if (dof->dofh_flags & ~DOF_FL_VALID) { 10950 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 10951 return (-1); 10952 } 10953 10954 if (dof->dofh_secsize == 0) { 10955 dtrace_dof_error(dof, "zero section header size"); 10956 return (-1); 10957 } 10958 10959 /* 10960 * Check that the section headers don't exceed the amount of DOF 10961 * data. Note that we cast the section size and number of sections 10962 * to uint64_t's to prevent possible overflow in the multiplication. 10963 */ 10964 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 10965 10966 if (dof->dofh_secoff > len || seclen > len || 10967 dof->dofh_secoff + seclen > len) { 10968 dtrace_dof_error(dof, "truncated section headers"); 10969 return (-1); 10970 } 10971 10972 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 10973 dtrace_dof_error(dof, "misaligned section headers"); 10974 return (-1); 10975 } 10976 10977 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 10978 dtrace_dof_error(dof, "misaligned section size"); 10979 return (-1); 10980 } 10981 10982 /* 10983 * Take an initial pass through the section headers to be sure that 10984 * the headers don't have stray offsets. If the 'noprobes' flag is 10985 * set, do not permit sections relating to providers, probes, or args. 10986 */ 10987 for (i = 0; i < dof->dofh_secnum; i++) { 10988 dof_sec_t *sec = (dof_sec_t *)(daddr + 10989 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10990 10991 if (noprobes) { 10992 switch (sec->dofs_type) { 10993 case DOF_SECT_PROVIDER: 10994 case DOF_SECT_PROBES: 10995 case DOF_SECT_PRARGS: 10996 case DOF_SECT_PROFFS: 10997 dtrace_dof_error(dof, "illegal sections " 10998 "for enabling"); 10999 return (-1); 11000 } 11001 } 11002 11003 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11004 continue; /* just ignore non-loadable sections */ 11005 11006 if (sec->dofs_align & (sec->dofs_align - 1)) { 11007 dtrace_dof_error(dof, "bad section alignment"); 11008 return (-1); 11009 } 11010 11011 if (sec->dofs_offset & (sec->dofs_align - 1)) { 11012 dtrace_dof_error(dof, "misaligned section"); 11013 return (-1); 11014 } 11015 11016 if (sec->dofs_offset > len || sec->dofs_size > len || 11017 sec->dofs_offset + sec->dofs_size > len) { 11018 dtrace_dof_error(dof, "corrupt section header"); 11019 return (-1); 11020 } 11021 11022 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 11023 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 11024 dtrace_dof_error(dof, "non-terminating string table"); 11025 return (-1); 11026 } 11027 } 11028 11029 /* 11030 * Take a second pass through the sections and locate and perform any 11031 * relocations that are present. We do this after the first pass to 11032 * be sure that all sections have had their headers validated. 11033 */ 11034 for (i = 0; i < dof->dofh_secnum; i++) { 11035 dof_sec_t *sec = (dof_sec_t *)(daddr + 11036 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11037 11038 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11039 continue; /* skip sections that are not loadable */ 11040 11041 switch (sec->dofs_type) { 11042 case DOF_SECT_URELHDR: 11043 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 11044 return (-1); 11045 break; 11046 } 11047 } 11048 11049 if ((enab = *enabp) == NULL) 11050 enab = *enabp = dtrace_enabling_create(vstate); 11051 11052 for (i = 0; i < dof->dofh_secnum; i++) { 11053 dof_sec_t *sec = (dof_sec_t *)(daddr + 11054 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11055 11056 if (sec->dofs_type != DOF_SECT_ECBDESC) 11057 continue; 11058 11059 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 11060 dtrace_enabling_destroy(enab); 11061 *enabp = NULL; 11062 return (-1); 11063 } 11064 11065 dtrace_enabling_add(enab, ep); 11066 } 11067 11068 return (0); 11069 } 11070 11071 /* 11072 * Process DOF for any options. This routine assumes that the DOF has been 11073 * at least processed by dtrace_dof_slurp(). 11074 */ 11075 static int 11076 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 11077 { 11078 int i, rval; 11079 uint32_t entsize; 11080 size_t offs; 11081 dof_optdesc_t *desc; 11082 11083 for (i = 0; i < dof->dofh_secnum; i++) { 11084 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 11085 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11086 11087 if (sec->dofs_type != DOF_SECT_OPTDESC) 11088 continue; 11089 11090 if (sec->dofs_align != sizeof (uint64_t)) { 11091 dtrace_dof_error(dof, "bad alignment in " 11092 "option description"); 11093 return (EINVAL); 11094 } 11095 11096 if ((entsize = sec->dofs_entsize) == 0) { 11097 dtrace_dof_error(dof, "zeroed option entry size"); 11098 return (EINVAL); 11099 } 11100 11101 if (entsize < sizeof (dof_optdesc_t)) { 11102 dtrace_dof_error(dof, "bad option entry size"); 11103 return (EINVAL); 11104 } 11105 11106 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 11107 desc = (dof_optdesc_t *)((uintptr_t)dof + 11108 (uintptr_t)sec->dofs_offset + offs); 11109 11110 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 11111 dtrace_dof_error(dof, "non-zero option string"); 11112 return (EINVAL); 11113 } 11114 11115 if (desc->dofo_value == DTRACEOPT_UNSET) { 11116 dtrace_dof_error(dof, "unset option"); 11117 return (EINVAL); 11118 } 11119 11120 if ((rval = dtrace_state_option(state, 11121 desc->dofo_option, desc->dofo_value)) != 0) { 11122 dtrace_dof_error(dof, "rejected option"); 11123 return (rval); 11124 } 11125 } 11126 } 11127 11128 return (0); 11129 } 11130 11131 /* 11132 * DTrace Consumer State Functions 11133 */ 11134 int 11135 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 11136 { 11137 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 11138 void *base; 11139 uintptr_t limit; 11140 dtrace_dynvar_t *dvar, *next, *start; 11141 int i; 11142 11143 ASSERT(MUTEX_HELD(&dtrace_lock)); 11144 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 11145 11146 bzero(dstate, sizeof (dtrace_dstate_t)); 11147 11148 if ((dstate->dtds_chunksize = chunksize) == 0) 11149 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 11150 11151 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 11152 size = min; 11153 11154 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 11155 return (ENOMEM); 11156 11157 dstate->dtds_size = size; 11158 dstate->dtds_base = base; 11159 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 11160 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 11161 11162 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 11163 11164 if (hashsize != 1 && (hashsize & 1)) 11165 hashsize--; 11166 11167 dstate->dtds_hashsize = hashsize; 11168 dstate->dtds_hash = dstate->dtds_base; 11169 11170 /* 11171 * Set all of our hash buckets to point to the single sink, and (if 11172 * it hasn't already been set), set the sink's hash value to be the 11173 * sink sentinel value. The sink is needed for dynamic variable 11174 * lookups to know that they have iterated over an entire, valid hash 11175 * chain. 11176 */ 11177 for (i = 0; i < hashsize; i++) 11178 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 11179 11180 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 11181 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 11182 11183 /* 11184 * Determine number of active CPUs. Divide free list evenly among 11185 * active CPUs. 11186 */ 11187 start = (dtrace_dynvar_t *) 11188 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 11189 limit = (uintptr_t)base + size; 11190 11191 maxper = (limit - (uintptr_t)start) / NCPU; 11192 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 11193 11194 for (i = 0; i < NCPU; i++) { 11195 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 11196 11197 /* 11198 * If we don't even have enough chunks to make it once through 11199 * NCPUs, we're just going to allocate everything to the first 11200 * CPU. And if we're on the last CPU, we're going to allocate 11201 * whatever is left over. In either case, we set the limit to 11202 * be the limit of the dynamic variable space. 11203 */ 11204 if (maxper == 0 || i == NCPU - 1) { 11205 limit = (uintptr_t)base + size; 11206 start = NULL; 11207 } else { 11208 limit = (uintptr_t)start + maxper; 11209 start = (dtrace_dynvar_t *)limit; 11210 } 11211 11212 ASSERT(limit <= (uintptr_t)base + size); 11213 11214 for (;;) { 11215 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 11216 dstate->dtds_chunksize); 11217 11218 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 11219 break; 11220 11221 dvar->dtdv_next = next; 11222 dvar = next; 11223 } 11224 11225 if (maxper == 0) 11226 break; 11227 } 11228 11229 return (0); 11230 } 11231 11232 void 11233 dtrace_dstate_fini(dtrace_dstate_t *dstate) 11234 { 11235 ASSERT(MUTEX_HELD(&cpu_lock)); 11236 11237 if (dstate->dtds_base == NULL) 11238 return; 11239 11240 kmem_free(dstate->dtds_base, dstate->dtds_size); 11241 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 11242 } 11243 11244 static void 11245 dtrace_vstate_fini(dtrace_vstate_t *vstate) 11246 { 11247 /* 11248 * Logical XOR, where are you? 11249 */ 11250 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 11251 11252 if (vstate->dtvs_nglobals > 0) { 11253 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 11254 sizeof (dtrace_statvar_t *)); 11255 } 11256 11257 if (vstate->dtvs_ntlocals > 0) { 11258 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 11259 sizeof (dtrace_difv_t)); 11260 } 11261 11262 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 11263 11264 if (vstate->dtvs_nlocals > 0) { 11265 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 11266 sizeof (dtrace_statvar_t *)); 11267 } 11268 } 11269 11270 static void 11271 dtrace_state_clean(dtrace_state_t *state) 11272 { 11273 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 11274 return; 11275 11276 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 11277 dtrace_speculation_clean(state); 11278 } 11279 11280 static void 11281 dtrace_state_deadman(dtrace_state_t *state) 11282 { 11283 hrtime_t now; 11284 11285 dtrace_sync(); 11286 11287 now = dtrace_gethrtime(); 11288 11289 if (state != dtrace_anon.dta_state && 11290 now - state->dts_laststatus >= dtrace_deadman_user) 11291 return; 11292 11293 /* 11294 * We must be sure that dts_alive never appears to be less than the 11295 * value upon entry to dtrace_state_deadman(), and because we lack a 11296 * dtrace_cas64(), we cannot store to it atomically. We thus instead 11297 * store INT64_MAX to it, followed by a memory barrier, followed by 11298 * the new value. This assures that dts_alive never appears to be 11299 * less than its true value, regardless of the order in which the 11300 * stores to the underlying storage are issued. 11301 */ 11302 state->dts_alive = INT64_MAX; 11303 dtrace_membar_producer(); 11304 state->dts_alive = now; 11305 } 11306 11307 dtrace_state_t * 11308 dtrace_state_create(dev_t *devp, cred_t *cr) 11309 { 11310 minor_t minor; 11311 major_t major; 11312 char c[30]; 11313 dtrace_state_t *state; 11314 dtrace_optval_t *opt; 11315 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 11316 11317 ASSERT(MUTEX_HELD(&dtrace_lock)); 11318 ASSERT(MUTEX_HELD(&cpu_lock)); 11319 11320 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 11321 VM_BESTFIT | VM_SLEEP); 11322 11323 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 11324 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11325 return (NULL); 11326 } 11327 11328 state = ddi_get_soft_state(dtrace_softstate, minor); 11329 state->dts_epid = DTRACE_EPIDNONE + 1; 11330 11331 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 11332 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 11333 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 11334 11335 if (devp != NULL) { 11336 major = getemajor(*devp); 11337 } else { 11338 major = ddi_driver_major(dtrace_devi); 11339 } 11340 11341 state->dts_dev = makedevice(major, minor); 11342 11343 if (devp != NULL) 11344 *devp = state->dts_dev; 11345 11346 /* 11347 * We allocate NCPU buffers. On the one hand, this can be quite 11348 * a bit of memory per instance (nearly 36K on a Starcat). On the 11349 * other hand, it saves an additional memory reference in the probe 11350 * path. 11351 */ 11352 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 11353 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 11354 state->dts_cleaner = CYCLIC_NONE; 11355 state->dts_deadman = CYCLIC_NONE; 11356 state->dts_vstate.dtvs_state = state; 11357 11358 for (i = 0; i < DTRACEOPT_MAX; i++) 11359 state->dts_options[i] = DTRACEOPT_UNSET; 11360 11361 /* 11362 * Set the default options. 11363 */ 11364 opt = state->dts_options; 11365 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 11366 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 11367 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 11368 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 11369 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 11370 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 11371 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 11372 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 11373 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 11374 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 11375 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 11376 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 11377 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 11378 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 11379 11380 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 11381 11382 /* 11383 * Depending on the user credentials, we set flag bits which alter probe 11384 * visibility or the amount of destructiveness allowed. In the case of 11385 * actual anonymous tracing, or the possession of all privileges, all of 11386 * the normal checks are bypassed. 11387 */ 11388 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 11389 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 11390 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 11391 } else { 11392 /* 11393 * Set up the credentials for this instantiation. We take a 11394 * hold on the credential to prevent it from disappearing on 11395 * us; this in turn prevents the zone_t referenced by this 11396 * credential from disappearing. This means that we can 11397 * examine the credential and the zone from probe context. 11398 */ 11399 crhold(cr); 11400 state->dts_cred.dcr_cred = cr; 11401 11402 /* 11403 * CRA_PROC means "we have *some* privilege for dtrace" and 11404 * unlocks the use of variables like pid, zonename, etc. 11405 */ 11406 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 11407 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11408 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11409 } 11410 11411 /* 11412 * dtrace_user allows use of syscall and profile providers. 11413 * If the user also has proc_owner and/or proc_zone, we 11414 * extend the scope to include additional visibility and 11415 * destructive power. 11416 */ 11417 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 11418 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11419 state->dts_cred.dcr_visible |= 11420 DTRACE_CRV_ALLPROC; 11421 11422 state->dts_cred.dcr_action |= 11423 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11424 } 11425 11426 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 11427 state->dts_cred.dcr_visible |= 11428 DTRACE_CRV_ALLZONE; 11429 11430 state->dts_cred.dcr_action |= 11431 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11432 } 11433 11434 /* 11435 * If we have all privs in whatever zone this is, 11436 * we can do destructive things to processes which 11437 * have altered credentials. 11438 */ 11439 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11440 cr->cr_zone->zone_privset)) { 11441 state->dts_cred.dcr_action |= 11442 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11443 } 11444 } 11445 11446 /* 11447 * Holding the dtrace_kernel privilege also implies that 11448 * the user has the dtrace_user privilege from a visibility 11449 * perspective. But without further privileges, some 11450 * destructive actions are not available. 11451 */ 11452 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 11453 /* 11454 * Make all probes in all zones visible. However, 11455 * this doesn't mean that all actions become available 11456 * to all zones. 11457 */ 11458 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 11459 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 11460 11461 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 11462 DTRACE_CRA_PROC; 11463 /* 11464 * Holding proc_owner means that destructive actions 11465 * for *this* zone are allowed. 11466 */ 11467 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11468 state->dts_cred.dcr_action |= 11469 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11470 11471 /* 11472 * Holding proc_zone means that destructive actions 11473 * for this user/group ID in all zones is allowed. 11474 */ 11475 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11476 state->dts_cred.dcr_action |= 11477 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11478 11479 /* 11480 * If we have all privs in whatever zone this is, 11481 * we can do destructive things to processes which 11482 * have altered credentials. 11483 */ 11484 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11485 cr->cr_zone->zone_privset)) { 11486 state->dts_cred.dcr_action |= 11487 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11488 } 11489 } 11490 11491 /* 11492 * Holding the dtrace_proc privilege gives control over fasttrap 11493 * and pid providers. We need to grant wider destructive 11494 * privileges in the event that the user has proc_owner and/or 11495 * proc_zone. 11496 */ 11497 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11498 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11499 state->dts_cred.dcr_action |= 11500 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11501 11502 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11503 state->dts_cred.dcr_action |= 11504 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11505 } 11506 } 11507 11508 return (state); 11509 } 11510 11511 static int 11512 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 11513 { 11514 dtrace_optval_t *opt = state->dts_options, size; 11515 processorid_t cpu; 11516 int flags = 0, rval; 11517 11518 ASSERT(MUTEX_HELD(&dtrace_lock)); 11519 ASSERT(MUTEX_HELD(&cpu_lock)); 11520 ASSERT(which < DTRACEOPT_MAX); 11521 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 11522 (state == dtrace_anon.dta_state && 11523 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 11524 11525 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 11526 return (0); 11527 11528 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 11529 cpu = opt[DTRACEOPT_CPU]; 11530 11531 if (which == DTRACEOPT_SPECSIZE) 11532 flags |= DTRACEBUF_NOSWITCH; 11533 11534 if (which == DTRACEOPT_BUFSIZE) { 11535 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 11536 flags |= DTRACEBUF_RING; 11537 11538 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 11539 flags |= DTRACEBUF_FILL; 11540 11541 if (state != dtrace_anon.dta_state || 11542 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 11543 flags |= DTRACEBUF_INACTIVE; 11544 } 11545 11546 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 11547 /* 11548 * The size must be 8-byte aligned. If the size is not 8-byte 11549 * aligned, drop it down by the difference. 11550 */ 11551 if (size & (sizeof (uint64_t) - 1)) 11552 size -= size & (sizeof (uint64_t) - 1); 11553 11554 if (size < state->dts_reserve) { 11555 /* 11556 * Buffers always must be large enough to accommodate 11557 * their prereserved space. We return E2BIG instead 11558 * of ENOMEM in this case to allow for user-level 11559 * software to differentiate the cases. 11560 */ 11561 return (E2BIG); 11562 } 11563 11564 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 11565 11566 if (rval != ENOMEM) { 11567 opt[which] = size; 11568 return (rval); 11569 } 11570 11571 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11572 return (rval); 11573 } 11574 11575 return (ENOMEM); 11576 } 11577 11578 static int 11579 dtrace_state_buffers(dtrace_state_t *state) 11580 { 11581 dtrace_speculation_t *spec = state->dts_speculations; 11582 int rval, i; 11583 11584 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 11585 DTRACEOPT_BUFSIZE)) != 0) 11586 return (rval); 11587 11588 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 11589 DTRACEOPT_AGGSIZE)) != 0) 11590 return (rval); 11591 11592 for (i = 0; i < state->dts_nspeculations; i++) { 11593 if ((rval = dtrace_state_buffer(state, 11594 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 11595 return (rval); 11596 } 11597 11598 return (0); 11599 } 11600 11601 static void 11602 dtrace_state_prereserve(dtrace_state_t *state) 11603 { 11604 dtrace_ecb_t *ecb; 11605 dtrace_probe_t *probe; 11606 11607 state->dts_reserve = 0; 11608 11609 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 11610 return; 11611 11612 /* 11613 * If our buffer policy is a "fill" buffer policy, we need to set the 11614 * prereserved space to be the space required by the END probes. 11615 */ 11616 probe = dtrace_probes[dtrace_probeid_end - 1]; 11617 ASSERT(probe != NULL); 11618 11619 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 11620 if (ecb->dte_state != state) 11621 continue; 11622 11623 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 11624 } 11625 } 11626 11627 static int 11628 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 11629 { 11630 dtrace_optval_t *opt = state->dts_options, sz, nspec; 11631 dtrace_speculation_t *spec; 11632 dtrace_buffer_t *buf; 11633 cyc_handler_t hdlr; 11634 cyc_time_t when; 11635 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11636 dtrace_icookie_t cookie; 11637 11638 mutex_enter(&cpu_lock); 11639 mutex_enter(&dtrace_lock); 11640 11641 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 11642 rval = EBUSY; 11643 goto out; 11644 } 11645 11646 /* 11647 * Before we can perform any checks, we must prime all of the 11648 * retained enablings that correspond to this state. 11649 */ 11650 dtrace_enabling_prime(state); 11651 11652 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 11653 rval = EACCES; 11654 goto out; 11655 } 11656 11657 dtrace_state_prereserve(state); 11658 11659 /* 11660 * Now we want to do is try to allocate our speculations. 11661 * We do not automatically resize the number of speculations; if 11662 * this fails, we will fail the operation. 11663 */ 11664 nspec = opt[DTRACEOPT_NSPEC]; 11665 ASSERT(nspec != DTRACEOPT_UNSET); 11666 11667 if (nspec > INT_MAX) { 11668 rval = ENOMEM; 11669 goto out; 11670 } 11671 11672 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 11673 11674 if (spec == NULL) { 11675 rval = ENOMEM; 11676 goto out; 11677 } 11678 11679 state->dts_speculations = spec; 11680 state->dts_nspeculations = (int)nspec; 11681 11682 for (i = 0; i < nspec; i++) { 11683 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 11684 rval = ENOMEM; 11685 goto err; 11686 } 11687 11688 spec[i].dtsp_buffer = buf; 11689 } 11690 11691 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 11692 if (dtrace_anon.dta_state == NULL) { 11693 rval = ENOENT; 11694 goto out; 11695 } 11696 11697 if (state->dts_necbs != 0) { 11698 rval = EALREADY; 11699 goto out; 11700 } 11701 11702 state->dts_anon = dtrace_anon_grab(); 11703 ASSERT(state->dts_anon != NULL); 11704 state = state->dts_anon; 11705 11706 /* 11707 * We want "grabanon" to be set in the grabbed state, so we'll 11708 * copy that option value from the grabbing state into the 11709 * grabbed state. 11710 */ 11711 state->dts_options[DTRACEOPT_GRABANON] = 11712 opt[DTRACEOPT_GRABANON]; 11713 11714 *cpu = dtrace_anon.dta_beganon; 11715 11716 /* 11717 * If the anonymous state is active (as it almost certainly 11718 * is if the anonymous enabling ultimately matched anything), 11719 * we don't allow any further option processing -- but we 11720 * don't return failure. 11721 */ 11722 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11723 goto out; 11724 } 11725 11726 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 11727 opt[DTRACEOPT_AGGSIZE] != 0) { 11728 if (state->dts_aggregations == NULL) { 11729 /* 11730 * We're not going to create an aggregation buffer 11731 * because we don't have any ECBs that contain 11732 * aggregations -- set this option to 0. 11733 */ 11734 opt[DTRACEOPT_AGGSIZE] = 0; 11735 } else { 11736 /* 11737 * If we have an aggregation buffer, we must also have 11738 * a buffer to use as scratch. 11739 */ 11740 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 11741 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 11742 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 11743 } 11744 } 11745 } 11746 11747 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 11748 opt[DTRACEOPT_SPECSIZE] != 0) { 11749 if (!state->dts_speculates) { 11750 /* 11751 * We're not going to create speculation buffers 11752 * because we don't have any ECBs that actually 11753 * speculate -- set the speculation size to 0. 11754 */ 11755 opt[DTRACEOPT_SPECSIZE] = 0; 11756 } 11757 } 11758 11759 /* 11760 * The bare minimum size for any buffer that we're actually going to 11761 * do anything to is sizeof (uint64_t). 11762 */ 11763 sz = sizeof (uint64_t); 11764 11765 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 11766 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 11767 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 11768 /* 11769 * A buffer size has been explicitly set to 0 (or to a size 11770 * that will be adjusted to 0) and we need the space -- we 11771 * need to return failure. We return ENOSPC to differentiate 11772 * it from failing to allocate a buffer due to failure to meet 11773 * the reserve (for which we return E2BIG). 11774 */ 11775 rval = ENOSPC; 11776 goto out; 11777 } 11778 11779 if ((rval = dtrace_state_buffers(state)) != 0) 11780 goto err; 11781 11782 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 11783 sz = dtrace_dstate_defsize; 11784 11785 do { 11786 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 11787 11788 if (rval == 0) 11789 break; 11790 11791 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11792 goto err; 11793 } while (sz >>= 1); 11794 11795 opt[DTRACEOPT_DYNVARSIZE] = sz; 11796 11797 if (rval != 0) 11798 goto err; 11799 11800 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 11801 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 11802 11803 if (opt[DTRACEOPT_CLEANRATE] == 0) 11804 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11805 11806 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 11807 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 11808 11809 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 11810 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11811 11812 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 11813 hdlr.cyh_arg = state; 11814 hdlr.cyh_level = CY_LOW_LEVEL; 11815 11816 when.cyt_when = 0; 11817 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 11818 11819 state->dts_cleaner = cyclic_add(&hdlr, &when); 11820 11821 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 11822 hdlr.cyh_arg = state; 11823 hdlr.cyh_level = CY_LOW_LEVEL; 11824 11825 when.cyt_when = 0; 11826 when.cyt_interval = dtrace_deadman_interval; 11827 11828 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 11829 state->dts_deadman = cyclic_add(&hdlr, &when); 11830 11831 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 11832 11833 /* 11834 * Now it's time to actually fire the BEGIN probe. We need to disable 11835 * interrupts here both to record the CPU on which we fired the BEGIN 11836 * probe (the data from this CPU will be processed first at user 11837 * level) and to manually activate the buffer for this CPU. 11838 */ 11839 cookie = dtrace_interrupt_disable(); 11840 *cpu = CPU->cpu_id; 11841 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 11842 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11843 11844 dtrace_probe(dtrace_probeid_begin, 11845 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11846 dtrace_interrupt_enable(cookie); 11847 /* 11848 * We may have had an exit action from a BEGIN probe; only change our 11849 * state to ACTIVE if we're still in WARMUP. 11850 */ 11851 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 11852 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 11853 11854 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 11855 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 11856 11857 /* 11858 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 11859 * want each CPU to transition its principal buffer out of the 11860 * INACTIVE state. Doing this assures that no CPU will suddenly begin 11861 * processing an ECB halfway down a probe's ECB chain; all CPUs will 11862 * atomically transition from processing none of a state's ECBs to 11863 * processing all of them. 11864 */ 11865 dtrace_xcall(DTRACE_CPUALL, 11866 (dtrace_xcall_t)dtrace_buffer_activate, state); 11867 goto out; 11868 11869 err: 11870 dtrace_buffer_free(state->dts_buffer); 11871 dtrace_buffer_free(state->dts_aggbuffer); 11872 11873 if ((nspec = state->dts_nspeculations) == 0) { 11874 ASSERT(state->dts_speculations == NULL); 11875 goto out; 11876 } 11877 11878 spec = state->dts_speculations; 11879 ASSERT(spec != NULL); 11880 11881 for (i = 0; i < state->dts_nspeculations; i++) { 11882 if ((buf = spec[i].dtsp_buffer) == NULL) 11883 break; 11884 11885 dtrace_buffer_free(buf); 11886 kmem_free(buf, bufsize); 11887 } 11888 11889 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11890 state->dts_nspeculations = 0; 11891 state->dts_speculations = NULL; 11892 11893 out: 11894 mutex_exit(&dtrace_lock); 11895 mutex_exit(&cpu_lock); 11896 11897 return (rval); 11898 } 11899 11900 static int 11901 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 11902 { 11903 dtrace_icookie_t cookie; 11904 11905 ASSERT(MUTEX_HELD(&dtrace_lock)); 11906 11907 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 11908 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 11909 return (EINVAL); 11910 11911 /* 11912 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 11913 * to be sure that every CPU has seen it. See below for the details 11914 * on why this is done. 11915 */ 11916 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 11917 dtrace_sync(); 11918 11919 /* 11920 * By this point, it is impossible for any CPU to be still processing 11921 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 11922 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 11923 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 11924 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 11925 * iff we're in the END probe. 11926 */ 11927 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 11928 dtrace_sync(); 11929 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 11930 11931 /* 11932 * Finally, we can release the reserve and call the END probe. We 11933 * disable interrupts across calling the END probe to allow us to 11934 * return the CPU on which we actually called the END probe. This 11935 * allows user-land to be sure that this CPU's principal buffer is 11936 * processed last. 11937 */ 11938 state->dts_reserve = 0; 11939 11940 cookie = dtrace_interrupt_disable(); 11941 *cpu = CPU->cpu_id; 11942 dtrace_probe(dtrace_probeid_end, 11943 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11944 dtrace_interrupt_enable(cookie); 11945 11946 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 11947 dtrace_sync(); 11948 11949 return (0); 11950 } 11951 11952 static int 11953 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 11954 dtrace_optval_t val) 11955 { 11956 ASSERT(MUTEX_HELD(&dtrace_lock)); 11957 11958 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11959 return (EBUSY); 11960 11961 if (option >= DTRACEOPT_MAX) 11962 return (EINVAL); 11963 11964 if (option != DTRACEOPT_CPU && val < 0) 11965 return (EINVAL); 11966 11967 switch (option) { 11968 case DTRACEOPT_DESTRUCTIVE: 11969 if (dtrace_destructive_disallow) 11970 return (EACCES); 11971 11972 state->dts_cred.dcr_destructive = 1; 11973 break; 11974 11975 case DTRACEOPT_BUFSIZE: 11976 case DTRACEOPT_DYNVARSIZE: 11977 case DTRACEOPT_AGGSIZE: 11978 case DTRACEOPT_SPECSIZE: 11979 case DTRACEOPT_STRSIZE: 11980 if (val < 0) 11981 return (EINVAL); 11982 11983 if (val >= LONG_MAX) { 11984 /* 11985 * If this is an otherwise negative value, set it to 11986 * the highest multiple of 128m less than LONG_MAX. 11987 * Technically, we're adjusting the size without 11988 * regard to the buffer resizing policy, but in fact, 11989 * this has no effect -- if we set the buffer size to 11990 * ~LONG_MAX and the buffer policy is ultimately set to 11991 * be "manual", the buffer allocation is guaranteed to 11992 * fail, if only because the allocation requires two 11993 * buffers. (We set the the size to the highest 11994 * multiple of 128m because it ensures that the size 11995 * will remain a multiple of a megabyte when 11996 * repeatedly halved -- all the way down to 15m.) 11997 */ 11998 val = LONG_MAX - (1 << 27) + 1; 11999 } 12000 } 12001 12002 state->dts_options[option] = val; 12003 12004 return (0); 12005 } 12006 12007 static void 12008 dtrace_state_destroy(dtrace_state_t *state) 12009 { 12010 dtrace_ecb_t *ecb; 12011 dtrace_vstate_t *vstate = &state->dts_vstate; 12012 minor_t minor = getminor(state->dts_dev); 12013 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12014 dtrace_speculation_t *spec = state->dts_speculations; 12015 int nspec = state->dts_nspeculations; 12016 uint32_t match; 12017 12018 ASSERT(MUTEX_HELD(&dtrace_lock)); 12019 ASSERT(MUTEX_HELD(&cpu_lock)); 12020 12021 /* 12022 * First, retract any retained enablings for this state. 12023 */ 12024 dtrace_enabling_retract(state); 12025 ASSERT(state->dts_nretained == 0); 12026 12027 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 12028 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 12029 /* 12030 * We have managed to come into dtrace_state_destroy() on a 12031 * hot enabling -- almost certainly because of a disorderly 12032 * shutdown of a consumer. (That is, a consumer that is 12033 * exiting without having called dtrace_stop().) In this case, 12034 * we're going to set our activity to be KILLED, and then 12035 * issue a sync to be sure that everyone is out of probe 12036 * context before we start blowing away ECBs. 12037 */ 12038 state->dts_activity = DTRACE_ACTIVITY_KILLED; 12039 dtrace_sync(); 12040 } 12041 12042 /* 12043 * Release the credential hold we took in dtrace_state_create(). 12044 */ 12045 if (state->dts_cred.dcr_cred != NULL) 12046 crfree(state->dts_cred.dcr_cred); 12047 12048 /* 12049 * Now we can safely disable and destroy any enabled probes. Because 12050 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 12051 * (especially if they're all enabled), we take two passes through the 12052 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 12053 * in the second we disable whatever is left over. 12054 */ 12055 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 12056 for (i = 0; i < state->dts_necbs; i++) { 12057 if ((ecb = state->dts_ecbs[i]) == NULL) 12058 continue; 12059 12060 if (match && ecb->dte_probe != NULL) { 12061 dtrace_probe_t *probe = ecb->dte_probe; 12062 dtrace_provider_t *prov = probe->dtpr_provider; 12063 12064 if (!(prov->dtpv_priv.dtpp_flags & match)) 12065 continue; 12066 } 12067 12068 dtrace_ecb_disable(ecb); 12069 dtrace_ecb_destroy(ecb); 12070 } 12071 12072 if (!match) 12073 break; 12074 } 12075 12076 /* 12077 * Before we free the buffers, perform one more sync to assure that 12078 * every CPU is out of probe context. 12079 */ 12080 dtrace_sync(); 12081 12082 dtrace_buffer_free(state->dts_buffer); 12083 dtrace_buffer_free(state->dts_aggbuffer); 12084 12085 for (i = 0; i < nspec; i++) 12086 dtrace_buffer_free(spec[i].dtsp_buffer); 12087 12088 if (state->dts_cleaner != CYCLIC_NONE) 12089 cyclic_remove(state->dts_cleaner); 12090 12091 if (state->dts_deadman != CYCLIC_NONE) 12092 cyclic_remove(state->dts_deadman); 12093 12094 dtrace_dstate_fini(&vstate->dtvs_dynvars); 12095 dtrace_vstate_fini(vstate); 12096 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 12097 12098 if (state->dts_aggregations != NULL) { 12099 #ifdef DEBUG 12100 for (i = 0; i < state->dts_naggregations; i++) 12101 ASSERT(state->dts_aggregations[i] == NULL); 12102 #endif 12103 ASSERT(state->dts_naggregations > 0); 12104 kmem_free(state->dts_aggregations, 12105 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 12106 } 12107 12108 kmem_free(state->dts_buffer, bufsize); 12109 kmem_free(state->dts_aggbuffer, bufsize); 12110 12111 for (i = 0; i < nspec; i++) 12112 kmem_free(spec[i].dtsp_buffer, bufsize); 12113 12114 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12115 12116 dtrace_format_destroy(state); 12117 12118 vmem_destroy(state->dts_aggid_arena); 12119 ddi_soft_state_free(dtrace_softstate, minor); 12120 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12121 } 12122 12123 /* 12124 * DTrace Anonymous Enabling Functions 12125 */ 12126 static dtrace_state_t * 12127 dtrace_anon_grab(void) 12128 { 12129 dtrace_state_t *state; 12130 12131 ASSERT(MUTEX_HELD(&dtrace_lock)); 12132 12133 if ((state = dtrace_anon.dta_state) == NULL) { 12134 ASSERT(dtrace_anon.dta_enabling == NULL); 12135 return (NULL); 12136 } 12137 12138 ASSERT(dtrace_anon.dta_enabling != NULL); 12139 ASSERT(dtrace_retained != NULL); 12140 12141 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 12142 dtrace_anon.dta_enabling = NULL; 12143 dtrace_anon.dta_state = NULL; 12144 12145 return (state); 12146 } 12147 12148 static void 12149 dtrace_anon_property(void) 12150 { 12151 int i, rv; 12152 dtrace_state_t *state; 12153 dof_hdr_t *dof; 12154 char c[32]; /* enough for "dof-data-" + digits */ 12155 12156 ASSERT(MUTEX_HELD(&dtrace_lock)); 12157 ASSERT(MUTEX_HELD(&cpu_lock)); 12158 12159 for (i = 0; ; i++) { 12160 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 12161 12162 dtrace_err_verbose = 1; 12163 12164 if ((dof = dtrace_dof_property(c)) == NULL) { 12165 dtrace_err_verbose = 0; 12166 break; 12167 } 12168 12169 /* 12170 * We want to create anonymous state, so we need to transition 12171 * the kernel debugger to indicate that DTrace is active. If 12172 * this fails (e.g. because the debugger has modified text in 12173 * some way), we won't continue with the processing. 12174 */ 12175 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12176 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 12177 "enabling ignored."); 12178 dtrace_dof_destroy(dof); 12179 break; 12180 } 12181 12182 /* 12183 * If we haven't allocated an anonymous state, we'll do so now. 12184 */ 12185 if ((state = dtrace_anon.dta_state) == NULL) { 12186 state = dtrace_state_create(NULL, NULL); 12187 dtrace_anon.dta_state = state; 12188 12189 if (state == NULL) { 12190 /* 12191 * This basically shouldn't happen: the only 12192 * failure mode from dtrace_state_create() is a 12193 * failure of ddi_soft_state_zalloc() that 12194 * itself should never happen. Still, the 12195 * interface allows for a failure mode, and 12196 * we want to fail as gracefully as possible: 12197 * we'll emit an error message and cease 12198 * processing anonymous state in this case. 12199 */ 12200 cmn_err(CE_WARN, "failed to create " 12201 "anonymous state"); 12202 dtrace_dof_destroy(dof); 12203 break; 12204 } 12205 } 12206 12207 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 12208 &dtrace_anon.dta_enabling, 0, B_TRUE); 12209 12210 if (rv == 0) 12211 rv = dtrace_dof_options(dof, state); 12212 12213 dtrace_err_verbose = 0; 12214 dtrace_dof_destroy(dof); 12215 12216 if (rv != 0) { 12217 /* 12218 * This is malformed DOF; chuck any anonymous state 12219 * that we created. 12220 */ 12221 ASSERT(dtrace_anon.dta_enabling == NULL); 12222 dtrace_state_destroy(state); 12223 dtrace_anon.dta_state = NULL; 12224 break; 12225 } 12226 12227 ASSERT(dtrace_anon.dta_enabling != NULL); 12228 } 12229 12230 if (dtrace_anon.dta_enabling != NULL) { 12231 int rval; 12232 12233 /* 12234 * dtrace_enabling_retain() can only fail because we are 12235 * trying to retain more enablings than are allowed -- but 12236 * we only have one anonymous enabling, and we are guaranteed 12237 * to be allowed at least one retained enabling; we assert 12238 * that dtrace_enabling_retain() returns success. 12239 */ 12240 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 12241 ASSERT(rval == 0); 12242 12243 dtrace_enabling_dump(dtrace_anon.dta_enabling); 12244 } 12245 } 12246 12247 /* 12248 * DTrace Helper Functions 12249 */ 12250 static void 12251 dtrace_helper_trace(dtrace_helper_action_t *helper, 12252 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 12253 { 12254 uint32_t size, next, nnext, i; 12255 dtrace_helptrace_t *ent; 12256 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12257 12258 if (!dtrace_helptrace_enabled) 12259 return; 12260 12261 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 12262 12263 /* 12264 * What would a tracing framework be without its own tracing 12265 * framework? (Well, a hell of a lot simpler, for starters...) 12266 */ 12267 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 12268 sizeof (uint64_t) - sizeof (uint64_t); 12269 12270 /* 12271 * Iterate until we can allocate a slot in the trace buffer. 12272 */ 12273 do { 12274 next = dtrace_helptrace_next; 12275 12276 if (next + size < dtrace_helptrace_bufsize) { 12277 nnext = next + size; 12278 } else { 12279 nnext = size; 12280 } 12281 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 12282 12283 /* 12284 * We have our slot; fill it in. 12285 */ 12286 if (nnext == size) 12287 next = 0; 12288 12289 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 12290 ent->dtht_helper = helper; 12291 ent->dtht_where = where; 12292 ent->dtht_nlocals = vstate->dtvs_nlocals; 12293 12294 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 12295 mstate->dtms_fltoffs : -1; 12296 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 12297 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 12298 12299 for (i = 0; i < vstate->dtvs_nlocals; i++) { 12300 dtrace_statvar_t *svar; 12301 12302 if ((svar = vstate->dtvs_locals[i]) == NULL) 12303 continue; 12304 12305 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 12306 ent->dtht_locals[i] = 12307 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 12308 } 12309 } 12310 12311 static uint64_t 12312 dtrace_helper(int which, dtrace_mstate_t *mstate, 12313 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 12314 { 12315 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12316 uint64_t sarg0 = mstate->dtms_arg[0]; 12317 uint64_t sarg1 = mstate->dtms_arg[1]; 12318 uint64_t rval; 12319 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 12320 dtrace_helper_action_t *helper; 12321 dtrace_vstate_t *vstate; 12322 dtrace_difo_t *pred; 12323 int i, trace = dtrace_helptrace_enabled; 12324 12325 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 12326 12327 if (helpers == NULL) 12328 return (0); 12329 12330 if ((helper = helpers->dthps_actions[which]) == NULL) 12331 return (0); 12332 12333 vstate = &helpers->dthps_vstate; 12334 mstate->dtms_arg[0] = arg0; 12335 mstate->dtms_arg[1] = arg1; 12336 12337 /* 12338 * Now iterate over each helper. If its predicate evaluates to 'true', 12339 * we'll call the corresponding actions. Note that the below calls 12340 * to dtrace_dif_emulate() may set faults in machine state. This is 12341 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 12342 * the stored DIF offset with its own (which is the desired behavior). 12343 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 12344 * from machine state; this is okay, too. 12345 */ 12346 for (; helper != NULL; helper = helper->dtha_next) { 12347 if ((pred = helper->dtha_predicate) != NULL) { 12348 if (trace) 12349 dtrace_helper_trace(helper, mstate, vstate, 0); 12350 12351 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 12352 goto next; 12353 12354 if (*flags & CPU_DTRACE_FAULT) 12355 goto err; 12356 } 12357 12358 for (i = 0; i < helper->dtha_nactions; i++) { 12359 if (trace) 12360 dtrace_helper_trace(helper, 12361 mstate, vstate, i + 1); 12362 12363 rval = dtrace_dif_emulate(helper->dtha_actions[i], 12364 mstate, vstate, state); 12365 12366 if (*flags & CPU_DTRACE_FAULT) 12367 goto err; 12368 } 12369 12370 next: 12371 if (trace) 12372 dtrace_helper_trace(helper, mstate, vstate, 12373 DTRACE_HELPTRACE_NEXT); 12374 } 12375 12376 if (trace) 12377 dtrace_helper_trace(helper, mstate, vstate, 12378 DTRACE_HELPTRACE_DONE); 12379 12380 /* 12381 * Restore the arg0 that we saved upon entry. 12382 */ 12383 mstate->dtms_arg[0] = sarg0; 12384 mstate->dtms_arg[1] = sarg1; 12385 12386 return (rval); 12387 12388 err: 12389 if (trace) 12390 dtrace_helper_trace(helper, mstate, vstate, 12391 DTRACE_HELPTRACE_ERR); 12392 12393 /* 12394 * Restore the arg0 that we saved upon entry. 12395 */ 12396 mstate->dtms_arg[0] = sarg0; 12397 mstate->dtms_arg[1] = sarg1; 12398 12399 return (NULL); 12400 } 12401 12402 static void 12403 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 12404 dtrace_vstate_t *vstate) 12405 { 12406 int i; 12407 12408 if (helper->dtha_predicate != NULL) 12409 dtrace_difo_release(helper->dtha_predicate, vstate); 12410 12411 for (i = 0; i < helper->dtha_nactions; i++) { 12412 ASSERT(helper->dtha_actions[i] != NULL); 12413 dtrace_difo_release(helper->dtha_actions[i], vstate); 12414 } 12415 12416 kmem_free(helper->dtha_actions, 12417 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 12418 kmem_free(helper, sizeof (dtrace_helper_action_t)); 12419 } 12420 12421 static int 12422 dtrace_helper_destroygen(int gen) 12423 { 12424 proc_t *p = curproc; 12425 dtrace_helpers_t *help = p->p_dtrace_helpers; 12426 dtrace_vstate_t *vstate; 12427 int i; 12428 12429 ASSERT(MUTEX_HELD(&dtrace_lock)); 12430 12431 if (help == NULL || gen > help->dthps_generation) 12432 return (EINVAL); 12433 12434 vstate = &help->dthps_vstate; 12435 12436 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12437 dtrace_helper_action_t *last = NULL, *h, *next; 12438 12439 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12440 next = h->dtha_next; 12441 12442 if (h->dtha_generation == gen) { 12443 if (last != NULL) { 12444 last->dtha_next = next; 12445 } else { 12446 help->dthps_actions[i] = next; 12447 } 12448 12449 dtrace_helper_action_destroy(h, vstate); 12450 } else { 12451 last = h; 12452 } 12453 } 12454 } 12455 12456 /* 12457 * Interate until we've cleared out all helper providers with the 12458 * given generation number. 12459 */ 12460 for (;;) { 12461 dtrace_helper_provider_t *prov; 12462 12463 /* 12464 * Look for a helper provider with the right generation. We 12465 * have to start back at the beginning of the list each time 12466 * because we drop dtrace_lock. It's unlikely that we'll make 12467 * more than two passes. 12468 */ 12469 for (i = 0; i < help->dthps_nprovs; i++) { 12470 prov = help->dthps_provs[i]; 12471 12472 if (prov->dthp_generation == gen) 12473 break; 12474 } 12475 12476 /* 12477 * If there were no matches, we're done. 12478 */ 12479 if (i == help->dthps_nprovs) 12480 break; 12481 12482 /* 12483 * Move the last helper provider into this slot. 12484 */ 12485 help->dthps_nprovs--; 12486 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 12487 help->dthps_provs[help->dthps_nprovs] = NULL; 12488 12489 mutex_exit(&dtrace_lock); 12490 12491 /* 12492 * If we have a meta provider, remove this helper provider. 12493 */ 12494 mutex_enter(&dtrace_meta_lock); 12495 if (dtrace_meta_pid != NULL) { 12496 ASSERT(dtrace_deferred_pid == NULL); 12497 dtrace_helper_provider_remove(&prov->dthp_prov, 12498 p->p_pid); 12499 } 12500 mutex_exit(&dtrace_meta_lock); 12501 12502 dtrace_helper_provider_destroy(prov); 12503 12504 mutex_enter(&dtrace_lock); 12505 } 12506 12507 return (0); 12508 } 12509 12510 static int 12511 dtrace_helper_validate(dtrace_helper_action_t *helper) 12512 { 12513 int err = 0, i; 12514 dtrace_difo_t *dp; 12515 12516 if ((dp = helper->dtha_predicate) != NULL) 12517 err += dtrace_difo_validate_helper(dp); 12518 12519 for (i = 0; i < helper->dtha_nactions; i++) 12520 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 12521 12522 return (err == 0); 12523 } 12524 12525 static int 12526 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 12527 { 12528 dtrace_helpers_t *help; 12529 dtrace_helper_action_t *helper, *last; 12530 dtrace_actdesc_t *act; 12531 dtrace_vstate_t *vstate; 12532 dtrace_predicate_t *pred; 12533 int count = 0, nactions = 0, i; 12534 12535 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 12536 return (EINVAL); 12537 12538 help = curproc->p_dtrace_helpers; 12539 last = help->dthps_actions[which]; 12540 vstate = &help->dthps_vstate; 12541 12542 for (count = 0; last != NULL; last = last->dtha_next) { 12543 count++; 12544 if (last->dtha_next == NULL) 12545 break; 12546 } 12547 12548 /* 12549 * If we already have dtrace_helper_actions_max helper actions for this 12550 * helper action type, we'll refuse to add a new one. 12551 */ 12552 if (count >= dtrace_helper_actions_max) 12553 return (ENOSPC); 12554 12555 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 12556 helper->dtha_generation = help->dthps_generation; 12557 12558 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 12559 ASSERT(pred->dtp_difo != NULL); 12560 dtrace_difo_hold(pred->dtp_difo); 12561 helper->dtha_predicate = pred->dtp_difo; 12562 } 12563 12564 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 12565 if (act->dtad_kind != DTRACEACT_DIFEXPR) 12566 goto err; 12567 12568 if (act->dtad_difo == NULL) 12569 goto err; 12570 12571 nactions++; 12572 } 12573 12574 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 12575 (helper->dtha_nactions = nactions), KM_SLEEP); 12576 12577 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 12578 dtrace_difo_hold(act->dtad_difo); 12579 helper->dtha_actions[i++] = act->dtad_difo; 12580 } 12581 12582 if (!dtrace_helper_validate(helper)) 12583 goto err; 12584 12585 if (last == NULL) { 12586 help->dthps_actions[which] = helper; 12587 } else { 12588 last->dtha_next = helper; 12589 } 12590 12591 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 12592 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 12593 dtrace_helptrace_next = 0; 12594 } 12595 12596 return (0); 12597 err: 12598 dtrace_helper_action_destroy(helper, vstate); 12599 return (EINVAL); 12600 } 12601 12602 static void 12603 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 12604 dof_helper_t *dofhp) 12605 { 12606 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 12607 12608 mutex_enter(&dtrace_meta_lock); 12609 mutex_enter(&dtrace_lock); 12610 12611 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 12612 /* 12613 * If the dtrace module is loaded but not attached, or if 12614 * there aren't isn't a meta provider registered to deal with 12615 * these provider descriptions, we need to postpone creating 12616 * the actual providers until later. 12617 */ 12618 12619 if (help->dthps_next == NULL && help->dthps_prev == NULL && 12620 dtrace_deferred_pid != help) { 12621 help->dthps_deferred = 1; 12622 help->dthps_pid = p->p_pid; 12623 help->dthps_next = dtrace_deferred_pid; 12624 help->dthps_prev = NULL; 12625 if (dtrace_deferred_pid != NULL) 12626 dtrace_deferred_pid->dthps_prev = help; 12627 dtrace_deferred_pid = help; 12628 } 12629 12630 mutex_exit(&dtrace_lock); 12631 12632 } else if (dofhp != NULL) { 12633 /* 12634 * If the dtrace module is loaded and we have a particular 12635 * helper provider description, pass that off to the 12636 * meta provider. 12637 */ 12638 12639 mutex_exit(&dtrace_lock); 12640 12641 dtrace_helper_provide(dofhp, p->p_pid); 12642 12643 } else { 12644 /* 12645 * Otherwise, just pass all the helper provider descriptions 12646 * off to the meta provider. 12647 */ 12648 12649 int i; 12650 mutex_exit(&dtrace_lock); 12651 12652 for (i = 0; i < help->dthps_nprovs; i++) { 12653 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 12654 p->p_pid); 12655 } 12656 } 12657 12658 mutex_exit(&dtrace_meta_lock); 12659 } 12660 12661 static int 12662 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 12663 { 12664 dtrace_helpers_t *help; 12665 dtrace_helper_provider_t *hprov, **tmp_provs; 12666 uint_t tmp_maxprovs, i; 12667 12668 ASSERT(MUTEX_HELD(&dtrace_lock)); 12669 12670 help = curproc->p_dtrace_helpers; 12671 ASSERT(help != NULL); 12672 12673 /* 12674 * If we already have dtrace_helper_providers_max helper providers, 12675 * we're refuse to add a new one. 12676 */ 12677 if (help->dthps_nprovs >= dtrace_helper_providers_max) 12678 return (ENOSPC); 12679 12680 /* 12681 * Check to make sure this isn't a duplicate. 12682 */ 12683 for (i = 0; i < help->dthps_nprovs; i++) { 12684 if (dofhp->dofhp_addr == 12685 help->dthps_provs[i]->dthp_prov.dofhp_addr) 12686 return (EALREADY); 12687 } 12688 12689 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 12690 hprov->dthp_prov = *dofhp; 12691 hprov->dthp_ref = 1; 12692 hprov->dthp_generation = gen; 12693 12694 /* 12695 * Allocate a bigger table for helper providers if it's already full. 12696 */ 12697 if (help->dthps_maxprovs == help->dthps_nprovs) { 12698 tmp_maxprovs = help->dthps_maxprovs; 12699 tmp_provs = help->dthps_provs; 12700 12701 if (help->dthps_maxprovs == 0) 12702 help->dthps_maxprovs = 2; 12703 else 12704 help->dthps_maxprovs *= 2; 12705 if (help->dthps_maxprovs > dtrace_helper_providers_max) 12706 help->dthps_maxprovs = dtrace_helper_providers_max; 12707 12708 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 12709 12710 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 12711 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12712 12713 if (tmp_provs != NULL) { 12714 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 12715 sizeof (dtrace_helper_provider_t *)); 12716 kmem_free(tmp_provs, tmp_maxprovs * 12717 sizeof (dtrace_helper_provider_t *)); 12718 } 12719 } 12720 12721 help->dthps_provs[help->dthps_nprovs] = hprov; 12722 help->dthps_nprovs++; 12723 12724 return (0); 12725 } 12726 12727 static void 12728 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 12729 { 12730 mutex_enter(&dtrace_lock); 12731 12732 if (--hprov->dthp_ref == 0) { 12733 dof_hdr_t *dof; 12734 mutex_exit(&dtrace_lock); 12735 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 12736 dtrace_dof_destroy(dof); 12737 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 12738 } else { 12739 mutex_exit(&dtrace_lock); 12740 } 12741 } 12742 12743 static int 12744 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 12745 { 12746 uintptr_t daddr = (uintptr_t)dof; 12747 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 12748 dof_provider_t *provider; 12749 dof_probe_t *probe; 12750 uint8_t *arg; 12751 char *strtab, *typestr; 12752 dof_stridx_t typeidx; 12753 size_t typesz; 12754 uint_t nprobes, j, k; 12755 12756 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 12757 12758 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 12759 dtrace_dof_error(dof, "misaligned section offset"); 12760 return (-1); 12761 } 12762 12763 /* 12764 * The section needs to be large enough to contain the DOF provider 12765 * structure appropriate for the given version. 12766 */ 12767 if (sec->dofs_size < 12768 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 12769 offsetof(dof_provider_t, dofpv_prenoffs) : 12770 sizeof (dof_provider_t))) { 12771 dtrace_dof_error(dof, "provider section too small"); 12772 return (-1); 12773 } 12774 12775 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 12776 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 12777 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 12778 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 12779 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 12780 12781 if (str_sec == NULL || prb_sec == NULL || 12782 arg_sec == NULL || off_sec == NULL) 12783 return (-1); 12784 12785 enoff_sec = NULL; 12786 12787 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12788 provider->dofpv_prenoffs != DOF_SECT_NONE && 12789 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 12790 provider->dofpv_prenoffs)) == NULL) 12791 return (-1); 12792 12793 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 12794 12795 if (provider->dofpv_name >= str_sec->dofs_size || 12796 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 12797 dtrace_dof_error(dof, "invalid provider name"); 12798 return (-1); 12799 } 12800 12801 if (prb_sec->dofs_entsize == 0 || 12802 prb_sec->dofs_entsize > prb_sec->dofs_size) { 12803 dtrace_dof_error(dof, "invalid entry size"); 12804 return (-1); 12805 } 12806 12807 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 12808 dtrace_dof_error(dof, "misaligned entry size"); 12809 return (-1); 12810 } 12811 12812 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 12813 dtrace_dof_error(dof, "invalid entry size"); 12814 return (-1); 12815 } 12816 12817 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 12818 dtrace_dof_error(dof, "misaligned section offset"); 12819 return (-1); 12820 } 12821 12822 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 12823 dtrace_dof_error(dof, "invalid entry size"); 12824 return (-1); 12825 } 12826 12827 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 12828 12829 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 12830 12831 /* 12832 * Take a pass through the probes to check for errors. 12833 */ 12834 for (j = 0; j < nprobes; j++) { 12835 probe = (dof_probe_t *)(uintptr_t)(daddr + 12836 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 12837 12838 if (probe->dofpr_func >= str_sec->dofs_size) { 12839 dtrace_dof_error(dof, "invalid function name"); 12840 return (-1); 12841 } 12842 12843 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 12844 dtrace_dof_error(dof, "function name too long"); 12845 return (-1); 12846 } 12847 12848 if (probe->dofpr_name >= str_sec->dofs_size || 12849 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 12850 dtrace_dof_error(dof, "invalid probe name"); 12851 return (-1); 12852 } 12853 12854 /* 12855 * The offset count must not wrap the index, and the offsets 12856 * must also not overflow the section's data. 12857 */ 12858 if (probe->dofpr_offidx + probe->dofpr_noffs < 12859 probe->dofpr_offidx || 12860 (probe->dofpr_offidx + probe->dofpr_noffs) * 12861 off_sec->dofs_entsize > off_sec->dofs_size) { 12862 dtrace_dof_error(dof, "invalid probe offset"); 12863 return (-1); 12864 } 12865 12866 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 12867 /* 12868 * If there's no is-enabled offset section, make sure 12869 * there aren't any is-enabled offsets. Otherwise 12870 * perform the same checks as for probe offsets 12871 * (immediately above). 12872 */ 12873 if (enoff_sec == NULL) { 12874 if (probe->dofpr_enoffidx != 0 || 12875 probe->dofpr_nenoffs != 0) { 12876 dtrace_dof_error(dof, "is-enabled " 12877 "offsets with null section"); 12878 return (-1); 12879 } 12880 } else if (probe->dofpr_enoffidx + 12881 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 12882 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 12883 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 12884 dtrace_dof_error(dof, "invalid is-enabled " 12885 "offset"); 12886 return (-1); 12887 } 12888 12889 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 12890 dtrace_dof_error(dof, "zero probe and " 12891 "is-enabled offsets"); 12892 return (-1); 12893 } 12894 } else if (probe->dofpr_noffs == 0) { 12895 dtrace_dof_error(dof, "zero probe offsets"); 12896 return (-1); 12897 } 12898 12899 if (probe->dofpr_argidx + probe->dofpr_xargc < 12900 probe->dofpr_argidx || 12901 (probe->dofpr_argidx + probe->dofpr_xargc) * 12902 arg_sec->dofs_entsize > arg_sec->dofs_size) { 12903 dtrace_dof_error(dof, "invalid args"); 12904 return (-1); 12905 } 12906 12907 typeidx = probe->dofpr_nargv; 12908 typestr = strtab + probe->dofpr_nargv; 12909 for (k = 0; k < probe->dofpr_nargc; k++) { 12910 if (typeidx >= str_sec->dofs_size) { 12911 dtrace_dof_error(dof, "bad " 12912 "native argument type"); 12913 return (-1); 12914 } 12915 12916 typesz = strlen(typestr) + 1; 12917 if (typesz > DTRACE_ARGTYPELEN) { 12918 dtrace_dof_error(dof, "native " 12919 "argument type too long"); 12920 return (-1); 12921 } 12922 typeidx += typesz; 12923 typestr += typesz; 12924 } 12925 12926 typeidx = probe->dofpr_xargv; 12927 typestr = strtab + probe->dofpr_xargv; 12928 for (k = 0; k < probe->dofpr_xargc; k++) { 12929 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 12930 dtrace_dof_error(dof, "bad " 12931 "native argument index"); 12932 return (-1); 12933 } 12934 12935 if (typeidx >= str_sec->dofs_size) { 12936 dtrace_dof_error(dof, "bad " 12937 "translated argument type"); 12938 return (-1); 12939 } 12940 12941 typesz = strlen(typestr) + 1; 12942 if (typesz > DTRACE_ARGTYPELEN) { 12943 dtrace_dof_error(dof, "translated argument " 12944 "type too long"); 12945 return (-1); 12946 } 12947 12948 typeidx += typesz; 12949 typestr += typesz; 12950 } 12951 } 12952 12953 return (0); 12954 } 12955 12956 static int 12957 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 12958 { 12959 dtrace_helpers_t *help; 12960 dtrace_vstate_t *vstate; 12961 dtrace_enabling_t *enab = NULL; 12962 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 12963 uintptr_t daddr = (uintptr_t)dof; 12964 12965 ASSERT(MUTEX_HELD(&dtrace_lock)); 12966 12967 if ((help = curproc->p_dtrace_helpers) == NULL) 12968 help = dtrace_helpers_create(curproc); 12969 12970 vstate = &help->dthps_vstate; 12971 12972 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 12973 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 12974 dtrace_dof_destroy(dof); 12975 return (rv); 12976 } 12977 12978 /* 12979 * Look for helper providers and validate their descriptions. 12980 */ 12981 if (dhp != NULL) { 12982 for (i = 0; i < dof->dofh_secnum; i++) { 12983 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 12984 dof->dofh_secoff + i * dof->dofh_secsize); 12985 12986 if (sec->dofs_type != DOF_SECT_PROVIDER) 12987 continue; 12988 12989 if (dtrace_helper_provider_validate(dof, sec) != 0) { 12990 dtrace_enabling_destroy(enab); 12991 dtrace_dof_destroy(dof); 12992 return (-1); 12993 } 12994 12995 nprovs++; 12996 } 12997 } 12998 12999 /* 13000 * Now we need to walk through the ECB descriptions in the enabling. 13001 */ 13002 for (i = 0; i < enab->dten_ndesc; i++) { 13003 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 13004 dtrace_probedesc_t *desc = &ep->dted_probe; 13005 13006 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 13007 continue; 13008 13009 if (strcmp(desc->dtpd_mod, "helper") != 0) 13010 continue; 13011 13012 if (strcmp(desc->dtpd_func, "ustack") != 0) 13013 continue; 13014 13015 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 13016 ep)) != 0) { 13017 /* 13018 * Adding this helper action failed -- we are now going 13019 * to rip out the entire generation and return failure. 13020 */ 13021 (void) dtrace_helper_destroygen(help->dthps_generation); 13022 dtrace_enabling_destroy(enab); 13023 dtrace_dof_destroy(dof); 13024 return (-1); 13025 } 13026 13027 nhelpers++; 13028 } 13029 13030 if (nhelpers < enab->dten_ndesc) 13031 dtrace_dof_error(dof, "unmatched helpers"); 13032 13033 gen = help->dthps_generation++; 13034 dtrace_enabling_destroy(enab); 13035 13036 if (dhp != NULL && nprovs > 0) { 13037 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 13038 if (dtrace_helper_provider_add(dhp, gen) == 0) { 13039 mutex_exit(&dtrace_lock); 13040 dtrace_helper_provider_register(curproc, help, dhp); 13041 mutex_enter(&dtrace_lock); 13042 13043 destroy = 0; 13044 } 13045 } 13046 13047 if (destroy) 13048 dtrace_dof_destroy(dof); 13049 13050 return (gen); 13051 } 13052 13053 static dtrace_helpers_t * 13054 dtrace_helpers_create(proc_t *p) 13055 { 13056 dtrace_helpers_t *help; 13057 13058 ASSERT(MUTEX_HELD(&dtrace_lock)); 13059 ASSERT(p->p_dtrace_helpers == NULL); 13060 13061 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 13062 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 13063 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 13064 13065 p->p_dtrace_helpers = help; 13066 dtrace_helpers++; 13067 13068 return (help); 13069 } 13070 13071 static void 13072 dtrace_helpers_destroy(void) 13073 { 13074 dtrace_helpers_t *help; 13075 dtrace_vstate_t *vstate; 13076 proc_t *p = curproc; 13077 int i; 13078 13079 mutex_enter(&dtrace_lock); 13080 13081 ASSERT(p->p_dtrace_helpers != NULL); 13082 ASSERT(dtrace_helpers > 0); 13083 13084 help = p->p_dtrace_helpers; 13085 vstate = &help->dthps_vstate; 13086 13087 /* 13088 * We're now going to lose the help from this process. 13089 */ 13090 p->p_dtrace_helpers = NULL; 13091 dtrace_sync(); 13092 13093 /* 13094 * Destory the helper actions. 13095 */ 13096 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13097 dtrace_helper_action_t *h, *next; 13098 13099 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13100 next = h->dtha_next; 13101 dtrace_helper_action_destroy(h, vstate); 13102 h = next; 13103 } 13104 } 13105 13106 mutex_exit(&dtrace_lock); 13107 13108 /* 13109 * Destroy the helper providers. 13110 */ 13111 if (help->dthps_maxprovs > 0) { 13112 mutex_enter(&dtrace_meta_lock); 13113 if (dtrace_meta_pid != NULL) { 13114 ASSERT(dtrace_deferred_pid == NULL); 13115 13116 for (i = 0; i < help->dthps_nprovs; i++) { 13117 dtrace_helper_provider_remove( 13118 &help->dthps_provs[i]->dthp_prov, p->p_pid); 13119 } 13120 } else { 13121 mutex_enter(&dtrace_lock); 13122 ASSERT(help->dthps_deferred == 0 || 13123 help->dthps_next != NULL || 13124 help->dthps_prev != NULL || 13125 help == dtrace_deferred_pid); 13126 13127 /* 13128 * Remove the helper from the deferred list. 13129 */ 13130 if (help->dthps_next != NULL) 13131 help->dthps_next->dthps_prev = help->dthps_prev; 13132 if (help->dthps_prev != NULL) 13133 help->dthps_prev->dthps_next = help->dthps_next; 13134 if (dtrace_deferred_pid == help) { 13135 dtrace_deferred_pid = help->dthps_next; 13136 ASSERT(help->dthps_prev == NULL); 13137 } 13138 13139 mutex_exit(&dtrace_lock); 13140 } 13141 13142 mutex_exit(&dtrace_meta_lock); 13143 13144 for (i = 0; i < help->dthps_nprovs; i++) { 13145 dtrace_helper_provider_destroy(help->dthps_provs[i]); 13146 } 13147 13148 kmem_free(help->dthps_provs, help->dthps_maxprovs * 13149 sizeof (dtrace_helper_provider_t *)); 13150 } 13151 13152 mutex_enter(&dtrace_lock); 13153 13154 dtrace_vstate_fini(&help->dthps_vstate); 13155 kmem_free(help->dthps_actions, 13156 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 13157 kmem_free(help, sizeof (dtrace_helpers_t)); 13158 13159 --dtrace_helpers; 13160 mutex_exit(&dtrace_lock); 13161 } 13162 13163 static void 13164 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 13165 { 13166 dtrace_helpers_t *help, *newhelp; 13167 dtrace_helper_action_t *helper, *new, *last; 13168 dtrace_difo_t *dp; 13169 dtrace_vstate_t *vstate; 13170 int i, j, sz, hasprovs = 0; 13171 13172 mutex_enter(&dtrace_lock); 13173 ASSERT(from->p_dtrace_helpers != NULL); 13174 ASSERT(dtrace_helpers > 0); 13175 13176 help = from->p_dtrace_helpers; 13177 newhelp = dtrace_helpers_create(to); 13178 ASSERT(to->p_dtrace_helpers != NULL); 13179 13180 newhelp->dthps_generation = help->dthps_generation; 13181 vstate = &newhelp->dthps_vstate; 13182 13183 /* 13184 * Duplicate the helper actions. 13185 */ 13186 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13187 if ((helper = help->dthps_actions[i]) == NULL) 13188 continue; 13189 13190 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 13191 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 13192 KM_SLEEP); 13193 new->dtha_generation = helper->dtha_generation; 13194 13195 if ((dp = helper->dtha_predicate) != NULL) { 13196 dp = dtrace_difo_duplicate(dp, vstate); 13197 new->dtha_predicate = dp; 13198 } 13199 13200 new->dtha_nactions = helper->dtha_nactions; 13201 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 13202 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 13203 13204 for (j = 0; j < new->dtha_nactions; j++) { 13205 dtrace_difo_t *dp = helper->dtha_actions[j]; 13206 13207 ASSERT(dp != NULL); 13208 dp = dtrace_difo_duplicate(dp, vstate); 13209 new->dtha_actions[j] = dp; 13210 } 13211 13212 if (last != NULL) { 13213 last->dtha_next = new; 13214 } else { 13215 newhelp->dthps_actions[i] = new; 13216 } 13217 13218 last = new; 13219 } 13220 } 13221 13222 /* 13223 * Duplicate the helper providers and register them with the 13224 * DTrace framework. 13225 */ 13226 if (help->dthps_nprovs > 0) { 13227 newhelp->dthps_nprovs = help->dthps_nprovs; 13228 newhelp->dthps_maxprovs = help->dthps_nprovs; 13229 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 13230 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13231 for (i = 0; i < newhelp->dthps_nprovs; i++) { 13232 newhelp->dthps_provs[i] = help->dthps_provs[i]; 13233 newhelp->dthps_provs[i]->dthp_ref++; 13234 } 13235 13236 hasprovs = 1; 13237 } 13238 13239 mutex_exit(&dtrace_lock); 13240 13241 if (hasprovs) 13242 dtrace_helper_provider_register(to, newhelp, NULL); 13243 } 13244 13245 /* 13246 * DTrace Hook Functions 13247 */ 13248 static void 13249 dtrace_module_loaded(struct modctl *ctl) 13250 { 13251 dtrace_provider_t *prv; 13252 13253 mutex_enter(&dtrace_provider_lock); 13254 mutex_enter(&mod_lock); 13255 13256 ASSERT(ctl->mod_busy); 13257 13258 /* 13259 * We're going to call each providers per-module provide operation 13260 * specifying only this module. 13261 */ 13262 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 13263 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 13264 13265 mutex_exit(&mod_lock); 13266 mutex_exit(&dtrace_provider_lock); 13267 13268 /* 13269 * If we have any retained enablings, we need to match against them. 13270 * Enabling probes requires that cpu_lock be held, and we cannot hold 13271 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 13272 * module. (In particular, this happens when loading scheduling 13273 * classes.) So if we have any retained enablings, we need to dispatch 13274 * our task queue to do the match for us. 13275 */ 13276 mutex_enter(&dtrace_lock); 13277 13278 if (dtrace_retained == NULL) { 13279 mutex_exit(&dtrace_lock); 13280 return; 13281 } 13282 13283 (void) taskq_dispatch(dtrace_taskq, 13284 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 13285 13286 mutex_exit(&dtrace_lock); 13287 13288 /* 13289 * And now, for a little heuristic sleaze: in general, we want to 13290 * match modules as soon as they load. However, we cannot guarantee 13291 * this, because it would lead us to the lock ordering violation 13292 * outlined above. The common case, of course, is that cpu_lock is 13293 * _not_ held -- so we delay here for a clock tick, hoping that that's 13294 * long enough for the task queue to do its work. If it's not, it's 13295 * not a serious problem -- it just means that the module that we 13296 * just loaded may not be immediately instrumentable. 13297 */ 13298 delay(1); 13299 } 13300 13301 static void 13302 dtrace_module_unloaded(struct modctl *ctl) 13303 { 13304 dtrace_probe_t template, *probe, *first, *next; 13305 dtrace_provider_t *prov; 13306 13307 template.dtpr_mod = ctl->mod_modname; 13308 13309 mutex_enter(&dtrace_provider_lock); 13310 mutex_enter(&mod_lock); 13311 mutex_enter(&dtrace_lock); 13312 13313 if (dtrace_bymod == NULL) { 13314 /* 13315 * The DTrace module is loaded (obviously) but not attached; 13316 * we don't have any work to do. 13317 */ 13318 mutex_exit(&dtrace_provider_lock); 13319 mutex_exit(&mod_lock); 13320 mutex_exit(&dtrace_lock); 13321 return; 13322 } 13323 13324 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 13325 probe != NULL; probe = probe->dtpr_nextmod) { 13326 if (probe->dtpr_ecb != NULL) { 13327 mutex_exit(&dtrace_provider_lock); 13328 mutex_exit(&mod_lock); 13329 mutex_exit(&dtrace_lock); 13330 13331 /* 13332 * This shouldn't _actually_ be possible -- we're 13333 * unloading a module that has an enabled probe in it. 13334 * (It's normally up to the provider to make sure that 13335 * this can't happen.) However, because dtps_enable() 13336 * doesn't have a failure mode, there can be an 13337 * enable/unload race. Upshot: we don't want to 13338 * assert, but we're not going to disable the 13339 * probe, either. 13340 */ 13341 if (dtrace_err_verbose) { 13342 cmn_err(CE_WARN, "unloaded module '%s' had " 13343 "enabled probes", ctl->mod_modname); 13344 } 13345 13346 return; 13347 } 13348 } 13349 13350 probe = first; 13351 13352 for (first = NULL; probe != NULL; probe = next) { 13353 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 13354 13355 dtrace_probes[probe->dtpr_id - 1] = NULL; 13356 13357 next = probe->dtpr_nextmod; 13358 dtrace_hash_remove(dtrace_bymod, probe); 13359 dtrace_hash_remove(dtrace_byfunc, probe); 13360 dtrace_hash_remove(dtrace_byname, probe); 13361 13362 if (first == NULL) { 13363 first = probe; 13364 probe->dtpr_nextmod = NULL; 13365 } else { 13366 probe->dtpr_nextmod = first; 13367 first = probe; 13368 } 13369 } 13370 13371 /* 13372 * We've removed all of the module's probes from the hash chains and 13373 * from the probe array. Now issue a dtrace_sync() to be sure that 13374 * everyone has cleared out from any probe array processing. 13375 */ 13376 dtrace_sync(); 13377 13378 for (probe = first; probe != NULL; probe = first) { 13379 first = probe->dtpr_nextmod; 13380 prov = probe->dtpr_provider; 13381 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 13382 probe->dtpr_arg); 13383 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 13384 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 13385 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 13386 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 13387 kmem_free(probe, sizeof (dtrace_probe_t)); 13388 } 13389 13390 mutex_exit(&dtrace_lock); 13391 mutex_exit(&mod_lock); 13392 mutex_exit(&dtrace_provider_lock); 13393 } 13394 13395 void 13396 dtrace_suspend(void) 13397 { 13398 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 13399 } 13400 13401 void 13402 dtrace_resume(void) 13403 { 13404 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 13405 } 13406 13407 static int 13408 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 13409 { 13410 ASSERT(MUTEX_HELD(&cpu_lock)); 13411 mutex_enter(&dtrace_lock); 13412 13413 switch (what) { 13414 case CPU_CONFIG: { 13415 dtrace_state_t *state; 13416 dtrace_optval_t *opt, rs, c; 13417 13418 /* 13419 * For now, we only allocate a new buffer for anonymous state. 13420 */ 13421 if ((state = dtrace_anon.dta_state) == NULL) 13422 break; 13423 13424 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13425 break; 13426 13427 opt = state->dts_options; 13428 c = opt[DTRACEOPT_CPU]; 13429 13430 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 13431 break; 13432 13433 /* 13434 * Regardless of what the actual policy is, we're going to 13435 * temporarily set our resize policy to be manual. We're 13436 * also going to temporarily set our CPU option to denote 13437 * the newly configured CPU. 13438 */ 13439 rs = opt[DTRACEOPT_BUFRESIZE]; 13440 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 13441 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 13442 13443 (void) dtrace_state_buffers(state); 13444 13445 opt[DTRACEOPT_BUFRESIZE] = rs; 13446 opt[DTRACEOPT_CPU] = c; 13447 13448 break; 13449 } 13450 13451 case CPU_UNCONFIG: 13452 /* 13453 * We don't free the buffer in the CPU_UNCONFIG case. (The 13454 * buffer will be freed when the consumer exits.) 13455 */ 13456 break; 13457 13458 default: 13459 break; 13460 } 13461 13462 mutex_exit(&dtrace_lock); 13463 return (0); 13464 } 13465 13466 static void 13467 dtrace_cpu_setup_initial(processorid_t cpu) 13468 { 13469 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 13470 } 13471 13472 static void 13473 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 13474 { 13475 if (dtrace_toxranges >= dtrace_toxranges_max) { 13476 int osize, nsize; 13477 dtrace_toxrange_t *range; 13478 13479 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13480 13481 if (osize == 0) { 13482 ASSERT(dtrace_toxrange == NULL); 13483 ASSERT(dtrace_toxranges_max == 0); 13484 dtrace_toxranges_max = 1; 13485 } else { 13486 dtrace_toxranges_max <<= 1; 13487 } 13488 13489 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13490 range = kmem_zalloc(nsize, KM_SLEEP); 13491 13492 if (dtrace_toxrange != NULL) { 13493 ASSERT(osize != 0); 13494 bcopy(dtrace_toxrange, range, osize); 13495 kmem_free(dtrace_toxrange, osize); 13496 } 13497 13498 dtrace_toxrange = range; 13499 } 13500 13501 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 13502 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 13503 13504 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 13505 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 13506 dtrace_toxranges++; 13507 } 13508 13509 /* 13510 * DTrace Driver Cookbook Functions 13511 */ 13512 /*ARGSUSED*/ 13513 static int 13514 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 13515 { 13516 dtrace_provider_id_t id; 13517 dtrace_state_t *state = NULL; 13518 dtrace_enabling_t *enab; 13519 13520 mutex_enter(&cpu_lock); 13521 mutex_enter(&dtrace_provider_lock); 13522 mutex_enter(&dtrace_lock); 13523 13524 if (ddi_soft_state_init(&dtrace_softstate, 13525 sizeof (dtrace_state_t), 0) != 0) { 13526 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 13527 mutex_exit(&cpu_lock); 13528 mutex_exit(&dtrace_provider_lock); 13529 mutex_exit(&dtrace_lock); 13530 return (DDI_FAILURE); 13531 } 13532 13533 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 13534 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 13535 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 13536 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 13537 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 13538 ddi_remove_minor_node(devi, NULL); 13539 ddi_soft_state_fini(&dtrace_softstate); 13540 mutex_exit(&cpu_lock); 13541 mutex_exit(&dtrace_provider_lock); 13542 mutex_exit(&dtrace_lock); 13543 return (DDI_FAILURE); 13544 } 13545 13546 ddi_report_dev(devi); 13547 dtrace_devi = devi; 13548 13549 dtrace_modload = dtrace_module_loaded; 13550 dtrace_modunload = dtrace_module_unloaded; 13551 dtrace_cpu_init = dtrace_cpu_setup_initial; 13552 dtrace_helpers_cleanup = dtrace_helpers_destroy; 13553 dtrace_helpers_fork = dtrace_helpers_duplicate; 13554 dtrace_cpustart_init = dtrace_suspend; 13555 dtrace_cpustart_fini = dtrace_resume; 13556 dtrace_debugger_init = dtrace_suspend; 13557 dtrace_debugger_fini = dtrace_resume; 13558 dtrace_kreloc_init = dtrace_suspend; 13559 dtrace_kreloc_fini = dtrace_resume; 13560 13561 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13562 13563 ASSERT(MUTEX_HELD(&cpu_lock)); 13564 13565 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 13566 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13567 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 13568 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 13569 VM_SLEEP | VMC_IDENTIFIER); 13570 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 13571 1, INT_MAX, 0); 13572 13573 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 13574 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 13575 NULL, NULL, NULL, NULL, NULL, 0); 13576 13577 ASSERT(MUTEX_HELD(&cpu_lock)); 13578 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 13579 offsetof(dtrace_probe_t, dtpr_nextmod), 13580 offsetof(dtrace_probe_t, dtpr_prevmod)); 13581 13582 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 13583 offsetof(dtrace_probe_t, dtpr_nextfunc), 13584 offsetof(dtrace_probe_t, dtpr_prevfunc)); 13585 13586 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 13587 offsetof(dtrace_probe_t, dtpr_nextname), 13588 offsetof(dtrace_probe_t, dtpr_prevname)); 13589 13590 if (dtrace_retain_max < 1) { 13591 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 13592 "setting to 1", dtrace_retain_max); 13593 dtrace_retain_max = 1; 13594 } 13595 13596 /* 13597 * Now discover our toxic ranges. 13598 */ 13599 dtrace_toxic_ranges(dtrace_toxrange_add); 13600 13601 /* 13602 * Before we register ourselves as a provider to our own framework, 13603 * we would like to assert that dtrace_provider is NULL -- but that's 13604 * not true if we were loaded as a dependency of a DTrace provider. 13605 * Once we've registered, we can assert that dtrace_provider is our 13606 * pseudo provider. 13607 */ 13608 (void) dtrace_register("dtrace", &dtrace_provider_attr, 13609 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 13610 13611 ASSERT(dtrace_provider != NULL); 13612 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 13613 13614 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 13615 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 13616 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 13617 dtrace_provider, NULL, NULL, "END", 0, NULL); 13618 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 13619 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 13620 13621 dtrace_anon_property(); 13622 mutex_exit(&cpu_lock); 13623 13624 /* 13625 * If DTrace helper tracing is enabled, we need to allocate the 13626 * trace buffer and initialize the values. 13627 */ 13628 if (dtrace_helptrace_enabled) { 13629 ASSERT(dtrace_helptrace_buffer == NULL); 13630 dtrace_helptrace_buffer = 13631 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 13632 dtrace_helptrace_next = 0; 13633 } 13634 13635 /* 13636 * If there are already providers, we must ask them to provide their 13637 * probes, and then match any anonymous enabling against them. Note 13638 * that there should be no other retained enablings at this time: 13639 * the only retained enablings at this time should be the anonymous 13640 * enabling. 13641 */ 13642 if (dtrace_anon.dta_enabling != NULL) { 13643 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 13644 13645 dtrace_enabling_provide(NULL); 13646 state = dtrace_anon.dta_state; 13647 13648 /* 13649 * We couldn't hold cpu_lock across the above call to 13650 * dtrace_enabling_provide(), but we must hold it to actually 13651 * enable the probes. We have to drop all of our locks, pick 13652 * up cpu_lock, and regain our locks before matching the 13653 * retained anonymous enabling. 13654 */ 13655 mutex_exit(&dtrace_lock); 13656 mutex_exit(&dtrace_provider_lock); 13657 13658 mutex_enter(&cpu_lock); 13659 mutex_enter(&dtrace_provider_lock); 13660 mutex_enter(&dtrace_lock); 13661 13662 if ((enab = dtrace_anon.dta_enabling) != NULL) 13663 (void) dtrace_enabling_match(enab, NULL); 13664 13665 mutex_exit(&cpu_lock); 13666 } 13667 13668 mutex_exit(&dtrace_lock); 13669 mutex_exit(&dtrace_provider_lock); 13670 13671 if (state != NULL) { 13672 /* 13673 * If we created any anonymous state, set it going now. 13674 */ 13675 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 13676 } 13677 13678 return (DDI_SUCCESS); 13679 } 13680 13681 /*ARGSUSED*/ 13682 static int 13683 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 13684 { 13685 dtrace_state_t *state; 13686 uint32_t priv; 13687 uid_t uid; 13688 zoneid_t zoneid; 13689 13690 if (getminor(*devp) == DTRACEMNRN_HELPER) 13691 return (0); 13692 13693 /* 13694 * If this wasn't an open with the "helper" minor, then it must be 13695 * the "dtrace" minor. 13696 */ 13697 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 13698 13699 /* 13700 * If no DTRACE_PRIV_* bits are set in the credential, then the 13701 * caller lacks sufficient permission to do anything with DTrace. 13702 */ 13703 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 13704 if (priv == DTRACE_PRIV_NONE) 13705 return (EACCES); 13706 13707 /* 13708 * Ask all providers to provide all their probes. 13709 */ 13710 mutex_enter(&dtrace_provider_lock); 13711 dtrace_probe_provide(NULL, NULL); 13712 mutex_exit(&dtrace_provider_lock); 13713 13714 mutex_enter(&cpu_lock); 13715 mutex_enter(&dtrace_lock); 13716 dtrace_opens++; 13717 dtrace_membar_producer(); 13718 13719 /* 13720 * If the kernel debugger is active (that is, if the kernel debugger 13721 * modified text in some way), we won't allow the open. 13722 */ 13723 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13724 dtrace_opens--; 13725 mutex_exit(&cpu_lock); 13726 mutex_exit(&dtrace_lock); 13727 return (EBUSY); 13728 } 13729 13730 state = dtrace_state_create(devp, cred_p); 13731 mutex_exit(&cpu_lock); 13732 13733 if (state == NULL) { 13734 if (--dtrace_opens == 0) 13735 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13736 mutex_exit(&dtrace_lock); 13737 return (EAGAIN); 13738 } 13739 13740 mutex_exit(&dtrace_lock); 13741 13742 return (0); 13743 } 13744 13745 /*ARGSUSED*/ 13746 static int 13747 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 13748 { 13749 minor_t minor = getminor(dev); 13750 dtrace_state_t *state; 13751 13752 if (minor == DTRACEMNRN_HELPER) 13753 return (0); 13754 13755 state = ddi_get_soft_state(dtrace_softstate, minor); 13756 13757 mutex_enter(&cpu_lock); 13758 mutex_enter(&dtrace_lock); 13759 13760 if (state->dts_anon) { 13761 /* 13762 * There is anonymous state. Destroy that first. 13763 */ 13764 ASSERT(dtrace_anon.dta_state == NULL); 13765 dtrace_state_destroy(state->dts_anon); 13766 } 13767 13768 dtrace_state_destroy(state); 13769 ASSERT(dtrace_opens > 0); 13770 if (--dtrace_opens == 0) 13771 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13772 13773 mutex_exit(&dtrace_lock); 13774 mutex_exit(&cpu_lock); 13775 13776 return (0); 13777 } 13778 13779 /*ARGSUSED*/ 13780 static int 13781 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 13782 { 13783 int rval; 13784 dof_helper_t help, *dhp = NULL; 13785 13786 switch (cmd) { 13787 case DTRACEHIOC_ADDDOF: 13788 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 13789 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 13790 return (EFAULT); 13791 } 13792 13793 dhp = &help; 13794 arg = (intptr_t)help.dofhp_dof; 13795 /*FALLTHROUGH*/ 13796 13797 case DTRACEHIOC_ADD: { 13798 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 13799 13800 if (dof == NULL) 13801 return (rval); 13802 13803 mutex_enter(&dtrace_lock); 13804 13805 /* 13806 * dtrace_helper_slurp() takes responsibility for the dof -- 13807 * it may free it now or it may save it and free it later. 13808 */ 13809 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 13810 *rv = rval; 13811 rval = 0; 13812 } else { 13813 rval = EINVAL; 13814 } 13815 13816 mutex_exit(&dtrace_lock); 13817 return (rval); 13818 } 13819 13820 case DTRACEHIOC_REMOVE: { 13821 mutex_enter(&dtrace_lock); 13822 rval = dtrace_helper_destroygen(arg); 13823 mutex_exit(&dtrace_lock); 13824 13825 return (rval); 13826 } 13827 13828 default: 13829 break; 13830 } 13831 13832 return (ENOTTY); 13833 } 13834 13835 /*ARGSUSED*/ 13836 static int 13837 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 13838 { 13839 minor_t minor = getminor(dev); 13840 dtrace_state_t *state; 13841 int rval; 13842 13843 if (minor == DTRACEMNRN_HELPER) 13844 return (dtrace_ioctl_helper(cmd, arg, rv)); 13845 13846 state = ddi_get_soft_state(dtrace_softstate, minor); 13847 13848 if (state->dts_anon) { 13849 ASSERT(dtrace_anon.dta_state == NULL); 13850 state = state->dts_anon; 13851 } 13852 13853 switch (cmd) { 13854 case DTRACEIOC_PROVIDER: { 13855 dtrace_providerdesc_t pvd; 13856 dtrace_provider_t *pvp; 13857 13858 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 13859 return (EFAULT); 13860 13861 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 13862 mutex_enter(&dtrace_provider_lock); 13863 13864 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 13865 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 13866 break; 13867 } 13868 13869 mutex_exit(&dtrace_provider_lock); 13870 13871 if (pvp == NULL) 13872 return (ESRCH); 13873 13874 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 13875 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 13876 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 13877 return (EFAULT); 13878 13879 return (0); 13880 } 13881 13882 case DTRACEIOC_EPROBE: { 13883 dtrace_eprobedesc_t epdesc; 13884 dtrace_ecb_t *ecb; 13885 dtrace_action_t *act; 13886 void *buf; 13887 size_t size; 13888 uintptr_t dest; 13889 int nrecs; 13890 13891 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 13892 return (EFAULT); 13893 13894 mutex_enter(&dtrace_lock); 13895 13896 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 13897 mutex_exit(&dtrace_lock); 13898 return (EINVAL); 13899 } 13900 13901 if (ecb->dte_probe == NULL) { 13902 mutex_exit(&dtrace_lock); 13903 return (EINVAL); 13904 } 13905 13906 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 13907 epdesc.dtepd_uarg = ecb->dte_uarg; 13908 epdesc.dtepd_size = ecb->dte_size; 13909 13910 nrecs = epdesc.dtepd_nrecs; 13911 epdesc.dtepd_nrecs = 0; 13912 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13913 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13914 continue; 13915 13916 epdesc.dtepd_nrecs++; 13917 } 13918 13919 /* 13920 * Now that we have the size, we need to allocate a temporary 13921 * buffer in which to store the complete description. We need 13922 * the temporary buffer to be able to drop dtrace_lock() 13923 * across the copyout(), below. 13924 */ 13925 size = sizeof (dtrace_eprobedesc_t) + 13926 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 13927 13928 buf = kmem_alloc(size, KM_SLEEP); 13929 dest = (uintptr_t)buf; 13930 13931 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 13932 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 13933 13934 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13935 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13936 continue; 13937 13938 if (nrecs-- == 0) 13939 break; 13940 13941 bcopy(&act->dta_rec, (void *)dest, 13942 sizeof (dtrace_recdesc_t)); 13943 dest += sizeof (dtrace_recdesc_t); 13944 } 13945 13946 mutex_exit(&dtrace_lock); 13947 13948 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13949 kmem_free(buf, size); 13950 return (EFAULT); 13951 } 13952 13953 kmem_free(buf, size); 13954 return (0); 13955 } 13956 13957 case DTRACEIOC_AGGDESC: { 13958 dtrace_aggdesc_t aggdesc; 13959 dtrace_action_t *act; 13960 dtrace_aggregation_t *agg; 13961 int nrecs; 13962 uint32_t offs; 13963 dtrace_recdesc_t *lrec; 13964 void *buf; 13965 size_t size; 13966 uintptr_t dest; 13967 13968 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 13969 return (EFAULT); 13970 13971 mutex_enter(&dtrace_lock); 13972 13973 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 13974 mutex_exit(&dtrace_lock); 13975 return (EINVAL); 13976 } 13977 13978 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 13979 13980 nrecs = aggdesc.dtagd_nrecs; 13981 aggdesc.dtagd_nrecs = 0; 13982 13983 offs = agg->dtag_base; 13984 lrec = &agg->dtag_action.dta_rec; 13985 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 13986 13987 for (act = agg->dtag_first; ; act = act->dta_next) { 13988 ASSERT(act->dta_intuple || 13989 DTRACEACT_ISAGG(act->dta_kind)); 13990 13991 /* 13992 * If this action has a record size of zero, it 13993 * denotes an argument to the aggregating action. 13994 * Because the presence of this record doesn't (or 13995 * shouldn't) affect the way the data is interpreted, 13996 * we don't copy it out to save user-level the 13997 * confusion of dealing with a zero-length record. 13998 */ 13999 if (act->dta_rec.dtrd_size == 0) { 14000 ASSERT(agg->dtag_hasarg); 14001 continue; 14002 } 14003 14004 aggdesc.dtagd_nrecs++; 14005 14006 if (act == &agg->dtag_action) 14007 break; 14008 } 14009 14010 /* 14011 * Now that we have the size, we need to allocate a temporary 14012 * buffer in which to store the complete description. We need 14013 * the temporary buffer to be able to drop dtrace_lock() 14014 * across the copyout(), below. 14015 */ 14016 size = sizeof (dtrace_aggdesc_t) + 14017 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 14018 14019 buf = kmem_alloc(size, KM_SLEEP); 14020 dest = (uintptr_t)buf; 14021 14022 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 14023 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 14024 14025 for (act = agg->dtag_first; ; act = act->dta_next) { 14026 dtrace_recdesc_t rec = act->dta_rec; 14027 14028 /* 14029 * See the comment in the above loop for why we pass 14030 * over zero-length records. 14031 */ 14032 if (rec.dtrd_size == 0) { 14033 ASSERT(agg->dtag_hasarg); 14034 continue; 14035 } 14036 14037 if (nrecs-- == 0) 14038 break; 14039 14040 rec.dtrd_offset -= offs; 14041 bcopy(&rec, (void *)dest, sizeof (rec)); 14042 dest += sizeof (dtrace_recdesc_t); 14043 14044 if (act == &agg->dtag_action) 14045 break; 14046 } 14047 14048 mutex_exit(&dtrace_lock); 14049 14050 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14051 kmem_free(buf, size); 14052 return (EFAULT); 14053 } 14054 14055 kmem_free(buf, size); 14056 return (0); 14057 } 14058 14059 case DTRACEIOC_ENABLE: { 14060 dof_hdr_t *dof; 14061 dtrace_enabling_t *enab = NULL; 14062 dtrace_vstate_t *vstate; 14063 int err = 0; 14064 14065 *rv = 0; 14066 14067 /* 14068 * If a NULL argument has been passed, we take this as our 14069 * cue to reevaluate our enablings. 14070 */ 14071 if (arg == NULL) { 14072 mutex_enter(&cpu_lock); 14073 mutex_enter(&dtrace_lock); 14074 err = dtrace_enabling_matchstate(state, rv); 14075 mutex_exit(&dtrace_lock); 14076 mutex_exit(&cpu_lock); 14077 14078 return (err); 14079 } 14080 14081 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 14082 return (rval); 14083 14084 mutex_enter(&cpu_lock); 14085 mutex_enter(&dtrace_lock); 14086 vstate = &state->dts_vstate; 14087 14088 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14089 mutex_exit(&dtrace_lock); 14090 mutex_exit(&cpu_lock); 14091 dtrace_dof_destroy(dof); 14092 return (EBUSY); 14093 } 14094 14095 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 14096 mutex_exit(&dtrace_lock); 14097 mutex_exit(&cpu_lock); 14098 dtrace_dof_destroy(dof); 14099 return (EINVAL); 14100 } 14101 14102 if ((rval = dtrace_dof_options(dof, state)) != 0) { 14103 dtrace_enabling_destroy(enab); 14104 mutex_exit(&dtrace_lock); 14105 mutex_exit(&cpu_lock); 14106 dtrace_dof_destroy(dof); 14107 return (rval); 14108 } 14109 14110 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 14111 err = dtrace_enabling_retain(enab); 14112 } else { 14113 dtrace_enabling_destroy(enab); 14114 } 14115 14116 mutex_exit(&cpu_lock); 14117 mutex_exit(&dtrace_lock); 14118 dtrace_dof_destroy(dof); 14119 14120 return (err); 14121 } 14122 14123 case DTRACEIOC_REPLICATE: { 14124 dtrace_repldesc_t desc; 14125 dtrace_probedesc_t *match = &desc.dtrpd_match; 14126 dtrace_probedesc_t *create = &desc.dtrpd_create; 14127 int err; 14128 14129 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14130 return (EFAULT); 14131 14132 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14133 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14134 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14135 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14136 14137 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14138 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14139 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14140 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14141 14142 mutex_enter(&dtrace_lock); 14143 err = dtrace_enabling_replicate(state, match, create); 14144 mutex_exit(&dtrace_lock); 14145 14146 return (err); 14147 } 14148 14149 case DTRACEIOC_PROBEMATCH: 14150 case DTRACEIOC_PROBES: { 14151 dtrace_probe_t *probe = NULL; 14152 dtrace_probedesc_t desc; 14153 dtrace_probekey_t pkey; 14154 dtrace_id_t i; 14155 int m = 0; 14156 uint32_t priv; 14157 uid_t uid; 14158 zoneid_t zoneid; 14159 14160 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14161 return (EFAULT); 14162 14163 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14164 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14165 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14166 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14167 14168 /* 14169 * Before we attempt to match this probe, we want to give 14170 * all providers the opportunity to provide it. 14171 */ 14172 if (desc.dtpd_id == DTRACE_IDNONE) { 14173 mutex_enter(&dtrace_provider_lock); 14174 dtrace_probe_provide(&desc, NULL); 14175 mutex_exit(&dtrace_provider_lock); 14176 desc.dtpd_id++; 14177 } 14178 14179 if (cmd == DTRACEIOC_PROBEMATCH) { 14180 dtrace_probekey(&desc, &pkey); 14181 pkey.dtpk_id = DTRACE_IDNONE; 14182 } 14183 14184 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 14185 14186 mutex_enter(&dtrace_lock); 14187 14188 if (cmd == DTRACEIOC_PROBEMATCH) { 14189 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14190 if ((probe = dtrace_probes[i - 1]) != NULL && 14191 (m = dtrace_match_probe(probe, &pkey, 14192 priv, uid, zoneid)) != 0) 14193 break; 14194 } 14195 14196 if (m < 0) { 14197 mutex_exit(&dtrace_lock); 14198 return (EINVAL); 14199 } 14200 14201 } else { 14202 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14203 if ((probe = dtrace_probes[i - 1]) != NULL && 14204 dtrace_match_priv(probe, priv, uid, zoneid)) 14205 break; 14206 } 14207 } 14208 14209 if (probe == NULL) { 14210 mutex_exit(&dtrace_lock); 14211 return (ESRCH); 14212 } 14213 14214 dtrace_probe_description(probe, &desc); 14215 mutex_exit(&dtrace_lock); 14216 14217 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14218 return (EFAULT); 14219 14220 return (0); 14221 } 14222 14223 case DTRACEIOC_PROBEARG: { 14224 dtrace_argdesc_t desc; 14225 dtrace_probe_t *probe; 14226 dtrace_provider_t *prov; 14227 14228 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14229 return (EFAULT); 14230 14231 if (desc.dtargd_id == DTRACE_IDNONE) 14232 return (EINVAL); 14233 14234 if (desc.dtargd_ndx == DTRACE_ARGNONE) 14235 return (EINVAL); 14236 14237 mutex_enter(&dtrace_provider_lock); 14238 mutex_enter(&mod_lock); 14239 mutex_enter(&dtrace_lock); 14240 14241 if (desc.dtargd_id > dtrace_nprobes) { 14242 mutex_exit(&dtrace_lock); 14243 mutex_exit(&mod_lock); 14244 mutex_exit(&dtrace_provider_lock); 14245 return (EINVAL); 14246 } 14247 14248 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 14249 mutex_exit(&dtrace_lock); 14250 mutex_exit(&mod_lock); 14251 mutex_exit(&dtrace_provider_lock); 14252 return (EINVAL); 14253 } 14254 14255 mutex_exit(&dtrace_lock); 14256 14257 prov = probe->dtpr_provider; 14258 14259 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 14260 /* 14261 * There isn't any typed information for this probe. 14262 * Set the argument number to DTRACE_ARGNONE. 14263 */ 14264 desc.dtargd_ndx = DTRACE_ARGNONE; 14265 } else { 14266 desc.dtargd_native[0] = '\0'; 14267 desc.dtargd_xlate[0] = '\0'; 14268 desc.dtargd_mapping = desc.dtargd_ndx; 14269 14270 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 14271 probe->dtpr_id, probe->dtpr_arg, &desc); 14272 } 14273 14274 mutex_exit(&mod_lock); 14275 mutex_exit(&dtrace_provider_lock); 14276 14277 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14278 return (EFAULT); 14279 14280 return (0); 14281 } 14282 14283 case DTRACEIOC_GO: { 14284 processorid_t cpuid; 14285 rval = dtrace_state_go(state, &cpuid); 14286 14287 if (rval != 0) 14288 return (rval); 14289 14290 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14291 return (EFAULT); 14292 14293 return (0); 14294 } 14295 14296 case DTRACEIOC_STOP: { 14297 processorid_t cpuid; 14298 14299 mutex_enter(&dtrace_lock); 14300 rval = dtrace_state_stop(state, &cpuid); 14301 mutex_exit(&dtrace_lock); 14302 14303 if (rval != 0) 14304 return (rval); 14305 14306 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14307 return (EFAULT); 14308 14309 return (0); 14310 } 14311 14312 case DTRACEIOC_DOFGET: { 14313 dof_hdr_t hdr, *dof; 14314 uint64_t len; 14315 14316 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 14317 return (EFAULT); 14318 14319 mutex_enter(&dtrace_lock); 14320 dof = dtrace_dof_create(state); 14321 mutex_exit(&dtrace_lock); 14322 14323 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 14324 rval = copyout(dof, (void *)arg, len); 14325 dtrace_dof_destroy(dof); 14326 14327 return (rval == 0 ? 0 : EFAULT); 14328 } 14329 14330 case DTRACEIOC_AGGSNAP: 14331 case DTRACEIOC_BUFSNAP: { 14332 dtrace_bufdesc_t desc; 14333 caddr_t cached; 14334 dtrace_buffer_t *buf; 14335 14336 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14337 return (EFAULT); 14338 14339 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 14340 return (EINVAL); 14341 14342 mutex_enter(&dtrace_lock); 14343 14344 if (cmd == DTRACEIOC_BUFSNAP) { 14345 buf = &state->dts_buffer[desc.dtbd_cpu]; 14346 } else { 14347 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 14348 } 14349 14350 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 14351 size_t sz = buf->dtb_offset; 14352 14353 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 14354 mutex_exit(&dtrace_lock); 14355 return (EBUSY); 14356 } 14357 14358 /* 14359 * If this buffer has already been consumed, we're 14360 * going to indicate that there's nothing left here 14361 * to consume. 14362 */ 14363 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 14364 mutex_exit(&dtrace_lock); 14365 14366 desc.dtbd_size = 0; 14367 desc.dtbd_drops = 0; 14368 desc.dtbd_errors = 0; 14369 desc.dtbd_oldest = 0; 14370 sz = sizeof (desc); 14371 14372 if (copyout(&desc, (void *)arg, sz) != 0) 14373 return (EFAULT); 14374 14375 return (0); 14376 } 14377 14378 /* 14379 * If this is a ring buffer that has wrapped, we want 14380 * to copy the whole thing out. 14381 */ 14382 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 14383 dtrace_buffer_polish(buf); 14384 sz = buf->dtb_size; 14385 } 14386 14387 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 14388 mutex_exit(&dtrace_lock); 14389 return (EFAULT); 14390 } 14391 14392 desc.dtbd_size = sz; 14393 desc.dtbd_drops = buf->dtb_drops; 14394 desc.dtbd_errors = buf->dtb_errors; 14395 desc.dtbd_oldest = buf->dtb_xamot_offset; 14396 14397 mutex_exit(&dtrace_lock); 14398 14399 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14400 return (EFAULT); 14401 14402 buf->dtb_flags |= DTRACEBUF_CONSUMED; 14403 14404 return (0); 14405 } 14406 14407 if (buf->dtb_tomax == NULL) { 14408 ASSERT(buf->dtb_xamot == NULL); 14409 mutex_exit(&dtrace_lock); 14410 return (ENOENT); 14411 } 14412 14413 cached = buf->dtb_tomax; 14414 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 14415 14416 dtrace_xcall(desc.dtbd_cpu, 14417 (dtrace_xcall_t)dtrace_buffer_switch, buf); 14418 14419 state->dts_errors += buf->dtb_xamot_errors; 14420 14421 /* 14422 * If the buffers did not actually switch, then the cross call 14423 * did not take place -- presumably because the given CPU is 14424 * not in the ready set. If this is the case, we'll return 14425 * ENOENT. 14426 */ 14427 if (buf->dtb_tomax == cached) { 14428 ASSERT(buf->dtb_xamot != cached); 14429 mutex_exit(&dtrace_lock); 14430 return (ENOENT); 14431 } 14432 14433 ASSERT(cached == buf->dtb_xamot); 14434 14435 /* 14436 * We have our snapshot; now copy it out. 14437 */ 14438 if (copyout(buf->dtb_xamot, desc.dtbd_data, 14439 buf->dtb_xamot_offset) != 0) { 14440 mutex_exit(&dtrace_lock); 14441 return (EFAULT); 14442 } 14443 14444 desc.dtbd_size = buf->dtb_xamot_offset; 14445 desc.dtbd_drops = buf->dtb_xamot_drops; 14446 desc.dtbd_errors = buf->dtb_xamot_errors; 14447 desc.dtbd_oldest = 0; 14448 14449 mutex_exit(&dtrace_lock); 14450 14451 /* 14452 * Finally, copy out the buffer description. 14453 */ 14454 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14455 return (EFAULT); 14456 14457 return (0); 14458 } 14459 14460 case DTRACEIOC_CONF: { 14461 dtrace_conf_t conf; 14462 14463 bzero(&conf, sizeof (conf)); 14464 conf.dtc_difversion = DIF_VERSION; 14465 conf.dtc_difintregs = DIF_DIR_NREGS; 14466 conf.dtc_diftupregs = DIF_DTR_NREGS; 14467 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 14468 14469 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 14470 return (EFAULT); 14471 14472 return (0); 14473 } 14474 14475 case DTRACEIOC_STATUS: { 14476 dtrace_status_t stat; 14477 dtrace_dstate_t *dstate; 14478 int i, j; 14479 uint64_t nerrs; 14480 14481 /* 14482 * See the comment in dtrace_state_deadman() for the reason 14483 * for setting dts_laststatus to INT64_MAX before setting 14484 * it to the correct value. 14485 */ 14486 state->dts_laststatus = INT64_MAX; 14487 dtrace_membar_producer(); 14488 state->dts_laststatus = dtrace_gethrtime(); 14489 14490 bzero(&stat, sizeof (stat)); 14491 14492 mutex_enter(&dtrace_lock); 14493 14494 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 14495 mutex_exit(&dtrace_lock); 14496 return (ENOENT); 14497 } 14498 14499 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 14500 stat.dtst_exiting = 1; 14501 14502 nerrs = state->dts_errors; 14503 dstate = &state->dts_vstate.dtvs_dynvars; 14504 14505 for (i = 0; i < NCPU; i++) { 14506 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 14507 14508 stat.dtst_dyndrops += dcpu->dtdsc_drops; 14509 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 14510 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 14511 14512 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 14513 stat.dtst_filled++; 14514 14515 nerrs += state->dts_buffer[i].dtb_errors; 14516 14517 for (j = 0; j < state->dts_nspeculations; j++) { 14518 dtrace_speculation_t *spec; 14519 dtrace_buffer_t *buf; 14520 14521 spec = &state->dts_speculations[j]; 14522 buf = &spec->dtsp_buffer[i]; 14523 stat.dtst_specdrops += buf->dtb_xamot_drops; 14524 } 14525 } 14526 14527 stat.dtst_specdrops_busy = state->dts_speculations_busy; 14528 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 14529 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 14530 stat.dtst_dblerrors = state->dts_dblerrors; 14531 stat.dtst_killed = 14532 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 14533 stat.dtst_errors = nerrs; 14534 14535 mutex_exit(&dtrace_lock); 14536 14537 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 14538 return (EFAULT); 14539 14540 return (0); 14541 } 14542 14543 case DTRACEIOC_FORMAT: { 14544 dtrace_fmtdesc_t fmt; 14545 char *str; 14546 int len; 14547 14548 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 14549 return (EFAULT); 14550 14551 mutex_enter(&dtrace_lock); 14552 14553 if (fmt.dtfd_format == 0 || 14554 fmt.dtfd_format > state->dts_nformats) { 14555 mutex_exit(&dtrace_lock); 14556 return (EINVAL); 14557 } 14558 14559 /* 14560 * Format strings are allocated contiguously and they are 14561 * never freed; if a format index is less than the number 14562 * of formats, we can assert that the format map is non-NULL 14563 * and that the format for the specified index is non-NULL. 14564 */ 14565 ASSERT(state->dts_formats != NULL); 14566 str = state->dts_formats[fmt.dtfd_format - 1]; 14567 ASSERT(str != NULL); 14568 14569 len = strlen(str) + 1; 14570 14571 if (len > fmt.dtfd_length) { 14572 fmt.dtfd_length = len; 14573 14574 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 14575 mutex_exit(&dtrace_lock); 14576 return (EINVAL); 14577 } 14578 } else { 14579 if (copyout(str, fmt.dtfd_string, len) != 0) { 14580 mutex_exit(&dtrace_lock); 14581 return (EINVAL); 14582 } 14583 } 14584 14585 mutex_exit(&dtrace_lock); 14586 return (0); 14587 } 14588 14589 default: 14590 break; 14591 } 14592 14593 return (ENOTTY); 14594 } 14595 14596 /*ARGSUSED*/ 14597 static int 14598 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 14599 { 14600 dtrace_state_t *state; 14601 14602 switch (cmd) { 14603 case DDI_DETACH: 14604 break; 14605 14606 case DDI_SUSPEND: 14607 return (DDI_SUCCESS); 14608 14609 default: 14610 return (DDI_FAILURE); 14611 } 14612 14613 mutex_enter(&cpu_lock); 14614 mutex_enter(&dtrace_provider_lock); 14615 mutex_enter(&dtrace_lock); 14616 14617 ASSERT(dtrace_opens == 0); 14618 14619 if (dtrace_helpers > 0) { 14620 mutex_exit(&dtrace_provider_lock); 14621 mutex_exit(&dtrace_lock); 14622 mutex_exit(&cpu_lock); 14623 return (DDI_FAILURE); 14624 } 14625 14626 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 14627 mutex_exit(&dtrace_provider_lock); 14628 mutex_exit(&dtrace_lock); 14629 mutex_exit(&cpu_lock); 14630 return (DDI_FAILURE); 14631 } 14632 14633 dtrace_provider = NULL; 14634 14635 if ((state = dtrace_anon_grab()) != NULL) { 14636 /* 14637 * If there were ECBs on this state, the provider should 14638 * have not been allowed to detach; assert that there is 14639 * none. 14640 */ 14641 ASSERT(state->dts_necbs == 0); 14642 dtrace_state_destroy(state); 14643 14644 /* 14645 * If we're being detached with anonymous state, we need to 14646 * indicate to the kernel debugger that DTrace is now inactive. 14647 */ 14648 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14649 } 14650 14651 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 14652 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14653 dtrace_cpu_init = NULL; 14654 dtrace_helpers_cleanup = NULL; 14655 dtrace_helpers_fork = NULL; 14656 dtrace_cpustart_init = NULL; 14657 dtrace_cpustart_fini = NULL; 14658 dtrace_debugger_init = NULL; 14659 dtrace_debugger_fini = NULL; 14660 dtrace_kreloc_init = NULL; 14661 dtrace_kreloc_fini = NULL; 14662 dtrace_modload = NULL; 14663 dtrace_modunload = NULL; 14664 14665 mutex_exit(&cpu_lock); 14666 14667 if (dtrace_helptrace_enabled) { 14668 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 14669 dtrace_helptrace_buffer = NULL; 14670 } 14671 14672 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 14673 dtrace_probes = NULL; 14674 dtrace_nprobes = 0; 14675 14676 dtrace_hash_destroy(dtrace_bymod); 14677 dtrace_hash_destroy(dtrace_byfunc); 14678 dtrace_hash_destroy(dtrace_byname); 14679 dtrace_bymod = NULL; 14680 dtrace_byfunc = NULL; 14681 dtrace_byname = NULL; 14682 14683 kmem_cache_destroy(dtrace_state_cache); 14684 vmem_destroy(dtrace_minor); 14685 vmem_destroy(dtrace_arena); 14686 14687 if (dtrace_toxrange != NULL) { 14688 kmem_free(dtrace_toxrange, 14689 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 14690 dtrace_toxrange = NULL; 14691 dtrace_toxranges = 0; 14692 dtrace_toxranges_max = 0; 14693 } 14694 14695 ddi_remove_minor_node(dtrace_devi, NULL); 14696 dtrace_devi = NULL; 14697 14698 ddi_soft_state_fini(&dtrace_softstate); 14699 14700 ASSERT(dtrace_vtime_references == 0); 14701 ASSERT(dtrace_opens == 0); 14702 ASSERT(dtrace_retained == NULL); 14703 14704 mutex_exit(&dtrace_lock); 14705 mutex_exit(&dtrace_provider_lock); 14706 14707 /* 14708 * We don't destroy the task queue until after we have dropped our 14709 * locks (taskq_destroy() may block on running tasks). To prevent 14710 * attempting to do work after we have effectively detached but before 14711 * the task queue has been destroyed, all tasks dispatched via the 14712 * task queue must check that DTrace is still attached before 14713 * performing any operation. 14714 */ 14715 taskq_destroy(dtrace_taskq); 14716 dtrace_taskq = NULL; 14717 14718 return (DDI_SUCCESS); 14719 } 14720 14721 /*ARGSUSED*/ 14722 static int 14723 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 14724 { 14725 int error; 14726 14727 switch (infocmd) { 14728 case DDI_INFO_DEVT2DEVINFO: 14729 *result = (void *)dtrace_devi; 14730 error = DDI_SUCCESS; 14731 break; 14732 case DDI_INFO_DEVT2INSTANCE: 14733 *result = (void *)0; 14734 error = DDI_SUCCESS; 14735 break; 14736 default: 14737 error = DDI_FAILURE; 14738 } 14739 return (error); 14740 } 14741 14742 static struct cb_ops dtrace_cb_ops = { 14743 dtrace_open, /* open */ 14744 dtrace_close, /* close */ 14745 nulldev, /* strategy */ 14746 nulldev, /* print */ 14747 nodev, /* dump */ 14748 nodev, /* read */ 14749 nodev, /* write */ 14750 dtrace_ioctl, /* ioctl */ 14751 nodev, /* devmap */ 14752 nodev, /* mmap */ 14753 nodev, /* segmap */ 14754 nochpoll, /* poll */ 14755 ddi_prop_op, /* cb_prop_op */ 14756 0, /* streamtab */ 14757 D_NEW | D_MP /* Driver compatibility flag */ 14758 }; 14759 14760 static struct dev_ops dtrace_ops = { 14761 DEVO_REV, /* devo_rev */ 14762 0, /* refcnt */ 14763 dtrace_info, /* get_dev_info */ 14764 nulldev, /* identify */ 14765 nulldev, /* probe */ 14766 dtrace_attach, /* attach */ 14767 dtrace_detach, /* detach */ 14768 nodev, /* reset */ 14769 &dtrace_cb_ops, /* driver operations */ 14770 NULL, /* bus operations */ 14771 nodev /* dev power */ 14772 }; 14773 14774 static struct modldrv modldrv = { 14775 &mod_driverops, /* module type (this is a pseudo driver) */ 14776 "Dynamic Tracing", /* name of module */ 14777 &dtrace_ops, /* driver ops */ 14778 }; 14779 14780 static struct modlinkage modlinkage = { 14781 MODREV_1, 14782 (void *)&modldrv, 14783 NULL 14784 }; 14785 14786 int 14787 _init(void) 14788 { 14789 return (mod_install(&modlinkage)); 14790 } 14791 14792 int 14793 _info(struct modinfo *modinfop) 14794 { 14795 return (mod_info(&modlinkage, modinfop)); 14796 } 14797 14798 int 14799 _fini(void) 14800 { 14801 return (mod_remove(&modlinkage)); 14802 } 14803