1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 188 /* 189 * DTrace Locking 190 * DTrace is protected by three (relatively coarse-grained) locks: 191 * 192 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 193 * including enabling state, probes, ECBs, consumer state, helper state, 194 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 195 * probe context is lock-free -- synchronization is handled via the 196 * dtrace_sync() cross call mechanism. 197 * 198 * (2) dtrace_provider_lock is required when manipulating provider state, or 199 * when provider state must be held constant. 200 * 201 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 202 * when meta provider state must be held constant. 203 * 204 * The lock ordering between these three locks is dtrace_meta_lock before 205 * dtrace_provider_lock before dtrace_lock. (In particular, there are 206 * several places where dtrace_provider_lock is held by the framework as it 207 * calls into the providers -- which then call back into the framework, 208 * grabbing dtrace_lock.) 209 * 210 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 211 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 212 * role as a coarse-grained lock; it is acquired before both of these locks. 213 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 214 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 215 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 216 * acquired _between_ dtrace_provider_lock and dtrace_lock. 217 */ 218 static kmutex_t dtrace_lock; /* probe state lock */ 219 static kmutex_t dtrace_provider_lock; /* provider state lock */ 220 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 221 222 /* 223 * DTrace Provider Variables 224 * 225 * These are the variables relating to DTrace as a provider (that is, the 226 * provider of the BEGIN, END, and ERROR probes). 227 */ 228 static dtrace_pattr_t dtrace_provider_attr = { 229 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 230 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 }; 235 236 static void 237 dtrace_nullop(void) 238 {} 239 240 static dtrace_pops_t dtrace_provider_ops = { 241 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 242 (void (*)(void *, struct modctl *))dtrace_nullop, 243 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 NULL, 248 NULL, 249 NULL, 250 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 251 }; 252 253 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 254 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 255 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 256 257 /* 258 * DTrace Helper Tracing Variables 259 */ 260 uint32_t dtrace_helptrace_next = 0; 261 uint32_t dtrace_helptrace_nlocals; 262 char *dtrace_helptrace_buffer; 263 int dtrace_helptrace_bufsize = 512 * 1024; 264 265 #ifdef DEBUG 266 int dtrace_helptrace_enabled = 1; 267 #else 268 int dtrace_helptrace_enabled = 0; 269 #endif 270 271 /* 272 * DTrace Error Hashing 273 * 274 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 275 * table. This is very useful for checking coverage of tests that are 276 * expected to induce DIF or DOF processing errors, and may be useful for 277 * debugging problems in the DIF code generator or in DOF generation . The 278 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 279 */ 280 #ifdef DEBUG 281 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 282 static const char *dtrace_errlast; 283 static kthread_t *dtrace_errthread; 284 static kmutex_t dtrace_errlock; 285 #endif 286 287 /* 288 * DTrace Macros and Constants 289 * 290 * These are various macros that are useful in various spots in the 291 * implementation, along with a few random constants that have no meaning 292 * outside of the implementation. There is no real structure to this cpp 293 * mishmash -- but is there ever? 294 */ 295 #define DTRACE_HASHSTR(hash, probe) \ 296 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 297 298 #define DTRACE_HASHNEXT(hash, probe) \ 299 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 300 301 #define DTRACE_HASHPREV(hash, probe) \ 302 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 303 304 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 305 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 306 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 307 308 #define DTRACE_AGGHASHSIZE_SLEW 17 309 310 /* 311 * The key for a thread-local variable consists of the lower 61 bits of the 312 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 313 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 314 * equal to a variable identifier. This is necessary (but not sufficient) to 315 * assure that global associative arrays never collide with thread-local 316 * variables. To guarantee that they cannot collide, we must also define the 317 * order for keying dynamic variables. That order is: 318 * 319 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 320 * 321 * Because the variable-key and the tls-key are in orthogonal spaces, there is 322 * no way for a global variable key signature to match a thread-local key 323 * signature. 324 */ 325 #define DTRACE_TLS_THRKEY(where) { \ 326 uint_t intr = 0; \ 327 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 328 for (; actv; actv >>= 1) \ 329 intr++; \ 330 ASSERT(intr < (1 << 3)); \ 331 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 332 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 333 } 334 335 #define DTRACE_STORE(type, tomax, offset, what) \ 336 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 337 338 #ifndef __i386 339 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 340 if (addr & (size - 1)) { \ 341 *flags |= CPU_DTRACE_BADALIGN; \ 342 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 343 return (0); \ 344 } 345 #else 346 #define DTRACE_ALIGNCHECK(addr, size, flags) 347 #endif 348 349 #define DTRACE_LOADFUNC(bits) \ 350 /*CSTYLED*/ \ 351 uint##bits##_t \ 352 dtrace_load##bits(uintptr_t addr) \ 353 { \ 354 size_t size = bits / NBBY; \ 355 /*CSTYLED*/ \ 356 uint##bits##_t rval; \ 357 int i; \ 358 volatile uint16_t *flags = (volatile uint16_t *) \ 359 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 360 \ 361 DTRACE_ALIGNCHECK(addr, size, flags); \ 362 \ 363 for (i = 0; i < dtrace_toxranges; i++) { \ 364 if (addr >= dtrace_toxrange[i].dtt_limit) \ 365 continue; \ 366 \ 367 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 368 continue; \ 369 \ 370 /* \ 371 * This address falls within a toxic region; return 0. \ 372 */ \ 373 *flags |= CPU_DTRACE_BADADDR; \ 374 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 375 return (0); \ 376 } \ 377 \ 378 *flags |= CPU_DTRACE_NOFAULT; \ 379 /*CSTYLED*/ \ 380 rval = *((volatile uint##bits##_t *)addr); \ 381 *flags &= ~CPU_DTRACE_NOFAULT; \ 382 \ 383 return (rval); \ 384 } 385 386 #ifdef _LP64 387 #define dtrace_loadptr dtrace_load64 388 #else 389 #define dtrace_loadptr dtrace_load32 390 #endif 391 392 #define DTRACE_MATCH_NEXT 0 393 #define DTRACE_MATCH_DONE 1 394 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 395 #define DTRACE_STATE_ALIGN 64 396 397 #define DTRACE_FLAGS2FLT(flags) \ 398 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 399 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 400 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 401 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 402 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 403 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 404 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 405 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 406 DTRACEFLT_UNKNOWN) 407 408 #define DTRACEACT_ISSTRING(act) \ 409 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 410 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 411 412 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 413 static void dtrace_enabling_provide(dtrace_provider_t *); 414 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 415 static void dtrace_enabling_matchall(void); 416 static dtrace_state_t *dtrace_anon_grab(void); 417 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 418 dtrace_state_t *, uint64_t, uint64_t); 419 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 420 static void dtrace_buffer_drop(dtrace_buffer_t *); 421 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 422 dtrace_state_t *, dtrace_mstate_t *); 423 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 424 dtrace_optval_t); 425 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 426 427 /* 428 * DTrace Probe Context Functions 429 * 430 * These functions are called from probe context. Because probe context is 431 * any context in which C may be called, arbitrarily locks may be held, 432 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 433 * As a result, functions called from probe context may only call other DTrace 434 * support functions -- they may not interact at all with the system at large. 435 * (Note that the ASSERT macro is made probe-context safe by redefining it in 436 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 437 * loads are to be performed from probe context, they _must_ be in terms of 438 * the safe dtrace_load*() variants. 439 * 440 * Some functions in this block are not actually called from probe context; 441 * for these functions, there will be a comment above the function reading 442 * "Note: not called from probe context." 443 */ 444 void 445 dtrace_panic(const char *format, ...) 446 { 447 va_list alist; 448 449 va_start(alist, format); 450 dtrace_vpanic(format, alist); 451 va_end(alist); 452 } 453 454 int 455 dtrace_assfail(const char *a, const char *f, int l) 456 { 457 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 458 459 /* 460 * We just need something here that even the most clever compiler 461 * cannot optimize away. 462 */ 463 return (a[(uintptr_t)f]); 464 } 465 466 /* 467 * Atomically increment a specified error counter from probe context. 468 */ 469 static void 470 dtrace_error(uint32_t *counter) 471 { 472 /* 473 * Most counters stored to in probe context are per-CPU counters. 474 * However, there are some error conditions that are sufficiently 475 * arcane that they don't merit per-CPU storage. If these counters 476 * are incremented concurrently on different CPUs, scalability will be 477 * adversely affected -- but we don't expect them to be white-hot in a 478 * correctly constructed enabling... 479 */ 480 uint32_t oval, nval; 481 482 do { 483 oval = *counter; 484 485 if ((nval = oval + 1) == 0) { 486 /* 487 * If the counter would wrap, set it to 1 -- assuring 488 * that the counter is never zero when we have seen 489 * errors. (The counter must be 32-bits because we 490 * aren't guaranteed a 64-bit compare&swap operation.) 491 * To save this code both the infamy of being fingered 492 * by a priggish news story and the indignity of being 493 * the target of a neo-puritan witch trial, we're 494 * carefully avoiding any colorful description of the 495 * likelihood of this condition -- but suffice it to 496 * say that it is only slightly more likely than the 497 * overflow of predicate cache IDs, as discussed in 498 * dtrace_predicate_create(). 499 */ 500 nval = 1; 501 } 502 } while (dtrace_cas32(counter, oval, nval) != oval); 503 } 504 505 /* 506 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 507 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 508 */ 509 DTRACE_LOADFUNC(8) 510 DTRACE_LOADFUNC(16) 511 DTRACE_LOADFUNC(32) 512 DTRACE_LOADFUNC(64) 513 514 static int 515 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 516 { 517 if (dest < mstate->dtms_scratch_base) 518 return (0); 519 520 if (dest + size < dest) 521 return (0); 522 523 if (dest + size > mstate->dtms_scratch_ptr) 524 return (0); 525 526 return (1); 527 } 528 529 static int 530 dtrace_canstore_statvar(uint64_t addr, size_t sz, 531 dtrace_statvar_t **svars, int nsvars) 532 { 533 int i; 534 535 for (i = 0; i < nsvars; i++) { 536 dtrace_statvar_t *svar = svars[i]; 537 538 if (svar == NULL || svar->dtsv_size == 0) 539 continue; 540 541 if (addr - svar->dtsv_data < svar->dtsv_size && 542 addr + sz <= svar->dtsv_data + svar->dtsv_size) 543 return (1); 544 } 545 546 return (0); 547 } 548 549 /* 550 * Check to see if the address is within a memory region to which a store may 551 * be issued. This includes the DTrace scratch areas, and any DTrace variable 552 * region. The caller of dtrace_canstore() is responsible for performing any 553 * alignment checks that are needed before stores are actually executed. 554 */ 555 static int 556 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 557 dtrace_vstate_t *vstate) 558 { 559 uintptr_t a; 560 size_t s; 561 562 /* 563 * First, check to see if the address is in scratch space... 564 */ 565 a = mstate->dtms_scratch_base; 566 s = mstate->dtms_scratch_size; 567 568 if (addr - a < s && addr + sz <= a + s) 569 return (1); 570 571 /* 572 * Now check to see if it's a dynamic variable. This check will pick 573 * up both thread-local variables and any global dynamically-allocated 574 * variables. 575 */ 576 a = (uintptr_t)vstate->dtvs_dynvars.dtds_base; 577 s = vstate->dtvs_dynvars.dtds_size; 578 if (addr - a < s && addr + sz <= a + s) 579 return (1); 580 581 /* 582 * Finally, check the static local and global variables. These checks 583 * take the longest, so we perform them last. 584 */ 585 if (dtrace_canstore_statvar(addr, sz, 586 vstate->dtvs_locals, vstate->dtvs_nlocals)) 587 return (1); 588 589 if (dtrace_canstore_statvar(addr, sz, 590 vstate->dtvs_globals, vstate->dtvs_nglobals)) 591 return (1); 592 593 return (0); 594 } 595 596 /* 597 * Compare two strings using safe loads. 598 */ 599 static int 600 dtrace_strncmp(char *s1, char *s2, size_t limit) 601 { 602 uint8_t c1, c2; 603 volatile uint16_t *flags; 604 605 if (s1 == s2 || limit == 0) 606 return (0); 607 608 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 609 610 do { 611 if (s1 == NULL) { 612 c1 = '\0'; 613 } else { 614 c1 = dtrace_load8((uintptr_t)s1++); 615 } 616 617 if (s2 == NULL) { 618 c2 = '\0'; 619 } else { 620 c2 = dtrace_load8((uintptr_t)s2++); 621 } 622 623 if (c1 != c2) 624 return (c1 - c2); 625 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 626 627 return (0); 628 } 629 630 /* 631 * Compute strlen(s) for a string using safe memory accesses. The additional 632 * len parameter is used to specify a maximum length to ensure completion. 633 */ 634 static size_t 635 dtrace_strlen(const char *s, size_t lim) 636 { 637 uint_t len; 638 639 for (len = 0; len != lim; len++) { 640 if (dtrace_load8((uintptr_t)s++) == '\0') 641 break; 642 } 643 644 return (len); 645 } 646 647 /* 648 * Check if an address falls within a toxic region. 649 */ 650 static int 651 dtrace_istoxic(uintptr_t kaddr, size_t size) 652 { 653 uintptr_t taddr, tsize; 654 int i; 655 656 for (i = 0; i < dtrace_toxranges; i++) { 657 taddr = dtrace_toxrange[i].dtt_base; 658 tsize = dtrace_toxrange[i].dtt_limit - taddr; 659 660 if (kaddr - taddr < tsize) { 661 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 662 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 663 return (1); 664 } 665 666 if (taddr - kaddr < size) { 667 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 668 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 669 return (1); 670 } 671 } 672 673 return (0); 674 } 675 676 /* 677 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 678 * memory specified by the DIF program. The dst is assumed to be safe memory 679 * that we can store to directly because it is managed by DTrace. As with 680 * standard bcopy, overlapping copies are handled properly. 681 */ 682 static void 683 dtrace_bcopy(const void *src, void *dst, size_t len) 684 { 685 if (len != 0) { 686 uint8_t *s1 = dst; 687 const uint8_t *s2 = src; 688 689 if (s1 <= s2) { 690 do { 691 *s1++ = dtrace_load8((uintptr_t)s2++); 692 } while (--len != 0); 693 } else { 694 s2 += len; 695 s1 += len; 696 697 do { 698 *--s1 = dtrace_load8((uintptr_t)--s2); 699 } while (--len != 0); 700 } 701 } 702 } 703 704 /* 705 * Copy src to dst using safe memory accesses, up to either the specified 706 * length, or the point that a nul byte is encountered. The src is assumed to 707 * be unsafe memory specified by the DIF program. The dst is assumed to be 708 * safe memory that we can store to directly because it is managed by DTrace. 709 * Unlike dtrace_bcopy(), overlapping regions are not handled. 710 */ 711 static void 712 dtrace_strcpy(const void *src, void *dst, size_t len) 713 { 714 if (len != 0) { 715 uint8_t *s1 = dst, c; 716 const uint8_t *s2 = src; 717 718 do { 719 *s1++ = c = dtrace_load8((uintptr_t)s2++); 720 } while (--len != 0 && c != '\0'); 721 } 722 } 723 724 /* 725 * Copy src to dst, deriving the size and type from the specified (BYREF) 726 * variable type. The src is assumed to be unsafe memory specified by the DIF 727 * program. The dst is assumed to be DTrace variable memory that is of the 728 * specified type; we assume that we can store to directly. 729 */ 730 static void 731 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 732 { 733 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 734 735 if (type->dtdt_kind == DIF_TYPE_STRING) { 736 dtrace_strcpy(src, dst, type->dtdt_size); 737 } else { 738 dtrace_bcopy(src, dst, type->dtdt_size); 739 } 740 } 741 742 /* 743 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 744 * unsafe memory specified by the DIF program. The s2 data is assumed to be 745 * safe memory that we can access directly because it is managed by DTrace. 746 */ 747 static int 748 dtrace_bcmp(const void *s1, const void *s2, size_t len) 749 { 750 volatile uint16_t *flags; 751 752 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 753 754 if (s1 == s2) 755 return (0); 756 757 if (s1 == NULL || s2 == NULL) 758 return (1); 759 760 if (s1 != s2 && len != 0) { 761 const uint8_t *ps1 = s1; 762 const uint8_t *ps2 = s2; 763 764 do { 765 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 766 return (1); 767 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 768 } 769 return (0); 770 } 771 772 /* 773 * Zero the specified region using a simple byte-by-byte loop. Note that this 774 * is for safe DTrace-managed memory only. 775 */ 776 static void 777 dtrace_bzero(void *dst, size_t len) 778 { 779 uchar_t *cp; 780 781 for (cp = dst; len != 0; len--) 782 *cp++ = 0; 783 } 784 785 /* 786 * This privilege check should be used by actions and subroutines to 787 * verify that the user credentials of the process that enabled the 788 * invoking ECB match the target credentials 789 */ 790 static int 791 dtrace_priv_proc_common_user(dtrace_state_t *state) 792 { 793 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 794 795 /* 796 * We should always have a non-NULL state cred here, since if cred 797 * is null (anonymous tracing), we fast-path bypass this routine. 798 */ 799 ASSERT(s_cr != NULL); 800 801 if ((cr = CRED()) != NULL && 802 s_cr->cr_uid == cr->cr_uid && 803 s_cr->cr_uid == cr->cr_ruid && 804 s_cr->cr_uid == cr->cr_suid && 805 s_cr->cr_gid == cr->cr_gid && 806 s_cr->cr_gid == cr->cr_rgid && 807 s_cr->cr_gid == cr->cr_sgid) 808 return (1); 809 810 return (0); 811 } 812 813 /* 814 * This privilege check should be used by actions and subroutines to 815 * verify that the zone of the process that enabled the invoking ECB 816 * matches the target credentials 817 */ 818 static int 819 dtrace_priv_proc_common_zone(dtrace_state_t *state) 820 { 821 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 822 823 /* 824 * We should always have a non-NULL state cred here, since if cred 825 * is null (anonymous tracing), we fast-path bypass this routine. 826 */ 827 ASSERT(s_cr != NULL); 828 829 if ((cr = CRED()) != NULL && 830 s_cr->cr_zone == cr->cr_zone) 831 return (1); 832 833 return (0); 834 } 835 836 /* 837 * This privilege check should be used by actions and subroutines to 838 * verify that the process has not setuid or changed credentials. 839 */ 840 static int 841 dtrace_priv_proc_common_nocd() 842 { 843 proc_t *proc; 844 845 if ((proc = ttoproc(curthread)) != NULL && 846 !(proc->p_flag & SNOCD)) 847 return (1); 848 849 return (0); 850 } 851 852 static int 853 dtrace_priv_proc_destructive(dtrace_state_t *state) 854 { 855 int action = state->dts_cred.dcr_action; 856 857 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 858 dtrace_priv_proc_common_zone(state) == 0) 859 goto bad; 860 861 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 862 dtrace_priv_proc_common_user(state) == 0) 863 goto bad; 864 865 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 866 dtrace_priv_proc_common_nocd() == 0) 867 goto bad; 868 869 return (1); 870 871 bad: 872 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 873 874 return (0); 875 } 876 877 static int 878 dtrace_priv_proc_control(dtrace_state_t *state) 879 { 880 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 881 return (1); 882 883 if (dtrace_priv_proc_common_zone(state) && 884 dtrace_priv_proc_common_user(state) && 885 dtrace_priv_proc_common_nocd()) 886 return (1); 887 888 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 889 890 return (0); 891 } 892 893 static int 894 dtrace_priv_proc(dtrace_state_t *state) 895 { 896 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 897 return (1); 898 899 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 900 901 return (0); 902 } 903 904 static int 905 dtrace_priv_kernel(dtrace_state_t *state) 906 { 907 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 908 return (1); 909 910 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 911 912 return (0); 913 } 914 915 static int 916 dtrace_priv_kernel_destructive(dtrace_state_t *state) 917 { 918 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 919 return (1); 920 921 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 922 923 return (0); 924 } 925 926 /* 927 * Note: not called from probe context. This function is called 928 * asynchronously (and at a regular interval) from outside of probe context to 929 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 930 * cleaning is explained in detail in <sys/dtrace_impl.h>. 931 */ 932 void 933 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 934 { 935 dtrace_dynvar_t *dirty; 936 dtrace_dstate_percpu_t *dcpu; 937 int i, work = 0; 938 939 for (i = 0; i < NCPU; i++) { 940 dcpu = &dstate->dtds_percpu[i]; 941 942 ASSERT(dcpu->dtdsc_rinsing == NULL); 943 944 /* 945 * If the dirty list is NULL, there is no dirty work to do. 946 */ 947 if (dcpu->dtdsc_dirty == NULL) 948 continue; 949 950 /* 951 * If the clean list is non-NULL, then we're not going to do 952 * any work for this CPU -- it means that there has not been 953 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 954 * since the last time we cleaned house. 955 */ 956 if (dcpu->dtdsc_clean != NULL) 957 continue; 958 959 work = 1; 960 961 /* 962 * Atomically move the dirty list aside. 963 */ 964 do { 965 dirty = dcpu->dtdsc_dirty; 966 967 /* 968 * Before we zap the dirty list, set the rinsing list. 969 * (This allows for a potential assertion in 970 * dtrace_dynvar(): if a free dynamic variable appears 971 * on a hash chain, either the dirty list or the 972 * rinsing list for some CPU must be non-NULL.) 973 */ 974 dcpu->dtdsc_rinsing = dirty; 975 dtrace_membar_producer(); 976 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 977 dirty, NULL) != dirty); 978 } 979 980 if (!work) { 981 /* 982 * We have no work to do; we can simply return. 983 */ 984 return; 985 } 986 987 dtrace_sync(); 988 989 for (i = 0; i < NCPU; i++) { 990 dcpu = &dstate->dtds_percpu[i]; 991 992 if (dcpu->dtdsc_rinsing == NULL) 993 continue; 994 995 /* 996 * We are now guaranteed that no hash chain contains a pointer 997 * into this dirty list; we can make it clean. 998 */ 999 ASSERT(dcpu->dtdsc_clean == NULL); 1000 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1001 dcpu->dtdsc_rinsing = NULL; 1002 } 1003 1004 /* 1005 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1006 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1007 * This prevents a race whereby a CPU incorrectly decides that 1008 * the state should be something other than DTRACE_DSTATE_CLEAN 1009 * after dtrace_dynvar_clean() has completed. 1010 */ 1011 dtrace_sync(); 1012 1013 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1014 } 1015 1016 /* 1017 * Depending on the value of the op parameter, this function looks-up, 1018 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1019 * allocation is requested, this function will return a pointer to a 1020 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1021 * variable can be allocated. If NULL is returned, the appropriate counter 1022 * will be incremented. 1023 */ 1024 dtrace_dynvar_t * 1025 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1026 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op) 1027 { 1028 uint64_t hashval = 1; 1029 dtrace_dynhash_t *hash = dstate->dtds_hash; 1030 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1031 processorid_t me = CPU->cpu_id, cpu = me; 1032 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1033 size_t bucket, ksize; 1034 size_t chunksize = dstate->dtds_chunksize; 1035 uintptr_t kdata, lock, nstate; 1036 uint_t i; 1037 1038 ASSERT(nkeys != 0); 1039 1040 /* 1041 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1042 * algorithm. For the by-value portions, we perform the algorithm in 1043 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1044 * bit, and seems to have only a minute effect on distribution. For 1045 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1046 * over each referenced byte. It's painful to do this, but it's much 1047 * better than pathological hash distribution. The efficacy of the 1048 * hashing algorithm (and a comparison with other algorithms) may be 1049 * found by running the ::dtrace_dynstat MDB dcmd. 1050 */ 1051 for (i = 0; i < nkeys; i++) { 1052 if (key[i].dttk_size == 0) { 1053 uint64_t val = key[i].dttk_value; 1054 1055 hashval += (val >> 48) & 0xffff; 1056 hashval += (hashval << 10); 1057 hashval ^= (hashval >> 6); 1058 1059 hashval += (val >> 32) & 0xffff; 1060 hashval += (hashval << 10); 1061 hashval ^= (hashval >> 6); 1062 1063 hashval += (val >> 16) & 0xffff; 1064 hashval += (hashval << 10); 1065 hashval ^= (hashval >> 6); 1066 1067 hashval += val & 0xffff; 1068 hashval += (hashval << 10); 1069 hashval ^= (hashval >> 6); 1070 } else { 1071 /* 1072 * This is incredibly painful, but it beats the hell 1073 * out of the alternative. 1074 */ 1075 uint64_t j, size = key[i].dttk_size; 1076 uintptr_t base = (uintptr_t)key[i].dttk_value; 1077 1078 for (j = 0; j < size; j++) { 1079 hashval += dtrace_load8(base + j); 1080 hashval += (hashval << 10); 1081 hashval ^= (hashval >> 6); 1082 } 1083 } 1084 } 1085 1086 hashval += (hashval << 3); 1087 hashval ^= (hashval >> 11); 1088 hashval += (hashval << 15); 1089 1090 /* 1091 * There is a remote chance (ideally, 1 in 2^32) that our hashval 1092 * comes out to be 0. We rely on a zero hashval denoting a free 1093 * element; if this actually happens, we set the hashval to 1. 1094 */ 1095 if (hashval == 0) 1096 hashval = 1; 1097 1098 /* 1099 * Yes, it's painful to do a divide here. If the cycle count becomes 1100 * important here, tricks can be pulled to reduce it. (However, it's 1101 * critical that hash collisions be kept to an absolute minimum; 1102 * they're much more painful than a divide.) It's better to have a 1103 * solution that generates few collisions and still keeps things 1104 * relatively simple. 1105 */ 1106 bucket = hashval % dstate->dtds_hashsize; 1107 1108 if (op == DTRACE_DYNVAR_DEALLOC) { 1109 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1110 1111 for (;;) { 1112 while ((lock = *lockp) & 1) 1113 continue; 1114 1115 if (dtrace_casptr((void *)lockp, 1116 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1117 break; 1118 } 1119 1120 dtrace_membar_producer(); 1121 } 1122 1123 top: 1124 prev = NULL; 1125 lock = hash[bucket].dtdh_lock; 1126 1127 dtrace_membar_consumer(); 1128 1129 start = hash[bucket].dtdh_chain; 1130 ASSERT(start == NULL || start->dtdv_hashval != 0 || 1131 op != DTRACE_DYNVAR_DEALLOC); 1132 1133 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1134 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1135 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1136 1137 if (dvar->dtdv_hashval != hashval) { 1138 if (dvar->dtdv_hashval == 0) { 1139 /* 1140 * We've gone off the rails. Somewhere 1141 * along the line, one of the members of this 1142 * hash chain was deleted. We could assert 1143 * that either the dirty list or the rinsing 1144 * list is non-NULL. (The dtrace_sync() in 1145 * dtrace_dynvar_clean() would validate this 1146 * assertion.) 1147 */ 1148 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1149 goto top; 1150 } 1151 1152 goto next; 1153 } 1154 1155 if (dtuple->dtt_nkeys != nkeys) 1156 goto next; 1157 1158 for (i = 0; i < nkeys; i++, dkey++) { 1159 if (dkey->dttk_size != key[i].dttk_size) 1160 goto next; /* size or type mismatch */ 1161 1162 if (dkey->dttk_size != 0) { 1163 if (dtrace_bcmp( 1164 (void *)(uintptr_t)key[i].dttk_value, 1165 (void *)(uintptr_t)dkey->dttk_value, 1166 dkey->dttk_size)) 1167 goto next; 1168 } else { 1169 if (dkey->dttk_value != key[i].dttk_value) 1170 goto next; 1171 } 1172 } 1173 1174 if (op != DTRACE_DYNVAR_DEALLOC) 1175 return (dvar); 1176 1177 ASSERT(dvar->dtdv_next == NULL || 1178 dvar->dtdv_next->dtdv_hashval != 0); 1179 1180 if (prev != NULL) { 1181 ASSERT(hash[bucket].dtdh_chain != dvar); 1182 ASSERT(start != dvar); 1183 ASSERT(prev->dtdv_next == dvar); 1184 prev->dtdv_next = dvar->dtdv_next; 1185 } else { 1186 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1187 start, dvar->dtdv_next) != start) { 1188 /* 1189 * We have failed to atomically swing the 1190 * hash table head pointer, presumably because 1191 * of a conflicting allocation on another CPU. 1192 * We need to reread the hash chain and try 1193 * again. 1194 */ 1195 goto top; 1196 } 1197 } 1198 1199 dtrace_membar_producer(); 1200 1201 /* 1202 * Now clear the hash value to indicate that it's free. 1203 */ 1204 ASSERT(hash[bucket].dtdh_chain != dvar); 1205 dvar->dtdv_hashval = 0; 1206 1207 dtrace_membar_producer(); 1208 1209 /* 1210 * Set the next pointer to point at the dirty list, and 1211 * atomically swing the dirty pointer to the newly freed dvar. 1212 */ 1213 do { 1214 next = dcpu->dtdsc_dirty; 1215 dvar->dtdv_next = next; 1216 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1217 1218 /* 1219 * Finally, unlock this hash bucket. 1220 */ 1221 ASSERT(hash[bucket].dtdh_lock == lock); 1222 ASSERT(lock & 1); 1223 hash[bucket].dtdh_lock++; 1224 1225 return (NULL); 1226 next: 1227 prev = dvar; 1228 continue; 1229 } 1230 1231 if (op != DTRACE_DYNVAR_ALLOC) { 1232 /* 1233 * If we are not to allocate a new variable, we want to 1234 * return NULL now. Before we return, check that the value 1235 * of the lock word hasn't changed. If it has, we may have 1236 * seen an inconsistent snapshot. 1237 */ 1238 if (op == DTRACE_DYNVAR_NOALLOC) { 1239 if (hash[bucket].dtdh_lock != lock) 1240 goto top; 1241 } else { 1242 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1243 ASSERT(hash[bucket].dtdh_lock == lock); 1244 ASSERT(lock & 1); 1245 hash[bucket].dtdh_lock++; 1246 } 1247 1248 return (NULL); 1249 } 1250 1251 /* 1252 * We need to allocate a new dynamic variable. The size we need is the 1253 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1254 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1255 * the size of any referred-to data (dsize). We then round the final 1256 * size up to the chunksize for allocation. 1257 */ 1258 for (ksize = 0, i = 0; i < nkeys; i++) 1259 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1260 1261 /* 1262 * This should be pretty much impossible, but could happen if, say, 1263 * strange DIF specified the tuple. Ideally, this should be an 1264 * assertion and not an error condition -- but that requires that the 1265 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1266 * bullet-proof. (That is, it must not be able to be fooled by 1267 * malicious DIF.) Given the lack of backwards branches in DIF, 1268 * solving this would presumably not amount to solving the Halting 1269 * Problem -- but it still seems awfully hard. 1270 */ 1271 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1272 ksize + dsize > chunksize) { 1273 dcpu->dtdsc_drops++; 1274 return (NULL); 1275 } 1276 1277 nstate = DTRACE_DSTATE_EMPTY; 1278 1279 do { 1280 retry: 1281 free = dcpu->dtdsc_free; 1282 1283 if (free == NULL) { 1284 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1285 void *rval; 1286 1287 if (clean == NULL) { 1288 /* 1289 * We're out of dynamic variable space on 1290 * this CPU. Unless we have tried all CPUs, 1291 * we'll try to allocate from a different 1292 * CPU. 1293 */ 1294 switch (dstate->dtds_state) { 1295 case DTRACE_DSTATE_CLEAN: { 1296 void *sp = &dstate->dtds_state; 1297 1298 if (++cpu >= NCPU) 1299 cpu = 0; 1300 1301 if (dcpu->dtdsc_dirty != NULL && 1302 nstate == DTRACE_DSTATE_EMPTY) 1303 nstate = DTRACE_DSTATE_DIRTY; 1304 1305 if (dcpu->dtdsc_rinsing != NULL) 1306 nstate = DTRACE_DSTATE_RINSING; 1307 1308 dcpu = &dstate->dtds_percpu[cpu]; 1309 1310 if (cpu != me) 1311 goto retry; 1312 1313 (void) dtrace_cas32(sp, 1314 DTRACE_DSTATE_CLEAN, nstate); 1315 1316 /* 1317 * To increment the correct bean 1318 * counter, take another lap. 1319 */ 1320 goto retry; 1321 } 1322 1323 case DTRACE_DSTATE_DIRTY: 1324 dcpu->dtdsc_dirty_drops++; 1325 break; 1326 1327 case DTRACE_DSTATE_RINSING: 1328 dcpu->dtdsc_rinsing_drops++; 1329 break; 1330 1331 case DTRACE_DSTATE_EMPTY: 1332 dcpu->dtdsc_drops++; 1333 break; 1334 } 1335 1336 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1337 return (NULL); 1338 } 1339 1340 /* 1341 * The clean list appears to be non-empty. We want to 1342 * move the clean list to the free list; we start by 1343 * moving the clean pointer aside. 1344 */ 1345 if (dtrace_casptr(&dcpu->dtdsc_clean, 1346 clean, NULL) != clean) { 1347 /* 1348 * We are in one of two situations: 1349 * 1350 * (a) The clean list was switched to the 1351 * free list by another CPU. 1352 * 1353 * (b) The clean list was added to by the 1354 * cleansing cyclic. 1355 * 1356 * In either of these situations, we can 1357 * just reattempt the free list allocation. 1358 */ 1359 goto retry; 1360 } 1361 1362 ASSERT(clean->dtdv_hashval == 0); 1363 1364 /* 1365 * Now we'll move the clean list to the free list. 1366 * It's impossible for this to fail: the only way 1367 * the free list can be updated is through this 1368 * code path, and only one CPU can own the clean list. 1369 * Thus, it would only be possible for this to fail if 1370 * this code were racing with dtrace_dynvar_clean(). 1371 * (That is, if dtrace_dynvar_clean() updated the clean 1372 * list, and we ended up racing to update the free 1373 * list.) This race is prevented by the dtrace_sync() 1374 * in dtrace_dynvar_clean() -- which flushes the 1375 * owners of the clean lists out before resetting 1376 * the clean lists. 1377 */ 1378 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1379 ASSERT(rval == NULL); 1380 goto retry; 1381 } 1382 1383 dvar = free; 1384 new_free = dvar->dtdv_next; 1385 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1386 1387 /* 1388 * We have now allocated a new chunk. We copy the tuple keys into the 1389 * tuple array and copy any referenced key data into the data space 1390 * following the tuple array. As we do this, we relocate dttk_value 1391 * in the final tuple to point to the key data address in the chunk. 1392 */ 1393 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1394 dvar->dtdv_data = (void *)(kdata + ksize); 1395 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1396 1397 for (i = 0; i < nkeys; i++) { 1398 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1399 size_t kesize = key[i].dttk_size; 1400 1401 if (kesize != 0) { 1402 dtrace_bcopy( 1403 (const void *)(uintptr_t)key[i].dttk_value, 1404 (void *)kdata, kesize); 1405 dkey->dttk_value = kdata; 1406 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1407 } else { 1408 dkey->dttk_value = key[i].dttk_value; 1409 } 1410 1411 dkey->dttk_size = kesize; 1412 } 1413 1414 ASSERT(dvar->dtdv_hashval == 0); 1415 dvar->dtdv_hashval = hashval; 1416 dvar->dtdv_next = start; 1417 1418 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1419 return (dvar); 1420 1421 /* 1422 * The cas has failed. Either another CPU is adding an element to 1423 * this hash chain, or another CPU is deleting an element from this 1424 * hash chain. The simplest way to deal with both of these cases 1425 * (though not necessarily the most efficient) is to free our 1426 * allocated block and tail-call ourselves. Note that the free is 1427 * to the dirty list and _not_ to the free list. This is to prevent 1428 * races with allocators, above. 1429 */ 1430 dvar->dtdv_hashval = 0; 1431 1432 dtrace_membar_producer(); 1433 1434 do { 1435 free = dcpu->dtdsc_dirty; 1436 dvar->dtdv_next = free; 1437 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1438 1439 return (dtrace_dynvar(dstate, nkeys, key, dsize, op)); 1440 } 1441 1442 /*ARGSUSED*/ 1443 static void 1444 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1445 { 1446 if (nval < *oval) 1447 *oval = nval; 1448 } 1449 1450 /*ARGSUSED*/ 1451 static void 1452 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1453 { 1454 if (nval > *oval) 1455 *oval = nval; 1456 } 1457 1458 static void 1459 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1460 { 1461 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1462 int64_t val = (int64_t)nval; 1463 1464 if (val < 0) { 1465 for (i = 0; i < zero; i++) { 1466 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1467 quanta[i] += incr; 1468 return; 1469 } 1470 } 1471 } else { 1472 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1473 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1474 quanta[i - 1] += incr; 1475 return; 1476 } 1477 } 1478 1479 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1480 return; 1481 } 1482 1483 ASSERT(0); 1484 } 1485 1486 static void 1487 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1488 { 1489 uint64_t arg = *lquanta++; 1490 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1491 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1492 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1493 int32_t val = (int32_t)nval, level; 1494 1495 ASSERT(step != 0); 1496 ASSERT(levels != 0); 1497 1498 if (val < base) { 1499 /* 1500 * This is an underflow. 1501 */ 1502 lquanta[0] += incr; 1503 return; 1504 } 1505 1506 level = (val - base) / step; 1507 1508 if (level < levels) { 1509 lquanta[level + 1] += incr; 1510 return; 1511 } 1512 1513 /* 1514 * This is an overflow. 1515 */ 1516 lquanta[levels + 1] += incr; 1517 } 1518 1519 /*ARGSUSED*/ 1520 static void 1521 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1522 { 1523 data[0]++; 1524 data[1] += nval; 1525 } 1526 1527 /*ARGSUSED*/ 1528 static void 1529 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1530 { 1531 *oval = *oval + 1; 1532 } 1533 1534 /*ARGSUSED*/ 1535 static void 1536 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1537 { 1538 *oval += nval; 1539 } 1540 1541 /* 1542 * Aggregate given the tuple in the principal data buffer, and the aggregating 1543 * action denoted by the specified dtrace_aggregation_t. The aggregation 1544 * buffer is specified as the buf parameter. This routine does not return 1545 * failure; if there is no space in the aggregation buffer, the data will be 1546 * dropped, and a corresponding counter incremented. 1547 */ 1548 static void 1549 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1550 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1551 { 1552 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1553 uint32_t i, ndx, size, fsize; 1554 uint32_t align = sizeof (uint64_t) - 1; 1555 dtrace_aggbuffer_t *agb; 1556 dtrace_aggkey_t *key; 1557 uint32_t hashval = 0, limit, isstr; 1558 caddr_t tomax, data, kdata; 1559 dtrace_actkind_t action; 1560 dtrace_action_t *act; 1561 uintptr_t offs; 1562 1563 if (buf == NULL) 1564 return; 1565 1566 if (!agg->dtag_hasarg) { 1567 /* 1568 * Currently, only quantize() and lquantize() take additional 1569 * arguments, and they have the same semantics: an increment 1570 * value that defaults to 1 when not present. If additional 1571 * aggregating actions take arguments, the setting of the 1572 * default argument value will presumably have to become more 1573 * sophisticated... 1574 */ 1575 arg = 1; 1576 } 1577 1578 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1579 size = rec->dtrd_offset - agg->dtag_base; 1580 fsize = size + rec->dtrd_size; 1581 1582 ASSERT(dbuf->dtb_tomax != NULL); 1583 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1584 1585 if ((tomax = buf->dtb_tomax) == NULL) { 1586 dtrace_buffer_drop(buf); 1587 return; 1588 } 1589 1590 /* 1591 * The metastructure is always at the bottom of the buffer. 1592 */ 1593 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1594 sizeof (dtrace_aggbuffer_t)); 1595 1596 if (buf->dtb_offset == 0) { 1597 /* 1598 * We just kludge up approximately 1/8th of the size to be 1599 * buckets. If this guess ends up being routinely 1600 * off-the-mark, we may need to dynamically readjust this 1601 * based on past performance. 1602 */ 1603 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1604 1605 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1606 (uintptr_t)tomax || hashsize == 0) { 1607 /* 1608 * We've been given a ludicrously small buffer; 1609 * increment our drop count and leave. 1610 */ 1611 dtrace_buffer_drop(buf); 1612 return; 1613 } 1614 1615 /* 1616 * And now, a pathetic attempt to try to get a an odd (or 1617 * perchance, a prime) hash size for better hash distribution. 1618 */ 1619 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1620 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1621 1622 agb->dtagb_hashsize = hashsize; 1623 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1624 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1625 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1626 1627 for (i = 0; i < agb->dtagb_hashsize; i++) 1628 agb->dtagb_hash[i] = NULL; 1629 } 1630 1631 ASSERT(agg->dtag_first != NULL); 1632 ASSERT(agg->dtag_first->dta_intuple); 1633 1634 /* 1635 * Calculate the hash value based on the key. Note that we _don't_ 1636 * include the aggid in the hashing (but we will store it as part of 1637 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1638 * algorithm: a simple, quick algorithm that has no known funnels, and 1639 * gets good distribution in practice. The efficacy of the hashing 1640 * algorithm (and a comparison with other algorithms) may be found by 1641 * running the ::dtrace_aggstat MDB dcmd. 1642 */ 1643 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1644 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1645 limit = i + act->dta_rec.dtrd_size; 1646 ASSERT(limit <= size); 1647 isstr = DTRACEACT_ISSTRING(act); 1648 1649 for (; i < limit; i++) { 1650 hashval += data[i]; 1651 hashval += (hashval << 10); 1652 hashval ^= (hashval >> 6); 1653 1654 if (isstr && data[i] == '\0') 1655 break; 1656 } 1657 } 1658 1659 hashval += (hashval << 3); 1660 hashval ^= (hashval >> 11); 1661 hashval += (hashval << 15); 1662 1663 /* 1664 * Yes, the divide here is expensive -- but it's generally the least 1665 * of the performance issues given the amount of data that we iterate 1666 * over to compute hash values, compare data, etc. 1667 */ 1668 ndx = hashval % agb->dtagb_hashsize; 1669 1670 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1671 ASSERT((caddr_t)key >= tomax); 1672 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1673 1674 if (hashval != key->dtak_hashval || key->dtak_size != size) 1675 continue; 1676 1677 kdata = key->dtak_data; 1678 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1679 1680 for (act = agg->dtag_first; act->dta_intuple; 1681 act = act->dta_next) { 1682 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1683 limit = i + act->dta_rec.dtrd_size; 1684 ASSERT(limit <= size); 1685 isstr = DTRACEACT_ISSTRING(act); 1686 1687 for (; i < limit; i++) { 1688 if (kdata[i] != data[i]) 1689 goto next; 1690 1691 if (isstr && data[i] == '\0') 1692 break; 1693 } 1694 } 1695 1696 if (action != key->dtak_action) { 1697 /* 1698 * We are aggregating on the same value in the same 1699 * aggregation with two different aggregating actions. 1700 * (This should have been picked up in the compiler, 1701 * so we may be dealing with errant or devious DIF.) 1702 * This is an error condition; we indicate as much, 1703 * and return. 1704 */ 1705 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1706 return; 1707 } 1708 1709 /* 1710 * This is a hit: we need to apply the aggregator to 1711 * the value at this key. 1712 */ 1713 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1714 return; 1715 next: 1716 continue; 1717 } 1718 1719 /* 1720 * We didn't find it. We need to allocate some zero-filled space, 1721 * link it into the hash table appropriately, and apply the aggregator 1722 * to the (zero-filled) value. 1723 */ 1724 offs = buf->dtb_offset; 1725 while (offs & (align - 1)) 1726 offs += sizeof (uint32_t); 1727 1728 /* 1729 * If we don't have enough room to both allocate a new key _and_ 1730 * its associated data, increment the drop count and return. 1731 */ 1732 if ((uintptr_t)tomax + offs + fsize > 1733 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1734 dtrace_buffer_drop(buf); 1735 return; 1736 } 1737 1738 /*CONSTCOND*/ 1739 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1740 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1741 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1742 1743 key->dtak_data = kdata = tomax + offs; 1744 buf->dtb_offset = offs + fsize; 1745 1746 /* 1747 * Now copy the data across. 1748 */ 1749 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1750 1751 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1752 kdata[i] = data[i]; 1753 1754 /* 1755 * Because strings are not zeroed out by default, we need to iterate 1756 * looking for actions that store strings, and we need to explicitly 1757 * pad these strings out with zeroes. 1758 */ 1759 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1760 int nul; 1761 1762 if (!DTRACEACT_ISSTRING(act)) 1763 continue; 1764 1765 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1766 limit = i + act->dta_rec.dtrd_size; 1767 ASSERT(limit <= size); 1768 1769 for (nul = 0; i < limit; i++) { 1770 if (nul) { 1771 kdata[i] = '\0'; 1772 continue; 1773 } 1774 1775 if (data[i] != '\0') 1776 continue; 1777 1778 nul = 1; 1779 } 1780 } 1781 1782 for (i = size; i < fsize; i++) 1783 kdata[i] = 0; 1784 1785 key->dtak_hashval = hashval; 1786 key->dtak_size = size; 1787 key->dtak_action = action; 1788 key->dtak_next = agb->dtagb_hash[ndx]; 1789 agb->dtagb_hash[ndx] = key; 1790 1791 /* 1792 * Finally, apply the aggregator. 1793 */ 1794 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1795 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1796 } 1797 1798 /* 1799 * Given consumer state, this routine finds a speculation in the INACTIVE 1800 * state and transitions it into the ACTIVE state. If there is no speculation 1801 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1802 * incremented -- it is up to the caller to take appropriate action. 1803 */ 1804 static int 1805 dtrace_speculation(dtrace_state_t *state) 1806 { 1807 int i = 0; 1808 dtrace_speculation_state_t current; 1809 uint32_t *stat = &state->dts_speculations_unavail, count; 1810 1811 while (i < state->dts_nspeculations) { 1812 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1813 1814 current = spec->dtsp_state; 1815 1816 if (current != DTRACESPEC_INACTIVE) { 1817 if (current == DTRACESPEC_COMMITTINGMANY || 1818 current == DTRACESPEC_COMMITTING || 1819 current == DTRACESPEC_DISCARDING) 1820 stat = &state->dts_speculations_busy; 1821 i++; 1822 continue; 1823 } 1824 1825 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1826 current, DTRACESPEC_ACTIVE) == current) 1827 return (i + 1); 1828 } 1829 1830 /* 1831 * We couldn't find a speculation. If we found as much as a single 1832 * busy speculation buffer, we'll attribute this failure as "busy" 1833 * instead of "unavail". 1834 */ 1835 do { 1836 count = *stat; 1837 } while (dtrace_cas32(stat, count, count + 1) != count); 1838 1839 return (0); 1840 } 1841 1842 /* 1843 * This routine commits an active speculation. If the specified speculation 1844 * is not in a valid state to perform a commit(), this routine will silently do 1845 * nothing. The state of the specified speculation is transitioned according 1846 * to the state transition diagram outlined in <sys/dtrace_impl.h> 1847 */ 1848 static void 1849 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 1850 dtrace_specid_t which) 1851 { 1852 dtrace_speculation_t *spec; 1853 dtrace_buffer_t *src, *dest; 1854 uintptr_t daddr, saddr, dlimit; 1855 dtrace_speculation_state_t current, new; 1856 intptr_t offs; 1857 1858 if (which == 0) 1859 return; 1860 1861 if (which > state->dts_nspeculations) { 1862 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1863 return; 1864 } 1865 1866 spec = &state->dts_speculations[which - 1]; 1867 src = &spec->dtsp_buffer[cpu]; 1868 dest = &state->dts_buffer[cpu]; 1869 1870 do { 1871 current = spec->dtsp_state; 1872 1873 if (current == DTRACESPEC_COMMITTINGMANY) 1874 break; 1875 1876 switch (current) { 1877 case DTRACESPEC_INACTIVE: 1878 case DTRACESPEC_DISCARDING: 1879 return; 1880 1881 case DTRACESPEC_COMMITTING: 1882 /* 1883 * This is only possible if we are (a) commit()'ing 1884 * without having done a prior speculate() on this CPU 1885 * and (b) racing with another commit() on a different 1886 * CPU. There's nothing to do -- we just assert that 1887 * our offset is 0. 1888 */ 1889 ASSERT(src->dtb_offset == 0); 1890 return; 1891 1892 case DTRACESPEC_ACTIVE: 1893 new = DTRACESPEC_COMMITTING; 1894 break; 1895 1896 case DTRACESPEC_ACTIVEONE: 1897 /* 1898 * This speculation is active on one CPU. If our 1899 * buffer offset is non-zero, we know that the one CPU 1900 * must be us. Otherwise, we are committing on a 1901 * different CPU from the speculate(), and we must 1902 * rely on being asynchronously cleaned. 1903 */ 1904 if (src->dtb_offset != 0) { 1905 new = DTRACESPEC_COMMITTING; 1906 break; 1907 } 1908 /*FALLTHROUGH*/ 1909 1910 case DTRACESPEC_ACTIVEMANY: 1911 new = DTRACESPEC_COMMITTINGMANY; 1912 break; 1913 1914 default: 1915 ASSERT(0); 1916 } 1917 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1918 current, new) != current); 1919 1920 /* 1921 * We have set the state to indicate that we are committing this 1922 * speculation. Now reserve the necessary space in the destination 1923 * buffer. 1924 */ 1925 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 1926 sizeof (uint64_t), state, NULL)) < 0) { 1927 dtrace_buffer_drop(dest); 1928 goto out; 1929 } 1930 1931 /* 1932 * We have the space; copy the buffer across. (Note that this is a 1933 * highly subobtimal bcopy(); in the unlikely event that this becomes 1934 * a serious performance issue, a high-performance DTrace-specific 1935 * bcopy() should obviously be invented.) 1936 */ 1937 daddr = (uintptr_t)dest->dtb_tomax + offs; 1938 dlimit = daddr + src->dtb_offset; 1939 saddr = (uintptr_t)src->dtb_tomax; 1940 1941 /* 1942 * First, the aligned portion. 1943 */ 1944 while (dlimit - daddr >= sizeof (uint64_t)) { 1945 *((uint64_t *)daddr) = *((uint64_t *)saddr); 1946 1947 daddr += sizeof (uint64_t); 1948 saddr += sizeof (uint64_t); 1949 } 1950 1951 /* 1952 * Now any left-over bit... 1953 */ 1954 while (dlimit - daddr) 1955 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 1956 1957 /* 1958 * Finally, commit the reserved space in the destination buffer. 1959 */ 1960 dest->dtb_offset = offs + src->dtb_offset; 1961 1962 out: 1963 /* 1964 * If we're lucky enough to be the only active CPU on this speculation 1965 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 1966 */ 1967 if (current == DTRACESPEC_ACTIVE || 1968 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 1969 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 1970 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 1971 1972 ASSERT(rval == DTRACESPEC_COMMITTING); 1973 } 1974 1975 src->dtb_offset = 0; 1976 src->dtb_xamot_drops += src->dtb_drops; 1977 src->dtb_drops = 0; 1978 } 1979 1980 /* 1981 * This routine discards an active speculation. If the specified speculation 1982 * is not in a valid state to perform a discard(), this routine will silently 1983 * do nothing. The state of the specified speculation is transitioned 1984 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 1985 */ 1986 static void 1987 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 1988 dtrace_specid_t which) 1989 { 1990 dtrace_speculation_t *spec; 1991 dtrace_speculation_state_t current, new; 1992 dtrace_buffer_t *buf; 1993 1994 if (which == 0) 1995 return; 1996 1997 if (which > state->dts_nspeculations) { 1998 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 1999 return; 2000 } 2001 2002 spec = &state->dts_speculations[which - 1]; 2003 buf = &spec->dtsp_buffer[cpu]; 2004 2005 do { 2006 current = spec->dtsp_state; 2007 2008 switch (current) { 2009 case DTRACESPEC_INACTIVE: 2010 case DTRACESPEC_COMMITTINGMANY: 2011 case DTRACESPEC_COMMITTING: 2012 case DTRACESPEC_DISCARDING: 2013 return; 2014 2015 case DTRACESPEC_ACTIVE: 2016 case DTRACESPEC_ACTIVEMANY: 2017 new = DTRACESPEC_DISCARDING; 2018 break; 2019 2020 case DTRACESPEC_ACTIVEONE: 2021 if (buf->dtb_offset != 0) { 2022 new = DTRACESPEC_INACTIVE; 2023 } else { 2024 new = DTRACESPEC_DISCARDING; 2025 } 2026 break; 2027 2028 default: 2029 ASSERT(0); 2030 } 2031 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2032 current, new) != current); 2033 2034 buf->dtb_offset = 0; 2035 buf->dtb_drops = 0; 2036 } 2037 2038 /* 2039 * Note: not called from probe context. This function is called 2040 * asynchronously from cross call context to clean any speculations that are 2041 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2042 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2043 * speculation. 2044 */ 2045 static void 2046 dtrace_speculation_clean_here(dtrace_state_t *state) 2047 { 2048 dtrace_icookie_t cookie; 2049 processorid_t cpu = CPU->cpu_id; 2050 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2051 dtrace_specid_t i; 2052 2053 cookie = dtrace_interrupt_disable(); 2054 2055 if (dest->dtb_tomax == NULL) { 2056 dtrace_interrupt_enable(cookie); 2057 return; 2058 } 2059 2060 for (i = 0; i < state->dts_nspeculations; i++) { 2061 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2062 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2063 2064 if (src->dtb_tomax == NULL) 2065 continue; 2066 2067 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2068 src->dtb_offset = 0; 2069 continue; 2070 } 2071 2072 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2073 continue; 2074 2075 if (src->dtb_offset == 0) 2076 continue; 2077 2078 dtrace_speculation_commit(state, cpu, i + 1); 2079 } 2080 2081 dtrace_interrupt_enable(cookie); 2082 } 2083 2084 /* 2085 * Note: not called from probe context. This function is called 2086 * asynchronously (and at a regular interval) to clean any speculations that 2087 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2088 * is work to be done, it cross calls all CPUs to perform that work; 2089 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2090 * INACTIVE state until they have been cleaned by all CPUs. 2091 */ 2092 static void 2093 dtrace_speculation_clean(dtrace_state_t *state) 2094 { 2095 int work = 0, rv; 2096 dtrace_specid_t i; 2097 2098 for (i = 0; i < state->dts_nspeculations; i++) { 2099 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2100 2101 ASSERT(!spec->dtsp_cleaning); 2102 2103 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2104 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2105 continue; 2106 2107 work++; 2108 spec->dtsp_cleaning = 1; 2109 } 2110 2111 if (!work) 2112 return; 2113 2114 dtrace_xcall(DTRACE_CPUALL, 2115 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2116 2117 /* 2118 * We now know that all CPUs have committed or discarded their 2119 * speculation buffers, as appropriate. We can now set the state 2120 * to inactive. 2121 */ 2122 for (i = 0; i < state->dts_nspeculations; i++) { 2123 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2124 dtrace_speculation_state_t current, new; 2125 2126 if (!spec->dtsp_cleaning) 2127 continue; 2128 2129 current = spec->dtsp_state; 2130 ASSERT(current == DTRACESPEC_DISCARDING || 2131 current == DTRACESPEC_COMMITTINGMANY); 2132 2133 new = DTRACESPEC_INACTIVE; 2134 2135 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2136 ASSERT(rv == current); 2137 spec->dtsp_cleaning = 0; 2138 } 2139 } 2140 2141 /* 2142 * Called as part of a speculate() to get the speculative buffer associated 2143 * with a given speculation. Returns NULL if the specified speculation is not 2144 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2145 * the active CPU is not the specified CPU -- the speculation will be 2146 * atomically transitioned into the ACTIVEMANY state. 2147 */ 2148 static dtrace_buffer_t * 2149 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2150 dtrace_specid_t which) 2151 { 2152 dtrace_speculation_t *spec; 2153 dtrace_speculation_state_t current, new; 2154 dtrace_buffer_t *buf; 2155 2156 if (which == 0) 2157 return (NULL); 2158 2159 if (which > state->dts_nspeculations) { 2160 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2161 return (NULL); 2162 } 2163 2164 spec = &state->dts_speculations[which - 1]; 2165 buf = &spec->dtsp_buffer[cpuid]; 2166 2167 do { 2168 current = spec->dtsp_state; 2169 2170 switch (current) { 2171 case DTRACESPEC_INACTIVE: 2172 case DTRACESPEC_COMMITTINGMANY: 2173 case DTRACESPEC_DISCARDING: 2174 return (NULL); 2175 2176 case DTRACESPEC_COMMITTING: 2177 ASSERT(buf->dtb_offset == 0); 2178 return (NULL); 2179 2180 case DTRACESPEC_ACTIVEONE: 2181 /* 2182 * This speculation is currently active on one CPU. 2183 * Check the offset in the buffer; if it's non-zero, 2184 * that CPU must be us (and we leave the state alone). 2185 * If it's zero, assume that we're starting on a new 2186 * CPU -- and change the state to indicate that the 2187 * speculation is active on more than one CPU. 2188 */ 2189 if (buf->dtb_offset != 0) 2190 return (buf); 2191 2192 new = DTRACESPEC_ACTIVEMANY; 2193 break; 2194 2195 case DTRACESPEC_ACTIVEMANY: 2196 return (buf); 2197 2198 case DTRACESPEC_ACTIVE: 2199 new = DTRACESPEC_ACTIVEONE; 2200 break; 2201 2202 default: 2203 ASSERT(0); 2204 } 2205 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2206 current, new) != current); 2207 2208 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2209 return (buf); 2210 } 2211 2212 /* 2213 * This function implements the DIF emulator's variable lookups. The emulator 2214 * passes a reserved variable identifier and optional built-in array index. 2215 */ 2216 static uint64_t 2217 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2218 uint64_t ndx) 2219 { 2220 /* 2221 * If we're accessing one of the uncached arguments, we'll turn this 2222 * into a reference in the args array. 2223 */ 2224 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2225 ndx = v - DIF_VAR_ARG0; 2226 v = DIF_VAR_ARGS; 2227 } 2228 2229 switch (v) { 2230 case DIF_VAR_ARGS: 2231 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2232 if (ndx >= sizeof (mstate->dtms_arg) / 2233 sizeof (mstate->dtms_arg[0])) { 2234 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2235 dtrace_provider_t *pv; 2236 uint64_t val; 2237 2238 pv = mstate->dtms_probe->dtpr_provider; 2239 if (pv->dtpv_pops.dtps_getargval != NULL) 2240 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2241 mstate->dtms_probe->dtpr_id, 2242 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2243 else 2244 val = dtrace_getarg(ndx, aframes); 2245 2246 /* 2247 * This is regrettably required to keep the compiler 2248 * from tail-optimizing the call to dtrace_getarg(). 2249 * The condition always evaluates to true, but the 2250 * compiler has no way of figuring that out a priori. 2251 * (None of this would be necessary if the compiler 2252 * could be relied upon to _always_ tail-optimize 2253 * the call to dtrace_getarg() -- but it can't.) 2254 */ 2255 if (mstate->dtms_probe != NULL) 2256 return (val); 2257 2258 ASSERT(0); 2259 } 2260 2261 return (mstate->dtms_arg[ndx]); 2262 2263 case DIF_VAR_UREGS: { 2264 klwp_t *lwp; 2265 2266 if (!dtrace_priv_proc(state)) 2267 return (0); 2268 2269 if ((lwp = curthread->t_lwp) == NULL) { 2270 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2271 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2272 return (0); 2273 } 2274 2275 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2276 } 2277 2278 case DIF_VAR_CURTHREAD: 2279 if (!dtrace_priv_kernel(state)) 2280 return (0); 2281 return ((uint64_t)(uintptr_t)curthread); 2282 2283 case DIF_VAR_TIMESTAMP: 2284 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2285 mstate->dtms_timestamp = dtrace_gethrtime(); 2286 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2287 } 2288 return (mstate->dtms_timestamp); 2289 2290 case DIF_VAR_VTIMESTAMP: 2291 ASSERT(dtrace_vtime_references != 0); 2292 return (curthread->t_dtrace_vtime); 2293 2294 case DIF_VAR_WALLTIMESTAMP: 2295 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2296 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2297 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2298 } 2299 return (mstate->dtms_walltimestamp); 2300 2301 case DIF_VAR_IPL: 2302 if (!dtrace_priv_kernel(state)) 2303 return (0); 2304 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2305 mstate->dtms_ipl = dtrace_getipl(); 2306 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2307 } 2308 return (mstate->dtms_ipl); 2309 2310 case DIF_VAR_EPID: 2311 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2312 return (mstate->dtms_epid); 2313 2314 case DIF_VAR_ID: 2315 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2316 return (mstate->dtms_probe->dtpr_id); 2317 2318 case DIF_VAR_STACKDEPTH: 2319 if (!dtrace_priv_kernel(state)) 2320 return (0); 2321 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2322 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2323 2324 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2325 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2326 } 2327 return (mstate->dtms_stackdepth); 2328 2329 case DIF_VAR_USTACKDEPTH: 2330 if (!dtrace_priv_proc(state)) 2331 return (0); 2332 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2333 /* 2334 * See comment in DIF_VAR_PID. 2335 */ 2336 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2337 CPU_ON_INTR(CPU)) { 2338 mstate->dtms_ustackdepth = 0; 2339 } else { 2340 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2341 mstate->dtms_ustackdepth = 2342 dtrace_getustackdepth(); 2343 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2344 } 2345 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2346 } 2347 return (mstate->dtms_ustackdepth); 2348 2349 case DIF_VAR_CALLER: 2350 if (!dtrace_priv_kernel(state)) 2351 return (0); 2352 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2353 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2354 2355 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2356 /* 2357 * If this is an unanchored probe, we are 2358 * required to go through the slow path: 2359 * dtrace_caller() only guarantees correct 2360 * results for anchored probes. 2361 */ 2362 pc_t caller[2]; 2363 2364 dtrace_getpcstack(caller, 2, aframes, 2365 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2366 mstate->dtms_caller = caller[1]; 2367 } else if ((mstate->dtms_caller = 2368 dtrace_caller(aframes)) == -1) { 2369 /* 2370 * We have failed to do this the quick way; 2371 * we must resort to the slower approach of 2372 * calling dtrace_getpcstack(). 2373 */ 2374 pc_t caller; 2375 2376 dtrace_getpcstack(&caller, 1, aframes, NULL); 2377 mstate->dtms_caller = caller; 2378 } 2379 2380 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2381 } 2382 return (mstate->dtms_caller); 2383 2384 case DIF_VAR_UCALLER: 2385 if (!dtrace_priv_proc(state)) 2386 return (0); 2387 2388 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2389 uint64_t ustack[3]; 2390 2391 /* 2392 * dtrace_getupcstack() fills in the first uint64_t 2393 * with the current PID. The second uint64_t will 2394 * be the program counter at user-level. The third 2395 * uint64_t will contain the caller, which is what 2396 * we're after. 2397 */ 2398 ustack[2] = NULL; 2399 dtrace_getupcstack(ustack, 3); 2400 mstate->dtms_ucaller = ustack[2]; 2401 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2402 } 2403 2404 return (mstate->dtms_ucaller); 2405 2406 case DIF_VAR_PROBEPROV: 2407 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2408 return ((uint64_t)(uintptr_t) 2409 mstate->dtms_probe->dtpr_provider->dtpv_name); 2410 2411 case DIF_VAR_PROBEMOD: 2412 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2413 return ((uint64_t)(uintptr_t) 2414 mstate->dtms_probe->dtpr_mod); 2415 2416 case DIF_VAR_PROBEFUNC: 2417 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2418 return ((uint64_t)(uintptr_t) 2419 mstate->dtms_probe->dtpr_func); 2420 2421 case DIF_VAR_PROBENAME: 2422 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2423 return ((uint64_t)(uintptr_t) 2424 mstate->dtms_probe->dtpr_name); 2425 2426 case DIF_VAR_PID: 2427 if (!dtrace_priv_proc(state)) 2428 return (0); 2429 2430 /* 2431 * Note that we are assuming that an unanchored probe is 2432 * always due to a high-level interrupt. (And we're assuming 2433 * that there is only a single high level interrupt.) 2434 */ 2435 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2436 return (pid0.pid_id); 2437 2438 /* 2439 * It is always safe to dereference one's own t_procp pointer: 2440 * it always points to a valid, allocated proc structure. 2441 * Further, it is always safe to dereference the p_pidp member 2442 * of one's own proc structure. (These are truisms becuase 2443 * threads and processes don't clean up their own state -- 2444 * they leave that task to whomever reaps them.) 2445 */ 2446 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2447 2448 case DIF_VAR_TID: 2449 /* 2450 * See comment in DIF_VAR_PID. 2451 */ 2452 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2453 return (0); 2454 2455 return ((uint64_t)curthread->t_tid); 2456 2457 case DIF_VAR_EXECNAME: 2458 if (!dtrace_priv_proc(state)) 2459 return (0); 2460 2461 /* 2462 * See comment in DIF_VAR_PID. 2463 */ 2464 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2465 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2466 2467 /* 2468 * It is always safe to dereference one's own t_procp pointer: 2469 * it always points to a valid, allocated proc structure. 2470 * (This is true because threads don't clean up their own 2471 * state -- they leave that task to whomever reaps them.) 2472 */ 2473 return ((uint64_t)(uintptr_t) 2474 curthread->t_procp->p_user.u_comm); 2475 2476 case DIF_VAR_ZONENAME: 2477 if (!dtrace_priv_proc(state)) 2478 return (0); 2479 2480 /* 2481 * See comment in DIF_VAR_PID. 2482 */ 2483 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2484 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2485 2486 /* 2487 * It is always safe to dereference one's own t_procp pointer: 2488 * it always points to a valid, allocated proc structure. 2489 * (This is true because threads don't clean up their own 2490 * state -- they leave that task to whomever reaps them.) 2491 */ 2492 return ((uint64_t)(uintptr_t) 2493 curthread->t_procp->p_zone->zone_name); 2494 2495 default: 2496 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2497 return (0); 2498 } 2499 } 2500 2501 /* 2502 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2503 * Notice that we don't bother validating the proper number of arguments or 2504 * their types in the tuple stack. This isn't needed because all argument 2505 * interpretation is safe because of our load safety -- the worst that can 2506 * happen is that a bogus program can obtain bogus results. 2507 */ 2508 static void 2509 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2510 dtrace_key_t *tupregs, int nargs, 2511 dtrace_mstate_t *mstate, dtrace_state_t *state) 2512 { 2513 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2514 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2515 2516 union { 2517 mutex_impl_t mi; 2518 uint64_t mx; 2519 } m; 2520 2521 union { 2522 krwlock_t ri; 2523 uintptr_t rw; 2524 } r; 2525 2526 switch (subr) { 2527 case DIF_SUBR_RAND: 2528 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2529 break; 2530 2531 case DIF_SUBR_MUTEX_OWNED: 2532 m.mx = dtrace_load64(tupregs[0].dttk_value); 2533 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2534 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2535 else 2536 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2537 break; 2538 2539 case DIF_SUBR_MUTEX_OWNER: 2540 m.mx = dtrace_load64(tupregs[0].dttk_value); 2541 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2542 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2543 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2544 else 2545 regs[rd] = 0; 2546 break; 2547 2548 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2549 m.mx = dtrace_load64(tupregs[0].dttk_value); 2550 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2551 break; 2552 2553 case DIF_SUBR_MUTEX_TYPE_SPIN: 2554 m.mx = dtrace_load64(tupregs[0].dttk_value); 2555 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2556 break; 2557 2558 case DIF_SUBR_RW_READ_HELD: { 2559 uintptr_t tmp; 2560 2561 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2562 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2563 break; 2564 } 2565 2566 case DIF_SUBR_RW_WRITE_HELD: 2567 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2568 regs[rd] = _RW_WRITE_HELD(&r.ri); 2569 break; 2570 2571 case DIF_SUBR_RW_ISWRITER: 2572 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2573 regs[rd] = _RW_ISWRITER(&r.ri); 2574 break; 2575 2576 case DIF_SUBR_BCOPY: { 2577 /* 2578 * We need to be sure that the destination is in the scratch 2579 * region -- no other region is allowed. 2580 */ 2581 uintptr_t src = tupregs[0].dttk_value; 2582 uintptr_t dest = tupregs[1].dttk_value; 2583 size_t size = tupregs[2].dttk_value; 2584 2585 if (!dtrace_inscratch(dest, size, mstate)) { 2586 *flags |= CPU_DTRACE_BADADDR; 2587 *illval = regs[rd]; 2588 break; 2589 } 2590 2591 dtrace_bcopy((void *)src, (void *)dest, size); 2592 break; 2593 } 2594 2595 case DIF_SUBR_ALLOCA: 2596 case DIF_SUBR_COPYIN: { 2597 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2598 uint64_t size = 2599 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2600 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2601 2602 /* 2603 * This action doesn't require any credential checks since 2604 * probes will not activate in user contexts to which the 2605 * enabling user does not have permissions. 2606 */ 2607 if (mstate->dtms_scratch_ptr + scratch_size > 2608 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2609 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2610 regs[rd] = NULL; 2611 break; 2612 } 2613 2614 if (subr == DIF_SUBR_COPYIN) { 2615 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2616 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2617 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2618 } 2619 2620 mstate->dtms_scratch_ptr += scratch_size; 2621 regs[rd] = dest; 2622 break; 2623 } 2624 2625 case DIF_SUBR_COPYINTO: { 2626 uint64_t size = tupregs[1].dttk_value; 2627 uintptr_t dest = tupregs[2].dttk_value; 2628 2629 /* 2630 * This action doesn't require any credential checks since 2631 * probes will not activate in user contexts to which the 2632 * enabling user does not have permissions. 2633 */ 2634 if (!dtrace_inscratch(dest, size, mstate)) { 2635 *flags |= CPU_DTRACE_BADADDR; 2636 *illval = regs[rd]; 2637 break; 2638 } 2639 2640 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2641 dtrace_copyin(tupregs[0].dttk_value, dest, size); 2642 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2643 break; 2644 } 2645 2646 case DIF_SUBR_COPYINSTR: { 2647 uintptr_t dest = mstate->dtms_scratch_ptr; 2648 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2649 2650 if (nargs > 1 && tupregs[1].dttk_value < size) 2651 size = tupregs[1].dttk_value + 1; 2652 2653 /* 2654 * This action doesn't require any credential checks since 2655 * probes will not activate in user contexts to which the 2656 * enabling user does not have permissions. 2657 */ 2658 if (mstate->dtms_scratch_ptr + size > 2659 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2660 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2661 regs[rd] = NULL; 2662 break; 2663 } 2664 2665 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2666 dtrace_copyinstr(tupregs[0].dttk_value, dest, size); 2667 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2668 2669 ((char *)dest)[size - 1] = '\0'; 2670 mstate->dtms_scratch_ptr += size; 2671 regs[rd] = dest; 2672 break; 2673 } 2674 2675 case DIF_SUBR_MSGSIZE: 2676 case DIF_SUBR_MSGDSIZE: { 2677 uintptr_t baddr = tupregs[0].dttk_value, daddr; 2678 uintptr_t wptr, rptr; 2679 size_t count = 0; 2680 int cont = 0; 2681 2682 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 2683 wptr = dtrace_loadptr(baddr + 2684 offsetof(mblk_t, b_wptr)); 2685 2686 rptr = dtrace_loadptr(baddr + 2687 offsetof(mblk_t, b_rptr)); 2688 2689 if (wptr < rptr) { 2690 *flags |= CPU_DTRACE_BADADDR; 2691 *illval = tupregs[0].dttk_value; 2692 break; 2693 } 2694 2695 daddr = dtrace_loadptr(baddr + 2696 offsetof(mblk_t, b_datap)); 2697 2698 baddr = dtrace_loadptr(baddr + 2699 offsetof(mblk_t, b_cont)); 2700 2701 /* 2702 * We want to prevent against denial-of-service here, 2703 * so we're only going to search the list for 2704 * dtrace_msgdsize_max mblks. 2705 */ 2706 if (cont++ > dtrace_msgdsize_max) { 2707 *flags |= CPU_DTRACE_ILLOP; 2708 break; 2709 } 2710 2711 if (subr == DIF_SUBR_MSGDSIZE) { 2712 if (dtrace_load8(daddr + 2713 offsetof(dblk_t, db_type)) != M_DATA) 2714 continue; 2715 } 2716 2717 count += wptr - rptr; 2718 } 2719 2720 if (!(*flags & CPU_DTRACE_FAULT)) 2721 regs[rd] = count; 2722 2723 break; 2724 } 2725 2726 case DIF_SUBR_PROGENYOF: { 2727 pid_t pid = tupregs[0].dttk_value; 2728 proc_t *p; 2729 int rval = 0; 2730 2731 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2732 2733 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 2734 if (p->p_pidp->pid_id == pid) { 2735 rval = 1; 2736 break; 2737 } 2738 } 2739 2740 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2741 2742 regs[rd] = rval; 2743 break; 2744 } 2745 2746 case DIF_SUBR_SPECULATION: 2747 regs[rd] = dtrace_speculation(state); 2748 break; 2749 2750 case DIF_SUBR_COPYOUT: { 2751 uintptr_t kaddr = tupregs[0].dttk_value; 2752 uintptr_t uaddr = tupregs[1].dttk_value; 2753 uint64_t size = tupregs[2].dttk_value; 2754 2755 if (!dtrace_destructive_disallow && 2756 dtrace_priv_proc_control(state) && 2757 !dtrace_istoxic(kaddr, size)) { 2758 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2759 dtrace_copyout(kaddr, uaddr, size); 2760 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2761 } 2762 break; 2763 } 2764 2765 case DIF_SUBR_COPYOUTSTR: { 2766 uintptr_t kaddr = tupregs[0].dttk_value; 2767 uintptr_t uaddr = tupregs[1].dttk_value; 2768 uint64_t size = tupregs[2].dttk_value; 2769 2770 if (!dtrace_destructive_disallow && 2771 dtrace_priv_proc_control(state) && 2772 !dtrace_istoxic(kaddr, size)) { 2773 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2774 dtrace_copyoutstr(kaddr, uaddr, size); 2775 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2776 } 2777 break; 2778 } 2779 2780 case DIF_SUBR_STRLEN: 2781 regs[rd] = dtrace_strlen((char *)(uintptr_t) 2782 tupregs[0].dttk_value, 2783 state->dts_options[DTRACEOPT_STRSIZE]); 2784 break; 2785 2786 case DIF_SUBR_STRCHR: 2787 case DIF_SUBR_STRRCHR: { 2788 /* 2789 * We're going to iterate over the string looking for the 2790 * specified character. We will iterate until we have reached 2791 * the string length or we have found the character. If this 2792 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 2793 * of the specified character instead of the first. 2794 */ 2795 uintptr_t addr = tupregs[0].dttk_value; 2796 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 2797 char c, target = (char)tupregs[1].dttk_value; 2798 2799 for (regs[rd] = NULL; addr < limit; addr++) { 2800 if ((c = dtrace_load8(addr)) == target) { 2801 regs[rd] = addr; 2802 2803 if (subr == DIF_SUBR_STRCHR) 2804 break; 2805 } 2806 2807 if (c == '\0') 2808 break; 2809 } 2810 2811 break; 2812 } 2813 2814 case DIF_SUBR_STRSTR: 2815 case DIF_SUBR_INDEX: 2816 case DIF_SUBR_RINDEX: { 2817 /* 2818 * We're going to iterate over the string looking for the 2819 * specified string. We will iterate until we have reached 2820 * the string length or we have found the string. (Yes, this 2821 * is done in the most naive way possible -- but considering 2822 * that the string we're searching for is likely to be 2823 * relatively short, the complexity of Rabin-Karp or similar 2824 * hardly seems merited.) 2825 */ 2826 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 2827 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 2828 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2829 size_t len = dtrace_strlen(addr, size); 2830 size_t sublen = dtrace_strlen(substr, size); 2831 char *limit = addr + len, *orig = addr; 2832 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 2833 int inc = 1; 2834 2835 regs[rd] = notfound; 2836 2837 /* 2838 * strstr() and index()/rindex() have similar semantics if 2839 * both strings are the empty string: strstr() returns a 2840 * pointer to the (empty) string, and index() and rindex() 2841 * both return index 0 (regardless of any position argument). 2842 */ 2843 if (sublen == 0 && len == 0) { 2844 if (subr == DIF_SUBR_STRSTR) 2845 regs[rd] = (uintptr_t)addr; 2846 else 2847 regs[rd] = 0; 2848 break; 2849 } 2850 2851 if (subr != DIF_SUBR_STRSTR) { 2852 if (subr == DIF_SUBR_RINDEX) { 2853 limit = orig - 1; 2854 addr += len; 2855 inc = -1; 2856 } 2857 2858 /* 2859 * Both index() and rindex() take an optional position 2860 * argument that denotes the starting position. 2861 */ 2862 if (nargs == 3) { 2863 int64_t pos = (int64_t)tupregs[2].dttk_value; 2864 2865 /* 2866 * If the position argument to index() is 2867 * negative, Perl implicitly clamps it at 2868 * zero. This semantic is a little surprising 2869 * given the special meaning of negative 2870 * positions to similar Perl functions like 2871 * substr(), but it appears to reflect a 2872 * notion that index() can start from a 2873 * negative index and increment its way up to 2874 * the string. Given this notion, Perl's 2875 * rindex() is at least self-consistent in 2876 * that it implicitly clamps positions greater 2877 * than the string length to be the string 2878 * length. Where Perl completely loses 2879 * coherence, however, is when the specified 2880 * substring is the empty string (""). In 2881 * this case, even if the position is 2882 * negative, rindex() returns 0 -- and even if 2883 * the position is greater than the length, 2884 * index() returns the string length. These 2885 * semantics violate the notion that index() 2886 * should never return a value less than the 2887 * specified position and that rindex() should 2888 * never return a value greater than the 2889 * specified position. (One assumes that 2890 * these semantics are artifacts of Perl's 2891 * implementation and not the results of 2892 * deliberate design -- it beggars belief that 2893 * even Larry Wall could desire such oddness.) 2894 * While in the abstract one would wish for 2895 * consistent position semantics across 2896 * substr(), index() and rindex() -- or at the 2897 * very least self-consistent position 2898 * semantics for index() and rindex() -- we 2899 * instead opt to keep with the extant Perl 2900 * semantics, in all their broken glory. (Do 2901 * we have more desire to maintain Perl's 2902 * semantics than Perl does? Probably.) 2903 */ 2904 if (subr == DIF_SUBR_RINDEX) { 2905 if (pos < 0) { 2906 if (sublen == 0) 2907 regs[rd] = 0; 2908 break; 2909 } 2910 2911 if (pos > len) 2912 pos = len; 2913 } else { 2914 if (pos < 0) 2915 pos = 0; 2916 2917 if (pos >= len) { 2918 if (sublen == 0) 2919 regs[rd] = len; 2920 break; 2921 } 2922 } 2923 2924 addr = orig + pos; 2925 } 2926 } 2927 2928 for (regs[rd] = notfound; addr != limit; addr += inc) { 2929 if (dtrace_strncmp(addr, substr, sublen) == 0) { 2930 if (subr != DIF_SUBR_STRSTR) { 2931 /* 2932 * As D index() and rindex() are 2933 * modeled on Perl (and not on awk), 2934 * we return a zero-based (and not a 2935 * one-based) index. (For you Perl 2936 * weenies: no, we're not going to add 2937 * $[ -- and shouldn't you be at a con 2938 * or something?) 2939 */ 2940 regs[rd] = (uintptr_t)(addr - orig); 2941 break; 2942 } 2943 2944 ASSERT(subr == DIF_SUBR_STRSTR); 2945 regs[rd] = (uintptr_t)addr; 2946 break; 2947 } 2948 } 2949 2950 break; 2951 } 2952 2953 case DIF_SUBR_STRTOK: { 2954 uintptr_t addr = tupregs[0].dttk_value; 2955 uintptr_t tokaddr = tupregs[1].dttk_value; 2956 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2957 uintptr_t limit, toklimit = tokaddr + size; 2958 uint8_t c, tokmap[32]; /* 256 / 8 */ 2959 char *dest = (char *)mstate->dtms_scratch_ptr; 2960 int i; 2961 2962 if (mstate->dtms_scratch_ptr + size > 2963 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2964 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2965 regs[rd] = NULL; 2966 break; 2967 } 2968 2969 if (addr == NULL) { 2970 /* 2971 * If the address specified is NULL, we use our saved 2972 * strtok pointer from the mstate. Note that this 2973 * means that the saved strtok pointer is _only_ 2974 * valid within multiple enablings of the same probe -- 2975 * it behaves like an implicit clause-local variable. 2976 */ 2977 addr = mstate->dtms_strtok; 2978 } 2979 2980 /* 2981 * First, zero the token map, and then process the token 2982 * string -- setting a bit in the map for every character 2983 * found in the token string. 2984 */ 2985 for (i = 0; i < sizeof (tokmap); i++) 2986 tokmap[i] = 0; 2987 2988 for (; tokaddr < toklimit; tokaddr++) { 2989 if ((c = dtrace_load8(tokaddr)) == '\0') 2990 break; 2991 2992 ASSERT((c >> 3) < sizeof (tokmap)); 2993 tokmap[c >> 3] |= (1 << (c & 0x7)); 2994 } 2995 2996 for (limit = addr + size; addr < limit; addr++) { 2997 /* 2998 * We're looking for a character that is _not_ contained 2999 * in the token string. 3000 */ 3001 if ((c = dtrace_load8(addr)) == '\0') 3002 break; 3003 3004 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3005 break; 3006 } 3007 3008 if (c == '\0') { 3009 /* 3010 * We reached the end of the string without finding 3011 * any character that was not in the token string. 3012 * We return NULL in this case, and we set the saved 3013 * address to NULL as well. 3014 */ 3015 regs[rd] = NULL; 3016 mstate->dtms_strtok = NULL; 3017 break; 3018 } 3019 3020 /* 3021 * From here on, we're copying into the destination string. 3022 */ 3023 for (i = 0; addr < limit && i < size - 1; addr++) { 3024 if ((c = dtrace_load8(addr)) == '\0') 3025 break; 3026 3027 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3028 break; 3029 3030 ASSERT(i < size); 3031 dest[i++] = c; 3032 } 3033 3034 ASSERT(i < size); 3035 dest[i] = '\0'; 3036 regs[rd] = (uintptr_t)dest; 3037 mstate->dtms_scratch_ptr += size; 3038 mstate->dtms_strtok = addr; 3039 break; 3040 } 3041 3042 case DIF_SUBR_SUBSTR: { 3043 uintptr_t s = tupregs[0].dttk_value; 3044 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3045 char *d = (char *)mstate->dtms_scratch_ptr; 3046 int64_t index = (int64_t)tupregs[1].dttk_value; 3047 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3048 size_t len = dtrace_strlen((char *)s, size); 3049 int64_t i = 0; 3050 3051 if (nargs <= 2) 3052 remaining = (int64_t)size; 3053 3054 if (mstate->dtms_scratch_ptr + size > 3055 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3056 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3057 regs[rd] = NULL; 3058 break; 3059 } 3060 3061 if (index < 0) { 3062 index += len; 3063 3064 if (index < 0 && index + remaining > 0) { 3065 remaining += index; 3066 index = 0; 3067 } 3068 } 3069 3070 if (index >= len || index < 0) 3071 index = len; 3072 3073 for (d[0] = '\0'; remaining > 0; remaining--) { 3074 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 3075 break; 3076 3077 if (i == size) { 3078 d[i - 1] = '\0'; 3079 break; 3080 } 3081 } 3082 3083 mstate->dtms_scratch_ptr += size; 3084 regs[rd] = (uintptr_t)d; 3085 break; 3086 } 3087 3088 case DIF_SUBR_GETMAJOR: 3089 #ifdef _LP64 3090 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3091 #else 3092 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3093 #endif 3094 break; 3095 3096 case DIF_SUBR_GETMINOR: 3097 #ifdef _LP64 3098 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3099 #else 3100 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3101 #endif 3102 break; 3103 3104 case DIF_SUBR_DDI_PATHNAME: { 3105 /* 3106 * This one is a galactic mess. We are going to roughly 3107 * emulate ddi_pathname(), but it's made more complicated 3108 * by the fact that we (a) want to include the minor name and 3109 * (b) must proceed iteratively instead of recursively. 3110 */ 3111 uintptr_t dest = mstate->dtms_scratch_ptr; 3112 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3113 char *start = (char *)dest, *end = start + size - 1; 3114 uintptr_t daddr = tupregs[0].dttk_value; 3115 int64_t minor = (int64_t)tupregs[1].dttk_value; 3116 char *s; 3117 int i, len, depth = 0; 3118 3119 if (size == 0 || mstate->dtms_scratch_ptr + size > 3120 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3121 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3122 regs[rd] = NULL; 3123 break; 3124 } 3125 3126 *end = '\0'; 3127 3128 /* 3129 * We want to have a name for the minor. In order to do this, 3130 * we need to walk the minor list from the devinfo. We want 3131 * to be sure that we don't infinitely walk a circular list, 3132 * so we check for circularity by sending a scout pointer 3133 * ahead two elements for every element that we iterate over; 3134 * if the list is circular, these will ultimately point to the 3135 * same element. You may recognize this little trick as the 3136 * answer to a stupid interview question -- one that always 3137 * seems to be asked by those who had to have it laboriously 3138 * explained to them, and who can't even concisely describe 3139 * the conditions under which one would be forced to resort to 3140 * this technique. Needless to say, those conditions are 3141 * found here -- and probably only here. Is this is the only 3142 * use of this infamous trick in shipping, production code? 3143 * If it isn't, it probably should be... 3144 */ 3145 if (minor != -1) { 3146 uintptr_t maddr = dtrace_loadptr(daddr + 3147 offsetof(struct dev_info, devi_minor)); 3148 3149 uintptr_t next = offsetof(struct ddi_minor_data, next); 3150 uintptr_t name = offsetof(struct ddi_minor_data, 3151 d_minor) + offsetof(struct ddi_minor, name); 3152 uintptr_t dev = offsetof(struct ddi_minor_data, 3153 d_minor) + offsetof(struct ddi_minor, dev); 3154 uintptr_t scout; 3155 3156 if (maddr != NULL) 3157 scout = dtrace_loadptr(maddr + next); 3158 3159 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3160 uint64_t m; 3161 #ifdef _LP64 3162 m = dtrace_load64(maddr + dev) & MAXMIN64; 3163 #else 3164 m = dtrace_load32(maddr + dev) & MAXMIN; 3165 #endif 3166 if (m != minor) { 3167 maddr = dtrace_loadptr(maddr + next); 3168 3169 if (scout == NULL) 3170 continue; 3171 3172 scout = dtrace_loadptr(scout + next); 3173 3174 if (scout == NULL) 3175 continue; 3176 3177 scout = dtrace_loadptr(scout + next); 3178 3179 if (scout == NULL) 3180 continue; 3181 3182 if (scout == maddr) { 3183 *flags |= CPU_DTRACE_ILLOP; 3184 break; 3185 } 3186 3187 continue; 3188 } 3189 3190 /* 3191 * We have the minor data. Now we need to 3192 * copy the minor's name into the end of the 3193 * pathname. 3194 */ 3195 s = (char *)dtrace_loadptr(maddr + name); 3196 len = dtrace_strlen(s, size); 3197 3198 if (*flags & CPU_DTRACE_FAULT) 3199 break; 3200 3201 if (len != 0) { 3202 if ((end -= (len + 1)) < start) 3203 break; 3204 3205 *end = ':'; 3206 } 3207 3208 for (i = 1; i <= len; i++) 3209 end[i] = dtrace_load8((uintptr_t)s++); 3210 break; 3211 } 3212 } 3213 3214 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3215 ddi_node_state_t devi_state; 3216 3217 devi_state = dtrace_load32(daddr + 3218 offsetof(struct dev_info, devi_node_state)); 3219 3220 if (*flags & CPU_DTRACE_FAULT) 3221 break; 3222 3223 if (devi_state >= DS_INITIALIZED) { 3224 s = (char *)dtrace_loadptr(daddr + 3225 offsetof(struct dev_info, devi_addr)); 3226 len = dtrace_strlen(s, size); 3227 3228 if (*flags & CPU_DTRACE_FAULT) 3229 break; 3230 3231 if (len != 0) { 3232 if ((end -= (len + 1)) < start) 3233 break; 3234 3235 *end = '@'; 3236 } 3237 3238 for (i = 1; i <= len; i++) 3239 end[i] = dtrace_load8((uintptr_t)s++); 3240 } 3241 3242 /* 3243 * Now for the node name... 3244 */ 3245 s = (char *)dtrace_loadptr(daddr + 3246 offsetof(struct dev_info, devi_node_name)); 3247 3248 daddr = dtrace_loadptr(daddr + 3249 offsetof(struct dev_info, devi_parent)); 3250 3251 /* 3252 * If our parent is NULL (that is, if we're the root 3253 * node), we're going to use the special path 3254 * "devices". 3255 */ 3256 if (daddr == NULL) 3257 s = "devices"; 3258 3259 len = dtrace_strlen(s, size); 3260 if (*flags & CPU_DTRACE_FAULT) 3261 break; 3262 3263 if ((end -= (len + 1)) < start) 3264 break; 3265 3266 for (i = 1; i <= len; i++) 3267 end[i] = dtrace_load8((uintptr_t)s++); 3268 *end = '/'; 3269 3270 if (depth++ > dtrace_devdepth_max) { 3271 *flags |= CPU_DTRACE_ILLOP; 3272 break; 3273 } 3274 } 3275 3276 if (end < start) 3277 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3278 3279 if (daddr == NULL) { 3280 regs[rd] = (uintptr_t)end; 3281 mstate->dtms_scratch_ptr += size; 3282 } 3283 3284 break; 3285 } 3286 3287 case DIF_SUBR_STRJOIN: { 3288 char *d = (char *)mstate->dtms_scratch_ptr; 3289 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3290 uintptr_t s1 = tupregs[0].dttk_value; 3291 uintptr_t s2 = tupregs[1].dttk_value; 3292 int i = 0; 3293 3294 if (mstate->dtms_scratch_ptr + size > 3295 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3296 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3297 regs[rd] = NULL; 3298 break; 3299 } 3300 3301 for (;;) { 3302 if (i >= size) { 3303 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3304 regs[rd] = NULL; 3305 break; 3306 } 3307 3308 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3309 i--; 3310 break; 3311 } 3312 } 3313 3314 for (;;) { 3315 if (i >= size) { 3316 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3317 regs[rd] = NULL; 3318 break; 3319 } 3320 3321 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3322 break; 3323 } 3324 3325 if (i < size) { 3326 mstate->dtms_scratch_ptr += i; 3327 regs[rd] = (uintptr_t)d; 3328 } 3329 3330 break; 3331 } 3332 3333 case DIF_SUBR_LLTOSTR: { 3334 int64_t i = (int64_t)tupregs[0].dttk_value; 3335 int64_t val = i < 0 ? i * -1 : i; 3336 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3337 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3338 3339 if (mstate->dtms_scratch_ptr + size > 3340 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3341 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3342 regs[rd] = NULL; 3343 break; 3344 } 3345 3346 for (*end-- = '\0'; val; val /= 10) 3347 *end-- = '0' + (val % 10); 3348 3349 if (i == 0) 3350 *end-- = '0'; 3351 3352 if (i < 0) 3353 *end-- = '-'; 3354 3355 regs[rd] = (uintptr_t)end + 1; 3356 mstate->dtms_scratch_ptr += size; 3357 break; 3358 } 3359 3360 case DIF_SUBR_DIRNAME: 3361 case DIF_SUBR_BASENAME: { 3362 char *dest = (char *)mstate->dtms_scratch_ptr; 3363 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3364 uintptr_t src = tupregs[0].dttk_value; 3365 int i, j, len = dtrace_strlen((char *)src, size); 3366 int lastbase = -1, firstbase = -1, lastdir = -1; 3367 int start, end; 3368 3369 if (mstate->dtms_scratch_ptr + size > 3370 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3371 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3372 regs[rd] = NULL; 3373 break; 3374 } 3375 3376 /* 3377 * The basename and dirname for a zero-length string is 3378 * defined to be "." 3379 */ 3380 if (len == 0) { 3381 len = 1; 3382 src = (uintptr_t)"."; 3383 } 3384 3385 /* 3386 * Start from the back of the string, moving back toward the 3387 * front until we see a character that isn't a slash. That 3388 * character is the last character in the basename. 3389 */ 3390 for (i = len - 1; i >= 0; i--) { 3391 if (dtrace_load8(src + i) != '/') 3392 break; 3393 } 3394 3395 if (i >= 0) 3396 lastbase = i; 3397 3398 /* 3399 * Starting from the last character in the basename, move 3400 * towards the front until we find a slash. The character 3401 * that we processed immediately before that is the first 3402 * character in the basename. 3403 */ 3404 for (; i >= 0; i--) { 3405 if (dtrace_load8(src + i) == '/') 3406 break; 3407 } 3408 3409 if (i >= 0) 3410 firstbase = i + 1; 3411 3412 /* 3413 * Now keep going until we find a non-slash character. That 3414 * character is the last character in the dirname. 3415 */ 3416 for (; i >= 0; i--) { 3417 if (dtrace_load8(src + i) != '/') 3418 break; 3419 } 3420 3421 if (i >= 0) 3422 lastdir = i; 3423 3424 ASSERT(!(lastbase == -1 && firstbase != -1)); 3425 ASSERT(!(firstbase == -1 && lastdir != -1)); 3426 3427 if (lastbase == -1) { 3428 /* 3429 * We didn't find a non-slash character. We know that 3430 * the length is non-zero, so the whole string must be 3431 * slashes. In either the dirname or the basename 3432 * case, we return '/'. 3433 */ 3434 ASSERT(firstbase == -1); 3435 firstbase = lastbase = lastdir = 0; 3436 } 3437 3438 if (firstbase == -1) { 3439 /* 3440 * The entire string consists only of a basename 3441 * component. If we're looking for dirname, we need 3442 * to change our string to be just "."; if we're 3443 * looking for a basename, we'll just set the first 3444 * character of the basename to be 0. 3445 */ 3446 if (subr == DIF_SUBR_DIRNAME) { 3447 ASSERT(lastdir == -1); 3448 src = (uintptr_t)"."; 3449 lastdir = 0; 3450 } else { 3451 firstbase = 0; 3452 } 3453 } 3454 3455 if (subr == DIF_SUBR_DIRNAME) { 3456 if (lastdir == -1) { 3457 /* 3458 * We know that we have a slash in the name -- 3459 * or lastdir would be set to 0, above. And 3460 * because lastdir is -1, we know that this 3461 * slash must be the first character. (That 3462 * is, the full string must be of the form 3463 * "/basename".) In this case, the last 3464 * character of the directory name is 0. 3465 */ 3466 lastdir = 0; 3467 } 3468 3469 start = 0; 3470 end = lastdir; 3471 } else { 3472 ASSERT(subr == DIF_SUBR_BASENAME); 3473 ASSERT(firstbase != -1 && lastbase != -1); 3474 start = firstbase; 3475 end = lastbase; 3476 } 3477 3478 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3479 dest[j] = dtrace_load8(src + i); 3480 3481 dest[j] = '\0'; 3482 regs[rd] = (uintptr_t)dest; 3483 mstate->dtms_scratch_ptr += size; 3484 break; 3485 } 3486 3487 case DIF_SUBR_CLEANPATH: { 3488 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3489 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3490 uintptr_t src = tupregs[0].dttk_value; 3491 int i = 0, j = 0; 3492 3493 if (mstate->dtms_scratch_ptr + size > 3494 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 3495 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3496 regs[rd] = NULL; 3497 break; 3498 } 3499 3500 /* 3501 * Move forward, loading each character. 3502 */ 3503 do { 3504 c = dtrace_load8(src + i++); 3505 next: 3506 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3507 break; 3508 3509 if (c != '/') { 3510 dest[j++] = c; 3511 continue; 3512 } 3513 3514 c = dtrace_load8(src + i++); 3515 3516 if (c == '/') { 3517 /* 3518 * We have two slashes -- we can just advance 3519 * to the next character. 3520 */ 3521 goto next; 3522 } 3523 3524 if (c != '.') { 3525 /* 3526 * This is not "." and it's not ".." -- we can 3527 * just store the "/" and this character and 3528 * drive on. 3529 */ 3530 dest[j++] = '/'; 3531 dest[j++] = c; 3532 continue; 3533 } 3534 3535 c = dtrace_load8(src + i++); 3536 3537 if (c == '/') { 3538 /* 3539 * This is a "/./" component. We're not going 3540 * to store anything in the destination buffer; 3541 * we're just going to go to the next component. 3542 */ 3543 goto next; 3544 } 3545 3546 if (c != '.') { 3547 /* 3548 * This is not ".." -- we can just store the 3549 * "/." and this character and continue 3550 * processing. 3551 */ 3552 dest[j++] = '/'; 3553 dest[j++] = '.'; 3554 dest[j++] = c; 3555 continue; 3556 } 3557 3558 c = dtrace_load8(src + i++); 3559 3560 if (c != '/' && c != '\0') { 3561 /* 3562 * This is not ".." -- it's "..[mumble]". 3563 * We'll store the "/.." and this character 3564 * and continue processing. 3565 */ 3566 dest[j++] = '/'; 3567 dest[j++] = '.'; 3568 dest[j++] = '.'; 3569 dest[j++] = c; 3570 continue; 3571 } 3572 3573 /* 3574 * This is "/../" or "/..\0". We need to back up 3575 * our destination pointer until we find a "/". 3576 */ 3577 i--; 3578 while (j != 0 && dest[--j] != '/') 3579 continue; 3580 3581 if (c == '\0') 3582 dest[++j] = '/'; 3583 } while (c != '\0'); 3584 3585 dest[j] = '\0'; 3586 regs[rd] = (uintptr_t)dest; 3587 mstate->dtms_scratch_ptr += size; 3588 break; 3589 } 3590 } 3591 } 3592 3593 /* 3594 * Emulate the execution of DTrace IR instructions specified by the given 3595 * DIF object. This function is deliberately void of assertions as all of 3596 * the necessary checks are handled by a call to dtrace_difo_validate(). 3597 */ 3598 static uint64_t 3599 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 3600 dtrace_vstate_t *vstate, dtrace_state_t *state) 3601 { 3602 const dif_instr_t *text = difo->dtdo_buf; 3603 const uint_t textlen = difo->dtdo_len; 3604 const char *strtab = difo->dtdo_strtab; 3605 const uint64_t *inttab = difo->dtdo_inttab; 3606 3607 uint64_t rval = 0; 3608 dtrace_statvar_t *svar; 3609 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 3610 dtrace_difv_t *v; 3611 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3612 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3613 3614 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 3615 uint64_t regs[DIF_DIR_NREGS]; 3616 uint64_t *tmp; 3617 3618 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 3619 int64_t cc_r; 3620 uint_t pc = 0, id, opc; 3621 uint8_t ttop = 0; 3622 dif_instr_t instr; 3623 uint_t r1, r2, rd; 3624 3625 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 3626 3627 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 3628 opc = pc; 3629 3630 instr = text[pc++]; 3631 r1 = DIF_INSTR_R1(instr); 3632 r2 = DIF_INSTR_R2(instr); 3633 rd = DIF_INSTR_RD(instr); 3634 3635 switch (DIF_INSTR_OP(instr)) { 3636 case DIF_OP_OR: 3637 regs[rd] = regs[r1] | regs[r2]; 3638 break; 3639 case DIF_OP_XOR: 3640 regs[rd] = regs[r1] ^ regs[r2]; 3641 break; 3642 case DIF_OP_AND: 3643 regs[rd] = regs[r1] & regs[r2]; 3644 break; 3645 case DIF_OP_SLL: 3646 regs[rd] = regs[r1] << regs[r2]; 3647 break; 3648 case DIF_OP_SRL: 3649 regs[rd] = regs[r1] >> regs[r2]; 3650 break; 3651 case DIF_OP_SUB: 3652 regs[rd] = regs[r1] - regs[r2]; 3653 break; 3654 case DIF_OP_ADD: 3655 regs[rd] = regs[r1] + regs[r2]; 3656 break; 3657 case DIF_OP_MUL: 3658 regs[rd] = regs[r1] * regs[r2]; 3659 break; 3660 case DIF_OP_SDIV: 3661 if (regs[r2] == 0) { 3662 regs[rd] = 0; 3663 *flags |= CPU_DTRACE_DIVZERO; 3664 } else { 3665 regs[rd] = (int64_t)regs[r1] / 3666 (int64_t)regs[r2]; 3667 } 3668 break; 3669 3670 case DIF_OP_UDIV: 3671 if (regs[r2] == 0) { 3672 regs[rd] = 0; 3673 *flags |= CPU_DTRACE_DIVZERO; 3674 } else { 3675 regs[rd] = regs[r1] / regs[r2]; 3676 } 3677 break; 3678 3679 case DIF_OP_SREM: 3680 if (regs[r2] == 0) { 3681 regs[rd] = 0; 3682 *flags |= CPU_DTRACE_DIVZERO; 3683 } else { 3684 regs[rd] = (int64_t)regs[r1] % 3685 (int64_t)regs[r2]; 3686 } 3687 break; 3688 3689 case DIF_OP_UREM: 3690 if (regs[r2] == 0) { 3691 regs[rd] = 0; 3692 *flags |= CPU_DTRACE_DIVZERO; 3693 } else { 3694 regs[rd] = regs[r1] % regs[r2]; 3695 } 3696 break; 3697 3698 case DIF_OP_NOT: 3699 regs[rd] = ~regs[r1]; 3700 break; 3701 case DIF_OP_MOV: 3702 regs[rd] = regs[r1]; 3703 break; 3704 case DIF_OP_CMP: 3705 cc_r = regs[r1] - regs[r2]; 3706 cc_n = cc_r < 0; 3707 cc_z = cc_r == 0; 3708 cc_v = 0; 3709 cc_c = regs[r1] < regs[r2]; 3710 break; 3711 case DIF_OP_TST: 3712 cc_n = cc_v = cc_c = 0; 3713 cc_z = regs[r1] == 0; 3714 break; 3715 case DIF_OP_BA: 3716 pc = DIF_INSTR_LABEL(instr); 3717 break; 3718 case DIF_OP_BE: 3719 if (cc_z) 3720 pc = DIF_INSTR_LABEL(instr); 3721 break; 3722 case DIF_OP_BNE: 3723 if (cc_z == 0) 3724 pc = DIF_INSTR_LABEL(instr); 3725 break; 3726 case DIF_OP_BG: 3727 if ((cc_z | (cc_n ^ cc_v)) == 0) 3728 pc = DIF_INSTR_LABEL(instr); 3729 break; 3730 case DIF_OP_BGU: 3731 if ((cc_c | cc_z) == 0) 3732 pc = DIF_INSTR_LABEL(instr); 3733 break; 3734 case DIF_OP_BGE: 3735 if ((cc_n ^ cc_v) == 0) 3736 pc = DIF_INSTR_LABEL(instr); 3737 break; 3738 case DIF_OP_BGEU: 3739 if (cc_c == 0) 3740 pc = DIF_INSTR_LABEL(instr); 3741 break; 3742 case DIF_OP_BL: 3743 if (cc_n ^ cc_v) 3744 pc = DIF_INSTR_LABEL(instr); 3745 break; 3746 case DIF_OP_BLU: 3747 if (cc_c) 3748 pc = DIF_INSTR_LABEL(instr); 3749 break; 3750 case DIF_OP_BLE: 3751 if (cc_z | (cc_n ^ cc_v)) 3752 pc = DIF_INSTR_LABEL(instr); 3753 break; 3754 case DIF_OP_BLEU: 3755 if (cc_c | cc_z) 3756 pc = DIF_INSTR_LABEL(instr); 3757 break; 3758 case DIF_OP_RLDSB: 3759 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3760 *flags |= CPU_DTRACE_KPRIV; 3761 *illval = regs[r1]; 3762 break; 3763 } 3764 /*FALLTHROUGH*/ 3765 case DIF_OP_LDSB: 3766 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 3767 break; 3768 case DIF_OP_RLDSH: 3769 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3770 *flags |= CPU_DTRACE_KPRIV; 3771 *illval = regs[r1]; 3772 break; 3773 } 3774 /*FALLTHROUGH*/ 3775 case DIF_OP_LDSH: 3776 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 3777 break; 3778 case DIF_OP_RLDSW: 3779 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3780 *flags |= CPU_DTRACE_KPRIV; 3781 *illval = regs[r1]; 3782 break; 3783 } 3784 /*FALLTHROUGH*/ 3785 case DIF_OP_LDSW: 3786 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 3787 break; 3788 case DIF_OP_RLDUB: 3789 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 3790 *flags |= CPU_DTRACE_KPRIV; 3791 *illval = regs[r1]; 3792 break; 3793 } 3794 /*FALLTHROUGH*/ 3795 case DIF_OP_LDUB: 3796 regs[rd] = dtrace_load8(regs[r1]); 3797 break; 3798 case DIF_OP_RLDUH: 3799 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 3800 *flags |= CPU_DTRACE_KPRIV; 3801 *illval = regs[r1]; 3802 break; 3803 } 3804 /*FALLTHROUGH*/ 3805 case DIF_OP_LDUH: 3806 regs[rd] = dtrace_load16(regs[r1]); 3807 break; 3808 case DIF_OP_RLDUW: 3809 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 3810 *flags |= CPU_DTRACE_KPRIV; 3811 *illval = regs[r1]; 3812 break; 3813 } 3814 /*FALLTHROUGH*/ 3815 case DIF_OP_LDUW: 3816 regs[rd] = dtrace_load32(regs[r1]); 3817 break; 3818 case DIF_OP_RLDX: 3819 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 3820 *flags |= CPU_DTRACE_KPRIV; 3821 *illval = regs[r1]; 3822 break; 3823 } 3824 /*FALLTHROUGH*/ 3825 case DIF_OP_LDX: 3826 regs[rd] = dtrace_load64(regs[r1]); 3827 break; 3828 case DIF_OP_ULDSB: 3829 regs[rd] = (int8_t) 3830 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3831 break; 3832 case DIF_OP_ULDSH: 3833 regs[rd] = (int16_t) 3834 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3835 break; 3836 case DIF_OP_ULDSW: 3837 regs[rd] = (int32_t) 3838 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3839 break; 3840 case DIF_OP_ULDUB: 3841 regs[rd] = 3842 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 3843 break; 3844 case DIF_OP_ULDUH: 3845 regs[rd] = 3846 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 3847 break; 3848 case DIF_OP_ULDUW: 3849 regs[rd] = 3850 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 3851 break; 3852 case DIF_OP_ULDX: 3853 regs[rd] = 3854 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 3855 break; 3856 case DIF_OP_RET: 3857 rval = regs[rd]; 3858 break; 3859 case DIF_OP_NOP: 3860 break; 3861 case DIF_OP_SETX: 3862 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 3863 break; 3864 case DIF_OP_SETS: 3865 regs[rd] = (uint64_t)(uintptr_t) 3866 (strtab + DIF_INSTR_STRING(instr)); 3867 break; 3868 case DIF_OP_SCMP: 3869 cc_r = dtrace_strncmp((char *)(uintptr_t)regs[r1], 3870 (char *)(uintptr_t)regs[r2], 3871 state->dts_options[DTRACEOPT_STRSIZE]); 3872 3873 cc_n = cc_r < 0; 3874 cc_z = cc_r == 0; 3875 cc_v = cc_c = 0; 3876 break; 3877 case DIF_OP_LDGA: 3878 regs[rd] = dtrace_dif_variable(mstate, state, 3879 r1, regs[r2]); 3880 break; 3881 case DIF_OP_LDGS: 3882 id = DIF_INSTR_VAR(instr); 3883 3884 if (id >= DIF_VAR_OTHER_UBASE) { 3885 uintptr_t a; 3886 3887 id -= DIF_VAR_OTHER_UBASE; 3888 svar = vstate->dtvs_globals[id]; 3889 ASSERT(svar != NULL); 3890 v = &svar->dtsv_var; 3891 3892 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 3893 regs[rd] = svar->dtsv_data; 3894 break; 3895 } 3896 3897 a = (uintptr_t)svar->dtsv_data; 3898 3899 if (*(uint8_t *)a == UINT8_MAX) { 3900 /* 3901 * If the 0th byte is set to UINT8_MAX 3902 * then this is to be treated as a 3903 * reference to a NULL variable. 3904 */ 3905 regs[rd] = NULL; 3906 } else { 3907 regs[rd] = a + sizeof (uint64_t); 3908 } 3909 3910 break; 3911 } 3912 3913 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 3914 break; 3915 3916 case DIF_OP_STGS: 3917 id = DIF_INSTR_VAR(instr); 3918 3919 ASSERT(id >= DIF_VAR_OTHER_UBASE); 3920 id -= DIF_VAR_OTHER_UBASE; 3921 3922 svar = vstate->dtvs_globals[id]; 3923 ASSERT(svar != NULL); 3924 v = &svar->dtsv_var; 3925 3926 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3927 uintptr_t a = (uintptr_t)svar->dtsv_data; 3928 3929 ASSERT(a != NULL); 3930 ASSERT(svar->dtsv_size != 0); 3931 3932 if (regs[rd] == NULL) { 3933 *(uint8_t *)a = UINT8_MAX; 3934 break; 3935 } else { 3936 *(uint8_t *)a = 0; 3937 a += sizeof (uint64_t); 3938 } 3939 3940 dtrace_vcopy((void *)(uintptr_t)regs[rd], 3941 (void *)a, &v->dtdv_type); 3942 break; 3943 } 3944 3945 svar->dtsv_data = regs[rd]; 3946 break; 3947 3948 case DIF_OP_LDTA: 3949 /* 3950 * There are no DTrace built-in thread-local arrays at 3951 * present. This opcode is saved for future work. 3952 */ 3953 *flags |= CPU_DTRACE_ILLOP; 3954 regs[rd] = 0; 3955 break; 3956 3957 case DIF_OP_LDLS: 3958 id = DIF_INSTR_VAR(instr); 3959 3960 if (id < DIF_VAR_OTHER_UBASE) { 3961 /* 3962 * For now, this has no meaning. 3963 */ 3964 regs[rd] = 0; 3965 break; 3966 } 3967 3968 id -= DIF_VAR_OTHER_UBASE; 3969 3970 ASSERT(id < vstate->dtvs_nlocals); 3971 ASSERT(vstate->dtvs_locals != NULL); 3972 3973 svar = vstate->dtvs_locals[id]; 3974 ASSERT(svar != NULL); 3975 v = &svar->dtsv_var; 3976 3977 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 3978 uintptr_t a = (uintptr_t)svar->dtsv_data; 3979 size_t sz = v->dtdv_type.dtdt_size; 3980 3981 sz += sizeof (uint64_t); 3982 ASSERT(svar->dtsv_size == NCPU * sz); 3983 a += CPU->cpu_id * sz; 3984 3985 if (*(uint8_t *)a == UINT8_MAX) { 3986 /* 3987 * If the 0th byte is set to UINT8_MAX 3988 * then this is to be treated as a 3989 * reference to a NULL variable. 3990 */ 3991 regs[rd] = NULL; 3992 } else { 3993 regs[rd] = a + sizeof (uint64_t); 3994 } 3995 3996 break; 3997 } 3998 3999 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4000 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4001 regs[rd] = tmp[CPU->cpu_id]; 4002 break; 4003 4004 case DIF_OP_STLS: 4005 id = DIF_INSTR_VAR(instr); 4006 4007 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4008 id -= DIF_VAR_OTHER_UBASE; 4009 ASSERT(id < vstate->dtvs_nlocals); 4010 4011 ASSERT(vstate->dtvs_locals != NULL); 4012 svar = vstate->dtvs_locals[id]; 4013 ASSERT(svar != NULL); 4014 v = &svar->dtsv_var; 4015 4016 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4017 uintptr_t a = (uintptr_t)svar->dtsv_data; 4018 size_t sz = v->dtdv_type.dtdt_size; 4019 4020 sz += sizeof (uint64_t); 4021 ASSERT(svar->dtsv_size == NCPU * sz); 4022 a += CPU->cpu_id * sz; 4023 4024 if (regs[rd] == NULL) { 4025 *(uint8_t *)a = UINT8_MAX; 4026 break; 4027 } else { 4028 *(uint8_t *)a = 0; 4029 a += sizeof (uint64_t); 4030 } 4031 4032 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4033 (void *)a, &v->dtdv_type); 4034 break; 4035 } 4036 4037 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4038 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4039 tmp[CPU->cpu_id] = regs[rd]; 4040 break; 4041 4042 case DIF_OP_LDTS: { 4043 dtrace_dynvar_t *dvar; 4044 dtrace_key_t *key; 4045 4046 id = DIF_INSTR_VAR(instr); 4047 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4048 id -= DIF_VAR_OTHER_UBASE; 4049 v = &vstate->dtvs_tlocals[id]; 4050 4051 key = &tupregs[DIF_DTR_NREGS]; 4052 key[0].dttk_value = (uint64_t)id; 4053 key[0].dttk_size = 0; 4054 DTRACE_TLS_THRKEY(key[1].dttk_value); 4055 key[1].dttk_size = 0; 4056 4057 dvar = dtrace_dynvar(dstate, 2, key, 4058 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC); 4059 4060 if (dvar == NULL) { 4061 regs[rd] = 0; 4062 break; 4063 } 4064 4065 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4066 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4067 } else { 4068 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4069 } 4070 4071 break; 4072 } 4073 4074 case DIF_OP_STTS: { 4075 dtrace_dynvar_t *dvar; 4076 dtrace_key_t *key; 4077 4078 id = DIF_INSTR_VAR(instr); 4079 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4080 id -= DIF_VAR_OTHER_UBASE; 4081 4082 key = &tupregs[DIF_DTR_NREGS]; 4083 key[0].dttk_value = (uint64_t)id; 4084 key[0].dttk_size = 0; 4085 DTRACE_TLS_THRKEY(key[1].dttk_value); 4086 key[1].dttk_size = 0; 4087 v = &vstate->dtvs_tlocals[id]; 4088 4089 dvar = dtrace_dynvar(dstate, 2, key, 4090 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4091 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4092 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4093 DTRACE_DYNVAR_DEALLOC); 4094 4095 /* 4096 * Given that we're storing to thread-local data, 4097 * we need to flush our predicate cache. 4098 */ 4099 curthread->t_predcache = NULL; 4100 4101 if (dvar == NULL) 4102 break; 4103 4104 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4105 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4106 dvar->dtdv_data, &v->dtdv_type); 4107 } else { 4108 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4109 } 4110 4111 break; 4112 } 4113 4114 case DIF_OP_SRA: 4115 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 4116 break; 4117 4118 case DIF_OP_CALL: 4119 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 4120 regs, tupregs, ttop, mstate, state); 4121 break; 4122 4123 case DIF_OP_PUSHTR: 4124 if (ttop == DIF_DTR_NREGS) { 4125 *flags |= CPU_DTRACE_TUPOFLOW; 4126 break; 4127 } 4128 4129 if (r1 == DIF_TYPE_STRING) { 4130 /* 4131 * If this is a string type and the size is 0, 4132 * we'll use the system-wide default string 4133 * size. Note that we are _not_ looking at 4134 * the value of the DTRACEOPT_STRSIZE option; 4135 * had this been set, we would expect to have 4136 * a non-zero size value in the "pushtr". 4137 */ 4138 tupregs[ttop].dttk_size = 4139 dtrace_strlen((char *)(uintptr_t)regs[rd], 4140 regs[r2] ? regs[r2] : 4141 dtrace_strsize_default) + 1; 4142 } else { 4143 tupregs[ttop].dttk_size = regs[r2]; 4144 } 4145 4146 tupregs[ttop++].dttk_value = regs[rd]; 4147 break; 4148 4149 case DIF_OP_PUSHTV: 4150 if (ttop == DIF_DTR_NREGS) { 4151 *flags |= CPU_DTRACE_TUPOFLOW; 4152 break; 4153 } 4154 4155 tupregs[ttop].dttk_value = regs[rd]; 4156 tupregs[ttop++].dttk_size = 0; 4157 break; 4158 4159 case DIF_OP_POPTS: 4160 if (ttop != 0) 4161 ttop--; 4162 break; 4163 4164 case DIF_OP_FLUSHTS: 4165 ttop = 0; 4166 break; 4167 4168 case DIF_OP_LDGAA: 4169 case DIF_OP_LDTAA: { 4170 dtrace_dynvar_t *dvar; 4171 dtrace_key_t *key = tupregs; 4172 uint_t nkeys = ttop; 4173 4174 id = DIF_INSTR_VAR(instr); 4175 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4176 id -= DIF_VAR_OTHER_UBASE; 4177 4178 key[nkeys].dttk_value = (uint64_t)id; 4179 key[nkeys++].dttk_size = 0; 4180 4181 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4182 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4183 key[nkeys++].dttk_size = 0; 4184 v = &vstate->dtvs_tlocals[id]; 4185 } else { 4186 v = &vstate->dtvs_globals[id]->dtsv_var; 4187 } 4188 4189 dvar = dtrace_dynvar(dstate, nkeys, key, 4190 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4191 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4192 DTRACE_DYNVAR_NOALLOC); 4193 4194 if (dvar == NULL) { 4195 regs[rd] = 0; 4196 break; 4197 } 4198 4199 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4200 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4201 } else { 4202 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4203 } 4204 4205 break; 4206 } 4207 4208 case DIF_OP_STGAA: 4209 case DIF_OP_STTAA: { 4210 dtrace_dynvar_t *dvar; 4211 dtrace_key_t *key = tupregs; 4212 uint_t nkeys = ttop; 4213 4214 id = DIF_INSTR_VAR(instr); 4215 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4216 id -= DIF_VAR_OTHER_UBASE; 4217 4218 key[nkeys].dttk_value = (uint64_t)id; 4219 key[nkeys++].dttk_size = 0; 4220 4221 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4222 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4223 key[nkeys++].dttk_size = 0; 4224 v = &vstate->dtvs_tlocals[id]; 4225 } else { 4226 v = &vstate->dtvs_globals[id]->dtsv_var; 4227 } 4228 4229 dvar = dtrace_dynvar(dstate, nkeys, key, 4230 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4231 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4232 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4233 DTRACE_DYNVAR_DEALLOC); 4234 4235 if (dvar == NULL) 4236 break; 4237 4238 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4239 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4240 dvar->dtdv_data, &v->dtdv_type); 4241 } else { 4242 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4243 } 4244 4245 break; 4246 } 4247 4248 case DIF_OP_ALLOCS: { 4249 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4250 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4251 4252 if (mstate->dtms_scratch_ptr + size > 4253 mstate->dtms_scratch_base + 4254 mstate->dtms_scratch_size) { 4255 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4256 regs[rd] = NULL; 4257 } else { 4258 dtrace_bzero((void *) 4259 mstate->dtms_scratch_ptr, size); 4260 mstate->dtms_scratch_ptr += size; 4261 regs[rd] = ptr; 4262 } 4263 break; 4264 } 4265 4266 case DIF_OP_COPYS: 4267 if (!dtrace_canstore(regs[rd], regs[r2], 4268 mstate, vstate)) { 4269 *flags |= CPU_DTRACE_BADADDR; 4270 *illval = regs[rd]; 4271 break; 4272 } 4273 4274 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4275 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4276 break; 4277 4278 case DIF_OP_STB: 4279 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4280 *flags |= CPU_DTRACE_BADADDR; 4281 *illval = regs[rd]; 4282 break; 4283 } 4284 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4285 break; 4286 4287 case DIF_OP_STH: 4288 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4289 *flags |= CPU_DTRACE_BADADDR; 4290 *illval = regs[rd]; 4291 break; 4292 } 4293 if (regs[rd] & 1) { 4294 *flags |= CPU_DTRACE_BADALIGN; 4295 *illval = regs[rd]; 4296 break; 4297 } 4298 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4299 break; 4300 4301 case DIF_OP_STW: 4302 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4303 *flags |= CPU_DTRACE_BADADDR; 4304 *illval = regs[rd]; 4305 break; 4306 } 4307 if (regs[rd] & 3) { 4308 *flags |= CPU_DTRACE_BADALIGN; 4309 *illval = regs[rd]; 4310 break; 4311 } 4312 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4313 break; 4314 4315 case DIF_OP_STX: 4316 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4317 *flags |= CPU_DTRACE_BADADDR; 4318 *illval = regs[rd]; 4319 break; 4320 } 4321 if (regs[rd] & 7) { 4322 *flags |= CPU_DTRACE_BADALIGN; 4323 *illval = regs[rd]; 4324 break; 4325 } 4326 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4327 break; 4328 } 4329 } 4330 4331 if (!(*flags & CPU_DTRACE_FAULT)) 4332 return (rval); 4333 4334 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4335 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4336 4337 return (0); 4338 } 4339 4340 static void 4341 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4342 { 4343 dtrace_probe_t *probe = ecb->dte_probe; 4344 dtrace_provider_t *prov = probe->dtpr_provider; 4345 char c[DTRACE_FULLNAMELEN + 80], *str; 4346 char *msg = "dtrace: breakpoint action at probe "; 4347 char *ecbmsg = " (ecb "; 4348 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4349 uintptr_t val = (uintptr_t)ecb; 4350 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4351 4352 if (dtrace_destructive_disallow) 4353 return; 4354 4355 /* 4356 * It's impossible to be taking action on the NULL probe. 4357 */ 4358 ASSERT(probe != NULL); 4359 4360 /* 4361 * This is a poor man's (destitute man's?) sprintf(): we want to 4362 * print the provider name, module name, function name and name of 4363 * the probe, along with the hex address of the ECB with the breakpoint 4364 * action -- all of which we must place in the character buffer by 4365 * hand. 4366 */ 4367 while (*msg != '\0') 4368 c[i++] = *msg++; 4369 4370 for (str = prov->dtpv_name; *str != '\0'; str++) 4371 c[i++] = *str; 4372 c[i++] = ':'; 4373 4374 for (str = probe->dtpr_mod; *str != '\0'; str++) 4375 c[i++] = *str; 4376 c[i++] = ':'; 4377 4378 for (str = probe->dtpr_func; *str != '\0'; str++) 4379 c[i++] = *str; 4380 c[i++] = ':'; 4381 4382 for (str = probe->dtpr_name; *str != '\0'; str++) 4383 c[i++] = *str; 4384 4385 while (*ecbmsg != '\0') 4386 c[i++] = *ecbmsg++; 4387 4388 while (shift >= 0) { 4389 mask = (uintptr_t)0xf << shift; 4390 4391 if (val >= ((uintptr_t)1 << shift)) 4392 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4393 shift -= 4; 4394 } 4395 4396 c[i++] = ')'; 4397 c[i] = '\0'; 4398 4399 debug_enter(c); 4400 } 4401 4402 static void 4403 dtrace_action_panic(dtrace_ecb_t *ecb) 4404 { 4405 dtrace_probe_t *probe = ecb->dte_probe; 4406 4407 /* 4408 * It's impossible to be taking action on the NULL probe. 4409 */ 4410 ASSERT(probe != NULL); 4411 4412 if (dtrace_destructive_disallow) 4413 return; 4414 4415 if (dtrace_panicked != NULL) 4416 return; 4417 4418 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4419 return; 4420 4421 /* 4422 * We won the right to panic. (We want to be sure that only one 4423 * thread calls panic() from dtrace_probe(), and that panic() is 4424 * called exactly once.) 4425 */ 4426 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4427 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4428 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4429 } 4430 4431 static void 4432 dtrace_action_raise(uint64_t sig) 4433 { 4434 if (dtrace_destructive_disallow) 4435 return; 4436 4437 if (sig >= NSIG) { 4438 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4439 return; 4440 } 4441 4442 /* 4443 * raise() has a queue depth of 1 -- we ignore all subsequent 4444 * invocations of the raise() action. 4445 */ 4446 if (curthread->t_dtrace_sig == 0) 4447 curthread->t_dtrace_sig = (uint8_t)sig; 4448 4449 curthread->t_sig_check = 1; 4450 aston(curthread); 4451 } 4452 4453 static void 4454 dtrace_action_stop(void) 4455 { 4456 if (dtrace_destructive_disallow) 4457 return; 4458 4459 if (!curthread->t_dtrace_stop) { 4460 curthread->t_dtrace_stop = 1; 4461 curthread->t_sig_check = 1; 4462 aston(curthread); 4463 } 4464 } 4465 4466 static void 4467 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4468 { 4469 hrtime_t now; 4470 volatile uint16_t *flags; 4471 cpu_t *cpu = CPU; 4472 4473 if (dtrace_destructive_disallow) 4474 return; 4475 4476 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4477 4478 now = dtrace_gethrtime(); 4479 4480 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4481 /* 4482 * We need to advance the mark to the current time. 4483 */ 4484 cpu->cpu_dtrace_chillmark = now; 4485 cpu->cpu_dtrace_chilled = 0; 4486 } 4487 4488 /* 4489 * Now check to see if the requested chill time would take us over 4490 * the maximum amount of time allowed in the chill interval. (Or 4491 * worse, if the calculation itself induces overflow.) 4492 */ 4493 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4494 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4495 *flags |= CPU_DTRACE_ILLOP; 4496 return; 4497 } 4498 4499 while (dtrace_gethrtime() - now < val) 4500 continue; 4501 4502 /* 4503 * Normally, we assure that the value of the variable "timestamp" does 4504 * not change within an ECB. The presence of chill() represents an 4505 * exception to this rule, however. 4506 */ 4507 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 4508 cpu->cpu_dtrace_chilled += val; 4509 } 4510 4511 static void 4512 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 4513 uint64_t *buf, uint64_t arg) 4514 { 4515 int nframes = DTRACE_USTACK_NFRAMES(arg); 4516 int strsize = DTRACE_USTACK_STRSIZE(arg); 4517 uint64_t *pcs = &buf[1], *fps; 4518 char *str = (char *)&pcs[nframes]; 4519 int size, offs = 0, i, j; 4520 uintptr_t old = mstate->dtms_scratch_ptr, saved; 4521 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4522 char *sym; 4523 4524 /* 4525 * Should be taking a faster path if string space has not been 4526 * allocated. 4527 */ 4528 ASSERT(strsize != 0); 4529 4530 /* 4531 * We will first allocate some temporary space for the frame pointers. 4532 */ 4533 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4534 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 4535 (nframes * sizeof (uint64_t)); 4536 4537 if (mstate->dtms_scratch_ptr + size > 4538 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 4539 /* 4540 * Not enough room for our frame pointers -- need to indicate 4541 * that we ran out of scratch space. 4542 */ 4543 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4544 return; 4545 } 4546 4547 mstate->dtms_scratch_ptr += size; 4548 saved = mstate->dtms_scratch_ptr; 4549 4550 /* 4551 * Now get a stack with both program counters and frame pointers. 4552 */ 4553 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4554 dtrace_getufpstack(buf, fps, nframes + 1); 4555 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4556 4557 /* 4558 * If that faulted, we're cooked. 4559 */ 4560 if (*flags & CPU_DTRACE_FAULT) 4561 goto out; 4562 4563 /* 4564 * Now we want to walk up the stack, calling the USTACK helper. For 4565 * each iteration, we restore the scratch pointer. 4566 */ 4567 for (i = 0; i < nframes; i++) { 4568 mstate->dtms_scratch_ptr = saved; 4569 4570 if (offs >= strsize) 4571 break; 4572 4573 sym = (char *)(uintptr_t)dtrace_helper( 4574 DTRACE_HELPER_ACTION_USTACK, 4575 mstate, state, pcs[i], fps[i]); 4576 4577 /* 4578 * If we faulted while running the helper, we're going to 4579 * clear the fault and null out the corresponding string. 4580 */ 4581 if (*flags & CPU_DTRACE_FAULT) { 4582 *flags &= ~CPU_DTRACE_FAULT; 4583 str[offs++] = '\0'; 4584 continue; 4585 } 4586 4587 if (sym == NULL) { 4588 str[offs++] = '\0'; 4589 continue; 4590 } 4591 4592 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4593 4594 /* 4595 * Now copy in the string that the helper returned to us. 4596 */ 4597 for (j = 0; offs + j < strsize; j++) { 4598 if ((str[offs + j] = sym[j]) == '\0') 4599 break; 4600 } 4601 4602 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4603 4604 offs += j + 1; 4605 } 4606 4607 if (offs >= strsize) { 4608 /* 4609 * If we didn't have room for all of the strings, we don't 4610 * abort processing -- this needn't be a fatal error -- but we 4611 * still want to increment a counter (dts_stkstroverflows) to 4612 * allow this condition to be warned about. (If this is from 4613 * a jstack() action, it is easily tuned via jstackstrsize.) 4614 */ 4615 dtrace_error(&state->dts_stkstroverflows); 4616 } 4617 4618 while (offs < strsize) 4619 str[offs++] = '\0'; 4620 4621 out: 4622 mstate->dtms_scratch_ptr = old; 4623 } 4624 4625 /* 4626 * If you're looking for the epicenter of DTrace, you just found it. This 4627 * is the function called by the provider to fire a probe -- from which all 4628 * subsequent probe-context DTrace activity emanates. 4629 */ 4630 void 4631 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 4632 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 4633 { 4634 processorid_t cpuid; 4635 dtrace_icookie_t cookie; 4636 dtrace_probe_t *probe; 4637 dtrace_mstate_t mstate; 4638 dtrace_ecb_t *ecb; 4639 dtrace_action_t *act; 4640 intptr_t offs; 4641 size_t size; 4642 int vtime, onintr; 4643 volatile uint16_t *flags; 4644 hrtime_t now; 4645 4646 /* 4647 * Kick out immediately if this CPU is still being born (in which case 4648 * curthread will be set to -1) 4649 */ 4650 if ((uintptr_t)curthread & 1) 4651 return; 4652 4653 cookie = dtrace_interrupt_disable(); 4654 probe = dtrace_probes[id - 1]; 4655 cpuid = CPU->cpu_id; 4656 onintr = CPU_ON_INTR(CPU); 4657 4658 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 4659 probe->dtpr_predcache == curthread->t_predcache) { 4660 /* 4661 * We have hit in the predicate cache; we know that 4662 * this predicate would evaluate to be false. 4663 */ 4664 dtrace_interrupt_enable(cookie); 4665 return; 4666 } 4667 4668 if (panic_quiesce) { 4669 /* 4670 * We don't trace anything if we're panicking. 4671 */ 4672 dtrace_interrupt_enable(cookie); 4673 return; 4674 } 4675 4676 now = dtrace_gethrtime(); 4677 vtime = dtrace_vtime_references != 0; 4678 4679 if (vtime && curthread->t_dtrace_start) 4680 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 4681 4682 mstate.dtms_probe = probe; 4683 mstate.dtms_arg[0] = arg0; 4684 mstate.dtms_arg[1] = arg1; 4685 mstate.dtms_arg[2] = arg2; 4686 mstate.dtms_arg[3] = arg3; 4687 mstate.dtms_arg[4] = arg4; 4688 4689 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 4690 4691 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 4692 dtrace_predicate_t *pred = ecb->dte_predicate; 4693 dtrace_state_t *state = ecb->dte_state; 4694 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 4695 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 4696 dtrace_vstate_t *vstate = &state->dts_vstate; 4697 dtrace_provider_t *prov = probe->dtpr_provider; 4698 int committed = 0; 4699 caddr_t tomax; 4700 4701 /* 4702 * A little subtlety with the following (seemingly innocuous) 4703 * declaration of the automatic 'val': by looking at the 4704 * code, you might think that it could be declared in the 4705 * action processing loop, below. (That is, it's only used in 4706 * the action processing loop.) However, it must be declared 4707 * out of that scope because in the case of DIF expression 4708 * arguments to aggregating actions, one iteration of the 4709 * action loop will use the last iteration's value. 4710 */ 4711 #ifdef lint 4712 uint64_t val = 0; 4713 #else 4714 uint64_t val; 4715 #endif 4716 4717 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 4718 *flags &= ~CPU_DTRACE_ERROR; 4719 4720 if (prov == dtrace_provider) { 4721 /* 4722 * If dtrace itself is the provider of this probe, 4723 * we're only going to continue processing the ECB if 4724 * arg0 (the dtrace_state_t) is equal to the ECB's 4725 * creating state. (This prevents disjoint consumers 4726 * from seeing one another's metaprobes.) 4727 */ 4728 if (arg0 != (uint64_t)(uintptr_t)state) 4729 continue; 4730 } 4731 4732 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 4733 /* 4734 * We're not currently active. If our provider isn't 4735 * the dtrace pseudo provider, we're not interested. 4736 */ 4737 if (prov != dtrace_provider) 4738 continue; 4739 4740 /* 4741 * Now we must further check if we are in the BEGIN 4742 * probe. If we are, we will only continue processing 4743 * if we're still in WARMUP -- if one BEGIN enabling 4744 * has invoked the exit() action, we don't want to 4745 * evaluate subsequent BEGIN enablings. 4746 */ 4747 if (probe->dtpr_id == dtrace_probeid_begin && 4748 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 4749 ASSERT(state->dts_activity == 4750 DTRACE_ACTIVITY_DRAINING); 4751 continue; 4752 } 4753 } 4754 4755 if (ecb->dte_cond) { 4756 /* 4757 * If the dte_cond bits indicate that this 4758 * consumer is only allowed to see user-mode firings 4759 * of this probe, call the provider's dtps_usermode() 4760 * entry point to check that the probe was fired 4761 * while in a user context. Skip this ECB if that's 4762 * not the case. 4763 */ 4764 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 4765 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 4766 probe->dtpr_id, probe->dtpr_arg) == 0) 4767 continue; 4768 4769 /* 4770 * This is more subtle than it looks. We have to be 4771 * absolutely certain that CRED() isn't going to 4772 * change out from under us so it's only legit to 4773 * examine that structure if we're in constrained 4774 * situations. Currently, the only times we'll this 4775 * check is if a non-super-user has enabled the 4776 * profile or syscall providers -- providers that 4777 * allow visibility of all processes. For the 4778 * profile case, the check above will ensure that 4779 * we're examining a user context. 4780 */ 4781 if (ecb->dte_cond & DTRACE_COND_OWNER) { 4782 cred_t *cr; 4783 cred_t *s_cr = 4784 ecb->dte_state->dts_cred.dcr_cred; 4785 proc_t *proc; 4786 4787 ASSERT(s_cr != NULL); 4788 4789 if ((cr = CRED()) == NULL || 4790 s_cr->cr_uid != cr->cr_uid || 4791 s_cr->cr_uid != cr->cr_ruid || 4792 s_cr->cr_uid != cr->cr_suid || 4793 s_cr->cr_gid != cr->cr_gid || 4794 s_cr->cr_gid != cr->cr_rgid || 4795 s_cr->cr_gid != cr->cr_sgid || 4796 (proc = ttoproc(curthread)) == NULL || 4797 (proc->p_flag & SNOCD)) 4798 continue; 4799 } 4800 4801 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 4802 cred_t *cr; 4803 cred_t *s_cr = 4804 ecb->dte_state->dts_cred.dcr_cred; 4805 4806 ASSERT(s_cr != NULL); 4807 4808 if ((cr = CRED()) == NULL || 4809 s_cr->cr_zone->zone_id != 4810 cr->cr_zone->zone_id) 4811 continue; 4812 } 4813 } 4814 4815 if (now - state->dts_alive > dtrace_deadman_timeout) { 4816 /* 4817 * We seem to be dead. Unless we (a) have kernel 4818 * destructive permissions (b) have expicitly enabled 4819 * destructive actions and (c) destructive actions have 4820 * not been disabled, we're going to transition into 4821 * the KILLED state, from which no further processing 4822 * on this state will be performed. 4823 */ 4824 if (!dtrace_priv_kernel_destructive(state) || 4825 !state->dts_cred.dcr_destructive || 4826 dtrace_destructive_disallow) { 4827 void *activity = &state->dts_activity; 4828 dtrace_activity_t current; 4829 4830 do { 4831 current = state->dts_activity; 4832 } while (dtrace_cas32(activity, current, 4833 DTRACE_ACTIVITY_KILLED) != current); 4834 4835 continue; 4836 } 4837 } 4838 4839 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 4840 ecb->dte_alignment, state, &mstate)) < 0) 4841 continue; 4842 4843 tomax = buf->dtb_tomax; 4844 ASSERT(tomax != NULL); 4845 4846 if (ecb->dte_size != 0) 4847 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 4848 4849 mstate.dtms_epid = ecb->dte_epid; 4850 mstate.dtms_present |= DTRACE_MSTATE_EPID; 4851 4852 if (pred != NULL) { 4853 dtrace_difo_t *dp = pred->dtp_difo; 4854 int rval; 4855 4856 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 4857 4858 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 4859 dtrace_cacheid_t cid = probe->dtpr_predcache; 4860 4861 if (cid != DTRACE_CACHEIDNONE && !onintr) { 4862 /* 4863 * Update the predicate cache... 4864 */ 4865 ASSERT(cid == pred->dtp_cacheid); 4866 curthread->t_predcache = cid; 4867 } 4868 4869 continue; 4870 } 4871 } 4872 4873 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 4874 act != NULL; act = act->dta_next) { 4875 size_t valoffs; 4876 dtrace_difo_t *dp; 4877 dtrace_recdesc_t *rec = &act->dta_rec; 4878 4879 size = rec->dtrd_size; 4880 valoffs = offs + rec->dtrd_offset; 4881 4882 if (DTRACEACT_ISAGG(act->dta_kind)) { 4883 uint64_t v = 0xbad; 4884 dtrace_aggregation_t *agg; 4885 4886 agg = (dtrace_aggregation_t *)act; 4887 4888 if ((dp = act->dta_difo) != NULL) 4889 v = dtrace_dif_emulate(dp, 4890 &mstate, vstate, state); 4891 4892 if (*flags & CPU_DTRACE_ERROR) 4893 continue; 4894 4895 /* 4896 * Note that we always pass the expression 4897 * value from the previous iteration of the 4898 * action loop. This value will only be used 4899 * if there is an expression argument to the 4900 * aggregating action, denoted by the 4901 * dtag_hasarg field. 4902 */ 4903 dtrace_aggregate(agg, buf, 4904 offs, aggbuf, v, val); 4905 continue; 4906 } 4907 4908 switch (act->dta_kind) { 4909 case DTRACEACT_STOP: 4910 if (dtrace_priv_proc_destructive(state)) 4911 dtrace_action_stop(); 4912 continue; 4913 4914 case DTRACEACT_BREAKPOINT: 4915 if (dtrace_priv_kernel_destructive(state)) 4916 dtrace_action_breakpoint(ecb); 4917 continue; 4918 4919 case DTRACEACT_PANIC: 4920 if (dtrace_priv_kernel_destructive(state)) 4921 dtrace_action_panic(ecb); 4922 continue; 4923 4924 case DTRACEACT_STACK: 4925 if (!dtrace_priv_kernel(state)) 4926 continue; 4927 4928 dtrace_getpcstack((pc_t *)(tomax + valoffs), 4929 size / sizeof (pc_t), probe->dtpr_aframes, 4930 DTRACE_ANCHORED(probe) ? NULL : 4931 (uint32_t *)arg0); 4932 4933 continue; 4934 4935 case DTRACEACT_JSTACK: 4936 case DTRACEACT_USTACK: 4937 if (!dtrace_priv_proc(state)) 4938 continue; 4939 4940 /* 4941 * See comment in DIF_VAR_PID. 4942 */ 4943 if (DTRACE_ANCHORED(mstate.dtms_probe) && 4944 CPU_ON_INTR(CPU)) { 4945 int depth = DTRACE_USTACK_NFRAMES( 4946 rec->dtrd_arg) + 1; 4947 4948 dtrace_bzero((void *)(tomax + valoffs), 4949 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 4950 + depth * sizeof (uint64_t)); 4951 4952 continue; 4953 } 4954 4955 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 4956 curproc->p_dtrace_helpers != NULL) { 4957 /* 4958 * This is the slow path -- we have 4959 * allocated string space, and we're 4960 * getting the stack of a process that 4961 * has helpers. Call into a separate 4962 * routine to perform this processing. 4963 */ 4964 dtrace_action_ustack(&mstate, state, 4965 (uint64_t *)(tomax + valoffs), 4966 rec->dtrd_arg); 4967 continue; 4968 } 4969 4970 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4971 dtrace_getupcstack((uint64_t *) 4972 (tomax + valoffs), 4973 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 4974 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4975 continue; 4976 4977 default: 4978 break; 4979 } 4980 4981 dp = act->dta_difo; 4982 ASSERT(dp != NULL); 4983 4984 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 4985 4986 if (*flags & CPU_DTRACE_ERROR) 4987 continue; 4988 4989 switch (act->dta_kind) { 4990 case DTRACEACT_SPECULATE: 4991 ASSERT(buf == &state->dts_buffer[cpuid]); 4992 buf = dtrace_speculation_buffer(state, 4993 cpuid, val); 4994 4995 if (buf == NULL) { 4996 *flags |= CPU_DTRACE_DROP; 4997 continue; 4998 } 4999 5000 offs = dtrace_buffer_reserve(buf, 5001 ecb->dte_needed, ecb->dte_alignment, 5002 state, NULL); 5003 5004 if (offs < 0) { 5005 *flags |= CPU_DTRACE_DROP; 5006 continue; 5007 } 5008 5009 tomax = buf->dtb_tomax; 5010 ASSERT(tomax != NULL); 5011 5012 if (ecb->dte_size != 0) 5013 DTRACE_STORE(uint32_t, tomax, offs, 5014 ecb->dte_epid); 5015 continue; 5016 5017 case DTRACEACT_CHILL: 5018 if (dtrace_priv_kernel_destructive(state)) 5019 dtrace_action_chill(&mstate, val); 5020 continue; 5021 5022 case DTRACEACT_RAISE: 5023 if (dtrace_priv_proc_destructive(state)) 5024 dtrace_action_raise(val); 5025 continue; 5026 5027 case DTRACEACT_COMMIT: 5028 ASSERT(!committed); 5029 5030 /* 5031 * We need to commit our buffer state. 5032 */ 5033 if (ecb->dte_size) 5034 buf->dtb_offset = offs + ecb->dte_size; 5035 buf = &state->dts_buffer[cpuid]; 5036 dtrace_speculation_commit(state, cpuid, val); 5037 committed = 1; 5038 continue; 5039 5040 case DTRACEACT_DISCARD: 5041 dtrace_speculation_discard(state, cpuid, val); 5042 continue; 5043 5044 case DTRACEACT_DIFEXPR: 5045 case DTRACEACT_LIBACT: 5046 case DTRACEACT_PRINTF: 5047 case DTRACEACT_PRINTA: 5048 case DTRACEACT_SYSTEM: 5049 case DTRACEACT_FREOPEN: 5050 break; 5051 5052 case DTRACEACT_SYM: 5053 case DTRACEACT_MOD: 5054 if (!dtrace_priv_kernel(state)) 5055 continue; 5056 break; 5057 5058 case DTRACEACT_USYM: 5059 case DTRACEACT_UMOD: 5060 case DTRACEACT_UADDR: { 5061 struct pid *pid = curthread->t_procp->p_pidp; 5062 5063 if (!dtrace_priv_proc(state)) 5064 continue; 5065 5066 DTRACE_STORE(uint64_t, tomax, 5067 valoffs, (uint64_t)pid->pid_id); 5068 DTRACE_STORE(uint64_t, tomax, 5069 valoffs + sizeof (uint64_t), val); 5070 5071 continue; 5072 } 5073 5074 case DTRACEACT_EXIT: { 5075 /* 5076 * For the exit action, we are going to attempt 5077 * to atomically set our activity to be 5078 * draining. If this fails (either because 5079 * another CPU has beat us to the exit action, 5080 * or because our current activity is something 5081 * other than ACTIVE or WARMUP), we will 5082 * continue. This assures that the exit action 5083 * can be successfully recorded at most once 5084 * when we're in the ACTIVE state. If we're 5085 * encountering the exit() action while in 5086 * COOLDOWN, however, we want to honor the new 5087 * status code. (We know that we're the only 5088 * thread in COOLDOWN, so there is no race.) 5089 */ 5090 void *activity = &state->dts_activity; 5091 dtrace_activity_t current = state->dts_activity; 5092 5093 if (current == DTRACE_ACTIVITY_COOLDOWN) 5094 break; 5095 5096 if (current != DTRACE_ACTIVITY_WARMUP) 5097 current = DTRACE_ACTIVITY_ACTIVE; 5098 5099 if (dtrace_cas32(activity, current, 5100 DTRACE_ACTIVITY_DRAINING) != current) { 5101 *flags |= CPU_DTRACE_DROP; 5102 continue; 5103 } 5104 5105 break; 5106 } 5107 5108 default: 5109 ASSERT(0); 5110 } 5111 5112 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 5113 uintptr_t end = valoffs + size; 5114 5115 /* 5116 * If this is a string, we're going to only 5117 * load until we find the zero byte -- after 5118 * which we'll store zero bytes. 5119 */ 5120 if (dp->dtdo_rtype.dtdt_kind == 5121 DIF_TYPE_STRING) { 5122 char c = '\0' + 1; 5123 int intuple = act->dta_intuple; 5124 size_t s; 5125 5126 for (s = 0; s < size; s++) { 5127 if (c != '\0') 5128 c = dtrace_load8(val++); 5129 5130 DTRACE_STORE(uint8_t, tomax, 5131 valoffs++, c); 5132 5133 if (c == '\0' && intuple) 5134 break; 5135 } 5136 5137 continue; 5138 } 5139 5140 while (valoffs < end) { 5141 DTRACE_STORE(uint8_t, tomax, valoffs++, 5142 dtrace_load8(val++)); 5143 } 5144 5145 continue; 5146 } 5147 5148 switch (size) { 5149 case 0: 5150 break; 5151 5152 case sizeof (uint8_t): 5153 DTRACE_STORE(uint8_t, tomax, valoffs, val); 5154 break; 5155 case sizeof (uint16_t): 5156 DTRACE_STORE(uint16_t, tomax, valoffs, val); 5157 break; 5158 case sizeof (uint32_t): 5159 DTRACE_STORE(uint32_t, tomax, valoffs, val); 5160 break; 5161 case sizeof (uint64_t): 5162 DTRACE_STORE(uint64_t, tomax, valoffs, val); 5163 break; 5164 default: 5165 /* 5166 * Any other size should have been returned by 5167 * reference, not by value. 5168 */ 5169 ASSERT(0); 5170 break; 5171 } 5172 } 5173 5174 if (*flags & CPU_DTRACE_DROP) 5175 continue; 5176 5177 if (*flags & CPU_DTRACE_FAULT) { 5178 int ndx; 5179 dtrace_action_t *err; 5180 5181 buf->dtb_errors++; 5182 5183 if (probe->dtpr_id == dtrace_probeid_error) { 5184 /* 5185 * There's nothing we can do -- we had an 5186 * error on the error probe. We bump an 5187 * error counter to at least indicate that 5188 * this condition happened. 5189 */ 5190 dtrace_error(&state->dts_dblerrors); 5191 continue; 5192 } 5193 5194 if (vtime) { 5195 /* 5196 * Before recursing on dtrace_probe(), we 5197 * need to explicitly clear out our start 5198 * time to prevent it from being accumulated 5199 * into t_dtrace_vtime. 5200 */ 5201 curthread->t_dtrace_start = 0; 5202 } 5203 5204 /* 5205 * Iterate over the actions to figure out which action 5206 * we were processing when we experienced the error. 5207 * Note that act points _past_ the faulting action; if 5208 * act is ecb->dte_action, the fault was in the 5209 * predicate, if it's ecb->dte_action->dta_next it's 5210 * in action #1, and so on. 5211 */ 5212 for (err = ecb->dte_action, ndx = 0; 5213 err != act; err = err->dta_next, ndx++) 5214 continue; 5215 5216 dtrace_probe_error(state, ecb->dte_epid, ndx, 5217 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5218 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 5219 cpu_core[cpuid].cpuc_dtrace_illval); 5220 5221 continue; 5222 } 5223 5224 if (!committed) 5225 buf->dtb_offset = offs + ecb->dte_size; 5226 } 5227 5228 if (vtime) 5229 curthread->t_dtrace_start = dtrace_gethrtime(); 5230 5231 dtrace_interrupt_enable(cookie); 5232 } 5233 5234 /* 5235 * DTrace Probe Hashing Functions 5236 * 5237 * The functions in this section (and indeed, the functions in remaining 5238 * sections) are not _called_ from probe context. (Any exceptions to this are 5239 * marked with a "Note:".) Rather, they are called from elsewhere in the 5240 * DTrace framework to look-up probes in, add probes to and remove probes from 5241 * the DTrace probe hashes. (Each probe is hashed by each element of the 5242 * probe tuple -- allowing for fast lookups, regardless of what was 5243 * specified.) 5244 */ 5245 static uint_t 5246 dtrace_hash_str(char *p) 5247 { 5248 unsigned int g; 5249 uint_t hval = 0; 5250 5251 while (*p) { 5252 hval = (hval << 4) + *p++; 5253 if ((g = (hval & 0xf0000000)) != 0) 5254 hval ^= g >> 24; 5255 hval &= ~g; 5256 } 5257 return (hval); 5258 } 5259 5260 static dtrace_hash_t * 5261 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5262 { 5263 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5264 5265 hash->dth_stroffs = stroffs; 5266 hash->dth_nextoffs = nextoffs; 5267 hash->dth_prevoffs = prevoffs; 5268 5269 hash->dth_size = 1; 5270 hash->dth_mask = hash->dth_size - 1; 5271 5272 hash->dth_tab = kmem_zalloc(hash->dth_size * 5273 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5274 5275 return (hash); 5276 } 5277 5278 static void 5279 dtrace_hash_destroy(dtrace_hash_t *hash) 5280 { 5281 #ifdef DEBUG 5282 int i; 5283 5284 for (i = 0; i < hash->dth_size; i++) 5285 ASSERT(hash->dth_tab[i] == NULL); 5286 #endif 5287 5288 kmem_free(hash->dth_tab, 5289 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5290 kmem_free(hash, sizeof (dtrace_hash_t)); 5291 } 5292 5293 static void 5294 dtrace_hash_resize(dtrace_hash_t *hash) 5295 { 5296 int size = hash->dth_size, i, ndx; 5297 int new_size = hash->dth_size << 1; 5298 int new_mask = new_size - 1; 5299 dtrace_hashbucket_t **new_tab, *bucket, *next; 5300 5301 ASSERT((new_size & new_mask) == 0); 5302 5303 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5304 5305 for (i = 0; i < size; i++) { 5306 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5307 dtrace_probe_t *probe = bucket->dthb_chain; 5308 5309 ASSERT(probe != NULL); 5310 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5311 5312 next = bucket->dthb_next; 5313 bucket->dthb_next = new_tab[ndx]; 5314 new_tab[ndx] = bucket; 5315 } 5316 } 5317 5318 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5319 hash->dth_tab = new_tab; 5320 hash->dth_size = new_size; 5321 hash->dth_mask = new_mask; 5322 } 5323 5324 static void 5325 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5326 { 5327 int hashval = DTRACE_HASHSTR(hash, new); 5328 int ndx = hashval & hash->dth_mask; 5329 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5330 dtrace_probe_t **nextp, **prevp; 5331 5332 for (; bucket != NULL; bucket = bucket->dthb_next) { 5333 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5334 goto add; 5335 } 5336 5337 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5338 dtrace_hash_resize(hash); 5339 dtrace_hash_add(hash, new); 5340 return; 5341 } 5342 5343 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5344 bucket->dthb_next = hash->dth_tab[ndx]; 5345 hash->dth_tab[ndx] = bucket; 5346 hash->dth_nbuckets++; 5347 5348 add: 5349 nextp = DTRACE_HASHNEXT(hash, new); 5350 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5351 *nextp = bucket->dthb_chain; 5352 5353 if (bucket->dthb_chain != NULL) { 5354 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5355 ASSERT(*prevp == NULL); 5356 *prevp = new; 5357 } 5358 5359 bucket->dthb_chain = new; 5360 bucket->dthb_len++; 5361 } 5362 5363 static dtrace_probe_t * 5364 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5365 { 5366 int hashval = DTRACE_HASHSTR(hash, template); 5367 int ndx = hashval & hash->dth_mask; 5368 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5369 5370 for (; bucket != NULL; bucket = bucket->dthb_next) { 5371 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5372 return (bucket->dthb_chain); 5373 } 5374 5375 return (NULL); 5376 } 5377 5378 static int 5379 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5380 { 5381 int hashval = DTRACE_HASHSTR(hash, template); 5382 int ndx = hashval & hash->dth_mask; 5383 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5384 5385 for (; bucket != NULL; bucket = bucket->dthb_next) { 5386 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5387 return (bucket->dthb_len); 5388 } 5389 5390 return (NULL); 5391 } 5392 5393 static void 5394 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5395 { 5396 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5397 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5398 5399 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5400 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5401 5402 /* 5403 * Find the bucket that we're removing this probe from. 5404 */ 5405 for (; bucket != NULL; bucket = bucket->dthb_next) { 5406 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5407 break; 5408 } 5409 5410 ASSERT(bucket != NULL); 5411 5412 if (*prevp == NULL) { 5413 if (*nextp == NULL) { 5414 /* 5415 * The removed probe was the only probe on this 5416 * bucket; we need to remove the bucket. 5417 */ 5418 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5419 5420 ASSERT(bucket->dthb_chain == probe); 5421 ASSERT(b != NULL); 5422 5423 if (b == bucket) { 5424 hash->dth_tab[ndx] = bucket->dthb_next; 5425 } else { 5426 while (b->dthb_next != bucket) 5427 b = b->dthb_next; 5428 b->dthb_next = bucket->dthb_next; 5429 } 5430 5431 ASSERT(hash->dth_nbuckets > 0); 5432 hash->dth_nbuckets--; 5433 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5434 return; 5435 } 5436 5437 bucket->dthb_chain = *nextp; 5438 } else { 5439 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5440 } 5441 5442 if (*nextp != NULL) 5443 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5444 } 5445 5446 /* 5447 * DTrace Utility Functions 5448 * 5449 * These are random utility functions that are _not_ called from probe context. 5450 */ 5451 static int 5452 dtrace_badattr(const dtrace_attribute_t *a) 5453 { 5454 return (a->dtat_name > DTRACE_STABILITY_MAX || 5455 a->dtat_data > DTRACE_STABILITY_MAX || 5456 a->dtat_class > DTRACE_CLASS_MAX); 5457 } 5458 5459 /* 5460 * Return a duplicate copy of a string. If the specified string is NULL, 5461 * this function returns a zero-length string. 5462 */ 5463 static char * 5464 dtrace_strdup(const char *str) 5465 { 5466 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5467 5468 if (str != NULL) 5469 (void) strcpy(new, str); 5470 5471 return (new); 5472 } 5473 5474 #define DTRACE_ISALPHA(c) \ 5475 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5476 5477 static int 5478 dtrace_badname(const char *s) 5479 { 5480 char c; 5481 5482 if (s == NULL || (c = *s++) == '\0') 5483 return (0); 5484 5485 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5486 return (1); 5487 5488 while ((c = *s++) != '\0') { 5489 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 5490 c != '-' && c != '_' && c != '.' && c != '`') 5491 return (1); 5492 } 5493 5494 return (0); 5495 } 5496 5497 static void 5498 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 5499 { 5500 uint32_t priv; 5501 5502 *uidp = crgetuid(cr); 5503 *zoneidp = crgetzoneid(cr); 5504 if (PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 5505 priv = DTRACE_PRIV_ALL; 5506 } else { 5507 priv = 0; 5508 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 5509 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 5510 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 5511 priv |= DTRACE_PRIV_USER; 5512 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 5513 priv |= DTRACE_PRIV_PROC; 5514 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 5515 priv |= DTRACE_PRIV_OWNER; 5516 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 5517 priv |= DTRACE_PRIV_ZONEOWNER; 5518 } 5519 5520 *privp = priv; 5521 } 5522 5523 #ifdef DTRACE_ERRDEBUG 5524 static void 5525 dtrace_errdebug(const char *str) 5526 { 5527 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 5528 int occupied = 0; 5529 5530 mutex_enter(&dtrace_errlock); 5531 dtrace_errlast = str; 5532 dtrace_errthread = curthread; 5533 5534 while (occupied++ < DTRACE_ERRHASHSZ) { 5535 if (dtrace_errhash[hval].dter_msg == str) { 5536 dtrace_errhash[hval].dter_count++; 5537 goto out; 5538 } 5539 5540 if (dtrace_errhash[hval].dter_msg != NULL) { 5541 hval = (hval + 1) % DTRACE_ERRHASHSZ; 5542 continue; 5543 } 5544 5545 dtrace_errhash[hval].dter_msg = str; 5546 dtrace_errhash[hval].dter_count = 1; 5547 goto out; 5548 } 5549 5550 panic("dtrace: undersized error hash"); 5551 out: 5552 mutex_exit(&dtrace_errlock); 5553 } 5554 #endif 5555 5556 /* 5557 * DTrace Matching Functions 5558 * 5559 * These functions are used to match groups of probes, given some elements of 5560 * a probe tuple, or some globbed expressions for elements of a probe tuple. 5561 */ 5562 static int 5563 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 5564 zoneid_t zoneid) 5565 { 5566 if (priv != DTRACE_PRIV_ALL) { 5567 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 5568 uint32_t match = priv & ppriv; 5569 5570 /* 5571 * No PRIV_DTRACE_* privileges... 5572 */ 5573 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 5574 DTRACE_PRIV_KERNEL)) == 0) 5575 return (0); 5576 5577 /* 5578 * No matching bits, but there were bits to match... 5579 */ 5580 if (match == 0 && ppriv != 0) 5581 return (0); 5582 5583 /* 5584 * Need to have permissions to the process, but don't... 5585 */ 5586 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 5587 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 5588 return (0); 5589 } 5590 5591 /* 5592 * Need to be in the same zone unless we possess the 5593 * privilege to examine all zones. 5594 */ 5595 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 5596 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 5597 return (0); 5598 } 5599 } 5600 5601 return (1); 5602 } 5603 5604 /* 5605 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 5606 * consists of input pattern strings and an ops-vector to evaluate them. 5607 * This function returns >0 for match, 0 for no match, and <0 for error. 5608 */ 5609 static int 5610 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 5611 uint32_t priv, uid_t uid, zoneid_t zoneid) 5612 { 5613 dtrace_provider_t *pvp = prp->dtpr_provider; 5614 int rv; 5615 5616 if (pvp->dtpv_defunct) 5617 return (0); 5618 5619 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 5620 return (rv); 5621 5622 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 5623 return (rv); 5624 5625 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 5626 return (rv); 5627 5628 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 5629 return (rv); 5630 5631 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 5632 return (0); 5633 5634 return (rv); 5635 } 5636 5637 /* 5638 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 5639 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 5640 * libc's version, the kernel version only applies to 8-bit ASCII strings. 5641 * In addition, all of the recursion cases except for '*' matching have been 5642 * unwound. For '*', we still implement recursive evaluation, but a depth 5643 * counter is maintained and matching is aborted if we recurse too deep. 5644 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 5645 */ 5646 static int 5647 dtrace_match_glob(const char *s, const char *p, int depth) 5648 { 5649 const char *olds; 5650 char s1, c; 5651 int gs; 5652 5653 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 5654 return (-1); 5655 5656 if (s == NULL) 5657 s = ""; /* treat NULL as empty string */ 5658 5659 top: 5660 olds = s; 5661 s1 = *s++; 5662 5663 if (p == NULL) 5664 return (0); 5665 5666 if ((c = *p++) == '\0') 5667 return (s1 == '\0'); 5668 5669 switch (c) { 5670 case '[': { 5671 int ok = 0, notflag = 0; 5672 char lc = '\0'; 5673 5674 if (s1 == '\0') 5675 return (0); 5676 5677 if (*p == '!') { 5678 notflag = 1; 5679 p++; 5680 } 5681 5682 if ((c = *p++) == '\0') 5683 return (0); 5684 5685 do { 5686 if (c == '-' && lc != '\0' && *p != ']') { 5687 if ((c = *p++) == '\0') 5688 return (0); 5689 if (c == '\\' && (c = *p++) == '\0') 5690 return (0); 5691 5692 if (notflag) { 5693 if (s1 < lc || s1 > c) 5694 ok++; 5695 else 5696 return (0); 5697 } else if (lc <= s1 && s1 <= c) 5698 ok++; 5699 5700 } else if (c == '\\' && (c = *p++) == '\0') 5701 return (0); 5702 5703 lc = c; /* save left-hand 'c' for next iteration */ 5704 5705 if (notflag) { 5706 if (s1 != c) 5707 ok++; 5708 else 5709 return (0); 5710 } else if (s1 == c) 5711 ok++; 5712 5713 if ((c = *p++) == '\0') 5714 return (0); 5715 5716 } while (c != ']'); 5717 5718 if (ok) 5719 goto top; 5720 5721 return (0); 5722 } 5723 5724 case '\\': 5725 if ((c = *p++) == '\0') 5726 return (0); 5727 /*FALLTHRU*/ 5728 5729 default: 5730 if (c != s1) 5731 return (0); 5732 /*FALLTHRU*/ 5733 5734 case '?': 5735 if (s1 != '\0') 5736 goto top; 5737 return (0); 5738 5739 case '*': 5740 while (*p == '*') 5741 p++; /* consecutive *'s are identical to a single one */ 5742 5743 if (*p == '\0') 5744 return (1); 5745 5746 for (s = olds; *s != '\0'; s++) { 5747 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 5748 return (gs); 5749 } 5750 5751 return (0); 5752 } 5753 } 5754 5755 /*ARGSUSED*/ 5756 static int 5757 dtrace_match_string(const char *s, const char *p, int depth) 5758 { 5759 return (s != NULL && strcmp(s, p) == 0); 5760 } 5761 5762 /*ARGSUSED*/ 5763 static int 5764 dtrace_match_nul(const char *s, const char *p, int depth) 5765 { 5766 return (1); /* always match the empty pattern */ 5767 } 5768 5769 /*ARGSUSED*/ 5770 static int 5771 dtrace_match_nonzero(const char *s, const char *p, int depth) 5772 { 5773 return (s != NULL && s[0] != '\0'); 5774 } 5775 5776 static int 5777 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 5778 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 5779 { 5780 dtrace_probe_t template, *probe; 5781 dtrace_hash_t *hash = NULL; 5782 int len, best = INT_MAX, nmatched = 0; 5783 dtrace_id_t i; 5784 5785 ASSERT(MUTEX_HELD(&dtrace_lock)); 5786 5787 /* 5788 * If the probe ID is specified in the key, just lookup by ID and 5789 * invoke the match callback once if a matching probe is found. 5790 */ 5791 if (pkp->dtpk_id != DTRACE_IDNONE) { 5792 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 5793 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 5794 (void) (*matched)(probe, arg); 5795 nmatched++; 5796 } 5797 return (nmatched); 5798 } 5799 5800 template.dtpr_mod = (char *)pkp->dtpk_mod; 5801 template.dtpr_func = (char *)pkp->dtpk_func; 5802 template.dtpr_name = (char *)pkp->dtpk_name; 5803 5804 /* 5805 * We want to find the most distinct of the module name, function 5806 * name, and name. So for each one that is not a glob pattern or 5807 * empty string, we perform a lookup in the corresponding hash and 5808 * use the hash table with the fewest collisions to do our search. 5809 */ 5810 if (pkp->dtpk_mmatch == &dtrace_match_string && 5811 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 5812 best = len; 5813 hash = dtrace_bymod; 5814 } 5815 5816 if (pkp->dtpk_fmatch == &dtrace_match_string && 5817 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 5818 best = len; 5819 hash = dtrace_byfunc; 5820 } 5821 5822 if (pkp->dtpk_nmatch == &dtrace_match_string && 5823 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 5824 best = len; 5825 hash = dtrace_byname; 5826 } 5827 5828 /* 5829 * If we did not select a hash table, iterate over every probe and 5830 * invoke our callback for each one that matches our input probe key. 5831 */ 5832 if (hash == NULL) { 5833 for (i = 0; i < dtrace_nprobes; i++) { 5834 if ((probe = dtrace_probes[i]) == NULL || 5835 dtrace_match_probe(probe, pkp, priv, uid, 5836 zoneid) <= 0) 5837 continue; 5838 5839 nmatched++; 5840 5841 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5842 break; 5843 } 5844 5845 return (nmatched); 5846 } 5847 5848 /* 5849 * If we selected a hash table, iterate over each probe of the same key 5850 * name and invoke the callback for every probe that matches the other 5851 * attributes of our input probe key. 5852 */ 5853 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 5854 probe = *(DTRACE_HASHNEXT(hash, probe))) { 5855 5856 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 5857 continue; 5858 5859 nmatched++; 5860 5861 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 5862 break; 5863 } 5864 5865 return (nmatched); 5866 } 5867 5868 /* 5869 * Return the function pointer dtrace_probecmp() should use to compare the 5870 * specified pattern with a string. For NULL or empty patterns, we select 5871 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 5872 * For non-empty non-glob strings, we use dtrace_match_string(). 5873 */ 5874 static dtrace_probekey_f * 5875 dtrace_probekey_func(const char *p) 5876 { 5877 char c; 5878 5879 if (p == NULL || *p == '\0') 5880 return (&dtrace_match_nul); 5881 5882 while ((c = *p++) != '\0') { 5883 if (c == '[' || c == '?' || c == '*' || c == '\\') 5884 return (&dtrace_match_glob); 5885 } 5886 5887 return (&dtrace_match_string); 5888 } 5889 5890 /* 5891 * Build a probe comparison key for use with dtrace_match_probe() from the 5892 * given probe description. By convention, a null key only matches anchored 5893 * probes: if each field is the empty string, reset dtpk_fmatch to 5894 * dtrace_match_nonzero(). 5895 */ 5896 static void 5897 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 5898 { 5899 pkp->dtpk_prov = pdp->dtpd_provider; 5900 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 5901 5902 pkp->dtpk_mod = pdp->dtpd_mod; 5903 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 5904 5905 pkp->dtpk_func = pdp->dtpd_func; 5906 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 5907 5908 pkp->dtpk_name = pdp->dtpd_name; 5909 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 5910 5911 pkp->dtpk_id = pdp->dtpd_id; 5912 5913 if (pkp->dtpk_id == DTRACE_IDNONE && 5914 pkp->dtpk_pmatch == &dtrace_match_nul && 5915 pkp->dtpk_mmatch == &dtrace_match_nul && 5916 pkp->dtpk_fmatch == &dtrace_match_nul && 5917 pkp->dtpk_nmatch == &dtrace_match_nul) 5918 pkp->dtpk_fmatch = &dtrace_match_nonzero; 5919 } 5920 5921 /* 5922 * DTrace Provider-to-Framework API Functions 5923 * 5924 * These functions implement much of the Provider-to-Framework API, as 5925 * described in <sys/dtrace.h>. The parts of the API not in this section are 5926 * the functions in the API for probe management (found below), and 5927 * dtrace_probe() itself (found above). 5928 */ 5929 5930 /* 5931 * Register the calling provider with the DTrace framework. This should 5932 * generally be called by DTrace providers in their attach(9E) entry point. 5933 */ 5934 int 5935 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 5936 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 5937 { 5938 dtrace_provider_t *provider; 5939 5940 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 5941 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5942 "arguments", name ? name : "<NULL>"); 5943 return (EINVAL); 5944 } 5945 5946 if (name[0] == '\0' || dtrace_badname(name)) { 5947 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5948 "provider name", name); 5949 return (EINVAL); 5950 } 5951 5952 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 5953 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 5954 pops->dtps_destroy == NULL || 5955 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 5956 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5957 "provider ops", name); 5958 return (EINVAL); 5959 } 5960 5961 if (dtrace_badattr(&pap->dtpa_provider) || 5962 dtrace_badattr(&pap->dtpa_mod) || 5963 dtrace_badattr(&pap->dtpa_func) || 5964 dtrace_badattr(&pap->dtpa_name) || 5965 dtrace_badattr(&pap->dtpa_args)) { 5966 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5967 "provider attributes", name); 5968 return (EINVAL); 5969 } 5970 5971 if (priv & ~DTRACE_PRIV_ALL) { 5972 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 5973 "privilege attributes", name); 5974 return (EINVAL); 5975 } 5976 5977 if ((priv & DTRACE_PRIV_KERNEL) && 5978 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 5979 pops->dtps_usermode == NULL) { 5980 cmn_err(CE_WARN, "failed to register provider '%s': need " 5981 "dtps_usermode() op for given privilege attributes", name); 5982 return (EINVAL); 5983 } 5984 5985 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 5986 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 5987 (void) strcpy(provider->dtpv_name, name); 5988 5989 provider->dtpv_attr = *pap; 5990 provider->dtpv_priv.dtpp_flags = priv; 5991 if (cr != NULL) { 5992 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 5993 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 5994 } 5995 provider->dtpv_pops = *pops; 5996 5997 if (pops->dtps_provide == NULL) { 5998 ASSERT(pops->dtps_provide_module != NULL); 5999 provider->dtpv_pops.dtps_provide = 6000 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 6001 } 6002 6003 if (pops->dtps_provide_module == NULL) { 6004 ASSERT(pops->dtps_provide != NULL); 6005 provider->dtpv_pops.dtps_provide_module = 6006 (void (*)(void *, struct modctl *))dtrace_nullop; 6007 } 6008 6009 if (pops->dtps_suspend == NULL) { 6010 ASSERT(pops->dtps_resume == NULL); 6011 provider->dtpv_pops.dtps_suspend = 6012 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6013 provider->dtpv_pops.dtps_resume = 6014 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6015 } 6016 6017 provider->dtpv_arg = arg; 6018 *idp = (dtrace_provider_id_t)provider; 6019 6020 if (pops == &dtrace_provider_ops) { 6021 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6022 ASSERT(MUTEX_HELD(&dtrace_lock)); 6023 ASSERT(dtrace_anon.dta_enabling == NULL); 6024 6025 /* 6026 * We make sure that the DTrace provider is at the head of 6027 * the provider chain. 6028 */ 6029 provider->dtpv_next = dtrace_provider; 6030 dtrace_provider = provider; 6031 return (0); 6032 } 6033 6034 mutex_enter(&dtrace_provider_lock); 6035 mutex_enter(&dtrace_lock); 6036 6037 /* 6038 * If there is at least one provider registered, we'll add this 6039 * provider after the first provider. 6040 */ 6041 if (dtrace_provider != NULL) { 6042 provider->dtpv_next = dtrace_provider->dtpv_next; 6043 dtrace_provider->dtpv_next = provider; 6044 } else { 6045 dtrace_provider = provider; 6046 } 6047 6048 if (dtrace_retained != NULL) { 6049 dtrace_enabling_provide(provider); 6050 6051 /* 6052 * Now we need to call dtrace_enabling_matchall() -- which 6053 * will acquire cpu_lock and dtrace_lock. We therefore need 6054 * to drop all of our locks before calling into it... 6055 */ 6056 mutex_exit(&dtrace_lock); 6057 mutex_exit(&dtrace_provider_lock); 6058 dtrace_enabling_matchall(); 6059 6060 return (0); 6061 } 6062 6063 mutex_exit(&dtrace_lock); 6064 mutex_exit(&dtrace_provider_lock); 6065 6066 return (0); 6067 } 6068 6069 /* 6070 * Unregister the specified provider from the DTrace framework. This should 6071 * generally be called by DTrace providers in their detach(9E) entry point. 6072 */ 6073 int 6074 dtrace_unregister(dtrace_provider_id_t id) 6075 { 6076 dtrace_provider_t *old = (dtrace_provider_t *)id; 6077 dtrace_provider_t *prev = NULL; 6078 int i, self = 0; 6079 dtrace_probe_t *probe, *first = NULL; 6080 6081 if (old->dtpv_pops.dtps_enable == 6082 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 6083 /* 6084 * If DTrace itself is the provider, we're called with locks 6085 * already held. 6086 */ 6087 ASSERT(old == dtrace_provider); 6088 ASSERT(dtrace_devi != NULL); 6089 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6090 ASSERT(MUTEX_HELD(&dtrace_lock)); 6091 self = 1; 6092 6093 if (dtrace_provider->dtpv_next != NULL) { 6094 /* 6095 * There's another provider here; return failure. 6096 */ 6097 return (EBUSY); 6098 } 6099 } else { 6100 mutex_enter(&dtrace_provider_lock); 6101 mutex_enter(&mod_lock); 6102 mutex_enter(&dtrace_lock); 6103 } 6104 6105 /* 6106 * If anyone has /dev/dtrace open, or if there are anonymous enabled 6107 * probes, we refuse to let providers slither away, unless this 6108 * provider has already been explicitly invalidated. 6109 */ 6110 if (!old->dtpv_defunct && 6111 (dtrace_opens || (dtrace_anon.dta_state != NULL && 6112 dtrace_anon.dta_state->dts_necbs > 0))) { 6113 if (!self) { 6114 mutex_exit(&dtrace_lock); 6115 mutex_exit(&mod_lock); 6116 mutex_exit(&dtrace_provider_lock); 6117 } 6118 return (EBUSY); 6119 } 6120 6121 /* 6122 * Attempt to destroy the probes associated with this provider. 6123 */ 6124 for (i = 0; i < dtrace_nprobes; i++) { 6125 if ((probe = dtrace_probes[i]) == NULL) 6126 continue; 6127 6128 if (probe->dtpr_provider != old) 6129 continue; 6130 6131 if (probe->dtpr_ecb == NULL) 6132 continue; 6133 6134 /* 6135 * We have at least one ECB; we can't remove this provider. 6136 */ 6137 if (!self) { 6138 mutex_exit(&dtrace_lock); 6139 mutex_exit(&mod_lock); 6140 mutex_exit(&dtrace_provider_lock); 6141 } 6142 return (EBUSY); 6143 } 6144 6145 /* 6146 * All of the probes for this provider are disabled; we can safely 6147 * remove all of them from their hash chains and from the probe array. 6148 */ 6149 for (i = 0; i < dtrace_nprobes; i++) { 6150 if ((probe = dtrace_probes[i]) == NULL) 6151 continue; 6152 6153 if (probe->dtpr_provider != old) 6154 continue; 6155 6156 dtrace_probes[i] = NULL; 6157 6158 dtrace_hash_remove(dtrace_bymod, probe); 6159 dtrace_hash_remove(dtrace_byfunc, probe); 6160 dtrace_hash_remove(dtrace_byname, probe); 6161 6162 if (first == NULL) { 6163 first = probe; 6164 probe->dtpr_nextmod = NULL; 6165 } else { 6166 probe->dtpr_nextmod = first; 6167 first = probe; 6168 } 6169 } 6170 6171 /* 6172 * The provider's probes have been removed from the hash chains and 6173 * from the probe array. Now issue a dtrace_sync() to be sure that 6174 * everyone has cleared out from any probe array processing. 6175 */ 6176 dtrace_sync(); 6177 6178 for (probe = first; probe != NULL; probe = first) { 6179 first = probe->dtpr_nextmod; 6180 6181 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6182 probe->dtpr_arg); 6183 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6184 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6185 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6186 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6187 kmem_free(probe, sizeof (dtrace_probe_t)); 6188 } 6189 6190 if ((prev = dtrace_provider) == old) { 6191 ASSERT(self || dtrace_devi == NULL); 6192 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6193 dtrace_provider = old->dtpv_next; 6194 } else { 6195 while (prev != NULL && prev->dtpv_next != old) 6196 prev = prev->dtpv_next; 6197 6198 if (prev == NULL) { 6199 panic("attempt to unregister non-existent " 6200 "dtrace provider %p\n", (void *)id); 6201 } 6202 6203 prev->dtpv_next = old->dtpv_next; 6204 } 6205 6206 if (!self) { 6207 mutex_exit(&dtrace_lock); 6208 mutex_exit(&mod_lock); 6209 mutex_exit(&dtrace_provider_lock); 6210 } 6211 6212 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6213 kmem_free(old, sizeof (dtrace_provider_t)); 6214 6215 return (0); 6216 } 6217 6218 /* 6219 * Invalidate the specified provider. All subsequent probe lookups for the 6220 * specified provider will fail, but its probes will not be removed. 6221 */ 6222 void 6223 dtrace_invalidate(dtrace_provider_id_t id) 6224 { 6225 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6226 6227 ASSERT(pvp->dtpv_pops.dtps_enable != 6228 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6229 6230 mutex_enter(&dtrace_provider_lock); 6231 mutex_enter(&dtrace_lock); 6232 6233 pvp->dtpv_defunct = 1; 6234 6235 mutex_exit(&dtrace_lock); 6236 mutex_exit(&dtrace_provider_lock); 6237 } 6238 6239 /* 6240 * Indicate whether or not DTrace has attached. 6241 */ 6242 int 6243 dtrace_attached(void) 6244 { 6245 /* 6246 * dtrace_provider will be non-NULL iff the DTrace driver has 6247 * attached. (It's non-NULL because DTrace is always itself a 6248 * provider.) 6249 */ 6250 return (dtrace_provider != NULL); 6251 } 6252 6253 /* 6254 * Remove all the unenabled probes for the given provider. This function is 6255 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6256 * -- just as many of its associated probes as it can. 6257 */ 6258 int 6259 dtrace_condense(dtrace_provider_id_t id) 6260 { 6261 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6262 int i; 6263 dtrace_probe_t *probe; 6264 6265 /* 6266 * Make sure this isn't the dtrace provider itself. 6267 */ 6268 ASSERT(prov->dtpv_pops.dtps_enable != 6269 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6270 6271 mutex_enter(&dtrace_provider_lock); 6272 mutex_enter(&dtrace_lock); 6273 6274 /* 6275 * Attempt to destroy the probes associated with this provider. 6276 */ 6277 for (i = 0; i < dtrace_nprobes; i++) { 6278 if ((probe = dtrace_probes[i]) == NULL) 6279 continue; 6280 6281 if (probe->dtpr_provider != prov) 6282 continue; 6283 6284 if (probe->dtpr_ecb != NULL) 6285 continue; 6286 6287 dtrace_probes[i] = NULL; 6288 6289 dtrace_hash_remove(dtrace_bymod, probe); 6290 dtrace_hash_remove(dtrace_byfunc, probe); 6291 dtrace_hash_remove(dtrace_byname, probe); 6292 6293 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 6294 probe->dtpr_arg); 6295 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6296 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6297 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6298 kmem_free(probe, sizeof (dtrace_probe_t)); 6299 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 6300 } 6301 6302 mutex_exit(&dtrace_lock); 6303 mutex_exit(&dtrace_provider_lock); 6304 6305 return (0); 6306 } 6307 6308 /* 6309 * DTrace Probe Management Functions 6310 * 6311 * The functions in this section perform the DTrace probe management, 6312 * including functions to create probes, look-up probes, and call into the 6313 * providers to request that probes be provided. Some of these functions are 6314 * in the Provider-to-Framework API; these functions can be identified by the 6315 * fact that they are not declared "static". 6316 */ 6317 6318 /* 6319 * Create a probe with the specified module name, function name, and name. 6320 */ 6321 dtrace_id_t 6322 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6323 const char *func, const char *name, int aframes, void *arg) 6324 { 6325 dtrace_probe_t *probe, **probes; 6326 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6327 dtrace_id_t id; 6328 6329 if (provider == dtrace_provider) { 6330 ASSERT(MUTEX_HELD(&dtrace_lock)); 6331 } else { 6332 mutex_enter(&dtrace_lock); 6333 } 6334 6335 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6336 VM_BESTFIT | VM_SLEEP); 6337 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6338 6339 probe->dtpr_id = id; 6340 probe->dtpr_gen = dtrace_probegen++; 6341 probe->dtpr_mod = dtrace_strdup(mod); 6342 probe->dtpr_func = dtrace_strdup(func); 6343 probe->dtpr_name = dtrace_strdup(name); 6344 probe->dtpr_arg = arg; 6345 probe->dtpr_aframes = aframes; 6346 probe->dtpr_provider = provider; 6347 6348 dtrace_hash_add(dtrace_bymod, probe); 6349 dtrace_hash_add(dtrace_byfunc, probe); 6350 dtrace_hash_add(dtrace_byname, probe); 6351 6352 if (id - 1 >= dtrace_nprobes) { 6353 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6354 size_t nsize = osize << 1; 6355 6356 if (nsize == 0) { 6357 ASSERT(osize == 0); 6358 ASSERT(dtrace_probes == NULL); 6359 nsize = sizeof (dtrace_probe_t *); 6360 } 6361 6362 probes = kmem_zalloc(nsize, KM_SLEEP); 6363 6364 if (dtrace_probes == NULL) { 6365 ASSERT(osize == 0); 6366 dtrace_probes = probes; 6367 dtrace_nprobes = 1; 6368 } else { 6369 dtrace_probe_t **oprobes = dtrace_probes; 6370 6371 bcopy(oprobes, probes, osize); 6372 dtrace_membar_producer(); 6373 dtrace_probes = probes; 6374 6375 dtrace_sync(); 6376 6377 /* 6378 * All CPUs are now seeing the new probes array; we can 6379 * safely free the old array. 6380 */ 6381 kmem_free(oprobes, osize); 6382 dtrace_nprobes <<= 1; 6383 } 6384 6385 ASSERT(id - 1 < dtrace_nprobes); 6386 } 6387 6388 ASSERT(dtrace_probes[id - 1] == NULL); 6389 dtrace_probes[id - 1] = probe; 6390 6391 if (provider != dtrace_provider) 6392 mutex_exit(&dtrace_lock); 6393 6394 return (id); 6395 } 6396 6397 static dtrace_probe_t * 6398 dtrace_probe_lookup_id(dtrace_id_t id) 6399 { 6400 ASSERT(MUTEX_HELD(&dtrace_lock)); 6401 6402 if (id == 0 || id > dtrace_nprobes) 6403 return (NULL); 6404 6405 return (dtrace_probes[id - 1]); 6406 } 6407 6408 static int 6409 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6410 { 6411 *((dtrace_id_t *)arg) = probe->dtpr_id; 6412 6413 return (DTRACE_MATCH_DONE); 6414 } 6415 6416 /* 6417 * Look up a probe based on provider and one or more of module name, function 6418 * name and probe name. 6419 */ 6420 dtrace_id_t 6421 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6422 const char *func, const char *name) 6423 { 6424 dtrace_probekey_t pkey; 6425 dtrace_id_t id; 6426 int match; 6427 6428 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6429 pkey.dtpk_pmatch = &dtrace_match_string; 6430 pkey.dtpk_mod = mod; 6431 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6432 pkey.dtpk_func = func; 6433 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6434 pkey.dtpk_name = name; 6435 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6436 pkey.dtpk_id = DTRACE_IDNONE; 6437 6438 mutex_enter(&dtrace_lock); 6439 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 6440 dtrace_probe_lookup_match, &id); 6441 mutex_exit(&dtrace_lock); 6442 6443 ASSERT(match == 1 || match == 0); 6444 return (match ? id : 0); 6445 } 6446 6447 /* 6448 * Returns the probe argument associated with the specified probe. 6449 */ 6450 void * 6451 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6452 { 6453 dtrace_probe_t *probe; 6454 void *rval = NULL; 6455 6456 mutex_enter(&dtrace_lock); 6457 6458 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6459 probe->dtpr_provider == (dtrace_provider_t *)id) 6460 rval = probe->dtpr_arg; 6461 6462 mutex_exit(&dtrace_lock); 6463 6464 return (rval); 6465 } 6466 6467 /* 6468 * Copy a probe into a probe description. 6469 */ 6470 static void 6471 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6472 { 6473 bzero(pdp, sizeof (dtrace_probedesc_t)); 6474 pdp->dtpd_id = prp->dtpr_id; 6475 6476 (void) strncpy(pdp->dtpd_provider, 6477 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6478 6479 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6480 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6481 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6482 } 6483 6484 /* 6485 * Called to indicate that a probe -- or probes -- should be provided by a 6486 * specfied provider. If the specified description is NULL, the provider will 6487 * be told to provide all of its probes. (This is done whenever a new 6488 * consumer comes along, or whenever a retained enabling is to be matched.) If 6489 * the specified description is non-NULL, the provider is given the 6490 * opportunity to dynamically provide the specified probe, allowing providers 6491 * to support the creation of probes on-the-fly. (So-called _autocreated_ 6492 * probes.) If the provider is NULL, the operations will be applied to all 6493 * providers; if the provider is non-NULL the operations will only be applied 6494 * to the specified provider. The dtrace_provider_lock must be held, and the 6495 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 6496 * will need to grab the dtrace_lock when it reenters the framework through 6497 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 6498 */ 6499 static void 6500 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 6501 { 6502 struct modctl *ctl; 6503 int all = 0; 6504 6505 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6506 6507 if (prv == NULL) { 6508 all = 1; 6509 prv = dtrace_provider; 6510 } 6511 6512 do { 6513 /* 6514 * First, call the blanket provide operation. 6515 */ 6516 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 6517 6518 /* 6519 * Now call the per-module provide operation. We will grab 6520 * mod_lock to prevent the list from being modified. Note 6521 * that this also prevents the mod_busy bits from changing. 6522 * (mod_busy can only be changed with mod_lock held.) 6523 */ 6524 mutex_enter(&mod_lock); 6525 6526 ctl = &modules; 6527 do { 6528 if (ctl->mod_busy || ctl->mod_mp == NULL) 6529 continue; 6530 6531 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 6532 6533 } while ((ctl = ctl->mod_next) != &modules); 6534 6535 mutex_exit(&mod_lock); 6536 } while (all && (prv = prv->dtpv_next) != NULL); 6537 } 6538 6539 /* 6540 * Iterate over each probe, and call the Framework-to-Provider API function 6541 * denoted by offs. 6542 */ 6543 static void 6544 dtrace_probe_foreach(uintptr_t offs) 6545 { 6546 dtrace_provider_t *prov; 6547 void (*func)(void *, dtrace_id_t, void *); 6548 dtrace_probe_t *probe; 6549 dtrace_icookie_t cookie; 6550 int i; 6551 6552 /* 6553 * We disable interrupts to walk through the probe array. This is 6554 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 6555 * won't see stale data. 6556 */ 6557 cookie = dtrace_interrupt_disable(); 6558 6559 for (i = 0; i < dtrace_nprobes; i++) { 6560 if ((probe = dtrace_probes[i]) == NULL) 6561 continue; 6562 6563 if (probe->dtpr_ecb == NULL) { 6564 /* 6565 * This probe isn't enabled -- don't call the function. 6566 */ 6567 continue; 6568 } 6569 6570 prov = probe->dtpr_provider; 6571 func = *((void(**)(void *, dtrace_id_t, void *)) 6572 ((uintptr_t)&prov->dtpv_pops + offs)); 6573 6574 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 6575 } 6576 6577 dtrace_interrupt_enable(cookie); 6578 } 6579 6580 static int 6581 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 6582 { 6583 dtrace_probekey_t pkey; 6584 uint32_t priv; 6585 uid_t uid; 6586 zoneid_t zoneid; 6587 6588 ASSERT(MUTEX_HELD(&dtrace_lock)); 6589 dtrace_ecb_create_cache = NULL; 6590 6591 if (desc == NULL) { 6592 /* 6593 * If we're passed a NULL description, we're being asked to 6594 * create an ECB with a NULL probe. 6595 */ 6596 (void) dtrace_ecb_create_enable(NULL, enab); 6597 return (0); 6598 } 6599 6600 dtrace_probekey(desc, &pkey); 6601 dtrace_cred2priv(CRED(), &priv, &uid, &zoneid); 6602 6603 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 6604 enab)); 6605 } 6606 6607 /* 6608 * DTrace Helper Provider Functions 6609 */ 6610 static void 6611 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 6612 { 6613 attr->dtat_name = DOF_ATTR_NAME(dofattr); 6614 attr->dtat_data = DOF_ATTR_DATA(dofattr); 6615 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 6616 } 6617 6618 static void 6619 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 6620 const dof_provider_t *dofprov, char *strtab) 6621 { 6622 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 6623 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 6624 dofprov->dofpv_provattr); 6625 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 6626 dofprov->dofpv_modattr); 6627 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 6628 dofprov->dofpv_funcattr); 6629 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 6630 dofprov->dofpv_nameattr); 6631 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 6632 dofprov->dofpv_argsattr); 6633 } 6634 6635 static void 6636 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6637 { 6638 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6639 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6640 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 6641 dof_provider_t *provider; 6642 dof_probe_t *probe; 6643 uint32_t *off, *enoff; 6644 uint8_t *arg; 6645 char *strtab; 6646 uint_t i, nprobes; 6647 dtrace_helper_provdesc_t dhpv; 6648 dtrace_helper_probedesc_t dhpb; 6649 dtrace_meta_t *meta = dtrace_meta_pid; 6650 dtrace_mops_t *mops = &meta->dtm_mops; 6651 void *parg; 6652 6653 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6654 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6655 provider->dofpv_strtab * dof->dofh_secsize); 6656 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6657 provider->dofpv_probes * dof->dofh_secsize); 6658 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6659 provider->dofpv_prargs * dof->dofh_secsize); 6660 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6661 provider->dofpv_proffs * dof->dofh_secsize); 6662 6663 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6664 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 6665 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 6666 enoff = NULL; 6667 6668 /* 6669 * See dtrace_helper_provider_validate(). 6670 */ 6671 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 6672 provider->dofpv_prenoffs != 0) { 6673 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6674 provider->dofpv_prenoffs * dof->dofh_secsize); 6675 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 6676 } 6677 6678 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 6679 6680 /* 6681 * Create the provider. 6682 */ 6683 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6684 6685 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 6686 return; 6687 6688 meta->dtm_count++; 6689 6690 /* 6691 * Create the probes. 6692 */ 6693 for (i = 0; i < nprobes; i++) { 6694 probe = (dof_probe_t *)(uintptr_t)(daddr + 6695 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 6696 6697 dhpb.dthpb_mod = dhp->dofhp_mod; 6698 dhpb.dthpb_func = strtab + probe->dofpr_func; 6699 dhpb.dthpb_name = strtab + probe->dofpr_name; 6700 dhpb.dthpb_base = probe->dofpr_addr; 6701 dhpb.dthpb_offs = off + probe->dofpr_offidx; 6702 dhpb.dthpb_noffs = probe->dofpr_noffs; 6703 if (enoff != NULL) { 6704 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 6705 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 6706 } else { 6707 dhpb.dthpb_enoffs = NULL; 6708 dhpb.dthpb_nenoffs = 0; 6709 } 6710 dhpb.dthpb_args = arg + probe->dofpr_argidx; 6711 dhpb.dthpb_nargc = probe->dofpr_nargc; 6712 dhpb.dthpb_xargc = probe->dofpr_xargc; 6713 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 6714 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 6715 6716 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 6717 } 6718 } 6719 6720 static void 6721 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 6722 { 6723 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6724 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6725 int i; 6726 6727 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6728 6729 for (i = 0; i < dof->dofh_secnum; i++) { 6730 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6731 dof->dofh_secoff + i * dof->dofh_secsize); 6732 6733 if (sec->dofs_type != DOF_SECT_PROVIDER) 6734 continue; 6735 6736 dtrace_helper_provide_one(dhp, sec, pid); 6737 } 6738 6739 /* 6740 * We may have just created probes, so we must now rematch against 6741 * any retained enablings. Note that this call will acquire both 6742 * cpu_lock and dtrace_lock; the fact that we are holding 6743 * dtrace_meta_lock now is what defines the ordering with respect to 6744 * these three locks. 6745 */ 6746 dtrace_enabling_matchall(); 6747 } 6748 6749 static void 6750 dtrace_helper_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 6751 { 6752 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6753 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6754 dof_sec_t *str_sec; 6755 dof_provider_t *provider; 6756 char *strtab; 6757 dtrace_helper_provdesc_t dhpv; 6758 dtrace_meta_t *meta = dtrace_meta_pid; 6759 dtrace_mops_t *mops = &meta->dtm_mops; 6760 6761 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 6762 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 6763 provider->dofpv_strtab * dof->dofh_secsize); 6764 6765 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 6766 6767 /* 6768 * Create the provider. 6769 */ 6770 dtrace_dofprov2hprov(&dhpv, provider, strtab); 6771 6772 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 6773 6774 meta->dtm_count--; 6775 } 6776 6777 static void 6778 dtrace_helper_remove(dof_helper_t *dhp, pid_t pid) 6779 { 6780 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 6781 dof_hdr_t *dof = (dof_hdr_t *)daddr; 6782 int i; 6783 6784 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 6785 6786 for (i = 0; i < dof->dofh_secnum; i++) { 6787 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 6788 dof->dofh_secoff + i * dof->dofh_secsize); 6789 6790 if (sec->dofs_type != DOF_SECT_PROVIDER) 6791 continue; 6792 6793 dtrace_helper_remove_one(dhp, sec, pid); 6794 } 6795 } 6796 6797 /* 6798 * DTrace Meta Provider-to-Framework API Functions 6799 * 6800 * These functions implement the Meta Provider-to-Framework API, as described 6801 * in <sys/dtrace.h>. 6802 */ 6803 int 6804 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 6805 dtrace_meta_provider_id_t *idp) 6806 { 6807 dtrace_meta_t *meta; 6808 dtrace_helpers_t *help, *next; 6809 int i; 6810 6811 *idp = DTRACE_METAPROVNONE; 6812 6813 /* 6814 * We strictly don't need the name, but we hold onto it for 6815 * debuggability. All hail error queues! 6816 */ 6817 if (name == NULL) { 6818 cmn_err(CE_WARN, "failed to register meta-provider: " 6819 "invalid name"); 6820 return (EINVAL); 6821 } 6822 6823 if (mops == NULL || 6824 mops->dtms_create_probe == NULL || 6825 mops->dtms_provide_pid == NULL || 6826 mops->dtms_remove_pid == NULL) { 6827 cmn_err(CE_WARN, "failed to register meta-register %s: " 6828 "invalid ops", name); 6829 return (EINVAL); 6830 } 6831 6832 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 6833 meta->dtm_mops = *mops; 6834 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6835 (void) strcpy(meta->dtm_name, name); 6836 meta->dtm_arg = arg; 6837 6838 mutex_enter(&dtrace_meta_lock); 6839 mutex_enter(&dtrace_lock); 6840 6841 if (dtrace_meta_pid != NULL) { 6842 mutex_exit(&dtrace_lock); 6843 mutex_exit(&dtrace_meta_lock); 6844 cmn_err(CE_WARN, "failed to register meta-register %s: " 6845 "user-land meta-provider exists", name); 6846 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 6847 kmem_free(meta, sizeof (dtrace_meta_t)); 6848 return (EINVAL); 6849 } 6850 6851 dtrace_meta_pid = meta; 6852 *idp = (dtrace_meta_provider_id_t)meta; 6853 6854 /* 6855 * If there are providers and probes ready to go, pass them 6856 * off to the new meta provider now. 6857 */ 6858 6859 help = dtrace_deferred_pid; 6860 dtrace_deferred_pid = NULL; 6861 6862 mutex_exit(&dtrace_lock); 6863 6864 while (help != NULL) { 6865 for (i = 0; i < help->dthps_nprovs; i++) { 6866 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 6867 help->dthps_pid); 6868 } 6869 6870 next = help->dthps_next; 6871 help->dthps_next = NULL; 6872 help->dthps_prev = NULL; 6873 help->dthps_deferred = 0; 6874 help = next; 6875 } 6876 6877 mutex_exit(&dtrace_meta_lock); 6878 6879 return (0); 6880 } 6881 6882 int 6883 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 6884 { 6885 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 6886 6887 mutex_enter(&dtrace_meta_lock); 6888 mutex_enter(&dtrace_lock); 6889 6890 if (old == dtrace_meta_pid) { 6891 pp = &dtrace_meta_pid; 6892 } else { 6893 panic("attempt to unregister non-existent " 6894 "dtrace meta-provider %p\n", (void *)old); 6895 } 6896 6897 if (old->dtm_count != 0) { 6898 mutex_exit(&dtrace_lock); 6899 mutex_exit(&dtrace_meta_lock); 6900 return (EBUSY); 6901 } 6902 6903 *pp = NULL; 6904 6905 mutex_exit(&dtrace_lock); 6906 mutex_exit(&dtrace_meta_lock); 6907 6908 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 6909 kmem_free(old, sizeof (dtrace_meta_t)); 6910 6911 return (0); 6912 } 6913 6914 6915 /* 6916 * DTrace DIF Object Functions 6917 */ 6918 static int 6919 dtrace_difo_err(uint_t pc, const char *format, ...) 6920 { 6921 if (dtrace_err_verbose) { 6922 va_list alist; 6923 6924 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 6925 va_start(alist, format); 6926 (void) vuprintf(format, alist); 6927 va_end(alist); 6928 } 6929 6930 #ifdef DTRACE_ERRDEBUG 6931 dtrace_errdebug(format); 6932 #endif 6933 return (1); 6934 } 6935 6936 /* 6937 * Validate a DTrace DIF object by checking the IR instructions. The following 6938 * rules are currently enforced by dtrace_difo_validate(): 6939 * 6940 * 1. Each instruction must have a valid opcode 6941 * 2. Each register, string, variable, or subroutine reference must be valid 6942 * 3. No instruction can modify register %r0 (must be zero) 6943 * 4. All instruction reserved bits must be set to zero 6944 * 5. The last instruction must be a "ret" instruction 6945 * 6. All branch targets must reference a valid instruction _after_ the branch 6946 */ 6947 static int 6948 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 6949 cred_t *cr) 6950 { 6951 int err = 0, i; 6952 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 6953 int kcheck; 6954 uint_t pc; 6955 6956 kcheck = cr == NULL || 6957 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE) == 0; 6958 6959 dp->dtdo_destructive = 0; 6960 6961 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 6962 dif_instr_t instr = dp->dtdo_buf[pc]; 6963 6964 uint_t r1 = DIF_INSTR_R1(instr); 6965 uint_t r2 = DIF_INSTR_R2(instr); 6966 uint_t rd = DIF_INSTR_RD(instr); 6967 uint_t rs = DIF_INSTR_RS(instr); 6968 uint_t label = DIF_INSTR_LABEL(instr); 6969 uint_t v = DIF_INSTR_VAR(instr); 6970 uint_t subr = DIF_INSTR_SUBR(instr); 6971 uint_t type = DIF_INSTR_TYPE(instr); 6972 uint_t op = DIF_INSTR_OP(instr); 6973 6974 switch (op) { 6975 case DIF_OP_OR: 6976 case DIF_OP_XOR: 6977 case DIF_OP_AND: 6978 case DIF_OP_SLL: 6979 case DIF_OP_SRL: 6980 case DIF_OP_SRA: 6981 case DIF_OP_SUB: 6982 case DIF_OP_ADD: 6983 case DIF_OP_MUL: 6984 case DIF_OP_SDIV: 6985 case DIF_OP_UDIV: 6986 case DIF_OP_SREM: 6987 case DIF_OP_UREM: 6988 case DIF_OP_COPYS: 6989 if (r1 >= nregs) 6990 err += efunc(pc, "invalid register %u\n", r1); 6991 if (r2 >= nregs) 6992 err += efunc(pc, "invalid register %u\n", r2); 6993 if (rd >= nregs) 6994 err += efunc(pc, "invalid register %u\n", rd); 6995 if (rd == 0) 6996 err += efunc(pc, "cannot write to %r0\n"); 6997 break; 6998 case DIF_OP_NOT: 6999 case DIF_OP_MOV: 7000 case DIF_OP_ALLOCS: 7001 if (r1 >= nregs) 7002 err += efunc(pc, "invalid register %u\n", r1); 7003 if (r2 != 0) 7004 err += efunc(pc, "non-zero reserved bits\n"); 7005 if (rd >= nregs) 7006 err += efunc(pc, "invalid register %u\n", rd); 7007 if (rd == 0) 7008 err += efunc(pc, "cannot write to %r0\n"); 7009 break; 7010 case DIF_OP_LDSB: 7011 case DIF_OP_LDSH: 7012 case DIF_OP_LDSW: 7013 case DIF_OP_LDUB: 7014 case DIF_OP_LDUH: 7015 case DIF_OP_LDUW: 7016 case DIF_OP_LDX: 7017 if (r1 >= nregs) 7018 err += efunc(pc, "invalid register %u\n", r1); 7019 if (r2 != 0) 7020 err += efunc(pc, "non-zero reserved bits\n"); 7021 if (rd >= nregs) 7022 err += efunc(pc, "invalid register %u\n", rd); 7023 if (rd == 0) 7024 err += efunc(pc, "cannot write to %r0\n"); 7025 if (kcheck) 7026 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 7027 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 7028 break; 7029 case DIF_OP_RLDSB: 7030 case DIF_OP_RLDSH: 7031 case DIF_OP_RLDSW: 7032 case DIF_OP_RLDUB: 7033 case DIF_OP_RLDUH: 7034 case DIF_OP_RLDUW: 7035 case DIF_OP_RLDX: 7036 if (r1 >= nregs) 7037 err += efunc(pc, "invalid register %u\n", r1); 7038 if (r2 != 0) 7039 err += efunc(pc, "non-zero reserved bits\n"); 7040 if (rd >= nregs) 7041 err += efunc(pc, "invalid register %u\n", rd); 7042 if (rd == 0) 7043 err += efunc(pc, "cannot write to %r0\n"); 7044 break; 7045 case DIF_OP_ULDSB: 7046 case DIF_OP_ULDSH: 7047 case DIF_OP_ULDSW: 7048 case DIF_OP_ULDUB: 7049 case DIF_OP_ULDUH: 7050 case DIF_OP_ULDUW: 7051 case DIF_OP_ULDX: 7052 if (r1 >= nregs) 7053 err += efunc(pc, "invalid register %u\n", r1); 7054 if (r2 != 0) 7055 err += efunc(pc, "non-zero reserved bits\n"); 7056 if (rd >= nregs) 7057 err += efunc(pc, "invalid register %u\n", rd); 7058 if (rd == 0) 7059 err += efunc(pc, "cannot write to %r0\n"); 7060 break; 7061 case DIF_OP_STB: 7062 case DIF_OP_STH: 7063 case DIF_OP_STW: 7064 case DIF_OP_STX: 7065 if (r1 >= nregs) 7066 err += efunc(pc, "invalid register %u\n", r1); 7067 if (r2 != 0) 7068 err += efunc(pc, "non-zero reserved bits\n"); 7069 if (rd >= nregs) 7070 err += efunc(pc, "invalid register %u\n", rd); 7071 if (rd == 0) 7072 err += efunc(pc, "cannot write to 0 address\n"); 7073 break; 7074 case DIF_OP_CMP: 7075 case DIF_OP_SCMP: 7076 if (r1 >= nregs) 7077 err += efunc(pc, "invalid register %u\n", r1); 7078 if (r2 >= nregs) 7079 err += efunc(pc, "invalid register %u\n", r2); 7080 if (rd != 0) 7081 err += efunc(pc, "non-zero reserved bits\n"); 7082 break; 7083 case DIF_OP_TST: 7084 if (r1 >= nregs) 7085 err += efunc(pc, "invalid register %u\n", r1); 7086 if (r2 != 0 || rd != 0) 7087 err += efunc(pc, "non-zero reserved bits\n"); 7088 break; 7089 case DIF_OP_BA: 7090 case DIF_OP_BE: 7091 case DIF_OP_BNE: 7092 case DIF_OP_BG: 7093 case DIF_OP_BGU: 7094 case DIF_OP_BGE: 7095 case DIF_OP_BGEU: 7096 case DIF_OP_BL: 7097 case DIF_OP_BLU: 7098 case DIF_OP_BLE: 7099 case DIF_OP_BLEU: 7100 if (label >= dp->dtdo_len) { 7101 err += efunc(pc, "invalid branch target %u\n", 7102 label); 7103 } 7104 if (label <= pc) { 7105 err += efunc(pc, "backward branch to %u\n", 7106 label); 7107 } 7108 break; 7109 case DIF_OP_RET: 7110 if (r1 != 0 || r2 != 0) 7111 err += efunc(pc, "non-zero reserved bits\n"); 7112 if (rd >= nregs) 7113 err += efunc(pc, "invalid register %u\n", rd); 7114 break; 7115 case DIF_OP_NOP: 7116 case DIF_OP_POPTS: 7117 case DIF_OP_FLUSHTS: 7118 if (r1 != 0 || r2 != 0 || rd != 0) 7119 err += efunc(pc, "non-zero reserved bits\n"); 7120 break; 7121 case DIF_OP_SETX: 7122 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 7123 err += efunc(pc, "invalid integer ref %u\n", 7124 DIF_INSTR_INTEGER(instr)); 7125 } 7126 if (rd >= nregs) 7127 err += efunc(pc, "invalid register %u\n", rd); 7128 if (rd == 0) 7129 err += efunc(pc, "cannot write to %r0\n"); 7130 break; 7131 case DIF_OP_SETS: 7132 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 7133 err += efunc(pc, "invalid string ref %u\n", 7134 DIF_INSTR_STRING(instr)); 7135 } 7136 if (rd >= nregs) 7137 err += efunc(pc, "invalid register %u\n", rd); 7138 if (rd == 0) 7139 err += efunc(pc, "cannot write to %r0\n"); 7140 break; 7141 case DIF_OP_LDGA: 7142 case DIF_OP_LDTA: 7143 if (r1 > DIF_VAR_ARRAY_MAX) 7144 err += efunc(pc, "invalid array %u\n", r1); 7145 if (r2 >= nregs) 7146 err += efunc(pc, "invalid register %u\n", r2); 7147 if (rd >= nregs) 7148 err += efunc(pc, "invalid register %u\n", rd); 7149 if (rd == 0) 7150 err += efunc(pc, "cannot write to %r0\n"); 7151 break; 7152 case DIF_OP_LDGS: 7153 case DIF_OP_LDTS: 7154 case DIF_OP_LDLS: 7155 case DIF_OP_LDGAA: 7156 case DIF_OP_LDTAA: 7157 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 7158 err += efunc(pc, "invalid variable %u\n", v); 7159 if (rd >= nregs) 7160 err += efunc(pc, "invalid register %u\n", rd); 7161 if (rd == 0) 7162 err += efunc(pc, "cannot write to %r0\n"); 7163 break; 7164 case DIF_OP_STGS: 7165 case DIF_OP_STTS: 7166 case DIF_OP_STLS: 7167 case DIF_OP_STGAA: 7168 case DIF_OP_STTAA: 7169 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 7170 err += efunc(pc, "invalid variable %u\n", v); 7171 if (rs >= nregs) 7172 err += efunc(pc, "invalid register %u\n", rd); 7173 break; 7174 case DIF_OP_CALL: 7175 if (subr > DIF_SUBR_MAX) 7176 err += efunc(pc, "invalid subr %u\n", subr); 7177 if (rd >= nregs) 7178 err += efunc(pc, "invalid register %u\n", rd); 7179 if (rd == 0) 7180 err += efunc(pc, "cannot write to %r0\n"); 7181 7182 if (subr == DIF_SUBR_COPYOUT || 7183 subr == DIF_SUBR_COPYOUTSTR) { 7184 dp->dtdo_destructive = 1; 7185 } 7186 break; 7187 case DIF_OP_PUSHTR: 7188 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 7189 err += efunc(pc, "invalid ref type %u\n", type); 7190 if (r2 >= nregs) 7191 err += efunc(pc, "invalid register %u\n", r2); 7192 if (rs >= nregs) 7193 err += efunc(pc, "invalid register %u\n", rs); 7194 break; 7195 case DIF_OP_PUSHTV: 7196 if (type != DIF_TYPE_CTF) 7197 err += efunc(pc, "invalid val type %u\n", type); 7198 if (r2 >= nregs) 7199 err += efunc(pc, "invalid register %u\n", r2); 7200 if (rs >= nregs) 7201 err += efunc(pc, "invalid register %u\n", rs); 7202 break; 7203 default: 7204 err += efunc(pc, "invalid opcode %u\n", 7205 DIF_INSTR_OP(instr)); 7206 } 7207 } 7208 7209 if (dp->dtdo_len != 0 && 7210 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7211 err += efunc(dp->dtdo_len - 1, 7212 "expected 'ret' as last DIF instruction\n"); 7213 } 7214 7215 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7216 /* 7217 * If we're not returning by reference, the size must be either 7218 * 0 or the size of one of the base types. 7219 */ 7220 switch (dp->dtdo_rtype.dtdt_size) { 7221 case 0: 7222 case sizeof (uint8_t): 7223 case sizeof (uint16_t): 7224 case sizeof (uint32_t): 7225 case sizeof (uint64_t): 7226 break; 7227 7228 default: 7229 err += efunc(dp->dtdo_len - 1, "bad return size"); 7230 } 7231 } 7232 7233 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7234 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7235 dtrace_diftype_t *vt, *et; 7236 uint_t id, ndx; 7237 7238 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7239 v->dtdv_scope != DIFV_SCOPE_THREAD && 7240 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7241 err += efunc(i, "unrecognized variable scope %d\n", 7242 v->dtdv_scope); 7243 break; 7244 } 7245 7246 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7247 v->dtdv_kind != DIFV_KIND_SCALAR) { 7248 err += efunc(i, "unrecognized variable type %d\n", 7249 v->dtdv_kind); 7250 break; 7251 } 7252 7253 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7254 err += efunc(i, "%d exceeds variable id limit\n", id); 7255 break; 7256 } 7257 7258 if (id < DIF_VAR_OTHER_UBASE) 7259 continue; 7260 7261 /* 7262 * For user-defined variables, we need to check that this 7263 * definition is identical to any previous definition that we 7264 * encountered. 7265 */ 7266 ndx = id - DIF_VAR_OTHER_UBASE; 7267 7268 switch (v->dtdv_scope) { 7269 case DIFV_SCOPE_GLOBAL: 7270 if (ndx < vstate->dtvs_nglobals) { 7271 dtrace_statvar_t *svar; 7272 7273 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7274 existing = &svar->dtsv_var; 7275 } 7276 7277 break; 7278 7279 case DIFV_SCOPE_THREAD: 7280 if (ndx < vstate->dtvs_ntlocals) 7281 existing = &vstate->dtvs_tlocals[ndx]; 7282 break; 7283 7284 case DIFV_SCOPE_LOCAL: 7285 if (ndx < vstate->dtvs_nlocals) { 7286 dtrace_statvar_t *svar; 7287 7288 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 7289 existing = &svar->dtsv_var; 7290 } 7291 7292 break; 7293 } 7294 7295 vt = &v->dtdv_type; 7296 7297 if (vt->dtdt_flags & DIF_TF_BYREF) { 7298 if (vt->dtdt_size == 0) { 7299 err += efunc(i, "zero-sized variable\n"); 7300 break; 7301 } 7302 7303 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 7304 vt->dtdt_size > dtrace_global_maxsize) { 7305 err += efunc(i, "oversized by-ref global\n"); 7306 break; 7307 } 7308 } 7309 7310 if (existing == NULL || existing->dtdv_id == 0) 7311 continue; 7312 7313 ASSERT(existing->dtdv_id == v->dtdv_id); 7314 ASSERT(existing->dtdv_scope == v->dtdv_scope); 7315 7316 if (existing->dtdv_kind != v->dtdv_kind) 7317 err += efunc(i, "%d changed variable kind\n", id); 7318 7319 et = &existing->dtdv_type; 7320 7321 if (vt->dtdt_flags != et->dtdt_flags) { 7322 err += efunc(i, "%d changed variable type flags\n", id); 7323 break; 7324 } 7325 7326 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 7327 err += efunc(i, "%d changed variable type size\n", id); 7328 break; 7329 } 7330 } 7331 7332 return (err); 7333 } 7334 7335 /* 7336 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7337 * are much more constrained than normal DIFOs. Specifically, they may 7338 * not: 7339 * 7340 * 1. Make calls to subroutines other than copyin(), copyinstr() or 7341 * miscellaneous string routines 7342 * 2. Access DTrace variables other than the args[] array, and the 7343 * curthread, pid, tid and execname variables. 7344 * 3. Have thread-local variables. 7345 * 4. Have dynamic variables. 7346 */ 7347 static int 7348 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7349 { 7350 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7351 int err = 0; 7352 uint_t pc; 7353 7354 for (pc = 0; pc < dp->dtdo_len; pc++) { 7355 dif_instr_t instr = dp->dtdo_buf[pc]; 7356 7357 uint_t v = DIF_INSTR_VAR(instr); 7358 uint_t subr = DIF_INSTR_SUBR(instr); 7359 uint_t op = DIF_INSTR_OP(instr); 7360 7361 switch (op) { 7362 case DIF_OP_OR: 7363 case DIF_OP_XOR: 7364 case DIF_OP_AND: 7365 case DIF_OP_SLL: 7366 case DIF_OP_SRL: 7367 case DIF_OP_SRA: 7368 case DIF_OP_SUB: 7369 case DIF_OP_ADD: 7370 case DIF_OP_MUL: 7371 case DIF_OP_SDIV: 7372 case DIF_OP_UDIV: 7373 case DIF_OP_SREM: 7374 case DIF_OP_UREM: 7375 case DIF_OP_COPYS: 7376 case DIF_OP_NOT: 7377 case DIF_OP_MOV: 7378 case DIF_OP_RLDSB: 7379 case DIF_OP_RLDSH: 7380 case DIF_OP_RLDSW: 7381 case DIF_OP_RLDUB: 7382 case DIF_OP_RLDUH: 7383 case DIF_OP_RLDUW: 7384 case DIF_OP_RLDX: 7385 case DIF_OP_ULDSB: 7386 case DIF_OP_ULDSH: 7387 case DIF_OP_ULDSW: 7388 case DIF_OP_ULDUB: 7389 case DIF_OP_ULDUH: 7390 case DIF_OP_ULDUW: 7391 case DIF_OP_ULDX: 7392 case DIF_OP_STB: 7393 case DIF_OP_STH: 7394 case DIF_OP_STW: 7395 case DIF_OP_STX: 7396 case DIF_OP_ALLOCS: 7397 case DIF_OP_CMP: 7398 case DIF_OP_SCMP: 7399 case DIF_OP_TST: 7400 case DIF_OP_BA: 7401 case DIF_OP_BE: 7402 case DIF_OP_BNE: 7403 case DIF_OP_BG: 7404 case DIF_OP_BGU: 7405 case DIF_OP_BGE: 7406 case DIF_OP_BGEU: 7407 case DIF_OP_BL: 7408 case DIF_OP_BLU: 7409 case DIF_OP_BLE: 7410 case DIF_OP_BLEU: 7411 case DIF_OP_RET: 7412 case DIF_OP_NOP: 7413 case DIF_OP_POPTS: 7414 case DIF_OP_FLUSHTS: 7415 case DIF_OP_SETX: 7416 case DIF_OP_SETS: 7417 case DIF_OP_LDGA: 7418 case DIF_OP_LDLS: 7419 case DIF_OP_STGS: 7420 case DIF_OP_STLS: 7421 case DIF_OP_PUSHTR: 7422 case DIF_OP_PUSHTV: 7423 break; 7424 7425 case DIF_OP_LDGS: 7426 if (v >= DIF_VAR_OTHER_UBASE) 7427 break; 7428 7429 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7430 break; 7431 7432 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7433 v == DIF_VAR_TID || v == DIF_VAR_EXECNAME || 7434 v == DIF_VAR_ZONENAME) 7435 break; 7436 7437 err += efunc(pc, "illegal variable %u\n", v); 7438 break; 7439 7440 case DIF_OP_LDTA: 7441 case DIF_OP_LDTS: 7442 case DIF_OP_LDGAA: 7443 case DIF_OP_LDTAA: 7444 err += efunc(pc, "illegal dynamic variable load\n"); 7445 break; 7446 7447 case DIF_OP_STTS: 7448 case DIF_OP_STGAA: 7449 case DIF_OP_STTAA: 7450 err += efunc(pc, "illegal dynamic variable store\n"); 7451 break; 7452 7453 case DIF_OP_CALL: 7454 if (subr == DIF_SUBR_ALLOCA || 7455 subr == DIF_SUBR_BCOPY || 7456 subr == DIF_SUBR_COPYIN || 7457 subr == DIF_SUBR_COPYINTO || 7458 subr == DIF_SUBR_COPYINSTR || 7459 subr == DIF_SUBR_INDEX || 7460 subr == DIF_SUBR_LLTOSTR || 7461 subr == DIF_SUBR_RINDEX || 7462 subr == DIF_SUBR_STRCHR || 7463 subr == DIF_SUBR_STRJOIN || 7464 subr == DIF_SUBR_STRRCHR || 7465 subr == DIF_SUBR_STRSTR) 7466 break; 7467 7468 err += efunc(pc, "invalid subr %u\n", subr); 7469 break; 7470 7471 default: 7472 err += efunc(pc, "invalid opcode %u\n", 7473 DIF_INSTR_OP(instr)); 7474 } 7475 } 7476 7477 return (err); 7478 } 7479 7480 /* 7481 * Returns 1 if the expression in the DIF object can be cached on a per-thread 7482 * basis; 0 if not. 7483 */ 7484 static int 7485 dtrace_difo_cacheable(dtrace_difo_t *dp) 7486 { 7487 int i; 7488 7489 if (dp == NULL) 7490 return (0); 7491 7492 for (i = 0; i < dp->dtdo_varlen; i++) { 7493 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7494 7495 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 7496 continue; 7497 7498 switch (v->dtdv_id) { 7499 case DIF_VAR_CURTHREAD: 7500 case DIF_VAR_PID: 7501 case DIF_VAR_TID: 7502 case DIF_VAR_EXECNAME: 7503 case DIF_VAR_ZONENAME: 7504 break; 7505 7506 default: 7507 return (0); 7508 } 7509 } 7510 7511 /* 7512 * This DIF object may be cacheable. Now we need to look for any 7513 * array loading instructions, any memory loading instructions, or 7514 * any stores to thread-local variables. 7515 */ 7516 for (i = 0; i < dp->dtdo_len; i++) { 7517 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 7518 7519 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 7520 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 7521 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 7522 op == DIF_OP_LDGA || op == DIF_OP_STTS) 7523 return (0); 7524 } 7525 7526 return (1); 7527 } 7528 7529 static void 7530 dtrace_difo_hold(dtrace_difo_t *dp) 7531 { 7532 int i; 7533 7534 ASSERT(MUTEX_HELD(&dtrace_lock)); 7535 7536 dp->dtdo_refcnt++; 7537 ASSERT(dp->dtdo_refcnt != 0); 7538 7539 /* 7540 * We need to check this DIF object for references to the variable 7541 * DIF_VAR_VTIMESTAMP. 7542 */ 7543 for (i = 0; i < dp->dtdo_varlen; i++) { 7544 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7545 7546 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7547 continue; 7548 7549 if (dtrace_vtime_references++ == 0) 7550 dtrace_vtime_enable(); 7551 } 7552 } 7553 7554 /* 7555 * This routine calculates the dynamic variable chunksize for a given DIF 7556 * object. The calculation is not fool-proof, and can probably be tricked by 7557 * malicious DIF -- but it works for all compiler-generated DIF. Because this 7558 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 7559 * if a dynamic variable size exceeds the chunksize. 7560 */ 7561 static void 7562 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7563 { 7564 uint64_t sval; 7565 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 7566 const dif_instr_t *text = dp->dtdo_buf; 7567 uint_t pc, srd = 0; 7568 uint_t ttop = 0; 7569 size_t size, ksize; 7570 uint_t id, i; 7571 7572 for (pc = 0; pc < dp->dtdo_len; pc++) { 7573 dif_instr_t instr = text[pc]; 7574 uint_t op = DIF_INSTR_OP(instr); 7575 uint_t rd = DIF_INSTR_RD(instr); 7576 uint_t r1 = DIF_INSTR_R1(instr); 7577 uint_t nkeys = 0; 7578 uchar_t scope; 7579 7580 dtrace_key_t *key = tupregs; 7581 7582 switch (op) { 7583 case DIF_OP_SETX: 7584 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 7585 srd = rd; 7586 continue; 7587 7588 case DIF_OP_STTS: 7589 key = &tupregs[DIF_DTR_NREGS]; 7590 key[0].dttk_size = 0; 7591 key[1].dttk_size = 0; 7592 nkeys = 2; 7593 scope = DIFV_SCOPE_THREAD; 7594 break; 7595 7596 case DIF_OP_STGAA: 7597 case DIF_OP_STTAA: 7598 nkeys = ttop; 7599 7600 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 7601 key[nkeys++].dttk_size = 0; 7602 7603 key[nkeys++].dttk_size = 0; 7604 7605 if (op == DIF_OP_STTAA) { 7606 scope = DIFV_SCOPE_THREAD; 7607 } else { 7608 scope = DIFV_SCOPE_GLOBAL; 7609 } 7610 7611 break; 7612 7613 case DIF_OP_PUSHTR: 7614 if (ttop == DIF_DTR_NREGS) 7615 return; 7616 7617 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 7618 /* 7619 * If the register for the size of the "pushtr" 7620 * is %r0 (or the value is 0) and the type is 7621 * a string, we'll use the system-wide default 7622 * string size. 7623 */ 7624 tupregs[ttop++].dttk_size = 7625 dtrace_strsize_default; 7626 } else { 7627 if (srd == 0) 7628 return; 7629 7630 tupregs[ttop++].dttk_size = sval; 7631 } 7632 7633 break; 7634 7635 case DIF_OP_PUSHTV: 7636 if (ttop == DIF_DTR_NREGS) 7637 return; 7638 7639 tupregs[ttop++].dttk_size = 0; 7640 break; 7641 7642 case DIF_OP_FLUSHTS: 7643 ttop = 0; 7644 break; 7645 7646 case DIF_OP_POPTS: 7647 if (ttop != 0) 7648 ttop--; 7649 break; 7650 } 7651 7652 sval = 0; 7653 srd = 0; 7654 7655 if (nkeys == 0) 7656 continue; 7657 7658 /* 7659 * We have a dynamic variable allocation; calculate its size. 7660 */ 7661 for (ksize = 0, i = 0; i < nkeys; i++) 7662 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 7663 7664 size = sizeof (dtrace_dynvar_t); 7665 size += sizeof (dtrace_key_t) * (nkeys - 1); 7666 size += ksize; 7667 7668 /* 7669 * Now we need to determine the size of the stored data. 7670 */ 7671 id = DIF_INSTR_VAR(instr); 7672 7673 for (i = 0; i < dp->dtdo_varlen; i++) { 7674 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7675 7676 if (v->dtdv_id == id && v->dtdv_scope == scope) { 7677 size += v->dtdv_type.dtdt_size; 7678 break; 7679 } 7680 } 7681 7682 if (i == dp->dtdo_varlen) 7683 return; 7684 7685 /* 7686 * We have the size. If this is larger than the chunk size 7687 * for our dynamic variable state, reset the chunk size. 7688 */ 7689 size = P2ROUNDUP(size, sizeof (uint64_t)); 7690 7691 if (size > vstate->dtvs_dynvars.dtds_chunksize) 7692 vstate->dtvs_dynvars.dtds_chunksize = size; 7693 } 7694 } 7695 7696 static void 7697 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7698 { 7699 int i, oldsvars, osz, nsz, otlocals, ntlocals; 7700 uint_t id; 7701 7702 ASSERT(MUTEX_HELD(&dtrace_lock)); 7703 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 7704 7705 for (i = 0; i < dp->dtdo_varlen; i++) { 7706 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7707 dtrace_statvar_t *svar, ***svarp; 7708 size_t dsize = 0; 7709 uint8_t scope = v->dtdv_scope; 7710 int *np; 7711 7712 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7713 continue; 7714 7715 id -= DIF_VAR_OTHER_UBASE; 7716 7717 switch (scope) { 7718 case DIFV_SCOPE_THREAD: 7719 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 7720 dtrace_difv_t *tlocals; 7721 7722 if ((ntlocals = (otlocals << 1)) == 0) 7723 ntlocals = 1; 7724 7725 osz = otlocals * sizeof (dtrace_difv_t); 7726 nsz = ntlocals * sizeof (dtrace_difv_t); 7727 7728 tlocals = kmem_zalloc(nsz, KM_SLEEP); 7729 7730 if (osz != 0) { 7731 bcopy(vstate->dtvs_tlocals, 7732 tlocals, osz); 7733 kmem_free(vstate->dtvs_tlocals, osz); 7734 } 7735 7736 vstate->dtvs_tlocals = tlocals; 7737 vstate->dtvs_ntlocals = ntlocals; 7738 } 7739 7740 vstate->dtvs_tlocals[id] = *v; 7741 continue; 7742 7743 case DIFV_SCOPE_LOCAL: 7744 np = &vstate->dtvs_nlocals; 7745 svarp = &vstate->dtvs_locals; 7746 7747 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7748 dsize = NCPU * (v->dtdv_type.dtdt_size + 7749 sizeof (uint64_t)); 7750 else 7751 dsize = NCPU * sizeof (uint64_t); 7752 7753 break; 7754 7755 case DIFV_SCOPE_GLOBAL: 7756 np = &vstate->dtvs_nglobals; 7757 svarp = &vstate->dtvs_globals; 7758 7759 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 7760 dsize = v->dtdv_type.dtdt_size + 7761 sizeof (uint64_t); 7762 7763 break; 7764 7765 default: 7766 ASSERT(0); 7767 } 7768 7769 while (id >= (oldsvars = *np)) { 7770 dtrace_statvar_t **statics; 7771 int newsvars, oldsize, newsize; 7772 7773 if ((newsvars = (oldsvars << 1)) == 0) 7774 newsvars = 1; 7775 7776 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 7777 newsize = newsvars * sizeof (dtrace_statvar_t *); 7778 7779 statics = kmem_zalloc(newsize, KM_SLEEP); 7780 7781 if (oldsize != 0) { 7782 bcopy(*svarp, statics, oldsize); 7783 kmem_free(*svarp, oldsize); 7784 } 7785 7786 *svarp = statics; 7787 *np = newsvars; 7788 } 7789 7790 if ((svar = (*svarp)[id]) == NULL) { 7791 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 7792 svar->dtsv_var = *v; 7793 7794 if ((svar->dtsv_size = dsize) != 0) { 7795 svar->dtsv_data = (uint64_t)(uintptr_t) 7796 kmem_zalloc(dsize, KM_SLEEP); 7797 } 7798 7799 (*svarp)[id] = svar; 7800 } 7801 7802 svar->dtsv_refcnt++; 7803 } 7804 7805 dtrace_difo_chunksize(dp, vstate); 7806 dtrace_difo_hold(dp); 7807 } 7808 7809 static dtrace_difo_t * 7810 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7811 { 7812 dtrace_difo_t *new; 7813 size_t sz; 7814 7815 ASSERT(dp->dtdo_buf != NULL); 7816 ASSERT(dp->dtdo_refcnt != 0); 7817 7818 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 7819 7820 ASSERT(dp->dtdo_buf != NULL); 7821 sz = dp->dtdo_len * sizeof (dif_instr_t); 7822 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 7823 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 7824 new->dtdo_len = dp->dtdo_len; 7825 7826 if (dp->dtdo_strtab != NULL) { 7827 ASSERT(dp->dtdo_strlen != 0); 7828 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 7829 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 7830 new->dtdo_strlen = dp->dtdo_strlen; 7831 } 7832 7833 if (dp->dtdo_inttab != NULL) { 7834 ASSERT(dp->dtdo_intlen != 0); 7835 sz = dp->dtdo_intlen * sizeof (uint64_t); 7836 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 7837 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 7838 new->dtdo_intlen = dp->dtdo_intlen; 7839 } 7840 7841 if (dp->dtdo_vartab != NULL) { 7842 ASSERT(dp->dtdo_varlen != 0); 7843 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 7844 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 7845 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 7846 new->dtdo_varlen = dp->dtdo_varlen; 7847 } 7848 7849 dtrace_difo_init(new, vstate); 7850 return (new); 7851 } 7852 7853 static void 7854 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7855 { 7856 int i; 7857 7858 ASSERT(dp->dtdo_refcnt == 0); 7859 7860 for (i = 0; i < dp->dtdo_varlen; i++) { 7861 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7862 dtrace_statvar_t *svar, **svarp; 7863 uint_t id; 7864 uint8_t scope = v->dtdv_scope; 7865 int *np; 7866 7867 switch (scope) { 7868 case DIFV_SCOPE_THREAD: 7869 continue; 7870 7871 case DIFV_SCOPE_LOCAL: 7872 np = &vstate->dtvs_nlocals; 7873 svarp = vstate->dtvs_locals; 7874 break; 7875 7876 case DIFV_SCOPE_GLOBAL: 7877 np = &vstate->dtvs_nglobals; 7878 svarp = vstate->dtvs_globals; 7879 break; 7880 7881 default: 7882 ASSERT(0); 7883 } 7884 7885 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 7886 continue; 7887 7888 id -= DIF_VAR_OTHER_UBASE; 7889 ASSERT(id < *np); 7890 7891 svar = svarp[id]; 7892 ASSERT(svar != NULL); 7893 ASSERT(svar->dtsv_refcnt > 0); 7894 7895 if (--svar->dtsv_refcnt > 0) 7896 continue; 7897 7898 if (svar->dtsv_size != 0) { 7899 ASSERT(svar->dtsv_data != NULL); 7900 kmem_free((void *)(uintptr_t)svar->dtsv_data, 7901 svar->dtsv_size); 7902 } 7903 7904 kmem_free(svar, sizeof (dtrace_statvar_t)); 7905 svarp[id] = NULL; 7906 } 7907 7908 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 7909 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 7910 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 7911 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 7912 7913 kmem_free(dp, sizeof (dtrace_difo_t)); 7914 } 7915 7916 static void 7917 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 7918 { 7919 int i; 7920 7921 ASSERT(MUTEX_HELD(&dtrace_lock)); 7922 ASSERT(dp->dtdo_refcnt != 0); 7923 7924 for (i = 0; i < dp->dtdo_varlen; i++) { 7925 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 7926 7927 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 7928 continue; 7929 7930 ASSERT(dtrace_vtime_references > 0); 7931 if (--dtrace_vtime_references == 0) 7932 dtrace_vtime_disable(); 7933 } 7934 7935 if (--dp->dtdo_refcnt == 0) 7936 dtrace_difo_destroy(dp, vstate); 7937 } 7938 7939 /* 7940 * DTrace Format Functions 7941 */ 7942 static uint16_t 7943 dtrace_format_add(dtrace_state_t *state, char *str) 7944 { 7945 char *fmt, **new; 7946 uint16_t ndx, len = strlen(str) + 1; 7947 7948 fmt = kmem_zalloc(len, KM_SLEEP); 7949 bcopy(str, fmt, len); 7950 7951 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 7952 if (state->dts_formats[ndx] == NULL) { 7953 state->dts_formats[ndx] = fmt; 7954 return (ndx + 1); 7955 } 7956 } 7957 7958 if (state->dts_nformats == USHRT_MAX) { 7959 /* 7960 * This is only likely if a denial-of-service attack is being 7961 * attempted. As such, it's okay to fail silently here. 7962 */ 7963 kmem_free(fmt, len); 7964 return (0); 7965 } 7966 7967 /* 7968 * For simplicity, we always resize the formats array to be exactly the 7969 * number of formats. 7970 */ 7971 ndx = state->dts_nformats++; 7972 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 7973 7974 if (state->dts_formats != NULL) { 7975 ASSERT(ndx != 0); 7976 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 7977 kmem_free(state->dts_formats, ndx * sizeof (char *)); 7978 } 7979 7980 state->dts_formats = new; 7981 state->dts_formats[ndx] = fmt; 7982 7983 return (ndx + 1); 7984 } 7985 7986 static void 7987 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 7988 { 7989 char *fmt; 7990 7991 ASSERT(state->dts_formats != NULL); 7992 ASSERT(format <= state->dts_nformats); 7993 ASSERT(state->dts_formats[format - 1] != NULL); 7994 7995 fmt = state->dts_formats[format - 1]; 7996 kmem_free(fmt, strlen(fmt) + 1); 7997 state->dts_formats[format - 1] = NULL; 7998 } 7999 8000 static void 8001 dtrace_format_destroy(dtrace_state_t *state) 8002 { 8003 int i; 8004 8005 if (state->dts_nformats == 0) { 8006 ASSERT(state->dts_formats == NULL); 8007 return; 8008 } 8009 8010 ASSERT(state->dts_formats != NULL); 8011 8012 for (i = 0; i < state->dts_nformats; i++) { 8013 char *fmt = state->dts_formats[i]; 8014 8015 if (fmt == NULL) 8016 continue; 8017 8018 kmem_free(fmt, strlen(fmt) + 1); 8019 } 8020 8021 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 8022 state->dts_nformats = 0; 8023 state->dts_formats = NULL; 8024 } 8025 8026 /* 8027 * DTrace Predicate Functions 8028 */ 8029 static dtrace_predicate_t * 8030 dtrace_predicate_create(dtrace_difo_t *dp) 8031 { 8032 dtrace_predicate_t *pred; 8033 8034 ASSERT(MUTEX_HELD(&dtrace_lock)); 8035 ASSERT(dp->dtdo_refcnt != 0); 8036 8037 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 8038 pred->dtp_difo = dp; 8039 pred->dtp_refcnt = 1; 8040 8041 if (!dtrace_difo_cacheable(dp)) 8042 return (pred); 8043 8044 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 8045 /* 8046 * This is only theoretically possible -- we have had 2^32 8047 * cacheable predicates on this machine. We cannot allow any 8048 * more predicates to become cacheable: as unlikely as it is, 8049 * there may be a thread caching a (now stale) predicate cache 8050 * ID. (N.B.: the temptation is being successfully resisted to 8051 * have this cmn_err() "Holy shit -- we executed this code!") 8052 */ 8053 return (pred); 8054 } 8055 8056 pred->dtp_cacheid = dtrace_predcache_id++; 8057 8058 return (pred); 8059 } 8060 8061 static void 8062 dtrace_predicate_hold(dtrace_predicate_t *pred) 8063 { 8064 ASSERT(MUTEX_HELD(&dtrace_lock)); 8065 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 8066 ASSERT(pred->dtp_refcnt > 0); 8067 8068 pred->dtp_refcnt++; 8069 } 8070 8071 static void 8072 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 8073 { 8074 dtrace_difo_t *dp = pred->dtp_difo; 8075 8076 ASSERT(MUTEX_HELD(&dtrace_lock)); 8077 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 8078 ASSERT(pred->dtp_refcnt > 0); 8079 8080 if (--pred->dtp_refcnt == 0) { 8081 dtrace_difo_release(pred->dtp_difo, vstate); 8082 kmem_free(pred, sizeof (dtrace_predicate_t)); 8083 } 8084 } 8085 8086 /* 8087 * DTrace Action Description Functions 8088 */ 8089 static dtrace_actdesc_t * 8090 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 8091 uint64_t uarg, uint64_t arg) 8092 { 8093 dtrace_actdesc_t *act; 8094 8095 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 8096 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 8097 8098 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 8099 act->dtad_kind = kind; 8100 act->dtad_ntuple = ntuple; 8101 act->dtad_uarg = uarg; 8102 act->dtad_arg = arg; 8103 act->dtad_refcnt = 1; 8104 8105 return (act); 8106 } 8107 8108 static void 8109 dtrace_actdesc_hold(dtrace_actdesc_t *act) 8110 { 8111 ASSERT(act->dtad_refcnt >= 1); 8112 act->dtad_refcnt++; 8113 } 8114 8115 static void 8116 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 8117 { 8118 dtrace_actkind_t kind = act->dtad_kind; 8119 dtrace_difo_t *dp; 8120 8121 ASSERT(act->dtad_refcnt >= 1); 8122 8123 if (--act->dtad_refcnt != 0) 8124 return; 8125 8126 if ((dp = act->dtad_difo) != NULL) 8127 dtrace_difo_release(dp, vstate); 8128 8129 if (DTRACEACT_ISPRINTFLIKE(kind)) { 8130 char *str = (char *)(uintptr_t)act->dtad_arg; 8131 8132 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 8133 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 8134 8135 if (str != NULL) 8136 kmem_free(str, strlen(str) + 1); 8137 } 8138 8139 kmem_free(act, sizeof (dtrace_actdesc_t)); 8140 } 8141 8142 /* 8143 * DTrace ECB Functions 8144 */ 8145 static dtrace_ecb_t * 8146 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 8147 { 8148 dtrace_ecb_t *ecb; 8149 dtrace_epid_t epid; 8150 8151 ASSERT(MUTEX_HELD(&dtrace_lock)); 8152 8153 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 8154 ecb->dte_predicate = NULL; 8155 ecb->dte_probe = probe; 8156 8157 /* 8158 * The default size is the size of the default action: recording 8159 * the epid. 8160 */ 8161 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8162 ecb->dte_alignment = sizeof (dtrace_epid_t); 8163 8164 epid = state->dts_epid++; 8165 8166 if (epid - 1 >= state->dts_necbs) { 8167 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 8168 int necbs = state->dts_necbs << 1; 8169 8170 ASSERT(epid == state->dts_necbs + 1); 8171 8172 if (necbs == 0) { 8173 ASSERT(oecbs == NULL); 8174 necbs = 1; 8175 } 8176 8177 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 8178 8179 if (oecbs != NULL) 8180 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 8181 8182 dtrace_membar_producer(); 8183 state->dts_ecbs = ecbs; 8184 8185 if (oecbs != NULL) { 8186 /* 8187 * If this state is active, we must dtrace_sync() 8188 * before we can free the old dts_ecbs array: we're 8189 * coming in hot, and there may be active ring 8190 * buffer processing (which indexes into the dts_ecbs 8191 * array) on another CPU. 8192 */ 8193 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 8194 dtrace_sync(); 8195 8196 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 8197 } 8198 8199 dtrace_membar_producer(); 8200 state->dts_necbs = necbs; 8201 } 8202 8203 ecb->dte_state = state; 8204 8205 ASSERT(state->dts_ecbs[epid - 1] == NULL); 8206 dtrace_membar_producer(); 8207 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8208 8209 return (ecb); 8210 } 8211 8212 static void 8213 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8214 { 8215 dtrace_probe_t *probe = ecb->dte_probe; 8216 8217 ASSERT(MUTEX_HELD(&cpu_lock)); 8218 ASSERT(MUTEX_HELD(&dtrace_lock)); 8219 ASSERT(ecb->dte_next == NULL); 8220 8221 if (probe == NULL) { 8222 /* 8223 * This is the NULL probe -- there's nothing to do. 8224 */ 8225 return; 8226 } 8227 8228 if (probe->dtpr_ecb == NULL) { 8229 dtrace_provider_t *prov = probe->dtpr_provider; 8230 8231 /* 8232 * We're the first ECB on this probe. 8233 */ 8234 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8235 8236 if (ecb->dte_predicate != NULL) 8237 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8238 8239 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8240 probe->dtpr_id, probe->dtpr_arg); 8241 } else { 8242 /* 8243 * This probe is already active. Swing the last pointer to 8244 * point to the new ECB, and issue a dtrace_sync() to assure 8245 * that all CPUs have seen the change. 8246 */ 8247 ASSERT(probe->dtpr_ecb_last != NULL); 8248 probe->dtpr_ecb_last->dte_next = ecb; 8249 probe->dtpr_ecb_last = ecb; 8250 probe->dtpr_predcache = 0; 8251 8252 dtrace_sync(); 8253 } 8254 } 8255 8256 static void 8257 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8258 { 8259 uint32_t maxalign = sizeof (dtrace_epid_t); 8260 uint32_t align = sizeof (uint8_t), offs, diff; 8261 dtrace_action_t *act; 8262 int wastuple = 0; 8263 uint32_t aggbase = UINT32_MAX; 8264 dtrace_state_t *state = ecb->dte_state; 8265 8266 /* 8267 * If we record anything, we always record the epid. (And we always 8268 * record it first.) 8269 */ 8270 offs = sizeof (dtrace_epid_t); 8271 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8272 8273 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8274 dtrace_recdesc_t *rec = &act->dta_rec; 8275 8276 if ((align = rec->dtrd_alignment) > maxalign) 8277 maxalign = align; 8278 8279 if (!wastuple && act->dta_intuple) { 8280 /* 8281 * This is the first record in a tuple. Align the 8282 * offset to be at offset 4 in an 8-byte aligned 8283 * block. 8284 */ 8285 diff = offs + sizeof (dtrace_aggid_t); 8286 8287 if (diff = (diff & (sizeof (uint64_t) - 1))) 8288 offs += sizeof (uint64_t) - diff; 8289 8290 aggbase = offs - sizeof (dtrace_aggid_t); 8291 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 8292 } 8293 8294 /*LINTED*/ 8295 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 8296 /* 8297 * The current offset is not properly aligned; align it. 8298 */ 8299 offs += align - diff; 8300 } 8301 8302 rec->dtrd_offset = offs; 8303 8304 if (offs + rec->dtrd_size > ecb->dte_needed) { 8305 ecb->dte_needed = offs + rec->dtrd_size; 8306 8307 if (ecb->dte_needed > state->dts_needed) 8308 state->dts_needed = ecb->dte_needed; 8309 } 8310 8311 if (DTRACEACT_ISAGG(act->dta_kind)) { 8312 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8313 dtrace_action_t *first = agg->dtag_first, *prev; 8314 8315 ASSERT(rec->dtrd_size != 0 && first != NULL); 8316 ASSERT(wastuple); 8317 ASSERT(aggbase != UINT32_MAX); 8318 8319 agg->dtag_base = aggbase; 8320 8321 while ((prev = first->dta_prev) != NULL && 8322 DTRACEACT_ISAGG(prev->dta_kind)) { 8323 agg = (dtrace_aggregation_t *)prev; 8324 first = agg->dtag_first; 8325 } 8326 8327 if (prev != NULL) { 8328 offs = prev->dta_rec.dtrd_offset + 8329 prev->dta_rec.dtrd_size; 8330 } else { 8331 offs = sizeof (dtrace_epid_t); 8332 } 8333 wastuple = 0; 8334 } else { 8335 if (!act->dta_intuple) 8336 ecb->dte_size = offs + rec->dtrd_size; 8337 8338 offs += rec->dtrd_size; 8339 } 8340 8341 wastuple = act->dta_intuple; 8342 } 8343 8344 if ((act = ecb->dte_action) != NULL && 8345 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8346 ecb->dte_size == sizeof (dtrace_epid_t)) { 8347 /* 8348 * If the size is still sizeof (dtrace_epid_t), then all 8349 * actions store no data; set the size to 0. 8350 */ 8351 ecb->dte_alignment = maxalign; 8352 ecb->dte_size = 0; 8353 8354 /* 8355 * If the needed space is still sizeof (dtrace_epid_t), then 8356 * all actions need no additional space; set the needed 8357 * size to 0. 8358 */ 8359 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8360 ecb->dte_needed = 0; 8361 8362 return; 8363 } 8364 8365 /* 8366 * Set our alignment, and make sure that the dte_size and dte_needed 8367 * are aligned to the size of an EPID. 8368 */ 8369 ecb->dte_alignment = maxalign; 8370 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8371 ~(sizeof (dtrace_epid_t) - 1); 8372 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8373 ~(sizeof (dtrace_epid_t) - 1); 8374 ASSERT(ecb->dte_size <= ecb->dte_needed); 8375 } 8376 8377 static dtrace_action_t * 8378 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8379 { 8380 dtrace_aggregation_t *agg; 8381 size_t size = sizeof (uint64_t); 8382 int ntuple = desc->dtad_ntuple; 8383 dtrace_action_t *act; 8384 dtrace_recdesc_t *frec; 8385 dtrace_aggid_t aggid; 8386 dtrace_state_t *state = ecb->dte_state; 8387 8388 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8389 agg->dtag_ecb = ecb; 8390 8391 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8392 8393 switch (desc->dtad_kind) { 8394 case DTRACEAGG_MIN: 8395 agg->dtag_initial = UINT64_MAX; 8396 agg->dtag_aggregate = dtrace_aggregate_min; 8397 break; 8398 8399 case DTRACEAGG_MAX: 8400 agg->dtag_aggregate = dtrace_aggregate_max; 8401 break; 8402 8403 case DTRACEAGG_COUNT: 8404 agg->dtag_aggregate = dtrace_aggregate_count; 8405 break; 8406 8407 case DTRACEAGG_QUANTIZE: 8408 agg->dtag_aggregate = dtrace_aggregate_quantize; 8409 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8410 sizeof (uint64_t); 8411 break; 8412 8413 case DTRACEAGG_LQUANTIZE: { 8414 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8415 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8416 8417 agg->dtag_initial = desc->dtad_arg; 8418 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8419 8420 if (step == 0 || levels == 0) 8421 goto err; 8422 8423 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8424 break; 8425 } 8426 8427 case DTRACEAGG_AVG: 8428 agg->dtag_aggregate = dtrace_aggregate_avg; 8429 size = sizeof (uint64_t) * 2; 8430 break; 8431 8432 case DTRACEAGG_SUM: 8433 agg->dtag_aggregate = dtrace_aggregate_sum; 8434 break; 8435 8436 default: 8437 goto err; 8438 } 8439 8440 agg->dtag_action.dta_rec.dtrd_size = size; 8441 8442 if (ntuple == 0) 8443 goto err; 8444 8445 /* 8446 * We must make sure that we have enough actions for the n-tuple. 8447 */ 8448 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8449 if (DTRACEACT_ISAGG(act->dta_kind)) 8450 break; 8451 8452 if (--ntuple == 0) { 8453 /* 8454 * This is the action with which our n-tuple begins. 8455 */ 8456 agg->dtag_first = act; 8457 goto success; 8458 } 8459 } 8460 8461 /* 8462 * This n-tuple is short by ntuple elements. Return failure. 8463 */ 8464 ASSERT(ntuple != 0); 8465 err: 8466 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8467 return (NULL); 8468 8469 success: 8470 /* 8471 * If the last action in the tuple has a size of zero, it's actually 8472 * an expression argument for the aggregating action. 8473 */ 8474 ASSERT(ecb->dte_action_last != NULL); 8475 act = ecb->dte_action_last; 8476 8477 if (act->dta_kind == DTRACEACT_DIFEXPR) { 8478 ASSERT(act->dta_difo != NULL); 8479 8480 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 8481 agg->dtag_hasarg = 1; 8482 } 8483 8484 /* 8485 * We need to allocate an id for this aggregation. 8486 */ 8487 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 8488 VM_BESTFIT | VM_SLEEP); 8489 8490 if (aggid - 1 >= state->dts_naggregations) { 8491 dtrace_aggregation_t **oaggs = state->dts_aggregations; 8492 dtrace_aggregation_t **aggs; 8493 int naggs = state->dts_naggregations << 1; 8494 int onaggs = state->dts_naggregations; 8495 8496 ASSERT(aggid == state->dts_naggregations + 1); 8497 8498 if (naggs == 0) { 8499 ASSERT(oaggs == NULL); 8500 naggs = 1; 8501 } 8502 8503 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 8504 8505 if (oaggs != NULL) { 8506 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 8507 kmem_free(oaggs, onaggs * sizeof (*aggs)); 8508 } 8509 8510 state->dts_aggregations = aggs; 8511 state->dts_naggregations = naggs; 8512 } 8513 8514 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 8515 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 8516 8517 frec = &agg->dtag_first->dta_rec; 8518 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 8519 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 8520 8521 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 8522 ASSERT(!act->dta_intuple); 8523 act->dta_intuple = 1; 8524 } 8525 8526 return (&agg->dtag_action); 8527 } 8528 8529 static void 8530 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 8531 { 8532 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8533 dtrace_state_t *state = ecb->dte_state; 8534 dtrace_aggid_t aggid = agg->dtag_id; 8535 8536 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 8537 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 8538 8539 ASSERT(state->dts_aggregations[aggid - 1] == agg); 8540 state->dts_aggregations[aggid - 1] = NULL; 8541 8542 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8543 } 8544 8545 static int 8546 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8547 { 8548 dtrace_action_t *action, *last; 8549 dtrace_difo_t *dp = desc->dtad_difo; 8550 uint32_t size = 0, align = sizeof (uint8_t), mask; 8551 uint16_t format = 0; 8552 dtrace_recdesc_t *rec; 8553 dtrace_state_t *state = ecb->dte_state; 8554 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 8555 uint64_t arg = desc->dtad_arg; 8556 8557 ASSERT(MUTEX_HELD(&dtrace_lock)); 8558 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 8559 8560 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 8561 /* 8562 * If this is an aggregating action, there must be neither 8563 * a speculate nor a commit on the action chain. 8564 */ 8565 dtrace_action_t *act; 8566 8567 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8568 if (act->dta_kind == DTRACEACT_COMMIT) 8569 return (EINVAL); 8570 8571 if (act->dta_kind == DTRACEACT_SPECULATE) 8572 return (EINVAL); 8573 } 8574 8575 action = dtrace_ecb_aggregation_create(ecb, desc); 8576 8577 if (action == NULL) 8578 return (EINVAL); 8579 } else { 8580 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 8581 (desc->dtad_kind == DTRACEACT_DIFEXPR && 8582 dp != NULL && dp->dtdo_destructive)) { 8583 state->dts_destructive = 1; 8584 } 8585 8586 switch (desc->dtad_kind) { 8587 case DTRACEACT_PRINTF: 8588 case DTRACEACT_PRINTA: 8589 case DTRACEACT_SYSTEM: 8590 case DTRACEACT_FREOPEN: 8591 /* 8592 * We know that our arg is a string -- turn it into a 8593 * format. 8594 */ 8595 if (arg == NULL) { 8596 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 8597 format = 0; 8598 } else { 8599 ASSERT(arg != NULL); 8600 ASSERT(arg > KERNELBASE); 8601 format = dtrace_format_add(state, 8602 (char *)(uintptr_t)arg); 8603 } 8604 8605 /*FALLTHROUGH*/ 8606 case DTRACEACT_LIBACT: 8607 case DTRACEACT_DIFEXPR: 8608 if (dp == NULL) 8609 return (EINVAL); 8610 8611 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 8612 break; 8613 8614 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 8615 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8616 return (EINVAL); 8617 8618 size = opt[DTRACEOPT_STRSIZE]; 8619 } 8620 8621 break; 8622 8623 case DTRACEACT_STACK: 8624 if ((nframes = arg) == 0) { 8625 nframes = opt[DTRACEOPT_STACKFRAMES]; 8626 ASSERT(nframes > 0); 8627 arg = nframes; 8628 } 8629 8630 size = nframes * sizeof (pc_t); 8631 break; 8632 8633 case DTRACEACT_JSTACK: 8634 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 8635 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 8636 8637 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 8638 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 8639 8640 arg = DTRACE_USTACK_ARG(nframes, strsize); 8641 8642 /*FALLTHROUGH*/ 8643 case DTRACEACT_USTACK: 8644 if (desc->dtad_kind != DTRACEACT_JSTACK && 8645 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 8646 strsize = DTRACE_USTACK_STRSIZE(arg); 8647 nframes = opt[DTRACEOPT_USTACKFRAMES]; 8648 ASSERT(nframes > 0); 8649 arg = DTRACE_USTACK_ARG(nframes, strsize); 8650 } 8651 8652 /* 8653 * Save a slot for the pid. 8654 */ 8655 size = (nframes + 1) * sizeof (uint64_t); 8656 size += DTRACE_USTACK_STRSIZE(arg); 8657 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 8658 8659 break; 8660 8661 case DTRACEACT_SYM: 8662 case DTRACEACT_MOD: 8663 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 8664 sizeof (uint64_t)) || 8665 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8666 return (EINVAL); 8667 break; 8668 8669 case DTRACEACT_USYM: 8670 case DTRACEACT_UMOD: 8671 case DTRACEACT_UADDR: 8672 if (dp == NULL || 8673 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 8674 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8675 return (EINVAL); 8676 8677 /* 8678 * We have a slot for the pid, plus a slot for the 8679 * argument. To keep things simple (aligned with 8680 * bitness-neutral sizing), we store each as a 64-bit 8681 * quantity. 8682 */ 8683 size = 2 * sizeof (uint64_t); 8684 break; 8685 8686 case DTRACEACT_STOP: 8687 case DTRACEACT_BREAKPOINT: 8688 case DTRACEACT_PANIC: 8689 break; 8690 8691 case DTRACEACT_CHILL: 8692 case DTRACEACT_DISCARD: 8693 case DTRACEACT_RAISE: 8694 if (dp == NULL) 8695 return (EINVAL); 8696 break; 8697 8698 case DTRACEACT_EXIT: 8699 if (dp == NULL || 8700 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 8701 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 8702 return (EINVAL); 8703 break; 8704 8705 case DTRACEACT_SPECULATE: 8706 if (ecb->dte_size > sizeof (dtrace_epid_t)) 8707 return (EINVAL); 8708 8709 if (dp == NULL) 8710 return (EINVAL); 8711 8712 state->dts_speculates = 1; 8713 break; 8714 8715 case DTRACEACT_COMMIT: { 8716 dtrace_action_t *act = ecb->dte_action; 8717 8718 for (; act != NULL; act = act->dta_next) { 8719 if (act->dta_kind == DTRACEACT_COMMIT) 8720 return (EINVAL); 8721 } 8722 8723 if (dp == NULL) 8724 return (EINVAL); 8725 break; 8726 } 8727 8728 default: 8729 return (EINVAL); 8730 } 8731 8732 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 8733 /* 8734 * If this is a data-storing action or a speculate, 8735 * we must be sure that there isn't a commit on the 8736 * action chain. 8737 */ 8738 dtrace_action_t *act = ecb->dte_action; 8739 8740 for (; act != NULL; act = act->dta_next) { 8741 if (act->dta_kind == DTRACEACT_COMMIT) 8742 return (EINVAL); 8743 } 8744 } 8745 8746 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 8747 action->dta_rec.dtrd_size = size; 8748 } 8749 8750 action->dta_refcnt = 1; 8751 rec = &action->dta_rec; 8752 size = rec->dtrd_size; 8753 8754 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 8755 if (!(size & mask)) { 8756 align = mask + 1; 8757 break; 8758 } 8759 } 8760 8761 action->dta_kind = desc->dtad_kind; 8762 8763 if ((action->dta_difo = dp) != NULL) 8764 dtrace_difo_hold(dp); 8765 8766 rec->dtrd_action = action->dta_kind; 8767 rec->dtrd_arg = arg; 8768 rec->dtrd_uarg = desc->dtad_uarg; 8769 rec->dtrd_alignment = (uint16_t)align; 8770 rec->dtrd_format = format; 8771 8772 if ((last = ecb->dte_action_last) != NULL) { 8773 ASSERT(ecb->dte_action != NULL); 8774 action->dta_prev = last; 8775 last->dta_next = action; 8776 } else { 8777 ASSERT(ecb->dte_action == NULL); 8778 ecb->dte_action = action; 8779 } 8780 8781 ecb->dte_action_last = action; 8782 8783 return (0); 8784 } 8785 8786 static void 8787 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 8788 { 8789 dtrace_action_t *act = ecb->dte_action, *next; 8790 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 8791 dtrace_difo_t *dp; 8792 uint16_t format; 8793 8794 if (act != NULL && act->dta_refcnt > 1) { 8795 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 8796 act->dta_refcnt--; 8797 } else { 8798 for (; act != NULL; act = next) { 8799 next = act->dta_next; 8800 ASSERT(next != NULL || act == ecb->dte_action_last); 8801 ASSERT(act->dta_refcnt == 1); 8802 8803 if ((format = act->dta_rec.dtrd_format) != 0) 8804 dtrace_format_remove(ecb->dte_state, format); 8805 8806 if ((dp = act->dta_difo) != NULL) 8807 dtrace_difo_release(dp, vstate); 8808 8809 if (DTRACEACT_ISAGG(act->dta_kind)) { 8810 dtrace_ecb_aggregation_destroy(ecb, act); 8811 } else { 8812 kmem_free(act, sizeof (dtrace_action_t)); 8813 } 8814 } 8815 } 8816 8817 ecb->dte_action = NULL; 8818 ecb->dte_action_last = NULL; 8819 ecb->dte_size = sizeof (dtrace_epid_t); 8820 } 8821 8822 static void 8823 dtrace_ecb_disable(dtrace_ecb_t *ecb) 8824 { 8825 /* 8826 * We disable the ECB by removing it from its probe. 8827 */ 8828 dtrace_ecb_t *pecb, *prev = NULL; 8829 dtrace_probe_t *probe = ecb->dte_probe; 8830 8831 ASSERT(MUTEX_HELD(&dtrace_lock)); 8832 8833 if (probe == NULL) { 8834 /* 8835 * This is the NULL probe; there is nothing to disable. 8836 */ 8837 return; 8838 } 8839 8840 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 8841 if (pecb == ecb) 8842 break; 8843 prev = pecb; 8844 } 8845 8846 ASSERT(pecb != NULL); 8847 8848 if (prev == NULL) { 8849 probe->dtpr_ecb = ecb->dte_next; 8850 } else { 8851 prev->dte_next = ecb->dte_next; 8852 } 8853 8854 if (ecb == probe->dtpr_ecb_last) { 8855 ASSERT(ecb->dte_next == NULL); 8856 probe->dtpr_ecb_last = prev; 8857 } 8858 8859 /* 8860 * The ECB has been disconnected from the probe; now sync to assure 8861 * that all CPUs have seen the change before returning. 8862 */ 8863 dtrace_sync(); 8864 8865 if (probe->dtpr_ecb == NULL) { 8866 /* 8867 * That was the last ECB on the probe; clear the predicate 8868 * cache ID for the probe, disable it and sync one more time 8869 * to assure that we'll never hit it again. 8870 */ 8871 dtrace_provider_t *prov = probe->dtpr_provider; 8872 8873 ASSERT(ecb->dte_next == NULL); 8874 ASSERT(probe->dtpr_ecb_last == NULL); 8875 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 8876 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 8877 probe->dtpr_id, probe->dtpr_arg); 8878 dtrace_sync(); 8879 } else { 8880 /* 8881 * There is at least one ECB remaining on the probe. If there 8882 * is _exactly_ one, set the probe's predicate cache ID to be 8883 * the predicate cache ID of the remaining ECB. 8884 */ 8885 ASSERT(probe->dtpr_ecb_last != NULL); 8886 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 8887 8888 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 8889 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 8890 8891 ASSERT(probe->dtpr_ecb->dte_next == NULL); 8892 8893 if (p != NULL) 8894 probe->dtpr_predcache = p->dtp_cacheid; 8895 } 8896 8897 ecb->dte_next = NULL; 8898 } 8899 } 8900 8901 static void 8902 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 8903 { 8904 dtrace_state_t *state = ecb->dte_state; 8905 dtrace_vstate_t *vstate = &state->dts_vstate; 8906 dtrace_predicate_t *pred; 8907 dtrace_epid_t epid = ecb->dte_epid; 8908 8909 ASSERT(MUTEX_HELD(&dtrace_lock)); 8910 ASSERT(ecb->dte_next == NULL); 8911 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 8912 8913 if ((pred = ecb->dte_predicate) != NULL) 8914 dtrace_predicate_release(pred, vstate); 8915 8916 dtrace_ecb_action_remove(ecb); 8917 8918 ASSERT(state->dts_ecbs[epid - 1] == ecb); 8919 state->dts_ecbs[epid - 1] = NULL; 8920 8921 kmem_free(ecb, sizeof (dtrace_ecb_t)); 8922 } 8923 8924 static dtrace_ecb_t * 8925 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 8926 dtrace_enabling_t *enab) 8927 { 8928 dtrace_ecb_t *ecb; 8929 dtrace_predicate_t *pred; 8930 dtrace_actdesc_t *act; 8931 dtrace_provider_t *prov; 8932 dtrace_ecbdesc_t *desc = enab->dten_current; 8933 8934 ASSERT(MUTEX_HELD(&dtrace_lock)); 8935 ASSERT(state != NULL); 8936 8937 ecb = dtrace_ecb_add(state, probe); 8938 ecb->dte_uarg = desc->dted_uarg; 8939 8940 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 8941 dtrace_predicate_hold(pred); 8942 ecb->dte_predicate = pred; 8943 } 8944 8945 if (probe != NULL) { 8946 /* 8947 * If the provider shows more leg than the consumer is old 8948 * enough to see, we need to enable the appropriate implicit 8949 * predicate bits to prevent the ecb from activating at 8950 * revealing times. 8951 * 8952 * Providers specifying DTRACE_PRIV_USER at register time 8953 * are stating that they need the /proc-style privilege 8954 * model to be enforced, and this is what DTRACE_COND_OWNER 8955 * and DTRACE_COND_ZONEOWNER will then do at probe time. 8956 */ 8957 prov = probe->dtpr_provider; 8958 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 8959 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 8960 ecb->dte_cond |= DTRACE_COND_OWNER; 8961 8962 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 8963 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 8964 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 8965 8966 /* 8967 * If the provider shows us kernel innards and the user 8968 * is lacking sufficient privilege, enable the 8969 * DTRACE_COND_USERMODE implicit predicate. 8970 */ 8971 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 8972 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 8973 ecb->dte_cond |= DTRACE_COND_USERMODE; 8974 } 8975 8976 if (dtrace_ecb_create_cache != NULL) { 8977 /* 8978 * If we have a cached ecb, we'll use its action list instead 8979 * of creating our own (saving both time and space). 8980 */ 8981 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 8982 dtrace_action_t *act = cached->dte_action; 8983 8984 if (act != NULL) { 8985 ASSERT(act->dta_refcnt > 0); 8986 act->dta_refcnt++; 8987 ecb->dte_action = act; 8988 ecb->dte_action_last = cached->dte_action_last; 8989 ecb->dte_needed = cached->dte_needed; 8990 ecb->dte_size = cached->dte_size; 8991 ecb->dte_alignment = cached->dte_alignment; 8992 } 8993 8994 return (ecb); 8995 } 8996 8997 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 8998 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 8999 dtrace_ecb_destroy(ecb); 9000 return (NULL); 9001 } 9002 } 9003 9004 dtrace_ecb_resize(ecb); 9005 9006 return (dtrace_ecb_create_cache = ecb); 9007 } 9008 9009 static int 9010 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 9011 { 9012 dtrace_ecb_t *ecb; 9013 dtrace_enabling_t *enab = arg; 9014 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 9015 9016 ASSERT(state != NULL); 9017 9018 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 9019 /* 9020 * This probe was created in a generation for which this 9021 * enabling has previously created ECBs; we don't want to 9022 * enable it again, so just kick out. 9023 */ 9024 return (DTRACE_MATCH_NEXT); 9025 } 9026 9027 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 9028 return (DTRACE_MATCH_DONE); 9029 9030 dtrace_ecb_enable(ecb); 9031 return (DTRACE_MATCH_NEXT); 9032 } 9033 9034 static dtrace_ecb_t * 9035 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 9036 { 9037 dtrace_ecb_t *ecb; 9038 9039 ASSERT(MUTEX_HELD(&dtrace_lock)); 9040 9041 if (id == 0 || id > state->dts_necbs) 9042 return (NULL); 9043 9044 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 9045 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 9046 9047 return (state->dts_ecbs[id - 1]); 9048 } 9049 9050 static dtrace_aggregation_t * 9051 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 9052 { 9053 dtrace_aggregation_t *agg; 9054 9055 ASSERT(MUTEX_HELD(&dtrace_lock)); 9056 9057 if (id == 0 || id > state->dts_naggregations) 9058 return (NULL); 9059 9060 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 9061 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 9062 agg->dtag_id == id); 9063 9064 return (state->dts_aggregations[id - 1]); 9065 } 9066 9067 /* 9068 * DTrace Buffer Functions 9069 * 9070 * The following functions manipulate DTrace buffers. Most of these functions 9071 * are called in the context of establishing or processing consumer state; 9072 * exceptions are explicitly noted. 9073 */ 9074 9075 /* 9076 * Note: called from cross call context. This function switches the two 9077 * buffers on a given CPU. The atomicity of this operation is assured by 9078 * disabling interrupts while the actual switch takes place; the disabling of 9079 * interrupts serializes the execution with any execution of dtrace_probe() on 9080 * the same CPU. 9081 */ 9082 static void 9083 dtrace_buffer_switch(dtrace_buffer_t *buf) 9084 { 9085 caddr_t tomax = buf->dtb_tomax; 9086 caddr_t xamot = buf->dtb_xamot; 9087 dtrace_icookie_t cookie; 9088 9089 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9090 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 9091 9092 cookie = dtrace_interrupt_disable(); 9093 buf->dtb_tomax = xamot; 9094 buf->dtb_xamot = tomax; 9095 buf->dtb_xamot_drops = buf->dtb_drops; 9096 buf->dtb_xamot_offset = buf->dtb_offset; 9097 buf->dtb_xamot_errors = buf->dtb_errors; 9098 buf->dtb_xamot_flags = buf->dtb_flags; 9099 buf->dtb_offset = 0; 9100 buf->dtb_drops = 0; 9101 buf->dtb_errors = 0; 9102 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 9103 dtrace_interrupt_enable(cookie); 9104 } 9105 9106 /* 9107 * Note: called from cross call context. This function activates a buffer 9108 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 9109 * is guaranteed by the disabling of interrupts. 9110 */ 9111 static void 9112 dtrace_buffer_activate(dtrace_state_t *state) 9113 { 9114 dtrace_buffer_t *buf; 9115 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 9116 9117 buf = &state->dts_buffer[CPU->cpu_id]; 9118 9119 if (buf->dtb_tomax != NULL) { 9120 /* 9121 * We might like to assert that the buffer is marked inactive, 9122 * but this isn't necessarily true: the buffer for the CPU 9123 * that processes the BEGIN probe has its buffer activated 9124 * manually. In this case, we take the (harmless) action 9125 * re-clearing the bit INACTIVE bit. 9126 */ 9127 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 9128 } 9129 9130 dtrace_interrupt_enable(cookie); 9131 } 9132 9133 static int 9134 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 9135 processorid_t cpu) 9136 { 9137 cpu_t *cp; 9138 dtrace_buffer_t *buf; 9139 9140 ASSERT(MUTEX_HELD(&cpu_lock)); 9141 ASSERT(MUTEX_HELD(&dtrace_lock)); 9142 9143 if (size > dtrace_nonroot_maxsize && 9144 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 9145 return (EFBIG); 9146 9147 cp = cpu_list; 9148 9149 do { 9150 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9151 continue; 9152 9153 buf = &bufs[cp->cpu_id]; 9154 9155 /* 9156 * If there is already a buffer allocated for this CPU, it 9157 * is only possible that this is a DR event. In this case, 9158 * the buffer size must match our specified size. 9159 */ 9160 if (buf->dtb_tomax != NULL) { 9161 ASSERT(buf->dtb_size == size); 9162 continue; 9163 } 9164 9165 ASSERT(buf->dtb_xamot == NULL); 9166 9167 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9168 goto err; 9169 9170 buf->dtb_size = size; 9171 buf->dtb_flags = flags; 9172 buf->dtb_offset = 0; 9173 buf->dtb_drops = 0; 9174 9175 if (flags & DTRACEBUF_NOSWITCH) 9176 continue; 9177 9178 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9179 goto err; 9180 } while ((cp = cp->cpu_next) != cpu_list); 9181 9182 return (0); 9183 9184 err: 9185 cp = cpu_list; 9186 9187 do { 9188 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9189 continue; 9190 9191 buf = &bufs[cp->cpu_id]; 9192 9193 if (buf->dtb_xamot != NULL) { 9194 ASSERT(buf->dtb_tomax != NULL); 9195 ASSERT(buf->dtb_size == size); 9196 kmem_free(buf->dtb_xamot, size); 9197 } 9198 9199 if (buf->dtb_tomax != NULL) { 9200 ASSERT(buf->dtb_size == size); 9201 kmem_free(buf->dtb_tomax, size); 9202 } 9203 9204 buf->dtb_tomax = NULL; 9205 buf->dtb_xamot = NULL; 9206 buf->dtb_size = 0; 9207 } while ((cp = cp->cpu_next) != cpu_list); 9208 9209 return (ENOMEM); 9210 } 9211 9212 /* 9213 * Note: called from probe context. This function just increments the drop 9214 * count on a buffer. It has been made a function to allow for the 9215 * possibility of understanding the source of mysterious drop counts. (A 9216 * problem for which one may be particularly disappointed that DTrace cannot 9217 * be used to understand DTrace.) 9218 */ 9219 static void 9220 dtrace_buffer_drop(dtrace_buffer_t *buf) 9221 { 9222 buf->dtb_drops++; 9223 } 9224 9225 /* 9226 * Note: called from probe context. This function is called to reserve space 9227 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9228 * mstate. Returns the new offset in the buffer, or a negative value if an 9229 * error has occurred. 9230 */ 9231 static intptr_t 9232 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9233 dtrace_state_t *state, dtrace_mstate_t *mstate) 9234 { 9235 intptr_t offs = buf->dtb_offset, soffs; 9236 intptr_t woffs; 9237 caddr_t tomax; 9238 size_t total; 9239 9240 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9241 return (-1); 9242 9243 if ((tomax = buf->dtb_tomax) == NULL) { 9244 dtrace_buffer_drop(buf); 9245 return (-1); 9246 } 9247 9248 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9249 while (offs & (align - 1)) { 9250 /* 9251 * Assert that our alignment is off by a number which 9252 * is itself sizeof (uint32_t) aligned. 9253 */ 9254 ASSERT(!((align - (offs & (align - 1))) & 9255 (sizeof (uint32_t) - 1))); 9256 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9257 offs += sizeof (uint32_t); 9258 } 9259 9260 if ((soffs = offs + needed) > buf->dtb_size) { 9261 dtrace_buffer_drop(buf); 9262 return (-1); 9263 } 9264 9265 if (mstate == NULL) 9266 return (offs); 9267 9268 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 9269 mstate->dtms_scratch_size = buf->dtb_size - soffs; 9270 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9271 9272 return (offs); 9273 } 9274 9275 if (buf->dtb_flags & DTRACEBUF_FILL) { 9276 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 9277 (buf->dtb_flags & DTRACEBUF_FULL)) 9278 return (-1); 9279 goto out; 9280 } 9281 9282 total = needed + (offs & (align - 1)); 9283 9284 /* 9285 * For a ring buffer, life is quite a bit more complicated. Before 9286 * we can store any padding, we need to adjust our wrapping offset. 9287 * (If we've never before wrapped or we're not about to, no adjustment 9288 * is required.) 9289 */ 9290 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 9291 offs + total > buf->dtb_size) { 9292 woffs = buf->dtb_xamot_offset; 9293 9294 if (offs + total > buf->dtb_size) { 9295 /* 9296 * We can't fit in the end of the buffer. First, a 9297 * sanity check that we can fit in the buffer at all. 9298 */ 9299 if (total > buf->dtb_size) { 9300 dtrace_buffer_drop(buf); 9301 return (-1); 9302 } 9303 9304 /* 9305 * We're going to be storing at the top of the buffer, 9306 * so now we need to deal with the wrapped offset. We 9307 * only reset our wrapped offset to 0 if it is 9308 * currently greater than the current offset. If it 9309 * is less than the current offset, it is because a 9310 * previous allocation induced a wrap -- but the 9311 * allocation didn't subsequently take the space due 9312 * to an error or false predicate evaluation. In this 9313 * case, we'll just leave the wrapped offset alone: if 9314 * the wrapped offset hasn't been advanced far enough 9315 * for this allocation, it will be adjusted in the 9316 * lower loop. 9317 */ 9318 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 9319 if (woffs >= offs) 9320 woffs = 0; 9321 } else { 9322 woffs = 0; 9323 } 9324 9325 /* 9326 * Now we know that we're going to be storing to the 9327 * top of the buffer and that there is room for us 9328 * there. We need to clear the buffer from the current 9329 * offset to the end (there may be old gunk there). 9330 */ 9331 while (offs < buf->dtb_size) 9332 tomax[offs++] = 0; 9333 9334 /* 9335 * We need to set our offset to zero. And because we 9336 * are wrapping, we need to set the bit indicating as 9337 * much. We can also adjust our needed space back 9338 * down to the space required by the ECB -- we know 9339 * that the top of the buffer is aligned. 9340 */ 9341 offs = 0; 9342 total = needed; 9343 buf->dtb_flags |= DTRACEBUF_WRAPPED; 9344 } else { 9345 /* 9346 * There is room for us in the buffer, so we simply 9347 * need to check the wrapped offset. 9348 */ 9349 if (woffs < offs) { 9350 /* 9351 * The wrapped offset is less than the offset. 9352 * This can happen if we allocated buffer space 9353 * that induced a wrap, but then we didn't 9354 * subsequently take the space due to an error 9355 * or false predicate evaluation. This is 9356 * okay; we know that _this_ allocation isn't 9357 * going to induce a wrap. We still can't 9358 * reset the wrapped offset to be zero, 9359 * however: the space may have been trashed in 9360 * the previous failed probe attempt. But at 9361 * least the wrapped offset doesn't need to 9362 * be adjusted at all... 9363 */ 9364 goto out; 9365 } 9366 } 9367 9368 while (offs + total > woffs) { 9369 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 9370 size_t size; 9371 9372 if (epid == DTRACE_EPIDNONE) { 9373 size = sizeof (uint32_t); 9374 } else { 9375 ASSERT(epid <= state->dts_necbs); 9376 ASSERT(state->dts_ecbs[epid - 1] != NULL); 9377 9378 size = state->dts_ecbs[epid - 1]->dte_size; 9379 } 9380 9381 ASSERT(woffs + size <= buf->dtb_size); 9382 ASSERT(size != 0); 9383 9384 if (woffs + size == buf->dtb_size) { 9385 /* 9386 * We've reached the end of the buffer; we want 9387 * to set the wrapped offset to 0 and break 9388 * out. However, if the offs is 0, then we're 9389 * in a strange edge-condition: the amount of 9390 * space that we want to reserve plus the size 9391 * of the record that we're overwriting is 9392 * greater than the size of the buffer. This 9393 * is problematic because if we reserve the 9394 * space but subsequently don't consume it (due 9395 * to a failed predicate or error) the wrapped 9396 * offset will be 0 -- yet the EPID at offset 0 9397 * will not be committed. This situation is 9398 * relatively easy to deal with: if we're in 9399 * this case, the buffer is indistinguishable 9400 * from one that hasn't wrapped; we need only 9401 * finish the job by clearing the wrapped bit, 9402 * explicitly setting the offset to be 0, and 9403 * zero'ing out the old data in the buffer. 9404 */ 9405 if (offs == 0) { 9406 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9407 buf->dtb_offset = 0; 9408 woffs = total; 9409 9410 while (woffs < buf->dtb_size) 9411 tomax[woffs++] = 0; 9412 } 9413 9414 woffs = 0; 9415 break; 9416 } 9417 9418 woffs += size; 9419 } 9420 9421 /* 9422 * We have a wrapped offset. It may be that the wrapped offset 9423 * has become zero -- that's okay. 9424 */ 9425 buf->dtb_xamot_offset = woffs; 9426 } 9427 9428 out: 9429 /* 9430 * Now we can plow the buffer with any necessary padding. 9431 */ 9432 while (offs & (align - 1)) { 9433 /* 9434 * Assert that our alignment is off by a number which 9435 * is itself sizeof (uint32_t) aligned. 9436 */ 9437 ASSERT(!((align - (offs & (align - 1))) & 9438 (sizeof (uint32_t) - 1))); 9439 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9440 offs += sizeof (uint32_t); 9441 } 9442 9443 if (buf->dtb_flags & DTRACEBUF_FILL) { 9444 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9445 buf->dtb_flags |= DTRACEBUF_FULL; 9446 return (-1); 9447 } 9448 } 9449 9450 if (mstate == NULL) 9451 return (offs); 9452 9453 /* 9454 * For ring buffers and fill buffers, the scratch space is always 9455 * the inactive buffer. 9456 */ 9457 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9458 mstate->dtms_scratch_size = buf->dtb_size; 9459 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9460 9461 return (offs); 9462 } 9463 9464 static void 9465 dtrace_buffer_polish(dtrace_buffer_t *buf) 9466 { 9467 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9468 ASSERT(MUTEX_HELD(&dtrace_lock)); 9469 9470 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9471 return; 9472 9473 /* 9474 * We need to polish the ring buffer. There are three cases: 9475 * 9476 * - The first (and presumably most common) is that there is no gap 9477 * between the buffer offset and the wrapped offset. In this case, 9478 * there is nothing in the buffer that isn't valid data; we can 9479 * mark the buffer as polished and return. 9480 * 9481 * - The second (less common than the first but still more common 9482 * than the third) is that there is a gap between the buffer offset 9483 * and the wrapped offset, and the wrapped offset is larger than the 9484 * buffer offset. This can happen because of an alignment issue, or 9485 * can happen because of a call to dtrace_buffer_reserve() that 9486 * didn't subsequently consume the buffer space. In this case, 9487 * we need to zero the data from the buffer offset to the wrapped 9488 * offset. 9489 * 9490 * - The third (and least common) is that there is a gap between the 9491 * buffer offset and the wrapped offset, but the wrapped offset is 9492 * _less_ than the buffer offset. This can only happen because a 9493 * call to dtrace_buffer_reserve() induced a wrap, but the space 9494 * was not subsequently consumed. In this case, we need to zero the 9495 * space from the offset to the end of the buffer _and_ from the 9496 * top of the buffer to the wrapped offset. 9497 */ 9498 if (buf->dtb_offset < buf->dtb_xamot_offset) { 9499 bzero(buf->dtb_tomax + buf->dtb_offset, 9500 buf->dtb_xamot_offset - buf->dtb_offset); 9501 } 9502 9503 if (buf->dtb_offset > buf->dtb_xamot_offset) { 9504 bzero(buf->dtb_tomax + buf->dtb_offset, 9505 buf->dtb_size - buf->dtb_offset); 9506 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 9507 } 9508 } 9509 9510 static void 9511 dtrace_buffer_free(dtrace_buffer_t *bufs) 9512 { 9513 int i; 9514 9515 for (i = 0; i < NCPU; i++) { 9516 dtrace_buffer_t *buf = &bufs[i]; 9517 9518 if (buf->dtb_tomax == NULL) { 9519 ASSERT(buf->dtb_xamot == NULL); 9520 ASSERT(buf->dtb_size == 0); 9521 continue; 9522 } 9523 9524 if (buf->dtb_xamot != NULL) { 9525 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9526 kmem_free(buf->dtb_xamot, buf->dtb_size); 9527 } 9528 9529 kmem_free(buf->dtb_tomax, buf->dtb_size); 9530 buf->dtb_size = 0; 9531 buf->dtb_tomax = NULL; 9532 buf->dtb_xamot = NULL; 9533 } 9534 } 9535 9536 /* 9537 * DTrace Enabling Functions 9538 */ 9539 static dtrace_enabling_t * 9540 dtrace_enabling_create(dtrace_vstate_t *vstate) 9541 { 9542 dtrace_enabling_t *enab; 9543 9544 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 9545 enab->dten_vstate = vstate; 9546 9547 return (enab); 9548 } 9549 9550 static void 9551 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 9552 { 9553 dtrace_ecbdesc_t **ndesc; 9554 size_t osize, nsize; 9555 9556 /* 9557 * We can't add to enablings after we've enabled them, or after we've 9558 * retained them. 9559 */ 9560 ASSERT(enab->dten_probegen == 0); 9561 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9562 9563 if (enab->dten_ndesc < enab->dten_maxdesc) { 9564 enab->dten_desc[enab->dten_ndesc++] = ecb; 9565 return; 9566 } 9567 9568 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9569 9570 if (enab->dten_maxdesc == 0) { 9571 enab->dten_maxdesc = 1; 9572 } else { 9573 enab->dten_maxdesc <<= 1; 9574 } 9575 9576 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 9577 9578 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 9579 ndesc = kmem_zalloc(nsize, KM_SLEEP); 9580 bcopy(enab->dten_desc, ndesc, osize); 9581 kmem_free(enab->dten_desc, osize); 9582 9583 enab->dten_desc = ndesc; 9584 enab->dten_desc[enab->dten_ndesc++] = ecb; 9585 } 9586 9587 static void 9588 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 9589 dtrace_probedesc_t *pd) 9590 { 9591 dtrace_ecbdesc_t *new; 9592 dtrace_predicate_t *pred; 9593 dtrace_actdesc_t *act; 9594 9595 /* 9596 * We're going to create a new ECB description that matches the 9597 * specified ECB in every way, but has the specified probe description. 9598 */ 9599 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 9600 9601 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 9602 dtrace_predicate_hold(pred); 9603 9604 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 9605 dtrace_actdesc_hold(act); 9606 9607 new->dted_action = ecb->dted_action; 9608 new->dted_pred = ecb->dted_pred; 9609 new->dted_probe = *pd; 9610 new->dted_uarg = ecb->dted_uarg; 9611 9612 dtrace_enabling_add(enab, new); 9613 } 9614 9615 static void 9616 dtrace_enabling_dump(dtrace_enabling_t *enab) 9617 { 9618 int i; 9619 9620 for (i = 0; i < enab->dten_ndesc; i++) { 9621 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 9622 9623 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 9624 desc->dtpd_provider, desc->dtpd_mod, 9625 desc->dtpd_func, desc->dtpd_name); 9626 } 9627 } 9628 9629 static void 9630 dtrace_enabling_destroy(dtrace_enabling_t *enab) 9631 { 9632 int i; 9633 dtrace_ecbdesc_t *ep; 9634 dtrace_vstate_t *vstate = enab->dten_vstate; 9635 9636 ASSERT(MUTEX_HELD(&dtrace_lock)); 9637 9638 for (i = 0; i < enab->dten_ndesc; i++) { 9639 dtrace_actdesc_t *act, *next; 9640 dtrace_predicate_t *pred; 9641 9642 ep = enab->dten_desc[i]; 9643 9644 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 9645 dtrace_predicate_release(pred, vstate); 9646 9647 for (act = ep->dted_action; act != NULL; act = next) { 9648 next = act->dtad_next; 9649 dtrace_actdesc_release(act, vstate); 9650 } 9651 9652 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 9653 } 9654 9655 kmem_free(enab->dten_desc, 9656 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 9657 9658 /* 9659 * If this was a retained enabling, decrement the dts_nretained count 9660 * and take it off of the dtrace_retained list. 9661 */ 9662 if (enab->dten_prev != NULL || enab->dten_next != NULL || 9663 dtrace_retained == enab) { 9664 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9665 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 9666 enab->dten_vstate->dtvs_state->dts_nretained--; 9667 } 9668 9669 if (enab->dten_prev == NULL) { 9670 if (dtrace_retained == enab) { 9671 dtrace_retained = enab->dten_next; 9672 9673 if (dtrace_retained != NULL) 9674 dtrace_retained->dten_prev = NULL; 9675 } 9676 } else { 9677 ASSERT(enab != dtrace_retained); 9678 ASSERT(dtrace_retained != NULL); 9679 enab->dten_prev->dten_next = enab->dten_next; 9680 } 9681 9682 if (enab->dten_next != NULL) { 9683 ASSERT(dtrace_retained != NULL); 9684 enab->dten_next->dten_prev = enab->dten_prev; 9685 } 9686 9687 kmem_free(enab, sizeof (dtrace_enabling_t)); 9688 } 9689 9690 static int 9691 dtrace_enabling_retain(dtrace_enabling_t *enab) 9692 { 9693 dtrace_state_t *state; 9694 9695 ASSERT(MUTEX_HELD(&dtrace_lock)); 9696 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 9697 ASSERT(enab->dten_vstate != NULL); 9698 9699 state = enab->dten_vstate->dtvs_state; 9700 ASSERT(state != NULL); 9701 9702 /* 9703 * We only allow each state to retain dtrace_retain_max enablings. 9704 */ 9705 if (state->dts_nretained >= dtrace_retain_max) 9706 return (ENOSPC); 9707 9708 state->dts_nretained++; 9709 9710 if (dtrace_retained == NULL) { 9711 dtrace_retained = enab; 9712 return (0); 9713 } 9714 9715 enab->dten_next = dtrace_retained; 9716 dtrace_retained->dten_prev = enab; 9717 dtrace_retained = enab; 9718 9719 return (0); 9720 } 9721 9722 static int 9723 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 9724 dtrace_probedesc_t *create) 9725 { 9726 dtrace_enabling_t *new, *enab; 9727 int found = 0, err = ENOENT; 9728 9729 ASSERT(MUTEX_HELD(&dtrace_lock)); 9730 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 9731 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 9732 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 9733 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 9734 9735 new = dtrace_enabling_create(&state->dts_vstate); 9736 9737 /* 9738 * Iterate over all retained enablings, looking for enablings that 9739 * match the specified state. 9740 */ 9741 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9742 int i; 9743 9744 /* 9745 * dtvs_state can only be NULL for helper enablings -- and 9746 * helper enablings can't be retained. 9747 */ 9748 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9749 9750 if (enab->dten_vstate->dtvs_state != state) 9751 continue; 9752 9753 /* 9754 * Now iterate over each probe description; we're looking for 9755 * an exact match to the specified probe description. 9756 */ 9757 for (i = 0; i < enab->dten_ndesc; i++) { 9758 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9759 dtrace_probedesc_t *pd = &ep->dted_probe; 9760 9761 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 9762 continue; 9763 9764 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 9765 continue; 9766 9767 if (strcmp(pd->dtpd_func, match->dtpd_func)) 9768 continue; 9769 9770 if (strcmp(pd->dtpd_name, match->dtpd_name)) 9771 continue; 9772 9773 /* 9774 * We have a winning probe! Add it to our growing 9775 * enabling. 9776 */ 9777 found = 1; 9778 dtrace_enabling_addlike(new, ep, create); 9779 } 9780 } 9781 9782 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 9783 dtrace_enabling_destroy(new); 9784 return (err); 9785 } 9786 9787 return (0); 9788 } 9789 9790 static void 9791 dtrace_enabling_retract(dtrace_state_t *state) 9792 { 9793 dtrace_enabling_t *enab, *next; 9794 9795 ASSERT(MUTEX_HELD(&dtrace_lock)); 9796 9797 /* 9798 * Iterate over all retained enablings, destroy the enablings retained 9799 * for the specified state. 9800 */ 9801 for (enab = dtrace_retained; enab != NULL; enab = next) { 9802 next = enab->dten_next; 9803 9804 /* 9805 * dtvs_state can only be NULL for helper enablings -- and 9806 * helper enablings can't be retained. 9807 */ 9808 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9809 9810 if (enab->dten_vstate->dtvs_state == state) { 9811 ASSERT(state->dts_nretained > 0); 9812 dtrace_enabling_destroy(enab); 9813 } 9814 } 9815 9816 ASSERT(state->dts_nretained == 0); 9817 } 9818 9819 static int 9820 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 9821 { 9822 int i = 0; 9823 int matched = 0; 9824 9825 ASSERT(MUTEX_HELD(&cpu_lock)); 9826 ASSERT(MUTEX_HELD(&dtrace_lock)); 9827 9828 for (i = 0; i < enab->dten_ndesc; i++) { 9829 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 9830 9831 enab->dten_current = ep; 9832 enab->dten_error = 0; 9833 9834 matched += dtrace_probe_enable(&ep->dted_probe, enab); 9835 9836 if (enab->dten_error != 0) { 9837 /* 9838 * If we get an error half-way through enabling the 9839 * probes, we kick out -- perhaps with some number of 9840 * them enabled. Leaving enabled probes enabled may 9841 * be slightly confusing for user-level, but we expect 9842 * that no one will attempt to actually drive on in 9843 * the face of such errors. If this is an anonymous 9844 * enabling (indicated with a NULL nmatched pointer), 9845 * we cmn_err() a message. We aren't expecting to 9846 * get such an error -- such as it can exist at all, 9847 * it would be a result of corrupted DOF in the driver 9848 * properties. 9849 */ 9850 if (nmatched == NULL) { 9851 cmn_err(CE_WARN, "dtrace_enabling_match() " 9852 "error on %p: %d", (void *)ep, 9853 enab->dten_error); 9854 } 9855 9856 return (enab->dten_error); 9857 } 9858 } 9859 9860 enab->dten_probegen = dtrace_probegen; 9861 if (nmatched != NULL) 9862 *nmatched = matched; 9863 9864 return (0); 9865 } 9866 9867 static void 9868 dtrace_enabling_matchall(void) 9869 { 9870 dtrace_enabling_t *enab; 9871 9872 mutex_enter(&cpu_lock); 9873 mutex_enter(&dtrace_lock); 9874 9875 /* 9876 * Because we can be called after dtrace_detach() has been called, we 9877 * cannot assert that there are retained enablings. We can safely 9878 * load from dtrace_retained, however: the taskq_destroy() at the 9879 * end of dtrace_detach() will block pending our completion. 9880 */ 9881 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 9882 (void) dtrace_enabling_match(enab, NULL); 9883 9884 mutex_exit(&dtrace_lock); 9885 mutex_exit(&cpu_lock); 9886 } 9887 9888 static int 9889 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 9890 { 9891 dtrace_enabling_t *enab; 9892 int matched, total = 0, err; 9893 9894 ASSERT(MUTEX_HELD(&cpu_lock)); 9895 ASSERT(MUTEX_HELD(&dtrace_lock)); 9896 9897 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9898 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9899 9900 if (enab->dten_vstate->dtvs_state != state) 9901 continue; 9902 9903 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 9904 return (err); 9905 9906 total += matched; 9907 } 9908 9909 if (nmatched != NULL) 9910 *nmatched = total; 9911 9912 return (0); 9913 } 9914 9915 /* 9916 * If an enabling is to be enabled without having matched probes (that is, if 9917 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 9918 * enabling must be _primed_ by creating an ECB for every ECB description. 9919 * This must be done to assure that we know the number of speculations, the 9920 * number of aggregations, the minimum buffer size needed, etc. before we 9921 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 9922 * enabling any probes, we create ECBs for every ECB decription, but with a 9923 * NULL probe -- which is exactly what this function does. 9924 */ 9925 static void 9926 dtrace_enabling_prime(dtrace_state_t *state) 9927 { 9928 dtrace_enabling_t *enab; 9929 int i; 9930 9931 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 9932 ASSERT(enab->dten_vstate->dtvs_state != NULL); 9933 9934 if (enab->dten_vstate->dtvs_state != state) 9935 continue; 9936 9937 /* 9938 * We don't want to prime an enabling more than once, lest 9939 * we allow a malicious user to induce resource exhaustion. 9940 * (The ECBs that result from priming an enabling aren't 9941 * leaked -- but they also aren't deallocated until the 9942 * consumer state is destroyed.) 9943 */ 9944 if (enab->dten_primed) 9945 continue; 9946 9947 for (i = 0; i < enab->dten_ndesc; i++) { 9948 enab->dten_current = enab->dten_desc[i]; 9949 (void) dtrace_probe_enable(NULL, enab); 9950 } 9951 9952 enab->dten_primed = 1; 9953 } 9954 } 9955 9956 /* 9957 * Called to indicate that probes should be provided due to retained 9958 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 9959 * must take an initial lap through the enabling calling the dtps_provide() 9960 * entry point explicitly to allow for autocreated probes. 9961 */ 9962 static void 9963 dtrace_enabling_provide(dtrace_provider_t *prv) 9964 { 9965 int i, all = 0; 9966 dtrace_probedesc_t desc; 9967 9968 ASSERT(MUTEX_HELD(&dtrace_lock)); 9969 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 9970 9971 if (prv == NULL) { 9972 all = 1; 9973 prv = dtrace_provider; 9974 } 9975 9976 do { 9977 dtrace_enabling_t *enab = dtrace_retained; 9978 void *parg = prv->dtpv_arg; 9979 9980 for (; enab != NULL; enab = enab->dten_next) { 9981 for (i = 0; i < enab->dten_ndesc; i++) { 9982 desc = enab->dten_desc[i]->dted_probe; 9983 mutex_exit(&dtrace_lock); 9984 prv->dtpv_pops.dtps_provide(parg, &desc); 9985 mutex_enter(&dtrace_lock); 9986 } 9987 } 9988 } while (all && (prv = prv->dtpv_next) != NULL); 9989 9990 mutex_exit(&dtrace_lock); 9991 dtrace_probe_provide(NULL, all ? NULL : prv); 9992 mutex_enter(&dtrace_lock); 9993 } 9994 9995 /* 9996 * DTrace DOF Functions 9997 */ 9998 /*ARGSUSED*/ 9999 static void 10000 dtrace_dof_error(dof_hdr_t *dof, const char *str) 10001 { 10002 if (dtrace_err_verbose) 10003 cmn_err(CE_WARN, "failed to process DOF: %s", str); 10004 10005 #ifdef DTRACE_ERRDEBUG 10006 dtrace_errdebug(str); 10007 #endif 10008 } 10009 10010 /* 10011 * Create DOF out of a currently enabled state. Right now, we only create 10012 * DOF containing the run-time options -- but this could be expanded to create 10013 * complete DOF representing the enabled state. 10014 */ 10015 static dof_hdr_t * 10016 dtrace_dof_create(dtrace_state_t *state) 10017 { 10018 dof_hdr_t *dof; 10019 dof_sec_t *sec; 10020 dof_optdesc_t *opt; 10021 int i, len = sizeof (dof_hdr_t) + 10022 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 10023 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10024 10025 ASSERT(MUTEX_HELD(&dtrace_lock)); 10026 10027 dof = kmem_zalloc(len, KM_SLEEP); 10028 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 10029 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 10030 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 10031 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 10032 10033 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 10034 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 10035 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 10036 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 10037 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 10038 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 10039 10040 dof->dofh_flags = 0; 10041 dof->dofh_hdrsize = sizeof (dof_hdr_t); 10042 dof->dofh_secsize = sizeof (dof_sec_t); 10043 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 10044 dof->dofh_secoff = sizeof (dof_hdr_t); 10045 dof->dofh_loadsz = len; 10046 dof->dofh_filesz = len; 10047 dof->dofh_pad = 0; 10048 10049 /* 10050 * Fill in the option section header... 10051 */ 10052 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 10053 sec->dofs_type = DOF_SECT_OPTDESC; 10054 sec->dofs_align = sizeof (uint64_t); 10055 sec->dofs_flags = DOF_SECF_LOAD; 10056 sec->dofs_entsize = sizeof (dof_optdesc_t); 10057 10058 opt = (dof_optdesc_t *)((uintptr_t)sec + 10059 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 10060 10061 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 10062 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10063 10064 for (i = 0; i < DTRACEOPT_MAX; i++) { 10065 opt[i].dofo_option = i; 10066 opt[i].dofo_strtab = DOF_SECIDX_NONE; 10067 opt[i].dofo_value = state->dts_options[i]; 10068 } 10069 10070 return (dof); 10071 } 10072 10073 static dof_hdr_t * 10074 dtrace_dof_copyin(uintptr_t uarg, int *errp) 10075 { 10076 dof_hdr_t hdr, *dof; 10077 10078 ASSERT(!MUTEX_HELD(&dtrace_lock)); 10079 10080 /* 10081 * First, we're going to copyin() the sizeof (dof_hdr_t). 10082 */ 10083 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 10084 dtrace_dof_error(NULL, "failed to copyin DOF header"); 10085 *errp = EFAULT; 10086 return (NULL); 10087 } 10088 10089 /* 10090 * Now we'll allocate the entire DOF and copy it in -- provided 10091 * that the length isn't outrageous. 10092 */ 10093 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 10094 dtrace_dof_error(&hdr, "load size exceeds maximum"); 10095 *errp = E2BIG; 10096 return (NULL); 10097 } 10098 10099 if (hdr.dofh_loadsz < sizeof (hdr)) { 10100 dtrace_dof_error(&hdr, "invalid load size"); 10101 *errp = EINVAL; 10102 return (NULL); 10103 } 10104 10105 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 10106 10107 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 10108 kmem_free(dof, hdr.dofh_loadsz); 10109 *errp = EFAULT; 10110 return (NULL); 10111 } 10112 10113 return (dof); 10114 } 10115 10116 static dof_hdr_t * 10117 dtrace_dof_property(const char *name) 10118 { 10119 uchar_t *buf; 10120 uint64_t loadsz; 10121 unsigned int len, i; 10122 dof_hdr_t *dof; 10123 10124 /* 10125 * Unfortunately, array of values in .conf files are always (and 10126 * only) interpreted to be integer arrays. We must read our DOF 10127 * as an integer array, and then squeeze it into a byte array. 10128 */ 10129 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 10130 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 10131 return (NULL); 10132 10133 for (i = 0; i < len; i++) 10134 buf[i] = (uchar_t)(((int *)buf)[i]); 10135 10136 if (len < sizeof (dof_hdr_t)) { 10137 ddi_prop_free(buf); 10138 dtrace_dof_error(NULL, "truncated header"); 10139 return (NULL); 10140 } 10141 10142 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 10143 ddi_prop_free(buf); 10144 dtrace_dof_error(NULL, "truncated DOF"); 10145 return (NULL); 10146 } 10147 10148 if (loadsz >= dtrace_dof_maxsize) { 10149 ddi_prop_free(buf); 10150 dtrace_dof_error(NULL, "oversized DOF"); 10151 return (NULL); 10152 } 10153 10154 dof = kmem_alloc(loadsz, KM_SLEEP); 10155 bcopy(buf, dof, loadsz); 10156 ddi_prop_free(buf); 10157 10158 return (dof); 10159 } 10160 10161 static void 10162 dtrace_dof_destroy(dof_hdr_t *dof) 10163 { 10164 kmem_free(dof, dof->dofh_loadsz); 10165 } 10166 10167 /* 10168 * Return the dof_sec_t pointer corresponding to a given section index. If the 10169 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 10170 * a type other than DOF_SECT_NONE is specified, the header is checked against 10171 * this type and NULL is returned if the types do not match. 10172 */ 10173 static dof_sec_t * 10174 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 10175 { 10176 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 10177 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 10178 10179 if (i >= dof->dofh_secnum) { 10180 dtrace_dof_error(dof, "referenced section index is invalid"); 10181 return (NULL); 10182 } 10183 10184 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 10185 dtrace_dof_error(dof, "referenced section is not loadable"); 10186 return (NULL); 10187 } 10188 10189 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 10190 dtrace_dof_error(dof, "referenced section is the wrong type"); 10191 return (NULL); 10192 } 10193 10194 return (sec); 10195 } 10196 10197 static dtrace_probedesc_t * 10198 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 10199 { 10200 dof_probedesc_t *probe; 10201 dof_sec_t *strtab; 10202 uintptr_t daddr = (uintptr_t)dof; 10203 uintptr_t str; 10204 size_t size; 10205 10206 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 10207 dtrace_dof_error(dof, "invalid probe section"); 10208 return (NULL); 10209 } 10210 10211 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10212 dtrace_dof_error(dof, "bad alignment in probe description"); 10213 return (NULL); 10214 } 10215 10216 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10217 dtrace_dof_error(dof, "truncated probe description"); 10218 return (NULL); 10219 } 10220 10221 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10222 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10223 10224 if (strtab == NULL) 10225 return (NULL); 10226 10227 str = daddr + strtab->dofs_offset; 10228 size = strtab->dofs_size; 10229 10230 if (probe->dofp_provider >= strtab->dofs_size) { 10231 dtrace_dof_error(dof, "corrupt probe provider"); 10232 return (NULL); 10233 } 10234 10235 (void) strncpy(desc->dtpd_provider, 10236 (char *)(str + probe->dofp_provider), 10237 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10238 10239 if (probe->dofp_mod >= strtab->dofs_size) { 10240 dtrace_dof_error(dof, "corrupt probe module"); 10241 return (NULL); 10242 } 10243 10244 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10245 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10246 10247 if (probe->dofp_func >= strtab->dofs_size) { 10248 dtrace_dof_error(dof, "corrupt probe function"); 10249 return (NULL); 10250 } 10251 10252 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10253 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10254 10255 if (probe->dofp_name >= strtab->dofs_size) { 10256 dtrace_dof_error(dof, "corrupt probe name"); 10257 return (NULL); 10258 } 10259 10260 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10261 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10262 10263 return (desc); 10264 } 10265 10266 static dtrace_difo_t * 10267 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10268 cred_t *cr) 10269 { 10270 dtrace_difo_t *dp; 10271 size_t ttl = 0; 10272 dof_difohdr_t *dofd; 10273 uintptr_t daddr = (uintptr_t)dof; 10274 size_t max = dtrace_difo_maxsize; 10275 int i, l, n; 10276 10277 static const struct { 10278 int section; 10279 int bufoffs; 10280 int lenoffs; 10281 int entsize; 10282 int align; 10283 const char *msg; 10284 } difo[] = { 10285 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 10286 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 10287 sizeof (dif_instr_t), "multiple DIF sections" }, 10288 10289 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 10290 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 10291 sizeof (uint64_t), "multiple integer tables" }, 10292 10293 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 10294 offsetof(dtrace_difo_t, dtdo_strlen), 0, 10295 sizeof (char), "multiple string tables" }, 10296 10297 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 10298 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 10299 sizeof (uint_t), "multiple variable tables" }, 10300 10301 { DOF_SECT_NONE, 0, 0, 0, NULL } 10302 }; 10303 10304 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 10305 dtrace_dof_error(dof, "invalid DIFO header section"); 10306 return (NULL); 10307 } 10308 10309 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10310 dtrace_dof_error(dof, "bad alignment in DIFO header"); 10311 return (NULL); 10312 } 10313 10314 if (sec->dofs_size < sizeof (dof_difohdr_t) || 10315 sec->dofs_size % sizeof (dof_secidx_t)) { 10316 dtrace_dof_error(dof, "bad size in DIFO header"); 10317 return (NULL); 10318 } 10319 10320 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10321 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 10322 10323 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10324 dp->dtdo_rtype = dofd->dofd_rtype; 10325 10326 for (l = 0; l < n; l++) { 10327 dof_sec_t *subsec; 10328 void **bufp; 10329 uint32_t *lenp; 10330 10331 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 10332 dofd->dofd_links[l])) == NULL) 10333 goto err; /* invalid section link */ 10334 10335 if (ttl + subsec->dofs_size > max) { 10336 dtrace_dof_error(dof, "exceeds maximum size"); 10337 goto err; 10338 } 10339 10340 ttl += subsec->dofs_size; 10341 10342 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 10343 if (subsec->dofs_type != difo[i].section) 10344 continue; 10345 10346 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 10347 dtrace_dof_error(dof, "section not loaded"); 10348 goto err; 10349 } 10350 10351 if (subsec->dofs_align != difo[i].align) { 10352 dtrace_dof_error(dof, "bad alignment"); 10353 goto err; 10354 } 10355 10356 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 10357 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 10358 10359 if (*bufp != NULL) { 10360 dtrace_dof_error(dof, difo[i].msg); 10361 goto err; 10362 } 10363 10364 if (difo[i].entsize != subsec->dofs_entsize) { 10365 dtrace_dof_error(dof, "entry size mismatch"); 10366 goto err; 10367 } 10368 10369 if (subsec->dofs_entsize != 0 && 10370 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 10371 dtrace_dof_error(dof, "corrupt entry size"); 10372 goto err; 10373 } 10374 10375 *lenp = subsec->dofs_size; 10376 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 10377 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 10378 *bufp, subsec->dofs_size); 10379 10380 if (subsec->dofs_entsize != 0) 10381 *lenp /= subsec->dofs_entsize; 10382 10383 break; 10384 } 10385 10386 /* 10387 * If we encounter a loadable DIFO sub-section that is not 10388 * known to us, assume this is a broken program and fail. 10389 */ 10390 if (difo[i].section == DOF_SECT_NONE && 10391 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10392 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10393 goto err; 10394 } 10395 } 10396 10397 if (dp->dtdo_buf == NULL) { 10398 /* 10399 * We can't have a DIF object without DIF text. 10400 */ 10401 dtrace_dof_error(dof, "missing DIF text"); 10402 goto err; 10403 } 10404 10405 /* 10406 * Before we validate the DIF object, run through the variable table 10407 * looking for the strings -- if any of their size are under, we'll set 10408 * their size to be the system-wide default string size. Note that 10409 * this should _not_ happen if the "strsize" option has been set -- 10410 * in this case, the compiler should have set the size to reflect the 10411 * setting of the option. 10412 */ 10413 for (i = 0; i < dp->dtdo_varlen; i++) { 10414 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10415 dtrace_diftype_t *t = &v->dtdv_type; 10416 10417 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10418 continue; 10419 10420 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10421 t->dtdt_size = dtrace_strsize_default; 10422 } 10423 10424 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10425 goto err; 10426 10427 dtrace_difo_init(dp, vstate); 10428 return (dp); 10429 10430 err: 10431 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10432 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10433 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10434 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10435 10436 kmem_free(dp, sizeof (dtrace_difo_t)); 10437 return (NULL); 10438 } 10439 10440 static dtrace_predicate_t * 10441 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10442 cred_t *cr) 10443 { 10444 dtrace_difo_t *dp; 10445 10446 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10447 return (NULL); 10448 10449 return (dtrace_predicate_create(dp)); 10450 } 10451 10452 static dtrace_actdesc_t * 10453 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10454 cred_t *cr) 10455 { 10456 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10457 dof_actdesc_t *desc; 10458 dof_sec_t *difosec; 10459 size_t offs; 10460 uintptr_t daddr = (uintptr_t)dof; 10461 uint64_t arg; 10462 dtrace_actkind_t kind; 10463 10464 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10465 dtrace_dof_error(dof, "invalid action section"); 10466 return (NULL); 10467 } 10468 10469 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10470 dtrace_dof_error(dof, "truncated action description"); 10471 return (NULL); 10472 } 10473 10474 if (sec->dofs_align != sizeof (uint64_t)) { 10475 dtrace_dof_error(dof, "bad alignment in action description"); 10476 return (NULL); 10477 } 10478 10479 if (sec->dofs_size < sec->dofs_entsize) { 10480 dtrace_dof_error(dof, "section entry size exceeds total size"); 10481 return (NULL); 10482 } 10483 10484 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 10485 dtrace_dof_error(dof, "bad entry size in action description"); 10486 return (NULL); 10487 } 10488 10489 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 10490 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 10491 return (NULL); 10492 } 10493 10494 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 10495 desc = (dof_actdesc_t *)(daddr + 10496 (uintptr_t)sec->dofs_offset + offs); 10497 kind = (dtrace_actkind_t)desc->dofa_kind; 10498 10499 if (DTRACEACT_ISPRINTFLIKE(kind) && 10500 (kind != DTRACEACT_PRINTA || 10501 desc->dofa_strtab != DOF_SECIDX_NONE)) { 10502 dof_sec_t *strtab; 10503 char *str, *fmt; 10504 uint64_t i; 10505 10506 /* 10507 * printf()-like actions must have a format string. 10508 */ 10509 if ((strtab = dtrace_dof_sect(dof, 10510 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 10511 goto err; 10512 10513 str = (char *)((uintptr_t)dof + 10514 (uintptr_t)strtab->dofs_offset); 10515 10516 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 10517 if (str[i] == '\0') 10518 break; 10519 } 10520 10521 if (i >= strtab->dofs_size) { 10522 dtrace_dof_error(dof, "bogus format string"); 10523 goto err; 10524 } 10525 10526 if (i == desc->dofa_arg) { 10527 dtrace_dof_error(dof, "empty format string"); 10528 goto err; 10529 } 10530 10531 i -= desc->dofa_arg; 10532 fmt = kmem_alloc(i + 1, KM_SLEEP); 10533 bcopy(&str[desc->dofa_arg], fmt, i + 1); 10534 arg = (uint64_t)(uintptr_t)fmt; 10535 } else { 10536 if (kind == DTRACEACT_PRINTA) { 10537 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 10538 arg = 0; 10539 } else { 10540 arg = desc->dofa_arg; 10541 } 10542 } 10543 10544 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 10545 desc->dofa_uarg, arg); 10546 10547 if (last != NULL) { 10548 last->dtad_next = act; 10549 } else { 10550 first = act; 10551 } 10552 10553 last = act; 10554 10555 if (desc->dofa_difo == DOF_SECIDX_NONE) 10556 continue; 10557 10558 if ((difosec = dtrace_dof_sect(dof, 10559 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 10560 goto err; 10561 10562 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 10563 10564 if (act->dtad_difo == NULL) 10565 goto err; 10566 } 10567 10568 ASSERT(first != NULL); 10569 return (first); 10570 10571 err: 10572 for (act = first; act != NULL; act = next) { 10573 next = act->dtad_next; 10574 dtrace_actdesc_release(act, vstate); 10575 } 10576 10577 return (NULL); 10578 } 10579 10580 static dtrace_ecbdesc_t * 10581 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10582 cred_t *cr) 10583 { 10584 dtrace_ecbdesc_t *ep; 10585 dof_ecbdesc_t *ecb; 10586 dtrace_probedesc_t *desc; 10587 dtrace_predicate_t *pred = NULL; 10588 10589 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 10590 dtrace_dof_error(dof, "truncated ECB description"); 10591 return (NULL); 10592 } 10593 10594 if (sec->dofs_align != sizeof (uint64_t)) { 10595 dtrace_dof_error(dof, "bad alignment in ECB description"); 10596 return (NULL); 10597 } 10598 10599 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 10600 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 10601 10602 if (sec == NULL) 10603 return (NULL); 10604 10605 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10606 ep->dted_uarg = ecb->dofe_uarg; 10607 desc = &ep->dted_probe; 10608 10609 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 10610 goto err; 10611 10612 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 10613 if ((sec = dtrace_dof_sect(dof, 10614 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 10615 goto err; 10616 10617 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 10618 goto err; 10619 10620 ep->dted_pred.dtpdd_predicate = pred; 10621 } 10622 10623 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 10624 if ((sec = dtrace_dof_sect(dof, 10625 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 10626 goto err; 10627 10628 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 10629 10630 if (ep->dted_action == NULL) 10631 goto err; 10632 } 10633 10634 return (ep); 10635 10636 err: 10637 if (pred != NULL) 10638 dtrace_predicate_release(pred, vstate); 10639 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10640 return (NULL); 10641 } 10642 10643 /* 10644 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 10645 * specified DOF. At present, this amounts to simply adding 'ubase' to the 10646 * site of any user SETX relocations to account for load object base address. 10647 * In the future, if we need other relocations, this function can be extended. 10648 */ 10649 static int 10650 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 10651 { 10652 uintptr_t daddr = (uintptr_t)dof; 10653 dof_relohdr_t *dofr = 10654 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10655 dof_sec_t *ss, *rs, *ts; 10656 dof_relodesc_t *r; 10657 uint_t i, n; 10658 10659 if (sec->dofs_size < sizeof (dof_relohdr_t) || 10660 sec->dofs_align != sizeof (dof_secidx_t)) { 10661 dtrace_dof_error(dof, "invalid relocation header"); 10662 return (-1); 10663 } 10664 10665 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 10666 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 10667 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 10668 10669 if (ss == NULL || rs == NULL || ts == NULL) 10670 return (-1); /* dtrace_dof_error() has been called already */ 10671 10672 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 10673 rs->dofs_align != sizeof (uint64_t)) { 10674 dtrace_dof_error(dof, "invalid relocation section"); 10675 return (-1); 10676 } 10677 10678 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 10679 n = rs->dofs_size / rs->dofs_entsize; 10680 10681 for (i = 0; i < n; i++) { 10682 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 10683 10684 switch (r->dofr_type) { 10685 case DOF_RELO_NONE: 10686 break; 10687 case DOF_RELO_SETX: 10688 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 10689 sizeof (uint64_t) > ts->dofs_size) { 10690 dtrace_dof_error(dof, "bad relocation offset"); 10691 return (-1); 10692 } 10693 10694 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 10695 dtrace_dof_error(dof, "misaligned setx relo"); 10696 return (-1); 10697 } 10698 10699 *(uint64_t *)taddr += ubase; 10700 break; 10701 default: 10702 dtrace_dof_error(dof, "invalid relocation type"); 10703 return (-1); 10704 } 10705 10706 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 10707 } 10708 10709 return (0); 10710 } 10711 10712 /* 10713 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 10714 * header: it should be at the front of a memory region that is at least 10715 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 10716 * size. It need not be validated in any other way. 10717 */ 10718 static int 10719 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 10720 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 10721 { 10722 uint64_t len = dof->dofh_loadsz, seclen; 10723 uintptr_t daddr = (uintptr_t)dof; 10724 dtrace_ecbdesc_t *ep; 10725 dtrace_enabling_t *enab; 10726 uint_t i; 10727 10728 ASSERT(MUTEX_HELD(&dtrace_lock)); 10729 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 10730 10731 /* 10732 * Check the DOF header identification bytes. In addition to checking 10733 * valid settings, we also verify that unused bits/bytes are zeroed so 10734 * we can use them later without fear of regressing existing binaries. 10735 */ 10736 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 10737 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 10738 dtrace_dof_error(dof, "DOF magic string mismatch"); 10739 return (-1); 10740 } 10741 10742 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 10743 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 10744 dtrace_dof_error(dof, "DOF has invalid data model"); 10745 return (-1); 10746 } 10747 10748 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 10749 dtrace_dof_error(dof, "DOF encoding mismatch"); 10750 return (-1); 10751 } 10752 10753 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 10754 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 10755 dtrace_dof_error(dof, "DOF version mismatch"); 10756 return (-1); 10757 } 10758 10759 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 10760 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 10761 return (-1); 10762 } 10763 10764 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 10765 dtrace_dof_error(dof, "DOF uses too many integer registers"); 10766 return (-1); 10767 } 10768 10769 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 10770 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 10771 return (-1); 10772 } 10773 10774 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 10775 if (dof->dofh_ident[i] != 0) { 10776 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 10777 return (-1); 10778 } 10779 } 10780 10781 if (dof->dofh_flags & ~DOF_FL_VALID) { 10782 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 10783 return (-1); 10784 } 10785 10786 if (dof->dofh_secsize == 0) { 10787 dtrace_dof_error(dof, "zero section header size"); 10788 return (-1); 10789 } 10790 10791 /* 10792 * Check that the section headers don't exceed the amount of DOF 10793 * data. Note that we cast the section size and number of sections 10794 * to uint64_t's to prevent possible overflow in the multiplication. 10795 */ 10796 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 10797 10798 if (dof->dofh_secoff > len || seclen > len || 10799 dof->dofh_secoff + seclen > len) { 10800 dtrace_dof_error(dof, "truncated section headers"); 10801 return (-1); 10802 } 10803 10804 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 10805 dtrace_dof_error(dof, "misaligned section headers"); 10806 return (-1); 10807 } 10808 10809 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 10810 dtrace_dof_error(dof, "misaligned section size"); 10811 return (-1); 10812 } 10813 10814 /* 10815 * Take an initial pass through the section headers to be sure that 10816 * the headers don't have stray offsets. If the 'noprobes' flag is 10817 * set, do not permit sections relating to providers, probes, or args. 10818 */ 10819 for (i = 0; i < dof->dofh_secnum; i++) { 10820 dof_sec_t *sec = (dof_sec_t *)(daddr + 10821 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10822 10823 if (noprobes) { 10824 switch (sec->dofs_type) { 10825 case DOF_SECT_PROVIDER: 10826 case DOF_SECT_PROBES: 10827 case DOF_SECT_PRARGS: 10828 case DOF_SECT_PROFFS: 10829 dtrace_dof_error(dof, "illegal sections " 10830 "for enabling"); 10831 return (-1); 10832 } 10833 } 10834 10835 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10836 continue; /* just ignore non-loadable sections */ 10837 10838 if (sec->dofs_align & (sec->dofs_align - 1)) { 10839 dtrace_dof_error(dof, "bad section alignment"); 10840 return (-1); 10841 } 10842 10843 if (sec->dofs_offset & (sec->dofs_align - 1)) { 10844 dtrace_dof_error(dof, "misaligned section"); 10845 return (-1); 10846 } 10847 10848 if (sec->dofs_offset > len || sec->dofs_size > len || 10849 sec->dofs_offset + sec->dofs_size > len) { 10850 dtrace_dof_error(dof, "corrupt section header"); 10851 return (-1); 10852 } 10853 10854 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 10855 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 10856 dtrace_dof_error(dof, "non-terminating string table"); 10857 return (-1); 10858 } 10859 } 10860 10861 /* 10862 * Take a second pass through the sections and locate and perform any 10863 * relocations that are present. We do this after the first pass to 10864 * be sure that all sections have had their headers validated. 10865 */ 10866 for (i = 0; i < dof->dofh_secnum; i++) { 10867 dof_sec_t *sec = (dof_sec_t *)(daddr + 10868 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10869 10870 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 10871 continue; /* skip sections that are not loadable */ 10872 10873 switch (sec->dofs_type) { 10874 case DOF_SECT_URELHDR: 10875 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 10876 return (-1); 10877 break; 10878 } 10879 } 10880 10881 if ((enab = *enabp) == NULL) 10882 enab = *enabp = dtrace_enabling_create(vstate); 10883 10884 for (i = 0; i < dof->dofh_secnum; i++) { 10885 dof_sec_t *sec = (dof_sec_t *)(daddr + 10886 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10887 10888 if (sec->dofs_type != DOF_SECT_ECBDESC) 10889 continue; 10890 10891 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 10892 dtrace_enabling_destroy(enab); 10893 *enabp = NULL; 10894 return (-1); 10895 } 10896 10897 dtrace_enabling_add(enab, ep); 10898 } 10899 10900 return (0); 10901 } 10902 10903 /* 10904 * Process DOF for any options. This routine assumes that the DOF has been 10905 * at least processed by dtrace_dof_slurp(). 10906 */ 10907 static int 10908 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 10909 { 10910 int i, rval; 10911 uint32_t entsize; 10912 size_t offs; 10913 dof_optdesc_t *desc; 10914 10915 for (i = 0; i < dof->dofh_secnum; i++) { 10916 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 10917 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 10918 10919 if (sec->dofs_type != DOF_SECT_OPTDESC) 10920 continue; 10921 10922 if (sec->dofs_align != sizeof (uint64_t)) { 10923 dtrace_dof_error(dof, "bad alignment in " 10924 "option description"); 10925 return (EINVAL); 10926 } 10927 10928 if ((entsize = sec->dofs_entsize) == 0) { 10929 dtrace_dof_error(dof, "zeroed option entry size"); 10930 return (EINVAL); 10931 } 10932 10933 if (entsize < sizeof (dof_optdesc_t)) { 10934 dtrace_dof_error(dof, "bad option entry size"); 10935 return (EINVAL); 10936 } 10937 10938 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 10939 desc = (dof_optdesc_t *)((uintptr_t)dof + 10940 (uintptr_t)sec->dofs_offset + offs); 10941 10942 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 10943 dtrace_dof_error(dof, "non-zero option string"); 10944 return (EINVAL); 10945 } 10946 10947 if (desc->dofo_value == DTRACEOPT_UNSET) { 10948 dtrace_dof_error(dof, "unset option"); 10949 return (EINVAL); 10950 } 10951 10952 if ((rval = dtrace_state_option(state, 10953 desc->dofo_option, desc->dofo_value)) != 0) { 10954 dtrace_dof_error(dof, "rejected option"); 10955 return (rval); 10956 } 10957 } 10958 } 10959 10960 return (0); 10961 } 10962 10963 /* 10964 * DTrace Consumer State Functions 10965 */ 10966 int 10967 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 10968 { 10969 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 10970 void *base; 10971 uintptr_t limit; 10972 dtrace_dynvar_t *dvar, *next, *start; 10973 int i; 10974 10975 ASSERT(MUTEX_HELD(&dtrace_lock)); 10976 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 10977 10978 bzero(dstate, sizeof (dtrace_dstate_t)); 10979 10980 if ((dstate->dtds_chunksize = chunksize) == 0) 10981 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 10982 10983 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 10984 size = min; 10985 10986 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10987 return (ENOMEM); 10988 10989 dstate->dtds_size = size; 10990 dstate->dtds_base = base; 10991 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 10992 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 10993 10994 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 10995 10996 if (hashsize != 1 && (hashsize & 1)) 10997 hashsize--; 10998 10999 dstate->dtds_hashsize = hashsize; 11000 dstate->dtds_hash = dstate->dtds_base; 11001 11002 /* 11003 * Determine number of active CPUs. Divide free list evenly among 11004 * active CPUs. 11005 */ 11006 start = (dtrace_dynvar_t *) 11007 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 11008 limit = (uintptr_t)base + size; 11009 11010 maxper = (limit - (uintptr_t)start) / NCPU; 11011 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 11012 11013 for (i = 0; i < NCPU; i++) { 11014 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 11015 11016 /* 11017 * If we don't even have enough chunks to make it once through 11018 * NCPUs, we're just going to allocate everything to the first 11019 * CPU. And if we're on the last CPU, we're going to allocate 11020 * whatever is left over. In either case, we set the limit to 11021 * be the limit of the dynamic variable space. 11022 */ 11023 if (maxper == 0 || i == NCPU - 1) { 11024 limit = (uintptr_t)base + size; 11025 start = NULL; 11026 } else { 11027 limit = (uintptr_t)start + maxper; 11028 start = (dtrace_dynvar_t *)limit; 11029 } 11030 11031 ASSERT(limit <= (uintptr_t)base + size); 11032 11033 for (;;) { 11034 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 11035 dstate->dtds_chunksize); 11036 11037 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 11038 break; 11039 11040 dvar->dtdv_next = next; 11041 dvar = next; 11042 } 11043 11044 if (maxper == 0) 11045 break; 11046 } 11047 11048 return (0); 11049 } 11050 11051 void 11052 dtrace_dstate_fini(dtrace_dstate_t *dstate) 11053 { 11054 ASSERT(MUTEX_HELD(&cpu_lock)); 11055 11056 if (dstate->dtds_base == NULL) 11057 return; 11058 11059 kmem_free(dstate->dtds_base, dstate->dtds_size); 11060 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 11061 } 11062 11063 static void 11064 dtrace_vstate_fini(dtrace_vstate_t *vstate) 11065 { 11066 /* 11067 * Logical XOR, where are you? 11068 */ 11069 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 11070 11071 if (vstate->dtvs_nglobals > 0) { 11072 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 11073 sizeof (dtrace_statvar_t *)); 11074 } 11075 11076 if (vstate->dtvs_ntlocals > 0) { 11077 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 11078 sizeof (dtrace_difv_t)); 11079 } 11080 11081 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 11082 11083 if (vstate->dtvs_nlocals > 0) { 11084 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 11085 sizeof (dtrace_statvar_t *)); 11086 } 11087 } 11088 11089 static void 11090 dtrace_state_clean(dtrace_state_t *state) 11091 { 11092 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 11093 return; 11094 11095 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 11096 dtrace_speculation_clean(state); 11097 } 11098 11099 static void 11100 dtrace_state_deadman(dtrace_state_t *state) 11101 { 11102 hrtime_t now; 11103 11104 dtrace_sync(); 11105 11106 now = dtrace_gethrtime(); 11107 11108 if (state != dtrace_anon.dta_state && 11109 now - state->dts_laststatus >= dtrace_deadman_user) 11110 return; 11111 11112 /* 11113 * We must be sure that dts_alive never appears to be less than the 11114 * value upon entry to dtrace_state_deadman(), and because we lack a 11115 * dtrace_cas64(), we cannot store to it atomically. We thus instead 11116 * store INT64_MAX to it, followed by a memory barrier, followed by 11117 * the new value. This assures that dts_alive never appears to be 11118 * less than its true value, regardless of the order in which the 11119 * stores to the underlying storage are issued. 11120 */ 11121 state->dts_alive = INT64_MAX; 11122 dtrace_membar_producer(); 11123 state->dts_alive = now; 11124 } 11125 11126 dtrace_state_t * 11127 dtrace_state_create(dev_t *devp, cred_t *cr) 11128 { 11129 minor_t minor; 11130 major_t major; 11131 char c[30]; 11132 dtrace_state_t *state; 11133 dtrace_optval_t *opt; 11134 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 11135 11136 ASSERT(MUTEX_HELD(&dtrace_lock)); 11137 ASSERT(MUTEX_HELD(&cpu_lock)); 11138 11139 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 11140 VM_BESTFIT | VM_SLEEP); 11141 11142 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 11143 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11144 return (NULL); 11145 } 11146 11147 state = ddi_get_soft_state(dtrace_softstate, minor); 11148 state->dts_epid = DTRACE_EPIDNONE + 1; 11149 11150 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 11151 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 11152 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 11153 11154 if (devp != NULL) { 11155 major = getemajor(*devp); 11156 } else { 11157 major = ddi_driver_major(dtrace_devi); 11158 } 11159 11160 state->dts_dev = makedevice(major, minor); 11161 11162 if (devp != NULL) 11163 *devp = state->dts_dev; 11164 11165 /* 11166 * We allocate NCPU buffers. On the one hand, this can be quite 11167 * a bit of memory per instance (nearly 36K on a Starcat). On the 11168 * other hand, it saves an additional memory reference in the probe 11169 * path. 11170 */ 11171 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 11172 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 11173 state->dts_cleaner = CYCLIC_NONE; 11174 state->dts_deadman = CYCLIC_NONE; 11175 state->dts_vstate.dtvs_state = state; 11176 11177 for (i = 0; i < DTRACEOPT_MAX; i++) 11178 state->dts_options[i] = DTRACEOPT_UNSET; 11179 11180 /* 11181 * Set the default options. 11182 */ 11183 opt = state->dts_options; 11184 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 11185 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 11186 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 11187 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 11188 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 11189 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 11190 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 11191 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 11192 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 11193 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 11194 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 11195 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 11196 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 11197 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 11198 11199 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 11200 11201 /* 11202 * Depending on the user credentials, we set flag bits which alter probe 11203 * visibility or the amount of destructiveness allowed. In the case of 11204 * actual anonymous tracing, or the possession of all privileges, all of 11205 * the normal checks are bypassed. 11206 */ 11207 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 11208 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 11209 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 11210 } else { 11211 /* 11212 * Set up the credentials for this instantiation. We take a 11213 * hold on the credential to prevent it from disappearing on 11214 * us; this in turn prevents the zone_t referenced by this 11215 * credential from disappearing. This means that we can 11216 * examine the credential and the zone from probe context. 11217 */ 11218 crhold(cr); 11219 state->dts_cred.dcr_cred = cr; 11220 11221 /* 11222 * CRA_PROC means "we have *some* privilege for dtrace" and 11223 * unlocks the use of variables like pid, zonename, etc. 11224 */ 11225 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 11226 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11227 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11228 } 11229 11230 /* 11231 * dtrace_user allows use of syscall and profile providers. 11232 * If the user also has proc_owner and/or proc_zone, we 11233 * extend the scope to include additional visibility and 11234 * destructive power. 11235 */ 11236 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 11237 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11238 state->dts_cred.dcr_visible |= 11239 DTRACE_CRV_ALLPROC; 11240 11241 state->dts_cred.dcr_action |= 11242 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11243 } 11244 11245 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 11246 state->dts_cred.dcr_visible |= 11247 DTRACE_CRV_ALLZONE; 11248 11249 state->dts_cred.dcr_action |= 11250 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11251 } 11252 11253 /* 11254 * If we have all privs in whatever zone this is, 11255 * we can do destructive things to processes which 11256 * have altered credentials. 11257 */ 11258 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11259 cr->cr_zone->zone_privset)) { 11260 state->dts_cred.dcr_action |= 11261 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11262 } 11263 } 11264 11265 /* 11266 * Holding the dtrace_kernel privilege also implies that 11267 * the user has the dtrace_user privilege from a visibility 11268 * perspective. But without further privileges, some 11269 * destructive actions are not available. 11270 */ 11271 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 11272 /* 11273 * Make all probes in all zones visible. However, 11274 * this doesn't mean that all actions become available 11275 * to all zones. 11276 */ 11277 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 11278 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 11279 11280 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 11281 DTRACE_CRA_PROC; 11282 /* 11283 * Holding proc_owner means that destructive actions 11284 * for *this* zone are allowed. 11285 */ 11286 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11287 state->dts_cred.dcr_action |= 11288 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11289 11290 /* 11291 * Holding proc_zone means that destructive actions 11292 * for this user/group ID in all zones is allowed. 11293 */ 11294 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11295 state->dts_cred.dcr_action |= 11296 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11297 11298 /* 11299 * If we have all privs in whatever zone this is, 11300 * we can do destructive things to processes which 11301 * have altered credentials. 11302 */ 11303 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11304 cr->cr_zone->zone_privset)) { 11305 state->dts_cred.dcr_action |= 11306 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11307 } 11308 } 11309 11310 /* 11311 * Holding the dtrace_proc privilege gives control over fasttrap 11312 * and pid providers. We need to grant wider destructive 11313 * privileges in the event that the user has proc_owner and/or 11314 * proc_zone. 11315 */ 11316 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11317 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11318 state->dts_cred.dcr_action |= 11319 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11320 11321 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11322 state->dts_cred.dcr_action |= 11323 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11324 } 11325 } 11326 11327 return (state); 11328 } 11329 11330 static int 11331 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 11332 { 11333 dtrace_optval_t *opt = state->dts_options, size; 11334 processorid_t cpu; 11335 int flags = 0, rval; 11336 11337 ASSERT(MUTEX_HELD(&dtrace_lock)); 11338 ASSERT(MUTEX_HELD(&cpu_lock)); 11339 ASSERT(which < DTRACEOPT_MAX); 11340 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 11341 (state == dtrace_anon.dta_state && 11342 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 11343 11344 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 11345 return (0); 11346 11347 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 11348 cpu = opt[DTRACEOPT_CPU]; 11349 11350 if (which == DTRACEOPT_SPECSIZE) 11351 flags |= DTRACEBUF_NOSWITCH; 11352 11353 if (which == DTRACEOPT_BUFSIZE) { 11354 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 11355 flags |= DTRACEBUF_RING; 11356 11357 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 11358 flags |= DTRACEBUF_FILL; 11359 11360 flags |= DTRACEBUF_INACTIVE; 11361 } 11362 11363 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 11364 /* 11365 * The size must be 8-byte aligned. If the size is not 8-byte 11366 * aligned, drop it down by the difference. 11367 */ 11368 if (size & (sizeof (uint64_t) - 1)) 11369 size -= size & (sizeof (uint64_t) - 1); 11370 11371 if (size < state->dts_reserve) { 11372 /* 11373 * Buffers always must be large enough to accommodate 11374 * their prereserved space. We return E2BIG instead 11375 * of ENOMEM in this case to allow for user-level 11376 * software to differentiate the cases. 11377 */ 11378 return (E2BIG); 11379 } 11380 11381 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 11382 11383 if (rval != ENOMEM) { 11384 opt[which] = size; 11385 return (rval); 11386 } 11387 11388 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11389 return (rval); 11390 } 11391 11392 return (ENOMEM); 11393 } 11394 11395 static int 11396 dtrace_state_buffers(dtrace_state_t *state) 11397 { 11398 dtrace_speculation_t *spec = state->dts_speculations; 11399 int rval, i; 11400 11401 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 11402 DTRACEOPT_BUFSIZE)) != 0) 11403 return (rval); 11404 11405 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 11406 DTRACEOPT_AGGSIZE)) != 0) 11407 return (rval); 11408 11409 for (i = 0; i < state->dts_nspeculations; i++) { 11410 if ((rval = dtrace_state_buffer(state, 11411 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 11412 return (rval); 11413 } 11414 11415 return (0); 11416 } 11417 11418 static void 11419 dtrace_state_prereserve(dtrace_state_t *state) 11420 { 11421 dtrace_ecb_t *ecb; 11422 dtrace_probe_t *probe; 11423 11424 state->dts_reserve = 0; 11425 11426 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 11427 return; 11428 11429 /* 11430 * If our buffer policy is a "fill" buffer policy, we need to set the 11431 * prereserved space to be the space required by the END probes. 11432 */ 11433 probe = dtrace_probes[dtrace_probeid_end - 1]; 11434 ASSERT(probe != NULL); 11435 11436 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 11437 if (ecb->dte_state != state) 11438 continue; 11439 11440 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 11441 } 11442 } 11443 11444 static int 11445 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 11446 { 11447 dtrace_optval_t *opt = state->dts_options, sz, nspec; 11448 dtrace_speculation_t *spec; 11449 dtrace_buffer_t *buf; 11450 cyc_handler_t hdlr; 11451 cyc_time_t when; 11452 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11453 dtrace_icookie_t cookie; 11454 11455 mutex_enter(&cpu_lock); 11456 mutex_enter(&dtrace_lock); 11457 11458 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 11459 rval = EBUSY; 11460 goto out; 11461 } 11462 11463 /* 11464 * Before we can perform any checks, we must prime all of the 11465 * retained enablings that correspond to this state. 11466 */ 11467 dtrace_enabling_prime(state); 11468 11469 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 11470 rval = EACCES; 11471 goto out; 11472 } 11473 11474 dtrace_state_prereserve(state); 11475 11476 /* 11477 * Now we want to do is try to allocate our speculations. 11478 * We do not automatically resize the number of speculations; if 11479 * this fails, we will fail the operation. 11480 */ 11481 nspec = opt[DTRACEOPT_NSPEC]; 11482 ASSERT(nspec != DTRACEOPT_UNSET); 11483 11484 if (nspec > INT_MAX) { 11485 rval = ENOMEM; 11486 goto out; 11487 } 11488 11489 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 11490 11491 if (spec == NULL) { 11492 rval = ENOMEM; 11493 goto out; 11494 } 11495 11496 state->dts_speculations = spec; 11497 state->dts_nspeculations = (int)nspec; 11498 11499 for (i = 0; i < nspec; i++) { 11500 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 11501 rval = ENOMEM; 11502 goto err; 11503 } 11504 11505 spec[i].dtsp_buffer = buf; 11506 } 11507 11508 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 11509 if (dtrace_anon.dta_state == NULL) { 11510 rval = ENOENT; 11511 goto out; 11512 } 11513 11514 if (state->dts_necbs != 0) { 11515 rval = EALREADY; 11516 goto out; 11517 } 11518 11519 state->dts_anon = dtrace_anon_grab(); 11520 ASSERT(state->dts_anon != NULL); 11521 state = state->dts_anon; 11522 11523 /* 11524 * We want "grabanon" to be set in the grabbed state, so we'll 11525 * copy that option value from the grabbing state into the 11526 * grabbed state. 11527 */ 11528 state->dts_options[DTRACEOPT_GRABANON] = 11529 opt[DTRACEOPT_GRABANON]; 11530 11531 *cpu = dtrace_anon.dta_beganon; 11532 11533 /* 11534 * If the anonymous state is active (as it almost certainly 11535 * is if the anonymous enabling ultimately matched anything), 11536 * we don't allow any further option processing -- but we 11537 * don't return failure. 11538 */ 11539 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11540 goto out; 11541 } 11542 11543 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 11544 opt[DTRACEOPT_AGGSIZE] != 0) { 11545 if (state->dts_aggregations == NULL) { 11546 /* 11547 * We're not going to create an aggregation buffer 11548 * because we don't have any ECBs that contain 11549 * aggregations -- set this option to 0. 11550 */ 11551 opt[DTRACEOPT_AGGSIZE] = 0; 11552 } else { 11553 /* 11554 * If we have an aggregation buffer, we must also have 11555 * a buffer to use as scratch. 11556 */ 11557 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 11558 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 11559 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 11560 } 11561 } 11562 } 11563 11564 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 11565 opt[DTRACEOPT_SPECSIZE] != 0) { 11566 if (!state->dts_speculates) { 11567 /* 11568 * We're not going to create speculation buffers 11569 * because we don't have any ECBs that actually 11570 * speculate -- set the speculation size to 0. 11571 */ 11572 opt[DTRACEOPT_SPECSIZE] = 0; 11573 } 11574 } 11575 11576 /* 11577 * The bare minimum size for any buffer that we're actually going to 11578 * do anything to is sizeof (uint64_t). 11579 */ 11580 sz = sizeof (uint64_t); 11581 11582 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 11583 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 11584 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 11585 /* 11586 * A buffer size has been explicitly set to 0 (or to a size 11587 * that will be adjusted to 0) and we need the space -- we 11588 * need to return failure. We return ENOSPC to differentiate 11589 * it from failing to allocate a buffer due to failure to meet 11590 * the reserve (for which we return E2BIG). 11591 */ 11592 rval = ENOSPC; 11593 goto out; 11594 } 11595 11596 if ((rval = dtrace_state_buffers(state)) != 0) 11597 goto err; 11598 11599 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 11600 sz = dtrace_dstate_defsize; 11601 11602 do { 11603 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 11604 11605 if (rval == 0) 11606 break; 11607 11608 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11609 goto err; 11610 } while (sz >>= 1); 11611 11612 opt[DTRACEOPT_DYNVARSIZE] = sz; 11613 11614 if (rval != 0) 11615 goto err; 11616 11617 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 11618 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 11619 11620 if (opt[DTRACEOPT_CLEANRATE] == 0) 11621 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11622 11623 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 11624 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 11625 11626 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 11627 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 11628 11629 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 11630 hdlr.cyh_arg = state; 11631 hdlr.cyh_level = CY_LOW_LEVEL; 11632 11633 when.cyt_when = 0; 11634 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 11635 11636 state->dts_cleaner = cyclic_add(&hdlr, &when); 11637 11638 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 11639 hdlr.cyh_arg = state; 11640 hdlr.cyh_level = CY_LOW_LEVEL; 11641 11642 when.cyt_when = 0; 11643 when.cyt_interval = dtrace_deadman_interval; 11644 11645 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 11646 state->dts_deadman = cyclic_add(&hdlr, &when); 11647 11648 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 11649 11650 /* 11651 * Now it's time to actually fire the BEGIN probe. We need to disable 11652 * interrupts here both to record the CPU on which we fired the BEGIN 11653 * probe (the data from this CPU will be processed first at user 11654 * level) and to manually activate the buffer for this CPU. 11655 */ 11656 cookie = dtrace_interrupt_disable(); 11657 *cpu = CPU->cpu_id; 11658 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 11659 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 11660 11661 dtrace_probe(dtrace_probeid_begin, 11662 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11663 dtrace_interrupt_enable(cookie); 11664 /* 11665 * We may have had an exit action from a BEGIN probe; only change our 11666 * state to ACTIVE if we're still in WARMUP. 11667 */ 11668 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 11669 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 11670 11671 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 11672 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 11673 11674 /* 11675 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 11676 * want each CPU to transition its principal buffer out of the 11677 * INACTIVE state. Doing this assures that no CPU will suddenly begin 11678 * processing an ECB halfway down a probe's ECB chain; all CPUs will 11679 * atomically transition from processing none of a state's ECBs to 11680 * processing all of them. 11681 */ 11682 dtrace_xcall(DTRACE_CPUALL, 11683 (dtrace_xcall_t)dtrace_buffer_activate, state); 11684 goto out; 11685 11686 err: 11687 dtrace_buffer_free(state->dts_buffer); 11688 dtrace_buffer_free(state->dts_aggbuffer); 11689 11690 if ((nspec = state->dts_nspeculations) == 0) { 11691 ASSERT(state->dts_speculations == NULL); 11692 goto out; 11693 } 11694 11695 spec = state->dts_speculations; 11696 ASSERT(spec != NULL); 11697 11698 for (i = 0; i < state->dts_nspeculations; i++) { 11699 if ((buf = spec[i].dtsp_buffer) == NULL) 11700 break; 11701 11702 dtrace_buffer_free(buf); 11703 kmem_free(buf, bufsize); 11704 } 11705 11706 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11707 state->dts_nspeculations = 0; 11708 state->dts_speculations = NULL; 11709 11710 out: 11711 mutex_exit(&dtrace_lock); 11712 mutex_exit(&cpu_lock); 11713 11714 return (rval); 11715 } 11716 11717 static int 11718 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 11719 { 11720 dtrace_icookie_t cookie; 11721 11722 ASSERT(MUTEX_HELD(&dtrace_lock)); 11723 11724 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 11725 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 11726 return (EINVAL); 11727 11728 /* 11729 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 11730 * to be sure that every CPU has seen it. See below for the details 11731 * on why this is done. 11732 */ 11733 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 11734 dtrace_sync(); 11735 11736 /* 11737 * By this point, it is impossible for any CPU to be still processing 11738 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 11739 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 11740 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 11741 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 11742 * iff we're in the END probe. 11743 */ 11744 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 11745 dtrace_sync(); 11746 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 11747 11748 /* 11749 * Finally, we can release the reserve and call the END probe. We 11750 * disable interrupts across calling the END probe to allow us to 11751 * return the CPU on which we actually called the END probe. This 11752 * allows user-land to be sure that this CPU's principal buffer is 11753 * processed last. 11754 */ 11755 state->dts_reserve = 0; 11756 11757 cookie = dtrace_interrupt_disable(); 11758 *cpu = CPU->cpu_id; 11759 dtrace_probe(dtrace_probeid_end, 11760 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 11761 dtrace_interrupt_enable(cookie); 11762 11763 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 11764 dtrace_sync(); 11765 11766 return (0); 11767 } 11768 11769 static int 11770 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 11771 dtrace_optval_t val) 11772 { 11773 ASSERT(MUTEX_HELD(&dtrace_lock)); 11774 11775 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 11776 return (EBUSY); 11777 11778 if (option >= DTRACEOPT_MAX) 11779 return (EINVAL); 11780 11781 if (option != DTRACEOPT_CPU && val < 0) 11782 return (EINVAL); 11783 11784 switch (option) { 11785 case DTRACEOPT_DESTRUCTIVE: 11786 if (dtrace_destructive_disallow) 11787 return (EACCES); 11788 11789 state->dts_cred.dcr_destructive = 1; 11790 break; 11791 11792 case DTRACEOPT_BUFSIZE: 11793 case DTRACEOPT_DYNVARSIZE: 11794 case DTRACEOPT_AGGSIZE: 11795 case DTRACEOPT_SPECSIZE: 11796 case DTRACEOPT_STRSIZE: 11797 if (val < 0) 11798 return (EINVAL); 11799 11800 if (val >= LONG_MAX) { 11801 /* 11802 * If this is an otherwise negative value, set it to 11803 * the highest multiple of 128m less than LONG_MAX. 11804 * Technically, we're adjusting the size without 11805 * regard to the buffer resizing policy, but in fact, 11806 * this has no effect -- if we set the buffer size to 11807 * ~LONG_MAX and the buffer policy is ultimately set to 11808 * be "manual", the buffer allocation is guaranteed to 11809 * fail, if only because the allocation requires two 11810 * buffers. (We set the the size to the highest 11811 * multiple of 128m because it ensures that the size 11812 * will remain a multiple of a megabyte when 11813 * repeatedly halved -- all the way down to 15m.) 11814 */ 11815 val = LONG_MAX - (1 << 27) + 1; 11816 } 11817 } 11818 11819 state->dts_options[option] = val; 11820 11821 return (0); 11822 } 11823 11824 static void 11825 dtrace_state_destroy(dtrace_state_t *state) 11826 { 11827 dtrace_ecb_t *ecb; 11828 dtrace_vstate_t *vstate = &state->dts_vstate; 11829 minor_t minor = getminor(state->dts_dev); 11830 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11831 dtrace_speculation_t *spec = state->dts_speculations; 11832 int nspec = state->dts_nspeculations; 11833 uint32_t match; 11834 11835 ASSERT(MUTEX_HELD(&dtrace_lock)); 11836 ASSERT(MUTEX_HELD(&cpu_lock)); 11837 11838 /* 11839 * First, retract any retained enablings for this state. 11840 */ 11841 dtrace_enabling_retract(state); 11842 ASSERT(state->dts_nretained == 0); 11843 11844 /* 11845 * Release the credential hold we took in dtrace_state_create(). 11846 */ 11847 if (state->dts_cred.dcr_cred != NULL) 11848 crfree(state->dts_cred.dcr_cred); 11849 11850 /* 11851 * Now we need to disable and destroy any enabled probes. Because any 11852 * DTRACE_PRIV_KERNEL probes may actually be slowing our progress 11853 * (especially if they're all enabled), we take two passes through 11854 * the ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, 11855 * and in the second we disable whatever is left over. 11856 */ 11857 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 11858 for (i = 0; i < state->dts_necbs; i++) { 11859 if ((ecb = state->dts_ecbs[i]) == NULL) 11860 continue; 11861 11862 if (match && ecb->dte_probe != NULL) { 11863 dtrace_probe_t *probe = ecb->dte_probe; 11864 dtrace_provider_t *prov = probe->dtpr_provider; 11865 11866 if (!(prov->dtpv_priv.dtpp_flags & match)) 11867 continue; 11868 } 11869 11870 dtrace_ecb_disable(ecb); 11871 dtrace_ecb_destroy(ecb); 11872 } 11873 11874 if (!match) 11875 break; 11876 } 11877 11878 /* 11879 * Before we free the buffers, perform one more sync to assure that 11880 * every CPU is out of probe context. 11881 */ 11882 dtrace_sync(); 11883 11884 dtrace_buffer_free(state->dts_buffer); 11885 dtrace_buffer_free(state->dts_aggbuffer); 11886 11887 for (i = 0; i < nspec; i++) 11888 dtrace_buffer_free(spec[i].dtsp_buffer); 11889 11890 if (state->dts_cleaner != CYCLIC_NONE) 11891 cyclic_remove(state->dts_cleaner); 11892 11893 if (state->dts_deadman != CYCLIC_NONE) 11894 cyclic_remove(state->dts_deadman); 11895 11896 dtrace_dstate_fini(&vstate->dtvs_dynvars); 11897 dtrace_vstate_fini(vstate); 11898 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 11899 11900 if (state->dts_aggregations != NULL) { 11901 #ifdef DEBUG 11902 for (i = 0; i < state->dts_naggregations; i++) 11903 ASSERT(state->dts_aggregations[i] == NULL); 11904 #endif 11905 ASSERT(state->dts_naggregations > 0); 11906 kmem_free(state->dts_aggregations, 11907 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 11908 } 11909 11910 kmem_free(state->dts_buffer, bufsize); 11911 kmem_free(state->dts_aggbuffer, bufsize); 11912 11913 for (i = 0; i < nspec; i++) 11914 kmem_free(spec[i].dtsp_buffer, bufsize); 11915 11916 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 11917 11918 dtrace_format_destroy(state); 11919 11920 vmem_destroy(state->dts_aggid_arena); 11921 ddi_soft_state_free(dtrace_softstate, minor); 11922 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11923 } 11924 11925 /* 11926 * DTrace Anonymous Enabling Functions 11927 */ 11928 static dtrace_state_t * 11929 dtrace_anon_grab(void) 11930 { 11931 dtrace_state_t *state; 11932 11933 ASSERT(MUTEX_HELD(&dtrace_lock)); 11934 11935 if ((state = dtrace_anon.dta_state) == NULL) { 11936 ASSERT(dtrace_anon.dta_enabling == NULL); 11937 return (NULL); 11938 } 11939 11940 ASSERT(dtrace_anon.dta_enabling != NULL); 11941 ASSERT(dtrace_retained != NULL); 11942 11943 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 11944 dtrace_anon.dta_enabling = NULL; 11945 dtrace_anon.dta_state = NULL; 11946 11947 return (state); 11948 } 11949 11950 static void 11951 dtrace_anon_property(void) 11952 { 11953 int i, rv; 11954 dtrace_state_t *state; 11955 dof_hdr_t *dof; 11956 char c[32]; /* enough for "dof-data-" + digits */ 11957 11958 ASSERT(MUTEX_HELD(&dtrace_lock)); 11959 ASSERT(MUTEX_HELD(&cpu_lock)); 11960 11961 for (i = 0; ; i++) { 11962 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 11963 11964 dtrace_err_verbose = 1; 11965 11966 if ((dof = dtrace_dof_property(c)) == NULL) { 11967 dtrace_err_verbose = 0; 11968 break; 11969 } 11970 11971 /* 11972 * We want to create anonymous state, so we need to transition 11973 * the kernel debugger to indicate that DTrace is active. If 11974 * this fails (e.g. because the debugger has modified text in 11975 * some way), we won't continue with the processing. 11976 */ 11977 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 11978 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 11979 "enabling ignored."); 11980 dtrace_dof_destroy(dof); 11981 break; 11982 } 11983 11984 /* 11985 * If we haven't allocated an anonymous state, we'll do so now. 11986 */ 11987 if ((state = dtrace_anon.dta_state) == NULL) { 11988 state = dtrace_state_create(NULL, NULL); 11989 dtrace_anon.dta_state = state; 11990 11991 if (state == NULL) { 11992 /* 11993 * This basically shouldn't happen: the only 11994 * failure mode from dtrace_state_create() is a 11995 * failure of ddi_soft_state_zalloc() that 11996 * itself should never happen. Still, the 11997 * interface allows for a failure mode, and 11998 * we want to fail as gracefully as possible: 11999 * we'll emit an error message and cease 12000 * processing anonymous state in this case. 12001 */ 12002 cmn_err(CE_WARN, "failed to create " 12003 "anonymous state"); 12004 dtrace_dof_destroy(dof); 12005 break; 12006 } 12007 } 12008 12009 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 12010 &dtrace_anon.dta_enabling, 0, B_TRUE); 12011 12012 if (rv == 0) 12013 rv = dtrace_dof_options(dof, state); 12014 12015 dtrace_err_verbose = 0; 12016 dtrace_dof_destroy(dof); 12017 12018 if (rv != 0) { 12019 /* 12020 * This is malformed DOF; chuck any anonymous state 12021 * that we created. 12022 */ 12023 ASSERT(dtrace_anon.dta_enabling == NULL); 12024 dtrace_state_destroy(state); 12025 dtrace_anon.dta_state = NULL; 12026 break; 12027 } 12028 12029 ASSERT(dtrace_anon.dta_enabling != NULL); 12030 } 12031 12032 if (dtrace_anon.dta_enabling != NULL) { 12033 int rval; 12034 12035 /* 12036 * dtrace_enabling_retain() can only fail because we are 12037 * trying to retain more enablings than are allowed -- but 12038 * we only have one anonymous enabling, and we are guaranteed 12039 * to be allowed at least one retained enabling; we assert 12040 * that dtrace_enabling_retain() returns success. 12041 */ 12042 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 12043 ASSERT(rval == 0); 12044 12045 dtrace_enabling_dump(dtrace_anon.dta_enabling); 12046 } 12047 } 12048 12049 /* 12050 * DTrace Helper Functions 12051 */ 12052 static void 12053 dtrace_helper_trace(dtrace_helper_action_t *helper, 12054 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 12055 { 12056 uint32_t size, next, nnext, i; 12057 dtrace_helptrace_t *ent; 12058 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12059 12060 if (!dtrace_helptrace_enabled) 12061 return; 12062 12063 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 12064 12065 /* 12066 * What would a tracing framework be without its own tracing 12067 * framework? (Well, a hell of a lot simpler, for starters...) 12068 */ 12069 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 12070 sizeof (uint64_t) - sizeof (uint64_t); 12071 12072 /* 12073 * Iterate until we can allocate a slot in the trace buffer. 12074 */ 12075 do { 12076 next = dtrace_helptrace_next; 12077 12078 if (next + size < dtrace_helptrace_bufsize) { 12079 nnext = next + size; 12080 } else { 12081 nnext = size; 12082 } 12083 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 12084 12085 /* 12086 * We have our slot; fill it in. 12087 */ 12088 if (nnext == size) 12089 next = 0; 12090 12091 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 12092 ent->dtht_helper = helper; 12093 ent->dtht_where = where; 12094 ent->dtht_nlocals = vstate->dtvs_nlocals; 12095 12096 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 12097 mstate->dtms_fltoffs : -1; 12098 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 12099 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 12100 12101 for (i = 0; i < vstate->dtvs_nlocals; i++) { 12102 dtrace_statvar_t *svar; 12103 12104 if ((svar = vstate->dtvs_locals[i]) == NULL) 12105 continue; 12106 12107 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 12108 ent->dtht_locals[i] = 12109 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 12110 } 12111 } 12112 12113 static uint64_t 12114 dtrace_helper(int which, dtrace_mstate_t *mstate, 12115 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 12116 { 12117 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12118 uint64_t sarg0 = mstate->dtms_arg[0]; 12119 uint64_t sarg1 = mstate->dtms_arg[1]; 12120 uint64_t rval; 12121 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 12122 dtrace_helper_action_t *helper; 12123 dtrace_vstate_t *vstate; 12124 dtrace_difo_t *pred; 12125 int i, trace = dtrace_helptrace_enabled; 12126 12127 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 12128 12129 if (helpers == NULL) 12130 return (0); 12131 12132 if ((helper = helpers->dthps_actions[which]) == NULL) 12133 return (0); 12134 12135 vstate = &helpers->dthps_vstate; 12136 mstate->dtms_arg[0] = arg0; 12137 mstate->dtms_arg[1] = arg1; 12138 12139 /* 12140 * Now iterate over each helper. If its predicate evaluates to 'true', 12141 * we'll call the corresponding actions. Note that the below calls 12142 * to dtrace_dif_emulate() may set faults in machine state. This is 12143 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 12144 * the stored DIF offset with its own (which is the desired behavior). 12145 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 12146 * from machine state; this is okay, too. 12147 */ 12148 for (; helper != NULL; helper = helper->dthp_next) { 12149 if ((pred = helper->dthp_predicate) != NULL) { 12150 if (trace) 12151 dtrace_helper_trace(helper, mstate, vstate, 0); 12152 12153 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 12154 goto next; 12155 12156 if (*flags & CPU_DTRACE_FAULT) 12157 goto err; 12158 } 12159 12160 for (i = 0; i < helper->dthp_nactions; i++) { 12161 if (trace) 12162 dtrace_helper_trace(helper, 12163 mstate, vstate, i + 1); 12164 12165 rval = dtrace_dif_emulate(helper->dthp_actions[i], 12166 mstate, vstate, state); 12167 12168 if (*flags & CPU_DTRACE_FAULT) 12169 goto err; 12170 } 12171 12172 next: 12173 if (trace) 12174 dtrace_helper_trace(helper, mstate, vstate, 12175 DTRACE_HELPTRACE_NEXT); 12176 } 12177 12178 if (trace) 12179 dtrace_helper_trace(helper, mstate, vstate, 12180 DTRACE_HELPTRACE_DONE); 12181 12182 /* 12183 * Restore the arg0 that we saved upon entry. 12184 */ 12185 mstate->dtms_arg[0] = sarg0; 12186 mstate->dtms_arg[1] = sarg1; 12187 12188 return (rval); 12189 12190 err: 12191 if (trace) 12192 dtrace_helper_trace(helper, mstate, vstate, 12193 DTRACE_HELPTRACE_ERR); 12194 12195 /* 12196 * Restore the arg0 that we saved upon entry. 12197 */ 12198 mstate->dtms_arg[0] = sarg0; 12199 mstate->dtms_arg[1] = sarg1; 12200 12201 return (NULL); 12202 } 12203 12204 static void 12205 dtrace_helper_destroy(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate) 12206 { 12207 int i; 12208 12209 if (helper->dthp_predicate != NULL) 12210 dtrace_difo_release(helper->dthp_predicate, vstate); 12211 12212 for (i = 0; i < helper->dthp_nactions; i++) { 12213 ASSERT(helper->dthp_actions[i] != NULL); 12214 dtrace_difo_release(helper->dthp_actions[i], vstate); 12215 } 12216 12217 kmem_free(helper->dthp_actions, 12218 helper->dthp_nactions * sizeof (dtrace_difo_t *)); 12219 kmem_free(helper, sizeof (dtrace_helper_action_t)); 12220 } 12221 12222 static int 12223 dtrace_helper_destroygen(int gen) 12224 { 12225 dtrace_helpers_t *help = curproc->p_dtrace_helpers; 12226 dtrace_vstate_t *vstate; 12227 int i; 12228 12229 ASSERT(MUTEX_HELD(&dtrace_lock)); 12230 12231 if (help == NULL || gen > help->dthps_generation) 12232 return (EINVAL); 12233 12234 vstate = &help->dthps_vstate; 12235 12236 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12237 dtrace_helper_action_t *last = NULL, *h, *next; 12238 12239 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12240 next = h->dthp_next; 12241 12242 if (h->dthp_generation == gen) { 12243 if (last != NULL) { 12244 last->dthp_next = next; 12245 } else { 12246 help->dthps_actions[i] = next; 12247 } 12248 12249 dtrace_helper_destroy(h, vstate); 12250 } else { 12251 last = h; 12252 } 12253 } 12254 } 12255 12256 return (0); 12257 } 12258 12259 static int 12260 dtrace_helper_validate(dtrace_helper_action_t *helper) 12261 { 12262 int err = 0, i; 12263 dtrace_difo_t *dp; 12264 12265 if ((dp = helper->dthp_predicate) != NULL) 12266 err += dtrace_difo_validate_helper(dp); 12267 12268 for (i = 0; i < helper->dthp_nactions; i++) 12269 err += dtrace_difo_validate_helper(helper->dthp_actions[i]); 12270 12271 return (err == 0); 12272 } 12273 12274 static int 12275 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 12276 { 12277 dtrace_helpers_t *help; 12278 dtrace_helper_action_t *helper, *last; 12279 dtrace_actdesc_t *act; 12280 dtrace_vstate_t *vstate; 12281 dtrace_predicate_t *pred; 12282 int count = 0, nactions = 0, i; 12283 12284 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 12285 return (EINVAL); 12286 12287 help = curproc->p_dtrace_helpers; 12288 last = help->dthps_actions[which]; 12289 vstate = &help->dthps_vstate; 12290 12291 for (count = 0; last != NULL; last = last->dthp_next) { 12292 count++; 12293 if (last->dthp_next == NULL) 12294 break; 12295 } 12296 12297 /* 12298 * If we already have dtrace_helper_actions_max helper actions for this 12299 * helper action type, we'll refuse to add a new one. 12300 */ 12301 if (count >= dtrace_helper_actions_max) 12302 return (ENOSPC); 12303 12304 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 12305 helper->dthp_generation = help->dthps_generation; 12306 12307 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 12308 ASSERT(pred->dtp_difo != NULL); 12309 dtrace_difo_hold(pred->dtp_difo); 12310 helper->dthp_predicate = pred->dtp_difo; 12311 } 12312 12313 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 12314 if (act->dtad_kind != DTRACEACT_DIFEXPR) 12315 goto err; 12316 12317 if (act->dtad_difo == NULL) 12318 goto err; 12319 12320 nactions++; 12321 } 12322 12323 helper->dthp_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 12324 (helper->dthp_nactions = nactions), KM_SLEEP); 12325 12326 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 12327 dtrace_difo_hold(act->dtad_difo); 12328 helper->dthp_actions[i++] = act->dtad_difo; 12329 } 12330 12331 if (!dtrace_helper_validate(helper)) 12332 goto err; 12333 12334 if (last == NULL) { 12335 help->dthps_actions[which] = helper; 12336 } else { 12337 last->dthp_next = helper; 12338 } 12339 12340 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 12341 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 12342 dtrace_helptrace_next = 0; 12343 } 12344 12345 return (0); 12346 err: 12347 dtrace_helper_destroy(helper, vstate); 12348 return (EINVAL); 12349 } 12350 12351 static void 12352 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 12353 dof_helper_t *dofhp) 12354 { 12355 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 12356 12357 mutex_enter(&dtrace_meta_lock); 12358 mutex_enter(&dtrace_lock); 12359 12360 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 12361 /* 12362 * If the dtrace module is loaded but not attached, or if 12363 * there aren't isn't a meta provider registered to deal with 12364 * these provider descriptions, we need to postpone creating 12365 * the actual providers until later. 12366 */ 12367 12368 if (help->dthps_next == NULL && help->dthps_prev == NULL && 12369 dtrace_deferred_pid != help) { 12370 help->dthps_deferred = 1; 12371 help->dthps_pid = p->p_pid; 12372 help->dthps_next = dtrace_deferred_pid; 12373 help->dthps_prev = NULL; 12374 if (dtrace_deferred_pid != NULL) 12375 dtrace_deferred_pid->dthps_prev = help; 12376 dtrace_deferred_pid = help; 12377 } 12378 12379 mutex_exit(&dtrace_lock); 12380 12381 } else if (dofhp != NULL) { 12382 /* 12383 * If the dtrace module is loaded and we have a particular 12384 * helper provider description, pass that off to the 12385 * meta provider. 12386 */ 12387 12388 mutex_exit(&dtrace_lock); 12389 12390 dtrace_helper_provide(dofhp, p->p_pid); 12391 12392 } else { 12393 /* 12394 * Otherwise, just pass all the helper provider descriptions 12395 * off to the meta provider. 12396 */ 12397 12398 int i; 12399 mutex_exit(&dtrace_lock); 12400 12401 for (i = 0; i < help->dthps_nprovs; i++) { 12402 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 12403 p->p_pid); 12404 } 12405 } 12406 12407 mutex_exit(&dtrace_meta_lock); 12408 } 12409 12410 static int 12411 dtrace_helper_provider_add(dof_helper_t *dofhp) 12412 { 12413 dtrace_helpers_t *help; 12414 dtrace_helper_provider_t *hprov, **tmp_provs; 12415 uint_t tmp_nprovs, i; 12416 12417 help = curproc->p_dtrace_helpers; 12418 ASSERT(help != NULL); 12419 12420 /* 12421 * If we already have dtrace_helper_providers_max helper providers, 12422 * we're refuse to add a new one. 12423 */ 12424 if (help->dthps_nprovs >= dtrace_helper_providers_max) 12425 return (ENOSPC); 12426 12427 /* 12428 * Check to make sure this isn't a duplicate. 12429 */ 12430 for (i = 0; i < help->dthps_nprovs; i++) { 12431 if (dofhp->dofhp_addr == 12432 help->dthps_provs[i]->dthp_prov.dofhp_addr) 12433 return (EALREADY); 12434 } 12435 12436 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 12437 hprov->dthp_prov = *dofhp; 12438 hprov->dthp_ref = 1; 12439 12440 tmp_nprovs = help->dthps_nprovs; 12441 tmp_provs = help->dthps_provs; 12442 help->dthps_nprovs++; 12443 help->dthps_provs = kmem_zalloc(help->dthps_nprovs * 12444 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12445 12446 help->dthps_provs[tmp_nprovs] = hprov; 12447 if (tmp_provs != NULL) { 12448 bcopy(tmp_provs, help->dthps_provs, tmp_nprovs * 12449 sizeof (dtrace_helper_provider_t *)); 12450 kmem_free(tmp_provs, tmp_nprovs * 12451 sizeof (dtrace_helper_provider_t *)); 12452 } 12453 12454 return (0); 12455 } 12456 12457 static void 12458 dtrace_helper_provider_remove(dtrace_helper_provider_t *hprov) 12459 { 12460 mutex_enter(&dtrace_lock); 12461 12462 if (--hprov->dthp_ref == 0) { 12463 dof_hdr_t *dof; 12464 mutex_exit(&dtrace_lock); 12465 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 12466 dtrace_dof_destroy(dof); 12467 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 12468 } else { 12469 mutex_exit(&dtrace_lock); 12470 } 12471 } 12472 12473 static int 12474 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 12475 { 12476 uintptr_t daddr = (uintptr_t)dof; 12477 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 12478 dof_provider_t *provider; 12479 dof_probe_t *probe; 12480 uint8_t *arg; 12481 char *strtab, *typestr; 12482 dof_stridx_t typeidx; 12483 size_t typesz; 12484 uint_t nprobes, j, k; 12485 12486 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 12487 12488 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 12489 dtrace_dof_error(dof, "misaligned section offset"); 12490 return (-1); 12491 } 12492 12493 /* 12494 * The section needs to be large enough to contain the DOF provider 12495 * structure appropriate for the given version. 12496 */ 12497 if (sec->dofs_size < 12498 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 12499 offsetof(dof_provider_t, dofpv_prenoffs) : 12500 sizeof (dof_provider_t))) { 12501 dtrace_dof_error(dof, "provider section too small"); 12502 return (-1); 12503 } 12504 12505 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 12506 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 12507 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 12508 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 12509 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 12510 12511 if (str_sec == NULL || prb_sec == NULL || 12512 arg_sec == NULL || off_sec == NULL) 12513 return (-1); 12514 12515 enoff_sec = NULL; 12516 12517 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12518 provider->dofpv_prenoffs != 0 && (enoff_sec = dtrace_dof_sect(dof, 12519 DOF_SECT_PRENOFFS, provider->dofpv_prenoffs)) == NULL) 12520 return (-1); 12521 12522 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 12523 12524 if (provider->dofpv_name >= str_sec->dofs_size || 12525 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 12526 dtrace_dof_error(dof, "invalid provider name"); 12527 return (-1); 12528 } 12529 12530 if (prb_sec->dofs_entsize == 0 || 12531 prb_sec->dofs_entsize > prb_sec->dofs_size) { 12532 dtrace_dof_error(dof, "invalid entry size"); 12533 return (-1); 12534 } 12535 12536 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 12537 dtrace_dof_error(dof, "misaligned entry size"); 12538 return (-1); 12539 } 12540 12541 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 12542 dtrace_dof_error(dof, "invalid entry size"); 12543 return (-1); 12544 } 12545 12546 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 12547 dtrace_dof_error(dof, "misaligned section offset"); 12548 return (-1); 12549 } 12550 12551 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 12552 dtrace_dof_error(dof, "invalid entry size"); 12553 return (-1); 12554 } 12555 12556 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 12557 12558 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 12559 12560 /* 12561 * Take a pass through the probes to check for errors. 12562 */ 12563 for (j = 0; j < nprobes; j++) { 12564 probe = (dof_probe_t *)(uintptr_t)(daddr + 12565 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 12566 12567 if (probe->dofpr_func >= str_sec->dofs_size) { 12568 dtrace_dof_error(dof, "invalid function name"); 12569 return (-1); 12570 } 12571 12572 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 12573 dtrace_dof_error(dof, "function name too long"); 12574 return (-1); 12575 } 12576 12577 if (probe->dofpr_name >= str_sec->dofs_size || 12578 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 12579 dtrace_dof_error(dof, "invalid probe name"); 12580 return (-1); 12581 } 12582 12583 /* 12584 * The offset count must not wrap the index and there must be 12585 * at least one offset. The offsets must also not overflow the 12586 * section's data. 12587 */ 12588 if (probe->dofpr_offidx + probe->dofpr_noffs <= 12589 probe->dofpr_offidx || 12590 (probe->dofpr_offidx + probe->dofpr_noffs) * 12591 off_sec->dofs_entsize > off_sec->dofs_size) { 12592 dtrace_dof_error(dof, "invalid probe offset"); 12593 return (-1); 12594 } 12595 12596 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 12597 /* 12598 * If there's no is-enabled offset section, make sure 12599 * there aren't any is-enabled offsets. Otherwise 12600 * perform the same checks as for probe offsets 12601 * (immediately above), except that having zero 12602 * is-enabled offsets is permitted. 12603 */ 12604 if (enoff_sec == NULL) { 12605 if (probe->dofpr_enoffidx != 0 || 12606 probe->dofpr_nenoffs != 0) { 12607 dtrace_dof_error(dof, "is-enabled " 12608 "offsets with null section"); 12609 return (-1); 12610 } 12611 } else if (probe->dofpr_enoffidx + 12612 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 12613 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 12614 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 12615 dtrace_dof_error(dof, "invalid is-enabled " 12616 "offset"); 12617 return (-1); 12618 } 12619 } 12620 12621 if (probe->dofpr_argidx + probe->dofpr_xargc < 12622 probe->dofpr_argidx || 12623 (probe->dofpr_argidx + probe->dofpr_xargc) * 12624 arg_sec->dofs_entsize > arg_sec->dofs_size) { 12625 dtrace_dof_error(dof, "invalid args"); 12626 return (-1); 12627 } 12628 12629 typeidx = probe->dofpr_nargv; 12630 typestr = strtab + probe->dofpr_nargv; 12631 for (k = 0; k < probe->dofpr_nargc; k++) { 12632 if (typeidx >= str_sec->dofs_size) { 12633 dtrace_dof_error(dof, "bad " 12634 "native argument type"); 12635 return (-1); 12636 } 12637 12638 typesz = strlen(typestr) + 1; 12639 if (typesz > DTRACE_ARGTYPELEN) { 12640 dtrace_dof_error(dof, "native " 12641 "argument type too long"); 12642 return (-1); 12643 } 12644 typeidx += typesz; 12645 typestr += typesz; 12646 } 12647 12648 typeidx = probe->dofpr_xargv; 12649 typestr = strtab + probe->dofpr_xargv; 12650 for (k = 0; k < probe->dofpr_xargc; k++) { 12651 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 12652 dtrace_dof_error(dof, "bad " 12653 "native argument index"); 12654 return (-1); 12655 } 12656 12657 if (typeidx >= str_sec->dofs_size) { 12658 dtrace_dof_error(dof, "bad " 12659 "translated argument type"); 12660 return (-1); 12661 } 12662 12663 typesz = strlen(typestr) + 1; 12664 if (typesz > DTRACE_ARGTYPELEN) { 12665 dtrace_dof_error(dof, "translated argument " 12666 "type too long"); 12667 return (-1); 12668 } 12669 12670 typeidx += typesz; 12671 typestr += typesz; 12672 } 12673 } 12674 12675 return (0); 12676 } 12677 12678 static int 12679 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 12680 { 12681 dtrace_helpers_t *help; 12682 dtrace_vstate_t *vstate; 12683 dtrace_enabling_t *enab = NULL; 12684 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 12685 uintptr_t daddr = (uintptr_t)dof; 12686 12687 ASSERT(MUTEX_HELD(&dtrace_lock)); 12688 12689 if ((help = curproc->p_dtrace_helpers) == NULL) 12690 help = dtrace_helpers_create(curproc); 12691 12692 vstate = &help->dthps_vstate; 12693 12694 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 12695 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 12696 dtrace_dof_destroy(dof); 12697 return (rv); 12698 } 12699 12700 /* 12701 * Look for helper providers and validate their descriptions. 12702 */ 12703 if (dhp != NULL) { 12704 for (i = 0; i < dof->dofh_secnum; i++) { 12705 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 12706 dof->dofh_secoff + i * dof->dofh_secsize); 12707 12708 if (sec->dofs_type != DOF_SECT_PROVIDER) 12709 continue; 12710 12711 if (dtrace_helper_provider_validate(dof, sec) != 0) { 12712 dtrace_enabling_destroy(enab); 12713 dtrace_dof_destroy(dof); 12714 return (-1); 12715 } 12716 12717 nprovs++; 12718 } 12719 } 12720 12721 /* 12722 * Now we need to walk through the ECB descriptions in the enabling. 12723 */ 12724 for (i = 0; i < enab->dten_ndesc; i++) { 12725 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 12726 dtrace_probedesc_t *desc = &ep->dted_probe; 12727 12728 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 12729 continue; 12730 12731 if (strcmp(desc->dtpd_mod, "helper") != 0) 12732 continue; 12733 12734 if (strcmp(desc->dtpd_func, "ustack") != 0) 12735 continue; 12736 12737 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 12738 ep)) != 0) { 12739 /* 12740 * Adding this helper action failed -- we are now going 12741 * to rip out the entire generation and return failure. 12742 */ 12743 (void) dtrace_helper_destroygen(help->dthps_generation); 12744 dtrace_enabling_destroy(enab); 12745 dtrace_dof_destroy(dof); 12746 return (-1); 12747 } 12748 12749 nhelpers++; 12750 } 12751 12752 if (nhelpers < enab->dten_ndesc) 12753 dtrace_dof_error(dof, "unmatched helpers"); 12754 12755 gen = help->dthps_generation++; 12756 dtrace_enabling_destroy(enab); 12757 12758 if (dhp != NULL && nprovs > 0) { 12759 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 12760 if (dtrace_helper_provider_add(dhp) == 0) { 12761 mutex_exit(&dtrace_lock); 12762 dtrace_helper_provider_register(curproc, help, dhp); 12763 mutex_enter(&dtrace_lock); 12764 12765 destroy = 0; 12766 } 12767 } 12768 12769 if (destroy) 12770 dtrace_dof_destroy(dof); 12771 12772 return (gen); 12773 } 12774 12775 static dtrace_helpers_t * 12776 dtrace_helpers_create(proc_t *p) 12777 { 12778 dtrace_helpers_t *help; 12779 12780 ASSERT(MUTEX_HELD(&dtrace_lock)); 12781 ASSERT(p->p_dtrace_helpers == NULL); 12782 12783 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 12784 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 12785 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 12786 12787 p->p_dtrace_helpers = help; 12788 dtrace_helpers++; 12789 12790 return (help); 12791 } 12792 12793 static void 12794 dtrace_helpers_destroy(void) 12795 { 12796 dtrace_helpers_t *help; 12797 dtrace_vstate_t *vstate; 12798 proc_t *p = curproc; 12799 int i; 12800 12801 mutex_enter(&dtrace_lock); 12802 12803 ASSERT(p->p_dtrace_helpers != NULL); 12804 ASSERT(dtrace_helpers > 0); 12805 12806 help = p->p_dtrace_helpers; 12807 vstate = &help->dthps_vstate; 12808 12809 /* 12810 * We're now going to lose the help from this process. 12811 */ 12812 p->p_dtrace_helpers = NULL; 12813 dtrace_sync(); 12814 12815 /* 12816 * Destory the helper actions. 12817 */ 12818 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12819 dtrace_helper_action_t *h, *next; 12820 12821 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12822 next = h->dthp_next; 12823 dtrace_helper_destroy(h, vstate); 12824 h = next; 12825 } 12826 } 12827 12828 mutex_exit(&dtrace_lock); 12829 12830 /* 12831 * Destroy the helper providers. 12832 */ 12833 if (help->dthps_nprovs > 0) { 12834 mutex_enter(&dtrace_meta_lock); 12835 if (dtrace_meta_pid != NULL) { 12836 ASSERT(dtrace_deferred_pid == NULL); 12837 12838 for (i = 0; i < help->dthps_nprovs; i++) { 12839 dtrace_helper_remove( 12840 &help->dthps_provs[i]->dthp_prov, p->p_pid); 12841 } 12842 } else { 12843 mutex_enter(&dtrace_lock); 12844 ASSERT(help->dthps_deferred == 0 || 12845 help->dthps_next != NULL || 12846 help->dthps_prev != NULL || 12847 help == dtrace_deferred_pid); 12848 12849 /* 12850 * Remove the helper from the deferred list. 12851 */ 12852 if (help->dthps_next != NULL) 12853 help->dthps_next->dthps_prev = help->dthps_prev; 12854 if (help->dthps_prev != NULL) 12855 help->dthps_prev->dthps_next = help->dthps_next; 12856 if (dtrace_deferred_pid == help) { 12857 dtrace_deferred_pid = help->dthps_next; 12858 ASSERT(help->dthps_prev == NULL); 12859 } 12860 12861 mutex_exit(&dtrace_lock); 12862 } 12863 12864 mutex_exit(&dtrace_meta_lock); 12865 12866 for (i = 0; i < help->dthps_nprovs; i++) { 12867 dtrace_helper_provider_remove(help->dthps_provs[i]); 12868 } 12869 12870 kmem_free(help->dthps_provs, help->dthps_nprovs * 12871 sizeof (dtrace_helper_provider_t *)); 12872 } 12873 12874 mutex_enter(&dtrace_lock); 12875 12876 dtrace_vstate_fini(&help->dthps_vstate); 12877 kmem_free(help->dthps_actions, 12878 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 12879 kmem_free(help, sizeof (dtrace_helpers_t)); 12880 12881 --dtrace_helpers; 12882 mutex_exit(&dtrace_lock); 12883 } 12884 12885 static void 12886 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 12887 { 12888 dtrace_helpers_t *help, *newhelp; 12889 dtrace_helper_action_t *helper, *new, *last; 12890 dtrace_difo_t *dp; 12891 dtrace_vstate_t *vstate; 12892 int i, j, sz, hasprovs = 0; 12893 12894 mutex_enter(&dtrace_lock); 12895 ASSERT(from->p_dtrace_helpers != NULL); 12896 ASSERT(dtrace_helpers > 0); 12897 12898 help = from->p_dtrace_helpers; 12899 newhelp = dtrace_helpers_create(to); 12900 ASSERT(to->p_dtrace_helpers != NULL); 12901 12902 newhelp->dthps_generation = help->dthps_generation; 12903 vstate = &newhelp->dthps_vstate; 12904 12905 /* 12906 * Duplicate the helper actions. 12907 */ 12908 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12909 if ((helper = help->dthps_actions[i]) == NULL) 12910 continue; 12911 12912 for (last = NULL; helper != NULL; helper = helper->dthp_next) { 12913 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 12914 KM_SLEEP); 12915 new->dthp_generation = helper->dthp_generation; 12916 12917 if ((dp = helper->dthp_predicate) != NULL) { 12918 dp = dtrace_difo_duplicate(dp, vstate); 12919 new->dthp_predicate = dp; 12920 } 12921 12922 new->dthp_nactions = helper->dthp_nactions; 12923 sz = sizeof (dtrace_difo_t *) * new->dthp_nactions; 12924 new->dthp_actions = kmem_alloc(sz, KM_SLEEP); 12925 12926 for (j = 0; j < new->dthp_nactions; j++) { 12927 dtrace_difo_t *dp = helper->dthp_actions[j]; 12928 12929 ASSERT(dp != NULL); 12930 dp = dtrace_difo_duplicate(dp, vstate); 12931 new->dthp_actions[j] = dp; 12932 } 12933 12934 if (last != NULL) { 12935 last->dthp_next = new; 12936 } else { 12937 newhelp->dthps_actions[i] = new; 12938 } 12939 12940 last = new; 12941 } 12942 } 12943 12944 /* 12945 * Duplicate the helper providers and register them with the 12946 * DTrace framework. 12947 */ 12948 if (help->dthps_nprovs > 0) { 12949 newhelp->dthps_nprovs = help->dthps_nprovs; 12950 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 12951 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 12952 for (i = 0; i < newhelp->dthps_nprovs; i++) { 12953 newhelp->dthps_provs[i] = help->dthps_provs[i]; 12954 newhelp->dthps_provs[i]->dthp_ref++; 12955 } 12956 12957 hasprovs = 1; 12958 } 12959 12960 mutex_exit(&dtrace_lock); 12961 12962 if (hasprovs) 12963 dtrace_helper_provider_register(to, newhelp, NULL); 12964 } 12965 12966 /* 12967 * DTrace Hook Functions 12968 */ 12969 static void 12970 dtrace_module_loaded(struct modctl *ctl) 12971 { 12972 dtrace_provider_t *prv; 12973 12974 mutex_enter(&dtrace_provider_lock); 12975 mutex_enter(&mod_lock); 12976 12977 ASSERT(ctl->mod_busy); 12978 12979 /* 12980 * We're going to call each providers per-module provide operation 12981 * specifying only this module. 12982 */ 12983 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 12984 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 12985 12986 mutex_exit(&mod_lock); 12987 mutex_exit(&dtrace_provider_lock); 12988 12989 /* 12990 * If we have any retained enablings, we need to match against them. 12991 * Enabling probes requires that cpu_lock be held, and we cannot hold 12992 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 12993 * module. (In particular, this happens when loading scheduling 12994 * classes.) So if we have any retained enablings, we need to dispatch 12995 * our task queue to do the match for us. 12996 */ 12997 mutex_enter(&dtrace_lock); 12998 12999 if (dtrace_retained == NULL) { 13000 mutex_exit(&dtrace_lock); 13001 return; 13002 } 13003 13004 (void) taskq_dispatch(dtrace_taskq, 13005 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 13006 13007 mutex_exit(&dtrace_lock); 13008 13009 /* 13010 * And now, for a little heuristic sleaze: in general, we want to 13011 * match modules as soon as they load. However, we cannot guarantee 13012 * this, because it would lead us to the lock ordering violation 13013 * outlined above. The common case, of course, is that cpu_lock is 13014 * _not_ held -- so we delay here for a clock tick, hoping that that's 13015 * long enough for the task queue to do its work. If it's not, it's 13016 * not a serious problem -- it just means that the module that we 13017 * just loaded may not be immediately instrumentable. 13018 */ 13019 delay(1); 13020 } 13021 13022 static void 13023 dtrace_module_unloaded(struct modctl *ctl) 13024 { 13025 dtrace_probe_t template, *probe, *first, *next; 13026 dtrace_provider_t *prov; 13027 13028 template.dtpr_mod = ctl->mod_modname; 13029 13030 mutex_enter(&dtrace_provider_lock); 13031 mutex_enter(&mod_lock); 13032 mutex_enter(&dtrace_lock); 13033 13034 if (dtrace_bymod == NULL) { 13035 /* 13036 * The DTrace module is loaded (obviously) but not attached; 13037 * we don't have any work to do. 13038 */ 13039 mutex_exit(&dtrace_provider_lock); 13040 mutex_exit(&mod_lock); 13041 mutex_exit(&dtrace_lock); 13042 return; 13043 } 13044 13045 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 13046 probe != NULL; probe = probe->dtpr_nextmod) { 13047 if (probe->dtpr_ecb != NULL) { 13048 mutex_exit(&dtrace_provider_lock); 13049 mutex_exit(&mod_lock); 13050 mutex_exit(&dtrace_lock); 13051 13052 /* 13053 * This shouldn't _actually_ be possible -- we're 13054 * unloading a module that has an enabled probe in it. 13055 * (It's normally up to the provider to make sure that 13056 * this can't happen.) However, because dtps_enable() 13057 * doesn't have a failure mode, there can be an 13058 * enable/unload race. Upshot: we don't want to 13059 * assert, but we're not going to disable the 13060 * probe, either. 13061 */ 13062 if (dtrace_err_verbose) { 13063 cmn_err(CE_WARN, "unloaded module '%s' had " 13064 "enabled probes", ctl->mod_modname); 13065 } 13066 13067 return; 13068 } 13069 } 13070 13071 probe = first; 13072 13073 for (first = NULL; probe != NULL; probe = next) { 13074 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 13075 13076 dtrace_probes[probe->dtpr_id - 1] = NULL; 13077 13078 next = probe->dtpr_nextmod; 13079 dtrace_hash_remove(dtrace_bymod, probe); 13080 dtrace_hash_remove(dtrace_byfunc, probe); 13081 dtrace_hash_remove(dtrace_byname, probe); 13082 13083 if (first == NULL) { 13084 first = probe; 13085 probe->dtpr_nextmod = NULL; 13086 } else { 13087 probe->dtpr_nextmod = first; 13088 first = probe; 13089 } 13090 } 13091 13092 /* 13093 * We've removed all of the module's probes from the hash chains and 13094 * from the probe array. Now issue a dtrace_sync() to be sure that 13095 * everyone has cleared out from any probe array processing. 13096 */ 13097 dtrace_sync(); 13098 13099 for (probe = first; probe != NULL; probe = first) { 13100 first = probe->dtpr_nextmod; 13101 prov = probe->dtpr_provider; 13102 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 13103 probe->dtpr_arg); 13104 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 13105 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 13106 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 13107 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 13108 kmem_free(probe, sizeof (dtrace_probe_t)); 13109 } 13110 13111 mutex_exit(&dtrace_lock); 13112 mutex_exit(&mod_lock); 13113 mutex_exit(&dtrace_provider_lock); 13114 } 13115 13116 void 13117 dtrace_suspend(void) 13118 { 13119 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 13120 } 13121 13122 void 13123 dtrace_resume(void) 13124 { 13125 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 13126 } 13127 13128 static int 13129 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 13130 { 13131 ASSERT(MUTEX_HELD(&cpu_lock)); 13132 mutex_enter(&dtrace_lock); 13133 13134 switch (what) { 13135 case CPU_CONFIG: { 13136 dtrace_state_t *state; 13137 dtrace_optval_t *opt, rs, c; 13138 13139 /* 13140 * For now, we only allocate a new buffer for anonymous state. 13141 */ 13142 if ((state = dtrace_anon.dta_state) == NULL) 13143 break; 13144 13145 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13146 break; 13147 13148 opt = state->dts_options; 13149 c = opt[DTRACEOPT_CPU]; 13150 13151 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 13152 break; 13153 13154 /* 13155 * Regardless of what the actual policy is, we're going to 13156 * temporarily set our resize policy to be manual. We're 13157 * also going to temporarily set our CPU option to denote 13158 * the newly configured CPU. 13159 */ 13160 rs = opt[DTRACEOPT_BUFRESIZE]; 13161 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 13162 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 13163 13164 (void) dtrace_state_buffers(state); 13165 13166 opt[DTRACEOPT_BUFRESIZE] = rs; 13167 opt[DTRACEOPT_CPU] = c; 13168 13169 break; 13170 } 13171 13172 case CPU_UNCONFIG: 13173 /* 13174 * We don't free the buffer in the CPU_UNCONFIG case. (The 13175 * buffer will be freed when the consumer exits.) 13176 */ 13177 break; 13178 13179 default: 13180 break; 13181 } 13182 13183 mutex_exit(&dtrace_lock); 13184 return (0); 13185 } 13186 13187 static void 13188 dtrace_cpu_setup_initial(processorid_t cpu) 13189 { 13190 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 13191 } 13192 13193 static void 13194 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 13195 { 13196 if (dtrace_toxranges >= dtrace_toxranges_max) { 13197 int osize, nsize; 13198 dtrace_toxrange_t *range; 13199 13200 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13201 13202 if (osize == 0) { 13203 ASSERT(dtrace_toxrange == NULL); 13204 ASSERT(dtrace_toxranges_max == 0); 13205 dtrace_toxranges_max = 1; 13206 } else { 13207 dtrace_toxranges_max <<= 1; 13208 } 13209 13210 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13211 range = kmem_zalloc(nsize, KM_SLEEP); 13212 13213 if (dtrace_toxrange != NULL) { 13214 ASSERT(osize != 0); 13215 bcopy(dtrace_toxrange, range, osize); 13216 kmem_free(dtrace_toxrange, osize); 13217 } 13218 13219 dtrace_toxrange = range; 13220 } 13221 13222 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 13223 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 13224 13225 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 13226 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 13227 dtrace_toxranges++; 13228 } 13229 13230 /* 13231 * DTrace Driver Cookbook Functions 13232 */ 13233 /*ARGSUSED*/ 13234 static int 13235 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 13236 { 13237 dtrace_provider_id_t id; 13238 dtrace_state_t *state = NULL; 13239 dtrace_enabling_t *enab; 13240 13241 mutex_enter(&cpu_lock); 13242 mutex_enter(&dtrace_provider_lock); 13243 mutex_enter(&dtrace_lock); 13244 13245 if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t) + 13246 NCPU * sizeof (dtrace_buffer_t), 0) != 0) { 13247 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 13248 mutex_exit(&cpu_lock); 13249 mutex_exit(&dtrace_provider_lock); 13250 mutex_exit(&dtrace_lock); 13251 return (DDI_FAILURE); 13252 } 13253 13254 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 13255 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 13256 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 13257 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 13258 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 13259 ddi_remove_minor_node(devi, NULL); 13260 ddi_soft_state_fini(&dtrace_softstate); 13261 mutex_exit(&cpu_lock); 13262 mutex_exit(&dtrace_provider_lock); 13263 mutex_exit(&dtrace_lock); 13264 return (DDI_FAILURE); 13265 } 13266 13267 ddi_report_dev(devi); 13268 dtrace_devi = devi; 13269 13270 dtrace_modload = dtrace_module_loaded; 13271 dtrace_modunload = dtrace_module_unloaded; 13272 dtrace_cpu_init = dtrace_cpu_setup_initial; 13273 dtrace_helpers_cleanup = dtrace_helpers_destroy; 13274 dtrace_helpers_fork = dtrace_helpers_duplicate; 13275 dtrace_cpustart_init = dtrace_suspend; 13276 dtrace_cpustart_fini = dtrace_resume; 13277 dtrace_debugger_init = dtrace_suspend; 13278 dtrace_debugger_fini = dtrace_resume; 13279 dtrace_kreloc_init = dtrace_suspend; 13280 dtrace_kreloc_fini = dtrace_resume; 13281 13282 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13283 13284 ASSERT(MUTEX_HELD(&cpu_lock)); 13285 13286 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 13287 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13288 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 13289 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 13290 VM_SLEEP | VMC_IDENTIFIER); 13291 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 13292 1, INT_MAX, 0); 13293 13294 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 13295 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 13296 NULL, NULL, NULL, NULL, NULL, 0); 13297 13298 ASSERT(MUTEX_HELD(&cpu_lock)); 13299 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 13300 offsetof(dtrace_probe_t, dtpr_nextmod), 13301 offsetof(dtrace_probe_t, dtpr_prevmod)); 13302 13303 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 13304 offsetof(dtrace_probe_t, dtpr_nextfunc), 13305 offsetof(dtrace_probe_t, dtpr_prevfunc)); 13306 13307 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 13308 offsetof(dtrace_probe_t, dtpr_nextname), 13309 offsetof(dtrace_probe_t, dtpr_prevname)); 13310 13311 if (dtrace_retain_max < 1) { 13312 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 13313 "setting to 1", dtrace_retain_max); 13314 dtrace_retain_max = 1; 13315 } 13316 13317 /* 13318 * Now discover our toxic ranges. 13319 */ 13320 dtrace_toxic_ranges(dtrace_toxrange_add); 13321 13322 /* 13323 * Before we register ourselves as a provider to our own framework, 13324 * we would like to assert that dtrace_provider is NULL -- but that's 13325 * not true if we were loaded as a dependency of a DTrace provider. 13326 * Once we've registered, we can assert that dtrace_provider is our 13327 * pseudo provider. 13328 */ 13329 (void) dtrace_register("dtrace", &dtrace_provider_attr, 13330 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 13331 13332 ASSERT(dtrace_provider != NULL); 13333 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 13334 13335 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 13336 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 13337 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 13338 dtrace_provider, NULL, NULL, "END", 0, NULL); 13339 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 13340 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 13341 13342 dtrace_anon_property(); 13343 mutex_exit(&cpu_lock); 13344 13345 /* 13346 * If DTrace helper tracing is enabled, we need to allocate the 13347 * trace buffer and initialize the values. 13348 */ 13349 if (dtrace_helptrace_enabled) { 13350 ASSERT(dtrace_helptrace_buffer == NULL); 13351 dtrace_helptrace_buffer = 13352 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 13353 dtrace_helptrace_next = 0; 13354 } 13355 13356 /* 13357 * If there are already providers, we must ask them to provide their 13358 * probes, and then match any anonymous enabling against them. Note 13359 * that there should be no other retained enablings at this time: 13360 * the only retained enablings at this time should be the anonymous 13361 * enabling. 13362 */ 13363 if (dtrace_anon.dta_enabling != NULL) { 13364 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 13365 13366 dtrace_enabling_provide(NULL); 13367 state = dtrace_anon.dta_state; 13368 13369 /* 13370 * We couldn't hold cpu_lock across the above call to 13371 * dtrace_enabling_provide(), but we must hold it to actually 13372 * enable the probes. We have to drop all of our locks, pick 13373 * up cpu_lock, and regain our locks before matching the 13374 * retained anonymous enabling. 13375 */ 13376 mutex_exit(&dtrace_lock); 13377 mutex_exit(&dtrace_provider_lock); 13378 13379 mutex_enter(&cpu_lock); 13380 mutex_enter(&dtrace_provider_lock); 13381 mutex_enter(&dtrace_lock); 13382 13383 if ((enab = dtrace_anon.dta_enabling) != NULL) 13384 (void) dtrace_enabling_match(enab, NULL); 13385 13386 mutex_exit(&cpu_lock); 13387 } 13388 13389 mutex_exit(&dtrace_lock); 13390 mutex_exit(&dtrace_provider_lock); 13391 13392 if (state != NULL) { 13393 /* 13394 * If we created any anonymous state, set it going now. 13395 */ 13396 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 13397 } 13398 13399 return (DDI_SUCCESS); 13400 } 13401 13402 /*ARGSUSED*/ 13403 static int 13404 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 13405 { 13406 dtrace_state_t *state; 13407 uint32_t priv; 13408 uid_t uid; 13409 zoneid_t zoneid; 13410 13411 if (getminor(*devp) == DTRACEMNRN_HELPER) 13412 return (0); 13413 13414 /* 13415 * If this wasn't an open with the "helper" minor, then it must be 13416 * the "dtrace" minor. 13417 */ 13418 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 13419 13420 /* 13421 * If no DTRACE_PRIV_* bits are set in the credential, then the 13422 * caller lacks sufficient permission to do anything with DTrace. 13423 */ 13424 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 13425 if (priv == DTRACE_PRIV_NONE) 13426 return (EACCES); 13427 13428 /* 13429 * Ask all providers to provide all their probes. 13430 */ 13431 mutex_enter(&dtrace_provider_lock); 13432 dtrace_probe_provide(NULL, NULL); 13433 mutex_exit(&dtrace_provider_lock); 13434 13435 mutex_enter(&cpu_lock); 13436 mutex_enter(&dtrace_lock); 13437 dtrace_opens++; 13438 dtrace_membar_producer(); 13439 13440 /* 13441 * If the kernel debugger is active (that is, if the kernel debugger 13442 * modified text in some way), we won't allow the open. 13443 */ 13444 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13445 dtrace_opens--; 13446 mutex_exit(&cpu_lock); 13447 mutex_exit(&dtrace_lock); 13448 return (EBUSY); 13449 } 13450 13451 state = dtrace_state_create(devp, cred_p); 13452 mutex_exit(&cpu_lock); 13453 13454 if (state == NULL) { 13455 if (--dtrace_opens == 0) 13456 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13457 mutex_exit(&dtrace_lock); 13458 return (EAGAIN); 13459 } 13460 13461 mutex_exit(&dtrace_lock); 13462 13463 return (0); 13464 } 13465 13466 /*ARGSUSED*/ 13467 static int 13468 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 13469 { 13470 minor_t minor = getminor(dev); 13471 dtrace_state_t *state; 13472 13473 if (minor == DTRACEMNRN_HELPER) 13474 return (0); 13475 13476 state = ddi_get_soft_state(dtrace_softstate, minor); 13477 13478 mutex_enter(&cpu_lock); 13479 mutex_enter(&dtrace_lock); 13480 13481 if (state->dts_anon) { 13482 /* 13483 * There is anonymous state. Destroy that first. 13484 */ 13485 ASSERT(dtrace_anon.dta_state == NULL); 13486 dtrace_state_destroy(state->dts_anon); 13487 } 13488 13489 dtrace_state_destroy(state); 13490 ASSERT(dtrace_opens > 0); 13491 if (--dtrace_opens == 0) 13492 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 13493 13494 mutex_exit(&dtrace_lock); 13495 mutex_exit(&cpu_lock); 13496 13497 return (0); 13498 } 13499 13500 /*ARGSUSED*/ 13501 static int 13502 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 13503 { 13504 int rval; 13505 dof_helper_t help, *dhp = NULL; 13506 13507 switch (cmd) { 13508 case DTRACEHIOC_ADDDOF: 13509 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 13510 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 13511 return (EFAULT); 13512 } 13513 13514 dhp = &help; 13515 arg = (intptr_t)help.dofhp_dof; 13516 /*FALLTHROUGH*/ 13517 13518 case DTRACEHIOC_ADD: { 13519 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 13520 13521 if (dof == NULL) 13522 return (rval); 13523 13524 mutex_enter(&dtrace_lock); 13525 13526 /* 13527 * dtrace_helper_slurp() takes responsibility for the dof -- 13528 * it may free it now or it may save it and free it later. 13529 */ 13530 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 13531 *rv = rval; 13532 rval = 0; 13533 } else { 13534 rval = EINVAL; 13535 } 13536 13537 mutex_exit(&dtrace_lock); 13538 return (rval); 13539 } 13540 13541 case DTRACEHIOC_REMOVE: { 13542 mutex_enter(&dtrace_lock); 13543 rval = dtrace_helper_destroygen(arg); 13544 mutex_exit(&dtrace_lock); 13545 13546 return (rval); 13547 } 13548 13549 default: 13550 break; 13551 } 13552 13553 return (ENOTTY); 13554 } 13555 13556 /*ARGSUSED*/ 13557 static int 13558 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 13559 { 13560 minor_t minor = getminor(dev); 13561 dtrace_state_t *state; 13562 int rval; 13563 13564 if (minor == DTRACEMNRN_HELPER) 13565 return (dtrace_ioctl_helper(cmd, arg, rv)); 13566 13567 state = ddi_get_soft_state(dtrace_softstate, minor); 13568 13569 if (state->dts_anon) { 13570 ASSERT(dtrace_anon.dta_state == NULL); 13571 state = state->dts_anon; 13572 } 13573 13574 switch (cmd) { 13575 case DTRACEIOC_PROVIDER: { 13576 dtrace_providerdesc_t pvd; 13577 dtrace_provider_t *pvp; 13578 13579 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 13580 return (EFAULT); 13581 13582 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 13583 mutex_enter(&dtrace_provider_lock); 13584 13585 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 13586 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 13587 break; 13588 } 13589 13590 mutex_exit(&dtrace_provider_lock); 13591 13592 if (pvp == NULL) 13593 return (ESRCH); 13594 13595 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 13596 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 13597 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 13598 return (EFAULT); 13599 13600 return (0); 13601 } 13602 13603 case DTRACEIOC_EPROBE: { 13604 dtrace_eprobedesc_t epdesc; 13605 dtrace_ecb_t *ecb; 13606 dtrace_action_t *act; 13607 void *buf; 13608 size_t size; 13609 uintptr_t dest; 13610 int nrecs; 13611 13612 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 13613 return (EFAULT); 13614 13615 mutex_enter(&dtrace_lock); 13616 13617 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 13618 mutex_exit(&dtrace_lock); 13619 return (EINVAL); 13620 } 13621 13622 if (ecb->dte_probe == NULL) { 13623 mutex_exit(&dtrace_lock); 13624 return (EINVAL); 13625 } 13626 13627 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 13628 epdesc.dtepd_uarg = ecb->dte_uarg; 13629 epdesc.dtepd_size = ecb->dte_size; 13630 13631 nrecs = epdesc.dtepd_nrecs; 13632 epdesc.dtepd_nrecs = 0; 13633 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13634 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13635 continue; 13636 13637 epdesc.dtepd_nrecs++; 13638 } 13639 13640 /* 13641 * Now that we have the size, we need to allocate a temporary 13642 * buffer in which to store the complete description. We need 13643 * the temporary buffer to be able to drop dtrace_lock() 13644 * across the copyout(), below. 13645 */ 13646 size = sizeof (dtrace_eprobedesc_t) + 13647 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 13648 13649 buf = kmem_alloc(size, KM_SLEEP); 13650 dest = (uintptr_t)buf; 13651 13652 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 13653 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 13654 13655 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 13656 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 13657 continue; 13658 13659 if (nrecs-- == 0) 13660 break; 13661 13662 bcopy(&act->dta_rec, (void *)dest, 13663 sizeof (dtrace_recdesc_t)); 13664 dest += sizeof (dtrace_recdesc_t); 13665 } 13666 13667 mutex_exit(&dtrace_lock); 13668 13669 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13670 kmem_free(buf, size); 13671 return (EFAULT); 13672 } 13673 13674 kmem_free(buf, size); 13675 return (0); 13676 } 13677 13678 case DTRACEIOC_AGGDESC: { 13679 dtrace_aggdesc_t aggdesc; 13680 dtrace_action_t *act; 13681 dtrace_aggregation_t *agg; 13682 int nrecs; 13683 uint32_t offs; 13684 dtrace_recdesc_t *lrec; 13685 void *buf; 13686 size_t size; 13687 uintptr_t dest; 13688 13689 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 13690 return (EFAULT); 13691 13692 mutex_enter(&dtrace_lock); 13693 13694 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 13695 mutex_exit(&dtrace_lock); 13696 return (EINVAL); 13697 } 13698 13699 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 13700 13701 nrecs = aggdesc.dtagd_nrecs; 13702 aggdesc.dtagd_nrecs = 0; 13703 13704 offs = agg->dtag_base; 13705 lrec = &agg->dtag_action.dta_rec; 13706 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 13707 13708 for (act = agg->dtag_first; ; act = act->dta_next) { 13709 ASSERT(act->dta_intuple || 13710 DTRACEACT_ISAGG(act->dta_kind)); 13711 13712 /* 13713 * If this action has a record size of zero, it 13714 * denotes an argument to the aggregating action. 13715 * Because the presence of this record doesn't (or 13716 * shouldn't) affect the way the data is interpreted, 13717 * we don't copy it out to save user-level the 13718 * confusion of dealing with a zero-length record. 13719 */ 13720 if (act->dta_rec.dtrd_size == 0) { 13721 ASSERT(agg->dtag_hasarg); 13722 continue; 13723 } 13724 13725 aggdesc.dtagd_nrecs++; 13726 13727 if (act == &agg->dtag_action) 13728 break; 13729 } 13730 13731 /* 13732 * Now that we have the size, we need to allocate a temporary 13733 * buffer in which to store the complete description. We need 13734 * the temporary buffer to be able to drop dtrace_lock() 13735 * across the copyout(), below. 13736 */ 13737 size = sizeof (dtrace_aggdesc_t) + 13738 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 13739 13740 buf = kmem_alloc(size, KM_SLEEP); 13741 dest = (uintptr_t)buf; 13742 13743 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 13744 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 13745 13746 for (act = agg->dtag_first; ; act = act->dta_next) { 13747 dtrace_recdesc_t rec = act->dta_rec; 13748 13749 /* 13750 * See the comment in the above loop for why we pass 13751 * over zero-length records. 13752 */ 13753 if (rec.dtrd_size == 0) { 13754 ASSERT(agg->dtag_hasarg); 13755 continue; 13756 } 13757 13758 if (nrecs-- == 0) 13759 break; 13760 13761 rec.dtrd_offset -= offs; 13762 bcopy(&rec, (void *)dest, sizeof (rec)); 13763 dest += sizeof (dtrace_recdesc_t); 13764 13765 if (act == &agg->dtag_action) 13766 break; 13767 } 13768 13769 mutex_exit(&dtrace_lock); 13770 13771 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 13772 kmem_free(buf, size); 13773 return (EFAULT); 13774 } 13775 13776 kmem_free(buf, size); 13777 return (0); 13778 } 13779 13780 case DTRACEIOC_ENABLE: { 13781 dof_hdr_t *dof; 13782 dtrace_enabling_t *enab = NULL; 13783 dtrace_vstate_t *vstate; 13784 int err = 0; 13785 13786 *rv = 0; 13787 13788 /* 13789 * If a NULL argument has been passed, we take this as our 13790 * cue to reevaluate our enablings. 13791 */ 13792 if (arg == NULL) { 13793 mutex_enter(&cpu_lock); 13794 mutex_enter(&dtrace_lock); 13795 err = dtrace_enabling_matchstate(state, rv); 13796 mutex_exit(&dtrace_lock); 13797 mutex_exit(&cpu_lock); 13798 13799 return (err); 13800 } 13801 13802 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 13803 return (rval); 13804 13805 mutex_enter(&cpu_lock); 13806 mutex_enter(&dtrace_lock); 13807 vstate = &state->dts_vstate; 13808 13809 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13810 mutex_exit(&dtrace_lock); 13811 mutex_exit(&cpu_lock); 13812 dtrace_dof_destroy(dof); 13813 return (EBUSY); 13814 } 13815 13816 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 13817 mutex_exit(&dtrace_lock); 13818 mutex_exit(&cpu_lock); 13819 dtrace_dof_destroy(dof); 13820 return (EINVAL); 13821 } 13822 13823 if ((rval = dtrace_dof_options(dof, state)) != 0) { 13824 dtrace_enabling_destroy(enab); 13825 mutex_exit(&dtrace_lock); 13826 mutex_exit(&cpu_lock); 13827 dtrace_dof_destroy(dof); 13828 return (rval); 13829 } 13830 13831 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 13832 err = dtrace_enabling_retain(enab); 13833 } else { 13834 dtrace_enabling_destroy(enab); 13835 } 13836 13837 mutex_exit(&cpu_lock); 13838 mutex_exit(&dtrace_lock); 13839 dtrace_dof_destroy(dof); 13840 13841 return (err); 13842 } 13843 13844 case DTRACEIOC_REPLICATE: { 13845 dtrace_repldesc_t desc; 13846 dtrace_probedesc_t *match = &desc.dtrpd_match; 13847 dtrace_probedesc_t *create = &desc.dtrpd_create; 13848 int err; 13849 13850 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13851 return (EFAULT); 13852 13853 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13854 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13855 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13856 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13857 13858 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13859 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13860 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13861 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13862 13863 mutex_enter(&dtrace_lock); 13864 err = dtrace_enabling_replicate(state, match, create); 13865 mutex_exit(&dtrace_lock); 13866 13867 return (err); 13868 } 13869 13870 case DTRACEIOC_PROBEMATCH: 13871 case DTRACEIOC_PROBES: { 13872 dtrace_probe_t *probe = NULL; 13873 dtrace_probedesc_t desc; 13874 dtrace_probekey_t pkey; 13875 dtrace_id_t i; 13876 int m = 0; 13877 uint32_t priv; 13878 uid_t uid; 13879 zoneid_t zoneid; 13880 13881 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13882 return (EFAULT); 13883 13884 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 13885 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 13886 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 13887 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 13888 13889 /* 13890 * Before we attempt to match this probe, we want to give 13891 * all providers the opportunity to provide it. 13892 */ 13893 if (desc.dtpd_id == DTRACE_IDNONE) { 13894 mutex_enter(&dtrace_provider_lock); 13895 dtrace_probe_provide(&desc, NULL); 13896 mutex_exit(&dtrace_provider_lock); 13897 desc.dtpd_id++; 13898 } 13899 13900 if (cmd == DTRACEIOC_PROBEMATCH) { 13901 dtrace_probekey(&desc, &pkey); 13902 pkey.dtpk_id = DTRACE_IDNONE; 13903 } 13904 13905 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 13906 13907 mutex_enter(&dtrace_lock); 13908 13909 if (cmd == DTRACEIOC_PROBEMATCH) { 13910 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13911 if ((probe = dtrace_probes[i - 1]) != NULL && 13912 (m = dtrace_match_probe(probe, &pkey, 13913 priv, uid, zoneid)) != 0) 13914 break; 13915 } 13916 13917 if (m < 0) { 13918 mutex_exit(&dtrace_lock); 13919 return (EINVAL); 13920 } 13921 13922 } else { 13923 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 13924 if ((probe = dtrace_probes[i - 1]) != NULL && 13925 dtrace_match_priv(probe, priv, uid, zoneid)) 13926 break; 13927 } 13928 } 13929 13930 if (probe == NULL) { 13931 mutex_exit(&dtrace_lock); 13932 return (ESRCH); 13933 } 13934 13935 dtrace_probe_description(probe, &desc); 13936 mutex_exit(&dtrace_lock); 13937 13938 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13939 return (EFAULT); 13940 13941 return (0); 13942 } 13943 13944 case DTRACEIOC_PROBEARG: { 13945 dtrace_argdesc_t desc; 13946 dtrace_probe_t *probe; 13947 dtrace_provider_t *prov; 13948 13949 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 13950 return (EFAULT); 13951 13952 if (desc.dtargd_id == DTRACE_IDNONE) 13953 return (EINVAL); 13954 13955 if (desc.dtargd_ndx == DTRACE_ARGNONE) 13956 return (EINVAL); 13957 13958 mutex_enter(&dtrace_provider_lock); 13959 mutex_enter(&mod_lock); 13960 mutex_enter(&dtrace_lock); 13961 13962 if (desc.dtargd_id > dtrace_nprobes) { 13963 mutex_exit(&dtrace_lock); 13964 mutex_exit(&mod_lock); 13965 mutex_exit(&dtrace_provider_lock); 13966 return (EINVAL); 13967 } 13968 13969 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 13970 mutex_exit(&dtrace_lock); 13971 mutex_exit(&mod_lock); 13972 mutex_exit(&dtrace_provider_lock); 13973 return (EINVAL); 13974 } 13975 13976 mutex_exit(&dtrace_lock); 13977 13978 prov = probe->dtpr_provider; 13979 13980 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 13981 /* 13982 * There isn't any typed information for this probe. 13983 * Set the argument number to DTRACE_ARGNONE. 13984 */ 13985 desc.dtargd_ndx = DTRACE_ARGNONE; 13986 } else { 13987 desc.dtargd_native[0] = '\0'; 13988 desc.dtargd_xlate[0] = '\0'; 13989 desc.dtargd_mapping = desc.dtargd_ndx; 13990 13991 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 13992 probe->dtpr_id, probe->dtpr_arg, &desc); 13993 } 13994 13995 mutex_exit(&mod_lock); 13996 mutex_exit(&dtrace_provider_lock); 13997 13998 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 13999 return (EFAULT); 14000 14001 return (0); 14002 } 14003 14004 case DTRACEIOC_GO: { 14005 processorid_t cpuid; 14006 rval = dtrace_state_go(state, &cpuid); 14007 14008 if (rval != 0) 14009 return (rval); 14010 14011 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14012 return (EFAULT); 14013 14014 return (0); 14015 } 14016 14017 case DTRACEIOC_STOP: { 14018 processorid_t cpuid; 14019 14020 mutex_enter(&dtrace_lock); 14021 rval = dtrace_state_stop(state, &cpuid); 14022 mutex_exit(&dtrace_lock); 14023 14024 if (rval != 0) 14025 return (rval); 14026 14027 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14028 return (EFAULT); 14029 14030 return (0); 14031 } 14032 14033 case DTRACEIOC_DOFGET: { 14034 dof_hdr_t hdr, *dof; 14035 uint64_t len; 14036 14037 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 14038 return (EFAULT); 14039 14040 mutex_enter(&dtrace_lock); 14041 dof = dtrace_dof_create(state); 14042 mutex_exit(&dtrace_lock); 14043 14044 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 14045 rval = copyout(dof, (void *)arg, len); 14046 dtrace_dof_destroy(dof); 14047 14048 return (rval == 0 ? 0 : EFAULT); 14049 } 14050 14051 case DTRACEIOC_AGGSNAP: 14052 case DTRACEIOC_BUFSNAP: { 14053 dtrace_bufdesc_t desc; 14054 caddr_t cached; 14055 dtrace_buffer_t *buf; 14056 14057 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14058 return (EFAULT); 14059 14060 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 14061 return (EINVAL); 14062 14063 mutex_enter(&dtrace_lock); 14064 14065 if (cmd == DTRACEIOC_BUFSNAP) { 14066 buf = &state->dts_buffer[desc.dtbd_cpu]; 14067 } else { 14068 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 14069 } 14070 14071 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 14072 size_t sz = buf->dtb_offset; 14073 14074 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 14075 mutex_exit(&dtrace_lock); 14076 return (EBUSY); 14077 } 14078 14079 /* 14080 * If this buffer has already been consumed, we're 14081 * going to indicate that there's nothing left here 14082 * to consume. 14083 */ 14084 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 14085 mutex_exit(&dtrace_lock); 14086 14087 desc.dtbd_size = 0; 14088 desc.dtbd_drops = 0; 14089 desc.dtbd_errors = 0; 14090 desc.dtbd_oldest = 0; 14091 sz = sizeof (desc); 14092 14093 if (copyout(&desc, (void *)arg, sz) != 0) 14094 return (EFAULT); 14095 14096 return (0); 14097 } 14098 14099 /* 14100 * If this is a ring buffer that has wrapped, we want 14101 * to copy the whole thing out. 14102 */ 14103 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 14104 dtrace_buffer_polish(buf); 14105 sz = buf->dtb_size; 14106 } 14107 14108 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 14109 mutex_exit(&dtrace_lock); 14110 return (EFAULT); 14111 } 14112 14113 desc.dtbd_size = sz; 14114 desc.dtbd_drops = buf->dtb_drops; 14115 desc.dtbd_errors = buf->dtb_errors; 14116 desc.dtbd_oldest = buf->dtb_xamot_offset; 14117 14118 mutex_exit(&dtrace_lock); 14119 14120 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14121 return (EFAULT); 14122 14123 buf->dtb_flags |= DTRACEBUF_CONSUMED; 14124 14125 return (0); 14126 } 14127 14128 if (buf->dtb_tomax == NULL) { 14129 ASSERT(buf->dtb_xamot == NULL); 14130 mutex_exit(&dtrace_lock); 14131 return (ENOENT); 14132 } 14133 14134 cached = buf->dtb_tomax; 14135 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 14136 14137 dtrace_xcall(desc.dtbd_cpu, 14138 (dtrace_xcall_t)dtrace_buffer_switch, buf); 14139 14140 state->dts_errors += buf->dtb_xamot_errors; 14141 14142 /* 14143 * If the buffers did not actually switch, then the cross call 14144 * did not take place -- presumably because the given CPU is 14145 * not in the ready set. If this is the case, we'll return 14146 * ENOENT. 14147 */ 14148 if (buf->dtb_tomax == cached) { 14149 ASSERT(buf->dtb_xamot != cached); 14150 mutex_exit(&dtrace_lock); 14151 return (ENOENT); 14152 } 14153 14154 ASSERT(cached == buf->dtb_xamot); 14155 14156 /* 14157 * We have our snapshot; now copy it out. 14158 */ 14159 if (copyout(buf->dtb_xamot, desc.dtbd_data, 14160 buf->dtb_xamot_offset) != 0) { 14161 mutex_exit(&dtrace_lock); 14162 return (EFAULT); 14163 } 14164 14165 desc.dtbd_size = buf->dtb_xamot_offset; 14166 desc.dtbd_drops = buf->dtb_xamot_drops; 14167 desc.dtbd_errors = buf->dtb_xamot_errors; 14168 desc.dtbd_oldest = 0; 14169 14170 mutex_exit(&dtrace_lock); 14171 14172 /* 14173 * Finally, copy out the buffer description. 14174 */ 14175 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14176 return (EFAULT); 14177 14178 return (0); 14179 } 14180 14181 case DTRACEIOC_CONF: { 14182 dtrace_conf_t conf; 14183 14184 bzero(&conf, sizeof (conf)); 14185 conf.dtc_difversion = DIF_VERSION; 14186 conf.dtc_difintregs = DIF_DIR_NREGS; 14187 conf.dtc_diftupregs = DIF_DTR_NREGS; 14188 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 14189 14190 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 14191 return (EFAULT); 14192 14193 return (0); 14194 } 14195 14196 case DTRACEIOC_STATUS: { 14197 dtrace_status_t stat; 14198 dtrace_dstate_t *dstate; 14199 int i, j; 14200 uint64_t nerrs; 14201 14202 /* 14203 * See the comment in dtrace_state_deadman() for the reason 14204 * for setting dts_laststatus to INT64_MAX before setting 14205 * it to the correct value. 14206 */ 14207 state->dts_laststatus = INT64_MAX; 14208 dtrace_membar_producer(); 14209 state->dts_laststatus = dtrace_gethrtime(); 14210 14211 bzero(&stat, sizeof (stat)); 14212 14213 mutex_enter(&dtrace_lock); 14214 14215 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 14216 mutex_exit(&dtrace_lock); 14217 return (ENOENT); 14218 } 14219 14220 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 14221 stat.dtst_exiting = 1; 14222 14223 nerrs = state->dts_errors; 14224 dstate = &state->dts_vstate.dtvs_dynvars; 14225 14226 for (i = 0; i < NCPU; i++) { 14227 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 14228 14229 stat.dtst_dyndrops += dcpu->dtdsc_drops; 14230 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 14231 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 14232 14233 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 14234 stat.dtst_filled++; 14235 14236 nerrs += state->dts_buffer[i].dtb_errors; 14237 14238 for (j = 0; j < state->dts_nspeculations; j++) { 14239 dtrace_speculation_t *spec; 14240 dtrace_buffer_t *buf; 14241 14242 spec = &state->dts_speculations[j]; 14243 buf = &spec->dtsp_buffer[i]; 14244 stat.dtst_specdrops += buf->dtb_xamot_drops; 14245 } 14246 } 14247 14248 stat.dtst_specdrops_busy = state->dts_speculations_busy; 14249 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 14250 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 14251 stat.dtst_dblerrors = state->dts_dblerrors; 14252 stat.dtst_killed = 14253 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 14254 stat.dtst_errors = nerrs; 14255 14256 mutex_exit(&dtrace_lock); 14257 14258 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 14259 return (EFAULT); 14260 14261 return (0); 14262 } 14263 14264 case DTRACEIOC_FORMAT: { 14265 dtrace_fmtdesc_t fmt; 14266 char *str; 14267 int len; 14268 14269 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 14270 return (EFAULT); 14271 14272 mutex_enter(&dtrace_lock); 14273 14274 if (fmt.dtfd_format == 0 || 14275 fmt.dtfd_format > state->dts_nformats) { 14276 mutex_exit(&dtrace_lock); 14277 return (EINVAL); 14278 } 14279 14280 /* 14281 * Format strings are allocated contiguously and they are 14282 * never freed; if a format index is less than the number 14283 * of formats, we can assert that the format map is non-NULL 14284 * and that the format for the specified index is non-NULL. 14285 */ 14286 ASSERT(state->dts_formats != NULL); 14287 str = state->dts_formats[fmt.dtfd_format - 1]; 14288 ASSERT(str != NULL); 14289 14290 len = strlen(str) + 1; 14291 14292 if (len > fmt.dtfd_length) { 14293 fmt.dtfd_length = len; 14294 14295 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 14296 mutex_exit(&dtrace_lock); 14297 return (EINVAL); 14298 } 14299 } else { 14300 if (copyout(str, fmt.dtfd_string, len) != 0) { 14301 mutex_exit(&dtrace_lock); 14302 return (EINVAL); 14303 } 14304 } 14305 14306 mutex_exit(&dtrace_lock); 14307 return (0); 14308 } 14309 14310 default: 14311 break; 14312 } 14313 14314 return (ENOTTY); 14315 } 14316 14317 /*ARGSUSED*/ 14318 static int 14319 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 14320 { 14321 dtrace_state_t *state; 14322 14323 switch (cmd) { 14324 case DDI_DETACH: 14325 break; 14326 14327 case DDI_SUSPEND: 14328 return (DDI_SUCCESS); 14329 14330 default: 14331 return (DDI_FAILURE); 14332 } 14333 14334 mutex_enter(&cpu_lock); 14335 mutex_enter(&dtrace_provider_lock); 14336 mutex_enter(&dtrace_lock); 14337 14338 ASSERT(dtrace_opens == 0); 14339 14340 if (dtrace_helpers > 0) { 14341 mutex_exit(&dtrace_provider_lock); 14342 mutex_exit(&dtrace_lock); 14343 mutex_exit(&cpu_lock); 14344 return (DDI_FAILURE); 14345 } 14346 14347 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 14348 mutex_exit(&dtrace_provider_lock); 14349 mutex_exit(&dtrace_lock); 14350 mutex_exit(&cpu_lock); 14351 return (DDI_FAILURE); 14352 } 14353 14354 dtrace_provider = NULL; 14355 14356 if ((state = dtrace_anon_grab()) != NULL) { 14357 /* 14358 * If there were ECBs on this state, the provider should 14359 * have not been allowed to detach; assert that there is 14360 * none. 14361 */ 14362 ASSERT(state->dts_necbs == 0); 14363 dtrace_state_destroy(state); 14364 14365 /* 14366 * If we're being detached with anonymous state, we need to 14367 * indicate to the kernel debugger that DTrace is now inactive. 14368 */ 14369 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14370 } 14371 14372 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 14373 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14374 dtrace_cpu_init = NULL; 14375 dtrace_helpers_cleanup = NULL; 14376 dtrace_helpers_fork = NULL; 14377 dtrace_cpustart_init = NULL; 14378 dtrace_cpustart_fini = NULL; 14379 dtrace_debugger_init = NULL; 14380 dtrace_debugger_fini = NULL; 14381 dtrace_kreloc_init = NULL; 14382 dtrace_kreloc_fini = NULL; 14383 dtrace_modload = NULL; 14384 dtrace_modunload = NULL; 14385 14386 mutex_exit(&cpu_lock); 14387 14388 if (dtrace_helptrace_enabled) { 14389 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 14390 dtrace_helptrace_buffer = NULL; 14391 } 14392 14393 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 14394 dtrace_probes = NULL; 14395 dtrace_nprobes = 0; 14396 14397 dtrace_hash_destroy(dtrace_bymod); 14398 dtrace_hash_destroy(dtrace_byfunc); 14399 dtrace_hash_destroy(dtrace_byname); 14400 dtrace_bymod = NULL; 14401 dtrace_byfunc = NULL; 14402 dtrace_byname = NULL; 14403 14404 kmem_cache_destroy(dtrace_state_cache); 14405 vmem_destroy(dtrace_minor); 14406 vmem_destroy(dtrace_arena); 14407 14408 if (dtrace_toxrange != NULL) { 14409 kmem_free(dtrace_toxrange, 14410 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 14411 dtrace_toxrange = NULL; 14412 dtrace_toxranges = 0; 14413 dtrace_toxranges_max = 0; 14414 } 14415 14416 ddi_remove_minor_node(dtrace_devi, NULL); 14417 dtrace_devi = NULL; 14418 14419 ddi_soft_state_fini(&dtrace_softstate); 14420 14421 ASSERT(dtrace_vtime_references == 0); 14422 ASSERT(dtrace_opens == 0); 14423 ASSERT(dtrace_retained == NULL); 14424 14425 mutex_exit(&dtrace_lock); 14426 mutex_exit(&dtrace_provider_lock); 14427 14428 /* 14429 * We don't destroy the task queue until after we have dropped our 14430 * locks (taskq_destroy() may block on running tasks). To prevent 14431 * attempting to do work after we have effectively detached but before 14432 * the task queue has been destroyed, all tasks dispatched via the 14433 * task queue must check that DTrace is still attached before 14434 * performing any operation. 14435 */ 14436 taskq_destroy(dtrace_taskq); 14437 dtrace_taskq = NULL; 14438 14439 return (DDI_SUCCESS); 14440 } 14441 14442 /*ARGSUSED*/ 14443 static int 14444 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 14445 { 14446 int error; 14447 14448 switch (infocmd) { 14449 case DDI_INFO_DEVT2DEVINFO: 14450 *result = (void *)dtrace_devi; 14451 error = DDI_SUCCESS; 14452 break; 14453 case DDI_INFO_DEVT2INSTANCE: 14454 *result = (void *)0; 14455 error = DDI_SUCCESS; 14456 break; 14457 default: 14458 error = DDI_FAILURE; 14459 } 14460 return (error); 14461 } 14462 14463 static struct cb_ops dtrace_cb_ops = { 14464 dtrace_open, /* open */ 14465 dtrace_close, /* close */ 14466 nulldev, /* strategy */ 14467 nulldev, /* print */ 14468 nodev, /* dump */ 14469 nodev, /* read */ 14470 nodev, /* write */ 14471 dtrace_ioctl, /* ioctl */ 14472 nodev, /* devmap */ 14473 nodev, /* mmap */ 14474 nodev, /* segmap */ 14475 nochpoll, /* poll */ 14476 ddi_prop_op, /* cb_prop_op */ 14477 0, /* streamtab */ 14478 D_NEW | D_MP /* Driver compatibility flag */ 14479 }; 14480 14481 static struct dev_ops dtrace_ops = { 14482 DEVO_REV, /* devo_rev */ 14483 0, /* refcnt */ 14484 dtrace_info, /* get_dev_info */ 14485 nulldev, /* identify */ 14486 nulldev, /* probe */ 14487 dtrace_attach, /* attach */ 14488 dtrace_detach, /* detach */ 14489 nodev, /* reset */ 14490 &dtrace_cb_ops, /* driver operations */ 14491 NULL, /* bus operations */ 14492 nodev /* dev power */ 14493 }; 14494 14495 static struct modldrv modldrv = { 14496 &mod_driverops, /* module type (this is a pseudo driver) */ 14497 "Dynamic Tracing", /* name of module */ 14498 &dtrace_ops, /* driver ops */ 14499 }; 14500 14501 static struct modlinkage modlinkage = { 14502 MODREV_1, 14503 (void *)&modldrv, 14504 NULL 14505 }; 14506 14507 int 14508 _init(void) 14509 { 14510 return (mod_install(&modlinkage)); 14511 } 14512 14513 int 14514 _info(struct modinfo *modinfop) 14515 { 14516 return (mod_info(&modlinkage, modinfop)); 14517 } 14518 14519 int 14520 _fini(void) 14521 { 14522 return (mod_remove(&modlinkage)); 14523 } 14524