1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 25 */ 26 27 /* 28 * DTrace - Dynamic Tracing for Solaris 29 * 30 * This is the implementation of the Solaris Dynamic Tracing framework 31 * (DTrace). The user-visible interface to DTrace is described at length in 32 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 33 * library, the in-kernel DTrace framework, and the DTrace providers are 34 * described in the block comments in the <sys/dtrace.h> header file. The 35 * internal architecture of DTrace is described in the block comments in the 36 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 37 * implementation very much assume mastery of all of these sources; if one has 38 * an unanswered question about the implementation, one should consult them 39 * first. 40 * 41 * The functions here are ordered roughly as follows: 42 * 43 * - Probe context functions 44 * - Probe hashing functions 45 * - Non-probe context utility functions 46 * - Matching functions 47 * - Provider-to-Framework API functions 48 * - Probe management functions 49 * - DIF object functions 50 * - Format functions 51 * - Predicate functions 52 * - ECB functions 53 * - Buffer functions 54 * - Enabling functions 55 * - DOF functions 56 * - Anonymous enabling functions 57 * - Consumer state functions 58 * - Helper functions 59 * - Hook functions 60 * - Driver cookbook functions 61 * 62 * Each group of functions begins with a block comment labelled the "DTrace 63 * [Group] Functions", allowing one to find each block by searching forward 64 * on capital-f functions. 65 */ 66 #include <sys/errno.h> 67 #include <sys/stat.h> 68 #include <sys/modctl.h> 69 #include <sys/conf.h> 70 #include <sys/systm.h> 71 #include <sys/ddi.h> 72 #include <sys/sunddi.h> 73 #include <sys/cpuvar.h> 74 #include <sys/kmem.h> 75 #include <sys/strsubr.h> 76 #include <sys/sysmacros.h> 77 #include <sys/dtrace_impl.h> 78 #include <sys/atomic.h> 79 #include <sys/cmn_err.h> 80 #include <sys/mutex_impl.h> 81 #include <sys/rwlock_impl.h> 82 #include <sys/ctf_api.h> 83 #include <sys/panic.h> 84 #include <sys/priv_impl.h> 85 #include <sys/policy.h> 86 #include <sys/cred_impl.h> 87 #include <sys/procfs_isa.h> 88 #include <sys/taskq.h> 89 #include <sys/mkdev.h> 90 #include <sys/kdi.h> 91 #include <sys/zone.h> 92 #include <sys/socket.h> 93 #include <netinet/in.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 148 149 /* 150 * DTrace External Variables 151 * 152 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 153 * available to DTrace consumers via the backtick (`) syntax. One of these, 154 * dtrace_zero, is made deliberately so: it is provided as a source of 155 * well-known, zero-filled memory. While this variable is not documented, 156 * it is used by some translators as an implementation detail. 157 */ 158 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 159 160 /* 161 * DTrace Internal Variables 162 */ 163 static dev_info_t *dtrace_devi; /* device info */ 164 static vmem_t *dtrace_arena; /* probe ID arena */ 165 static vmem_t *dtrace_minor; /* minor number arena */ 166 static taskq_t *dtrace_taskq; /* task queue */ 167 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 168 static int dtrace_nprobes; /* number of probes */ 169 static dtrace_provider_t *dtrace_provider; /* provider list */ 170 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 171 static int dtrace_opens; /* number of opens */ 172 static int dtrace_helpers; /* number of helpers */ 173 static void *dtrace_softstate; /* softstate pointer */ 174 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 175 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 176 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 177 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 178 static int dtrace_toxranges; /* number of toxic ranges */ 179 static int dtrace_toxranges_max; /* size of toxic range array */ 180 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 181 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 182 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 183 static kthread_t *dtrace_panicked; /* panicking thread */ 184 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 185 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 186 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 187 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 188 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ 189 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 190 static int dtrace_dynvar_failclean; /* dynvars failed to clean */ 191 192 /* 193 * DTrace Locking 194 * DTrace is protected by three (relatively coarse-grained) locks: 195 * 196 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 197 * including enabling state, probes, ECBs, consumer state, helper state, 198 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 199 * probe context is lock-free -- synchronization is handled via the 200 * dtrace_sync() cross call mechanism. 201 * 202 * (2) dtrace_provider_lock is required when manipulating provider state, or 203 * when provider state must be held constant. 204 * 205 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 206 * when meta provider state must be held constant. 207 * 208 * The lock ordering between these three locks is dtrace_meta_lock before 209 * dtrace_provider_lock before dtrace_lock. (In particular, there are 210 * several places where dtrace_provider_lock is held by the framework as it 211 * calls into the providers -- which then call back into the framework, 212 * grabbing dtrace_lock.) 213 * 214 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 215 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 216 * role as a coarse-grained lock; it is acquired before both of these locks. 217 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 218 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 219 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 220 * acquired _between_ dtrace_provider_lock and dtrace_lock. 221 */ 222 static kmutex_t dtrace_lock; /* probe state lock */ 223 static kmutex_t dtrace_provider_lock; /* provider state lock */ 224 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 225 226 /* 227 * DTrace Provider Variables 228 * 229 * These are the variables relating to DTrace as a provider (that is, the 230 * provider of the BEGIN, END, and ERROR probes). 231 */ 232 static dtrace_pattr_t dtrace_provider_attr = { 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 235 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 237 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 238 }; 239 240 static void 241 dtrace_nullop(void) 242 {} 243 244 static int 245 dtrace_enable_nullop(void) 246 { 247 return (0); 248 } 249 250 static dtrace_pops_t dtrace_provider_ops = { 251 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 252 (void (*)(void *, struct modctl *))dtrace_nullop, 253 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop, 254 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 255 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 256 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 257 NULL, 258 NULL, 259 NULL, 260 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 261 }; 262 263 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 264 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 265 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 266 267 /* 268 * DTrace Helper Tracing Variables 269 */ 270 uint32_t dtrace_helptrace_next = 0; 271 uint32_t dtrace_helptrace_nlocals; 272 char *dtrace_helptrace_buffer; 273 int dtrace_helptrace_bufsize = 512 * 1024; 274 275 #ifdef DEBUG 276 int dtrace_helptrace_enabled = 1; 277 #else 278 int dtrace_helptrace_enabled = 0; 279 #endif 280 281 /* 282 * DTrace Error Hashing 283 * 284 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 285 * table. This is very useful for checking coverage of tests that are 286 * expected to induce DIF or DOF processing errors, and may be useful for 287 * debugging problems in the DIF code generator or in DOF generation . The 288 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 289 */ 290 #ifdef DEBUG 291 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 292 static const char *dtrace_errlast; 293 static kthread_t *dtrace_errthread; 294 static kmutex_t dtrace_errlock; 295 #endif 296 297 /* 298 * DTrace Macros and Constants 299 * 300 * These are various macros that are useful in various spots in the 301 * implementation, along with a few random constants that have no meaning 302 * outside of the implementation. There is no real structure to this cpp 303 * mishmash -- but is there ever? 304 */ 305 #define DTRACE_HASHSTR(hash, probe) \ 306 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 307 308 #define DTRACE_HASHNEXT(hash, probe) \ 309 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 310 311 #define DTRACE_HASHPREV(hash, probe) \ 312 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 313 314 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 315 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 316 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 317 318 #define DTRACE_AGGHASHSIZE_SLEW 17 319 320 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 321 322 /* 323 * The key for a thread-local variable consists of the lower 61 bits of the 324 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 325 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 326 * equal to a variable identifier. This is necessary (but not sufficient) to 327 * assure that global associative arrays never collide with thread-local 328 * variables. To guarantee that they cannot collide, we must also define the 329 * order for keying dynamic variables. That order is: 330 * 331 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 332 * 333 * Because the variable-key and the tls-key are in orthogonal spaces, there is 334 * no way for a global variable key signature to match a thread-local key 335 * signature. 336 */ 337 #define DTRACE_TLS_THRKEY(where) { \ 338 uint_t intr = 0; \ 339 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 340 for (; actv; actv >>= 1) \ 341 intr++; \ 342 ASSERT(intr < (1 << 3)); \ 343 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 344 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 345 } 346 347 #define DT_BSWAP_8(x) ((x) & 0xff) 348 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 349 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 350 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 351 352 #define DT_MASK_LO 0x00000000FFFFFFFFULL 353 354 #define DTRACE_STORE(type, tomax, offset, what) \ 355 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 356 357 #ifndef __i386 358 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 359 if (addr & (size - 1)) { \ 360 *flags |= CPU_DTRACE_BADALIGN; \ 361 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 362 return (0); \ 363 } 364 #else 365 #define DTRACE_ALIGNCHECK(addr, size, flags) 366 #endif 367 368 /* 369 * Test whether a range of memory starting at testaddr of size testsz falls 370 * within the range of memory described by addr, sz. We take care to avoid 371 * problems with overflow and underflow of the unsigned quantities, and 372 * disallow all negative sizes. Ranges of size 0 are allowed. 373 */ 374 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 375 ((testaddr) - (baseaddr) < (basesz) && \ 376 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 377 (testaddr) + (testsz) >= (testaddr)) 378 379 /* 380 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 381 * alloc_sz on the righthand side of the comparison in order to avoid overflow 382 * or underflow in the comparison with it. This is simpler than the INRANGE 383 * check above, because we know that the dtms_scratch_ptr is valid in the 384 * range. Allocations of size zero are allowed. 385 */ 386 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 387 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 388 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 389 390 #define DTRACE_LOADFUNC(bits) \ 391 /*CSTYLED*/ \ 392 uint##bits##_t \ 393 dtrace_load##bits(uintptr_t addr) \ 394 { \ 395 size_t size = bits / NBBY; \ 396 /*CSTYLED*/ \ 397 uint##bits##_t rval; \ 398 int i; \ 399 volatile uint16_t *flags = (volatile uint16_t *) \ 400 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 401 \ 402 DTRACE_ALIGNCHECK(addr, size, flags); \ 403 \ 404 for (i = 0; i < dtrace_toxranges; i++) { \ 405 if (addr >= dtrace_toxrange[i].dtt_limit) \ 406 continue; \ 407 \ 408 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 409 continue; \ 410 \ 411 /* \ 412 * This address falls within a toxic region; return 0. \ 413 */ \ 414 *flags |= CPU_DTRACE_BADADDR; \ 415 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 416 return (0); \ 417 } \ 418 \ 419 *flags |= CPU_DTRACE_NOFAULT; \ 420 /*CSTYLED*/ \ 421 rval = *((volatile uint##bits##_t *)addr); \ 422 *flags &= ~CPU_DTRACE_NOFAULT; \ 423 \ 424 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 425 } 426 427 #ifdef _LP64 428 #define dtrace_loadptr dtrace_load64 429 #else 430 #define dtrace_loadptr dtrace_load32 431 #endif 432 433 #define DTRACE_DYNHASH_FREE 0 434 #define DTRACE_DYNHASH_SINK 1 435 #define DTRACE_DYNHASH_VALID 2 436 437 #define DTRACE_MATCH_FAIL -1 438 #define DTRACE_MATCH_NEXT 0 439 #define DTRACE_MATCH_DONE 1 440 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 441 #define DTRACE_STATE_ALIGN 64 442 443 #define DTRACE_FLAGS2FLT(flags) \ 444 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 445 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 446 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 447 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 448 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 449 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 450 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 451 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 452 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 453 DTRACEFLT_UNKNOWN) 454 455 #define DTRACEACT_ISSTRING(act) \ 456 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 457 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 458 459 static size_t dtrace_strlen(const char *, size_t); 460 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 461 static void dtrace_enabling_provide(dtrace_provider_t *); 462 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 463 static void dtrace_enabling_matchall(void); 464 static void dtrace_enabling_reap(void); 465 static dtrace_state_t *dtrace_anon_grab(void); 466 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 467 dtrace_state_t *, uint64_t, uint64_t); 468 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 469 static void dtrace_buffer_drop(dtrace_buffer_t *); 470 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 471 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 472 dtrace_state_t *, dtrace_mstate_t *); 473 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 474 dtrace_optval_t); 475 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 476 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 477 478 /* 479 * DTrace Probe Context Functions 480 * 481 * These functions are called from probe context. Because probe context is 482 * any context in which C may be called, arbitrarily locks may be held, 483 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 484 * As a result, functions called from probe context may only call other DTrace 485 * support functions -- they may not interact at all with the system at large. 486 * (Note that the ASSERT macro is made probe-context safe by redefining it in 487 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 488 * loads are to be performed from probe context, they _must_ be in terms of 489 * the safe dtrace_load*() variants. 490 * 491 * Some functions in this block are not actually called from probe context; 492 * for these functions, there will be a comment above the function reading 493 * "Note: not called from probe context." 494 */ 495 void 496 dtrace_panic(const char *format, ...) 497 { 498 va_list alist; 499 500 va_start(alist, format); 501 dtrace_vpanic(format, alist); 502 va_end(alist); 503 } 504 505 int 506 dtrace_assfail(const char *a, const char *f, int l) 507 { 508 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 509 510 /* 511 * We just need something here that even the most clever compiler 512 * cannot optimize away. 513 */ 514 return (a[(uintptr_t)f]); 515 } 516 517 /* 518 * Atomically increment a specified error counter from probe context. 519 */ 520 static void 521 dtrace_error(uint32_t *counter) 522 { 523 /* 524 * Most counters stored to in probe context are per-CPU counters. 525 * However, there are some error conditions that are sufficiently 526 * arcane that they don't merit per-CPU storage. If these counters 527 * are incremented concurrently on different CPUs, scalability will be 528 * adversely affected -- but we don't expect them to be white-hot in a 529 * correctly constructed enabling... 530 */ 531 uint32_t oval, nval; 532 533 do { 534 oval = *counter; 535 536 if ((nval = oval + 1) == 0) { 537 /* 538 * If the counter would wrap, set it to 1 -- assuring 539 * that the counter is never zero when we have seen 540 * errors. (The counter must be 32-bits because we 541 * aren't guaranteed a 64-bit compare&swap operation.) 542 * To save this code both the infamy of being fingered 543 * by a priggish news story and the indignity of being 544 * the target of a neo-puritan witch trial, we're 545 * carefully avoiding any colorful description of the 546 * likelihood of this condition -- but suffice it to 547 * say that it is only slightly more likely than the 548 * overflow of predicate cache IDs, as discussed in 549 * dtrace_predicate_create(). 550 */ 551 nval = 1; 552 } 553 } while (dtrace_cas32(counter, oval, nval) != oval); 554 } 555 556 /* 557 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 558 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 559 */ 560 DTRACE_LOADFUNC(8) 561 DTRACE_LOADFUNC(16) 562 DTRACE_LOADFUNC(32) 563 DTRACE_LOADFUNC(64) 564 565 static int 566 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 567 { 568 if (dest < mstate->dtms_scratch_base) 569 return (0); 570 571 if (dest + size < dest) 572 return (0); 573 574 if (dest + size > mstate->dtms_scratch_ptr) 575 return (0); 576 577 return (1); 578 } 579 580 static int 581 dtrace_canstore_statvar(uint64_t addr, size_t sz, 582 dtrace_statvar_t **svars, int nsvars) 583 { 584 int i; 585 586 for (i = 0; i < nsvars; i++) { 587 dtrace_statvar_t *svar = svars[i]; 588 589 if (svar == NULL || svar->dtsv_size == 0) 590 continue; 591 592 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 593 return (1); 594 } 595 596 return (0); 597 } 598 599 /* 600 * Check to see if the address is within a memory region to which a store may 601 * be issued. This includes the DTrace scratch areas, and any DTrace variable 602 * region. The caller of dtrace_canstore() is responsible for performing any 603 * alignment checks that are needed before stores are actually executed. 604 */ 605 static int 606 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 607 dtrace_vstate_t *vstate) 608 { 609 /* 610 * First, check to see if the address is in scratch space... 611 */ 612 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 613 mstate->dtms_scratch_size)) 614 return (1); 615 616 /* 617 * Now check to see if it's a dynamic variable. This check will pick 618 * up both thread-local variables and any global dynamically-allocated 619 * variables. 620 */ 621 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 622 vstate->dtvs_dynvars.dtds_size)) { 623 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 624 uintptr_t base = (uintptr_t)dstate->dtds_base + 625 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 626 uintptr_t chunkoffs; 627 628 /* 629 * Before we assume that we can store here, we need to make 630 * sure that it isn't in our metadata -- storing to our 631 * dynamic variable metadata would corrupt our state. For 632 * the range to not include any dynamic variable metadata, 633 * it must: 634 * 635 * (1) Start above the hash table that is at the base of 636 * the dynamic variable space 637 * 638 * (2) Have a starting chunk offset that is beyond the 639 * dtrace_dynvar_t that is at the base of every chunk 640 * 641 * (3) Not span a chunk boundary 642 * 643 */ 644 if (addr < base) 645 return (0); 646 647 chunkoffs = (addr - base) % dstate->dtds_chunksize; 648 649 if (chunkoffs < sizeof (dtrace_dynvar_t)) 650 return (0); 651 652 if (chunkoffs + sz > dstate->dtds_chunksize) 653 return (0); 654 655 return (1); 656 } 657 658 /* 659 * Finally, check the static local and global variables. These checks 660 * take the longest, so we perform them last. 661 */ 662 if (dtrace_canstore_statvar(addr, sz, 663 vstate->dtvs_locals, vstate->dtvs_nlocals)) 664 return (1); 665 666 if (dtrace_canstore_statvar(addr, sz, 667 vstate->dtvs_globals, vstate->dtvs_nglobals)) 668 return (1); 669 670 return (0); 671 } 672 673 674 /* 675 * Convenience routine to check to see if the address is within a memory 676 * region in which a load may be issued given the user's privilege level; 677 * if not, it sets the appropriate error flags and loads 'addr' into the 678 * illegal value slot. 679 * 680 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 681 * appropriate memory access protection. 682 */ 683 static int 684 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 685 dtrace_vstate_t *vstate) 686 { 687 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 688 689 /* 690 * If we hold the privilege to read from kernel memory, then 691 * everything is readable. 692 */ 693 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 694 return (1); 695 696 /* 697 * You can obviously read that which you can store. 698 */ 699 if (dtrace_canstore(addr, sz, mstate, vstate)) 700 return (1); 701 702 /* 703 * We're allowed to read from our own string table. 704 */ 705 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 706 mstate->dtms_difo->dtdo_strlen)) 707 return (1); 708 709 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 710 *illval = addr; 711 return (0); 712 } 713 714 /* 715 * Convenience routine to check to see if a given string is within a memory 716 * region in which a load may be issued given the user's privilege level; 717 * this exists so that we don't need to issue unnecessary dtrace_strlen() 718 * calls in the event that the user has all privileges. 719 */ 720 static int 721 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 722 dtrace_vstate_t *vstate) 723 { 724 size_t strsz; 725 726 /* 727 * If we hold the privilege to read from kernel memory, then 728 * everything is readable. 729 */ 730 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 731 return (1); 732 733 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 734 if (dtrace_canload(addr, strsz, mstate, vstate)) 735 return (1); 736 737 return (0); 738 } 739 740 /* 741 * Convenience routine to check to see if a given variable is within a memory 742 * region in which a load may be issued given the user's privilege level. 743 */ 744 static int 745 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 746 dtrace_vstate_t *vstate) 747 { 748 size_t sz; 749 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 750 751 /* 752 * If we hold the privilege to read from kernel memory, then 753 * everything is readable. 754 */ 755 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 756 return (1); 757 758 if (type->dtdt_kind == DIF_TYPE_STRING) 759 sz = dtrace_strlen(src, 760 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 761 else 762 sz = type->dtdt_size; 763 764 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 765 } 766 767 /* 768 * Compare two strings using safe loads. 769 */ 770 static int 771 dtrace_strncmp(char *s1, char *s2, size_t limit) 772 { 773 uint8_t c1, c2; 774 volatile uint16_t *flags; 775 776 if (s1 == s2 || limit == 0) 777 return (0); 778 779 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 780 781 do { 782 if (s1 == NULL) { 783 c1 = '\0'; 784 } else { 785 c1 = dtrace_load8((uintptr_t)s1++); 786 } 787 788 if (s2 == NULL) { 789 c2 = '\0'; 790 } else { 791 c2 = dtrace_load8((uintptr_t)s2++); 792 } 793 794 if (c1 != c2) 795 return (c1 - c2); 796 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 797 798 return (0); 799 } 800 801 /* 802 * Compute strlen(s) for a string using safe memory accesses. The additional 803 * len parameter is used to specify a maximum length to ensure completion. 804 */ 805 static size_t 806 dtrace_strlen(const char *s, size_t lim) 807 { 808 uint_t len; 809 810 for (len = 0; len != lim; len++) { 811 if (dtrace_load8((uintptr_t)s++) == '\0') 812 break; 813 } 814 815 return (len); 816 } 817 818 /* 819 * Check if an address falls within a toxic region. 820 */ 821 static int 822 dtrace_istoxic(uintptr_t kaddr, size_t size) 823 { 824 uintptr_t taddr, tsize; 825 int i; 826 827 for (i = 0; i < dtrace_toxranges; i++) { 828 taddr = dtrace_toxrange[i].dtt_base; 829 tsize = dtrace_toxrange[i].dtt_limit - taddr; 830 831 if (kaddr - taddr < tsize) { 832 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 833 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 834 return (1); 835 } 836 837 if (taddr - kaddr < size) { 838 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 839 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 840 return (1); 841 } 842 } 843 844 return (0); 845 } 846 847 /* 848 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 849 * memory specified by the DIF program. The dst is assumed to be safe memory 850 * that we can store to directly because it is managed by DTrace. As with 851 * standard bcopy, overlapping copies are handled properly. 852 */ 853 static void 854 dtrace_bcopy(const void *src, void *dst, size_t len) 855 { 856 if (len != 0) { 857 uint8_t *s1 = dst; 858 const uint8_t *s2 = src; 859 860 if (s1 <= s2) { 861 do { 862 *s1++ = dtrace_load8((uintptr_t)s2++); 863 } while (--len != 0); 864 } else { 865 s2 += len; 866 s1 += len; 867 868 do { 869 *--s1 = dtrace_load8((uintptr_t)--s2); 870 } while (--len != 0); 871 } 872 } 873 } 874 875 /* 876 * Copy src to dst using safe memory accesses, up to either the specified 877 * length, or the point that a nul byte is encountered. The src is assumed to 878 * be unsafe memory specified by the DIF program. The dst is assumed to be 879 * safe memory that we can store to directly because it is managed by DTrace. 880 * Unlike dtrace_bcopy(), overlapping regions are not handled. 881 */ 882 static void 883 dtrace_strcpy(const void *src, void *dst, size_t len) 884 { 885 if (len != 0) { 886 uint8_t *s1 = dst, c; 887 const uint8_t *s2 = src; 888 889 do { 890 *s1++ = c = dtrace_load8((uintptr_t)s2++); 891 } while (--len != 0 && c != '\0'); 892 } 893 } 894 895 /* 896 * Copy src to dst, deriving the size and type from the specified (BYREF) 897 * variable type. The src is assumed to be unsafe memory specified by the DIF 898 * program. The dst is assumed to be DTrace variable memory that is of the 899 * specified type; we assume that we can store to directly. 900 */ 901 static void 902 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 903 { 904 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 905 906 if (type->dtdt_kind == DIF_TYPE_STRING) { 907 dtrace_strcpy(src, dst, type->dtdt_size); 908 } else { 909 dtrace_bcopy(src, dst, type->dtdt_size); 910 } 911 } 912 913 /* 914 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 915 * unsafe memory specified by the DIF program. The s2 data is assumed to be 916 * safe memory that we can access directly because it is managed by DTrace. 917 */ 918 static int 919 dtrace_bcmp(const void *s1, const void *s2, size_t len) 920 { 921 volatile uint16_t *flags; 922 923 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 924 925 if (s1 == s2) 926 return (0); 927 928 if (s1 == NULL || s2 == NULL) 929 return (1); 930 931 if (s1 != s2 && len != 0) { 932 const uint8_t *ps1 = s1; 933 const uint8_t *ps2 = s2; 934 935 do { 936 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 937 return (1); 938 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 939 } 940 return (0); 941 } 942 943 /* 944 * Zero the specified region using a simple byte-by-byte loop. Note that this 945 * is for safe DTrace-managed memory only. 946 */ 947 static void 948 dtrace_bzero(void *dst, size_t len) 949 { 950 uchar_t *cp; 951 952 for (cp = dst; len != 0; len--) 953 *cp++ = 0; 954 } 955 956 static void 957 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 958 { 959 uint64_t result[2]; 960 961 result[0] = addend1[0] + addend2[0]; 962 result[1] = addend1[1] + addend2[1] + 963 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 964 965 sum[0] = result[0]; 966 sum[1] = result[1]; 967 } 968 969 /* 970 * Shift the 128-bit value in a by b. If b is positive, shift left. 971 * If b is negative, shift right. 972 */ 973 static void 974 dtrace_shift_128(uint64_t *a, int b) 975 { 976 uint64_t mask; 977 978 if (b == 0) 979 return; 980 981 if (b < 0) { 982 b = -b; 983 if (b >= 64) { 984 a[0] = a[1] >> (b - 64); 985 a[1] = 0; 986 } else { 987 a[0] >>= b; 988 mask = 1LL << (64 - b); 989 mask -= 1; 990 a[0] |= ((a[1] & mask) << (64 - b)); 991 a[1] >>= b; 992 } 993 } else { 994 if (b >= 64) { 995 a[1] = a[0] << (b - 64); 996 a[0] = 0; 997 } else { 998 a[1] <<= b; 999 mask = a[0] >> (64 - b); 1000 a[1] |= mask; 1001 a[0] <<= b; 1002 } 1003 } 1004 } 1005 1006 /* 1007 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1008 * use native multiplication on those, and then re-combine into the 1009 * resulting 128-bit value. 1010 * 1011 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1012 * hi1 * hi2 << 64 + 1013 * hi1 * lo2 << 32 + 1014 * hi2 * lo1 << 32 + 1015 * lo1 * lo2 1016 */ 1017 static void 1018 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1019 { 1020 uint64_t hi1, hi2, lo1, lo2; 1021 uint64_t tmp[2]; 1022 1023 hi1 = factor1 >> 32; 1024 hi2 = factor2 >> 32; 1025 1026 lo1 = factor1 & DT_MASK_LO; 1027 lo2 = factor2 & DT_MASK_LO; 1028 1029 product[0] = lo1 * lo2; 1030 product[1] = hi1 * hi2; 1031 1032 tmp[0] = hi1 * lo2; 1033 tmp[1] = 0; 1034 dtrace_shift_128(tmp, 32); 1035 dtrace_add_128(product, tmp, product); 1036 1037 tmp[0] = hi2 * lo1; 1038 tmp[1] = 0; 1039 dtrace_shift_128(tmp, 32); 1040 dtrace_add_128(product, tmp, product); 1041 } 1042 1043 /* 1044 * This privilege check should be used by actions and subroutines to 1045 * verify that the user credentials of the process that enabled the 1046 * invoking ECB match the target credentials 1047 */ 1048 static int 1049 dtrace_priv_proc_common_user(dtrace_state_t *state) 1050 { 1051 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1052 1053 /* 1054 * We should always have a non-NULL state cred here, since if cred 1055 * is null (anonymous tracing), we fast-path bypass this routine. 1056 */ 1057 ASSERT(s_cr != NULL); 1058 1059 if ((cr = CRED()) != NULL && 1060 s_cr->cr_uid == cr->cr_uid && 1061 s_cr->cr_uid == cr->cr_ruid && 1062 s_cr->cr_uid == cr->cr_suid && 1063 s_cr->cr_gid == cr->cr_gid && 1064 s_cr->cr_gid == cr->cr_rgid && 1065 s_cr->cr_gid == cr->cr_sgid) 1066 return (1); 1067 1068 return (0); 1069 } 1070 1071 /* 1072 * This privilege check should be used by actions and subroutines to 1073 * verify that the zone of the process that enabled the invoking ECB 1074 * matches the target credentials 1075 */ 1076 static int 1077 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1078 { 1079 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1080 1081 /* 1082 * We should always have a non-NULL state cred here, since if cred 1083 * is null (anonymous tracing), we fast-path bypass this routine. 1084 */ 1085 ASSERT(s_cr != NULL); 1086 1087 if ((cr = CRED()) != NULL && 1088 s_cr->cr_zone == cr->cr_zone) 1089 return (1); 1090 1091 return (0); 1092 } 1093 1094 /* 1095 * This privilege check should be used by actions and subroutines to 1096 * verify that the process has not setuid or changed credentials. 1097 */ 1098 static int 1099 dtrace_priv_proc_common_nocd() 1100 { 1101 proc_t *proc; 1102 1103 if ((proc = ttoproc(curthread)) != NULL && 1104 !(proc->p_flag & SNOCD)) 1105 return (1); 1106 1107 return (0); 1108 } 1109 1110 static int 1111 dtrace_priv_proc_destructive(dtrace_state_t *state) 1112 { 1113 int action = state->dts_cred.dcr_action; 1114 1115 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1116 dtrace_priv_proc_common_zone(state) == 0) 1117 goto bad; 1118 1119 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1120 dtrace_priv_proc_common_user(state) == 0) 1121 goto bad; 1122 1123 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1124 dtrace_priv_proc_common_nocd() == 0) 1125 goto bad; 1126 1127 return (1); 1128 1129 bad: 1130 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1131 1132 return (0); 1133 } 1134 1135 static int 1136 dtrace_priv_proc_control(dtrace_state_t *state) 1137 { 1138 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1139 return (1); 1140 1141 if (dtrace_priv_proc_common_zone(state) && 1142 dtrace_priv_proc_common_user(state) && 1143 dtrace_priv_proc_common_nocd()) 1144 return (1); 1145 1146 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1147 1148 return (0); 1149 } 1150 1151 static int 1152 dtrace_priv_proc(dtrace_state_t *state) 1153 { 1154 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1155 return (1); 1156 1157 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1158 1159 return (0); 1160 } 1161 1162 static int 1163 dtrace_priv_kernel(dtrace_state_t *state) 1164 { 1165 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1166 return (1); 1167 1168 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1169 1170 return (0); 1171 } 1172 1173 static int 1174 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1175 { 1176 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1177 return (1); 1178 1179 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1180 1181 return (0); 1182 } 1183 1184 /* 1185 * Note: not called from probe context. This function is called 1186 * asynchronously (and at a regular interval) from outside of probe context to 1187 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1188 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1189 */ 1190 void 1191 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1192 { 1193 dtrace_dynvar_t *dirty; 1194 dtrace_dstate_percpu_t *dcpu; 1195 dtrace_dynvar_t **rinsep; 1196 int i, j, work = 0; 1197 1198 for (i = 0; i < NCPU; i++) { 1199 dcpu = &dstate->dtds_percpu[i]; 1200 rinsep = &dcpu->dtdsc_rinsing; 1201 1202 /* 1203 * If the dirty list is NULL, there is no dirty work to do. 1204 */ 1205 if (dcpu->dtdsc_dirty == NULL) 1206 continue; 1207 1208 if (dcpu->dtdsc_rinsing != NULL) { 1209 /* 1210 * If the rinsing list is non-NULL, then it is because 1211 * this CPU was selected to accept another CPU's 1212 * dirty list -- and since that time, dirty buffers 1213 * have accumulated. This is a highly unlikely 1214 * condition, but we choose to ignore the dirty 1215 * buffers -- they'll be picked up a future cleanse. 1216 */ 1217 continue; 1218 } 1219 1220 if (dcpu->dtdsc_clean != NULL) { 1221 /* 1222 * If the clean list is non-NULL, then we're in a 1223 * situation where a CPU has done deallocations (we 1224 * have a non-NULL dirty list) but no allocations (we 1225 * also have a non-NULL clean list). We can't simply 1226 * move the dirty list into the clean list on this 1227 * CPU, yet we also don't want to allow this condition 1228 * to persist, lest a short clean list prevent a 1229 * massive dirty list from being cleaned (which in 1230 * turn could lead to otherwise avoidable dynamic 1231 * drops). To deal with this, we look for some CPU 1232 * with a NULL clean list, NULL dirty list, and NULL 1233 * rinsing list -- and then we borrow this CPU to 1234 * rinse our dirty list. 1235 */ 1236 for (j = 0; j < NCPU; j++) { 1237 dtrace_dstate_percpu_t *rinser; 1238 1239 rinser = &dstate->dtds_percpu[j]; 1240 1241 if (rinser->dtdsc_rinsing != NULL) 1242 continue; 1243 1244 if (rinser->dtdsc_dirty != NULL) 1245 continue; 1246 1247 if (rinser->dtdsc_clean != NULL) 1248 continue; 1249 1250 rinsep = &rinser->dtdsc_rinsing; 1251 break; 1252 } 1253 1254 if (j == NCPU) { 1255 /* 1256 * We were unable to find another CPU that 1257 * could accept this dirty list -- we are 1258 * therefore unable to clean it now. 1259 */ 1260 dtrace_dynvar_failclean++; 1261 continue; 1262 } 1263 } 1264 1265 work = 1; 1266 1267 /* 1268 * Atomically move the dirty list aside. 1269 */ 1270 do { 1271 dirty = dcpu->dtdsc_dirty; 1272 1273 /* 1274 * Before we zap the dirty list, set the rinsing list. 1275 * (This allows for a potential assertion in 1276 * dtrace_dynvar(): if a free dynamic variable appears 1277 * on a hash chain, either the dirty list or the 1278 * rinsing list for some CPU must be non-NULL.) 1279 */ 1280 *rinsep = dirty; 1281 dtrace_membar_producer(); 1282 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1283 dirty, NULL) != dirty); 1284 } 1285 1286 if (!work) { 1287 /* 1288 * We have no work to do; we can simply return. 1289 */ 1290 return; 1291 } 1292 1293 dtrace_sync(); 1294 1295 for (i = 0; i < NCPU; i++) { 1296 dcpu = &dstate->dtds_percpu[i]; 1297 1298 if (dcpu->dtdsc_rinsing == NULL) 1299 continue; 1300 1301 /* 1302 * We are now guaranteed that no hash chain contains a pointer 1303 * into this dirty list; we can make it clean. 1304 */ 1305 ASSERT(dcpu->dtdsc_clean == NULL); 1306 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1307 dcpu->dtdsc_rinsing = NULL; 1308 } 1309 1310 /* 1311 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1312 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1313 * This prevents a race whereby a CPU incorrectly decides that 1314 * the state should be something other than DTRACE_DSTATE_CLEAN 1315 * after dtrace_dynvar_clean() has completed. 1316 */ 1317 dtrace_sync(); 1318 1319 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1320 } 1321 1322 /* 1323 * Depending on the value of the op parameter, this function looks-up, 1324 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1325 * allocation is requested, this function will return a pointer to a 1326 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1327 * variable can be allocated. If NULL is returned, the appropriate counter 1328 * will be incremented. 1329 */ 1330 dtrace_dynvar_t * 1331 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1332 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1333 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1334 { 1335 uint64_t hashval = DTRACE_DYNHASH_VALID; 1336 dtrace_dynhash_t *hash = dstate->dtds_hash; 1337 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1338 processorid_t me = CPU->cpu_id, cpu = me; 1339 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1340 size_t bucket, ksize; 1341 size_t chunksize = dstate->dtds_chunksize; 1342 uintptr_t kdata, lock, nstate; 1343 uint_t i; 1344 1345 ASSERT(nkeys != 0); 1346 1347 /* 1348 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1349 * algorithm. For the by-value portions, we perform the algorithm in 1350 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1351 * bit, and seems to have only a minute effect on distribution. For 1352 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1353 * over each referenced byte. It's painful to do this, but it's much 1354 * better than pathological hash distribution. The efficacy of the 1355 * hashing algorithm (and a comparison with other algorithms) may be 1356 * found by running the ::dtrace_dynstat MDB dcmd. 1357 */ 1358 for (i = 0; i < nkeys; i++) { 1359 if (key[i].dttk_size == 0) { 1360 uint64_t val = key[i].dttk_value; 1361 1362 hashval += (val >> 48) & 0xffff; 1363 hashval += (hashval << 10); 1364 hashval ^= (hashval >> 6); 1365 1366 hashval += (val >> 32) & 0xffff; 1367 hashval += (hashval << 10); 1368 hashval ^= (hashval >> 6); 1369 1370 hashval += (val >> 16) & 0xffff; 1371 hashval += (hashval << 10); 1372 hashval ^= (hashval >> 6); 1373 1374 hashval += val & 0xffff; 1375 hashval += (hashval << 10); 1376 hashval ^= (hashval >> 6); 1377 } else { 1378 /* 1379 * This is incredibly painful, but it beats the hell 1380 * out of the alternative. 1381 */ 1382 uint64_t j, size = key[i].dttk_size; 1383 uintptr_t base = (uintptr_t)key[i].dttk_value; 1384 1385 if (!dtrace_canload(base, size, mstate, vstate)) 1386 break; 1387 1388 for (j = 0; j < size; j++) { 1389 hashval += dtrace_load8(base + j); 1390 hashval += (hashval << 10); 1391 hashval ^= (hashval >> 6); 1392 } 1393 } 1394 } 1395 1396 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1397 return (NULL); 1398 1399 hashval += (hashval << 3); 1400 hashval ^= (hashval >> 11); 1401 hashval += (hashval << 15); 1402 1403 /* 1404 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1405 * comes out to be one of our two sentinel hash values. If this 1406 * actually happens, we set the hashval to be a value known to be a 1407 * non-sentinel value. 1408 */ 1409 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1410 hashval = DTRACE_DYNHASH_VALID; 1411 1412 /* 1413 * Yes, it's painful to do a divide here. If the cycle count becomes 1414 * important here, tricks can be pulled to reduce it. (However, it's 1415 * critical that hash collisions be kept to an absolute minimum; 1416 * they're much more painful than a divide.) It's better to have a 1417 * solution that generates few collisions and still keeps things 1418 * relatively simple. 1419 */ 1420 bucket = hashval % dstate->dtds_hashsize; 1421 1422 if (op == DTRACE_DYNVAR_DEALLOC) { 1423 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1424 1425 for (;;) { 1426 while ((lock = *lockp) & 1) 1427 continue; 1428 1429 if (dtrace_casptr((void *)lockp, 1430 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1431 break; 1432 } 1433 1434 dtrace_membar_producer(); 1435 } 1436 1437 top: 1438 prev = NULL; 1439 lock = hash[bucket].dtdh_lock; 1440 1441 dtrace_membar_consumer(); 1442 1443 start = hash[bucket].dtdh_chain; 1444 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1445 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1446 op != DTRACE_DYNVAR_DEALLOC)); 1447 1448 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1449 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1450 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1451 1452 if (dvar->dtdv_hashval != hashval) { 1453 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1454 /* 1455 * We've reached the sink, and therefore the 1456 * end of the hash chain; we can kick out of 1457 * the loop knowing that we have seen a valid 1458 * snapshot of state. 1459 */ 1460 ASSERT(dvar->dtdv_next == NULL); 1461 ASSERT(dvar == &dtrace_dynhash_sink); 1462 break; 1463 } 1464 1465 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1466 /* 1467 * We've gone off the rails: somewhere along 1468 * the line, one of the members of this hash 1469 * chain was deleted. Note that we could also 1470 * detect this by simply letting this loop run 1471 * to completion, as we would eventually hit 1472 * the end of the dirty list. However, we 1473 * want to avoid running the length of the 1474 * dirty list unnecessarily (it might be quite 1475 * long), so we catch this as early as 1476 * possible by detecting the hash marker. In 1477 * this case, we simply set dvar to NULL and 1478 * break; the conditional after the loop will 1479 * send us back to top. 1480 */ 1481 dvar = NULL; 1482 break; 1483 } 1484 1485 goto next; 1486 } 1487 1488 if (dtuple->dtt_nkeys != nkeys) 1489 goto next; 1490 1491 for (i = 0; i < nkeys; i++, dkey++) { 1492 if (dkey->dttk_size != key[i].dttk_size) 1493 goto next; /* size or type mismatch */ 1494 1495 if (dkey->dttk_size != 0) { 1496 if (dtrace_bcmp( 1497 (void *)(uintptr_t)key[i].dttk_value, 1498 (void *)(uintptr_t)dkey->dttk_value, 1499 dkey->dttk_size)) 1500 goto next; 1501 } else { 1502 if (dkey->dttk_value != key[i].dttk_value) 1503 goto next; 1504 } 1505 } 1506 1507 if (op != DTRACE_DYNVAR_DEALLOC) 1508 return (dvar); 1509 1510 ASSERT(dvar->dtdv_next == NULL || 1511 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1512 1513 if (prev != NULL) { 1514 ASSERT(hash[bucket].dtdh_chain != dvar); 1515 ASSERT(start != dvar); 1516 ASSERT(prev->dtdv_next == dvar); 1517 prev->dtdv_next = dvar->dtdv_next; 1518 } else { 1519 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1520 start, dvar->dtdv_next) != start) { 1521 /* 1522 * We have failed to atomically swing the 1523 * hash table head pointer, presumably because 1524 * of a conflicting allocation on another CPU. 1525 * We need to reread the hash chain and try 1526 * again. 1527 */ 1528 goto top; 1529 } 1530 } 1531 1532 dtrace_membar_producer(); 1533 1534 /* 1535 * Now set the hash value to indicate that it's free. 1536 */ 1537 ASSERT(hash[bucket].dtdh_chain != dvar); 1538 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1539 1540 dtrace_membar_producer(); 1541 1542 /* 1543 * Set the next pointer to point at the dirty list, and 1544 * atomically swing the dirty pointer to the newly freed dvar. 1545 */ 1546 do { 1547 next = dcpu->dtdsc_dirty; 1548 dvar->dtdv_next = next; 1549 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1550 1551 /* 1552 * Finally, unlock this hash bucket. 1553 */ 1554 ASSERT(hash[bucket].dtdh_lock == lock); 1555 ASSERT(lock & 1); 1556 hash[bucket].dtdh_lock++; 1557 1558 return (NULL); 1559 next: 1560 prev = dvar; 1561 continue; 1562 } 1563 1564 if (dvar == NULL) { 1565 /* 1566 * If dvar is NULL, it is because we went off the rails: 1567 * one of the elements that we traversed in the hash chain 1568 * was deleted while we were traversing it. In this case, 1569 * we assert that we aren't doing a dealloc (deallocs lock 1570 * the hash bucket to prevent themselves from racing with 1571 * one another), and retry the hash chain traversal. 1572 */ 1573 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1574 goto top; 1575 } 1576 1577 if (op != DTRACE_DYNVAR_ALLOC) { 1578 /* 1579 * If we are not to allocate a new variable, we want to 1580 * return NULL now. Before we return, check that the value 1581 * of the lock word hasn't changed. If it has, we may have 1582 * seen an inconsistent snapshot. 1583 */ 1584 if (op == DTRACE_DYNVAR_NOALLOC) { 1585 if (hash[bucket].dtdh_lock != lock) 1586 goto top; 1587 } else { 1588 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1589 ASSERT(hash[bucket].dtdh_lock == lock); 1590 ASSERT(lock & 1); 1591 hash[bucket].dtdh_lock++; 1592 } 1593 1594 return (NULL); 1595 } 1596 1597 /* 1598 * We need to allocate a new dynamic variable. The size we need is the 1599 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1600 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1601 * the size of any referred-to data (dsize). We then round the final 1602 * size up to the chunksize for allocation. 1603 */ 1604 for (ksize = 0, i = 0; i < nkeys; i++) 1605 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1606 1607 /* 1608 * This should be pretty much impossible, but could happen if, say, 1609 * strange DIF specified the tuple. Ideally, this should be an 1610 * assertion and not an error condition -- but that requires that the 1611 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1612 * bullet-proof. (That is, it must not be able to be fooled by 1613 * malicious DIF.) Given the lack of backwards branches in DIF, 1614 * solving this would presumably not amount to solving the Halting 1615 * Problem -- but it still seems awfully hard. 1616 */ 1617 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1618 ksize + dsize > chunksize) { 1619 dcpu->dtdsc_drops++; 1620 return (NULL); 1621 } 1622 1623 nstate = DTRACE_DSTATE_EMPTY; 1624 1625 do { 1626 retry: 1627 free = dcpu->dtdsc_free; 1628 1629 if (free == NULL) { 1630 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1631 void *rval; 1632 1633 if (clean == NULL) { 1634 /* 1635 * We're out of dynamic variable space on 1636 * this CPU. Unless we have tried all CPUs, 1637 * we'll try to allocate from a different 1638 * CPU. 1639 */ 1640 switch (dstate->dtds_state) { 1641 case DTRACE_DSTATE_CLEAN: { 1642 void *sp = &dstate->dtds_state; 1643 1644 if (++cpu >= NCPU) 1645 cpu = 0; 1646 1647 if (dcpu->dtdsc_dirty != NULL && 1648 nstate == DTRACE_DSTATE_EMPTY) 1649 nstate = DTRACE_DSTATE_DIRTY; 1650 1651 if (dcpu->dtdsc_rinsing != NULL) 1652 nstate = DTRACE_DSTATE_RINSING; 1653 1654 dcpu = &dstate->dtds_percpu[cpu]; 1655 1656 if (cpu != me) 1657 goto retry; 1658 1659 (void) dtrace_cas32(sp, 1660 DTRACE_DSTATE_CLEAN, nstate); 1661 1662 /* 1663 * To increment the correct bean 1664 * counter, take another lap. 1665 */ 1666 goto retry; 1667 } 1668 1669 case DTRACE_DSTATE_DIRTY: 1670 dcpu->dtdsc_dirty_drops++; 1671 break; 1672 1673 case DTRACE_DSTATE_RINSING: 1674 dcpu->dtdsc_rinsing_drops++; 1675 break; 1676 1677 case DTRACE_DSTATE_EMPTY: 1678 dcpu->dtdsc_drops++; 1679 break; 1680 } 1681 1682 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1683 return (NULL); 1684 } 1685 1686 /* 1687 * The clean list appears to be non-empty. We want to 1688 * move the clean list to the free list; we start by 1689 * moving the clean pointer aside. 1690 */ 1691 if (dtrace_casptr(&dcpu->dtdsc_clean, 1692 clean, NULL) != clean) { 1693 /* 1694 * We are in one of two situations: 1695 * 1696 * (a) The clean list was switched to the 1697 * free list by another CPU. 1698 * 1699 * (b) The clean list was added to by the 1700 * cleansing cyclic. 1701 * 1702 * In either of these situations, we can 1703 * just reattempt the free list allocation. 1704 */ 1705 goto retry; 1706 } 1707 1708 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1709 1710 /* 1711 * Now we'll move the clean list to our free list. 1712 * It's impossible for this to fail: the only way 1713 * the free list can be updated is through this 1714 * code path, and only one CPU can own the clean list. 1715 * Thus, it would only be possible for this to fail if 1716 * this code were racing with dtrace_dynvar_clean(). 1717 * (That is, if dtrace_dynvar_clean() updated the clean 1718 * list, and we ended up racing to update the free 1719 * list.) This race is prevented by the dtrace_sync() 1720 * in dtrace_dynvar_clean() -- which flushes the 1721 * owners of the clean lists out before resetting 1722 * the clean lists. 1723 */ 1724 dcpu = &dstate->dtds_percpu[me]; 1725 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1726 ASSERT(rval == NULL); 1727 goto retry; 1728 } 1729 1730 dvar = free; 1731 new_free = dvar->dtdv_next; 1732 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1733 1734 /* 1735 * We have now allocated a new chunk. We copy the tuple keys into the 1736 * tuple array and copy any referenced key data into the data space 1737 * following the tuple array. As we do this, we relocate dttk_value 1738 * in the final tuple to point to the key data address in the chunk. 1739 */ 1740 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1741 dvar->dtdv_data = (void *)(kdata + ksize); 1742 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1743 1744 for (i = 0; i < nkeys; i++) { 1745 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1746 size_t kesize = key[i].dttk_size; 1747 1748 if (kesize != 0) { 1749 dtrace_bcopy( 1750 (const void *)(uintptr_t)key[i].dttk_value, 1751 (void *)kdata, kesize); 1752 dkey->dttk_value = kdata; 1753 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1754 } else { 1755 dkey->dttk_value = key[i].dttk_value; 1756 } 1757 1758 dkey->dttk_size = kesize; 1759 } 1760 1761 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1762 dvar->dtdv_hashval = hashval; 1763 dvar->dtdv_next = start; 1764 1765 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1766 return (dvar); 1767 1768 /* 1769 * The cas has failed. Either another CPU is adding an element to 1770 * this hash chain, or another CPU is deleting an element from this 1771 * hash chain. The simplest way to deal with both of these cases 1772 * (though not necessarily the most efficient) is to free our 1773 * allocated block and tail-call ourselves. Note that the free is 1774 * to the dirty list and _not_ to the free list. This is to prevent 1775 * races with allocators, above. 1776 */ 1777 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1778 1779 dtrace_membar_producer(); 1780 1781 do { 1782 free = dcpu->dtdsc_dirty; 1783 dvar->dtdv_next = free; 1784 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1785 1786 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1787 } 1788 1789 /*ARGSUSED*/ 1790 static void 1791 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1792 { 1793 if ((int64_t)nval < (int64_t)*oval) 1794 *oval = nval; 1795 } 1796 1797 /*ARGSUSED*/ 1798 static void 1799 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1800 { 1801 if ((int64_t)nval > (int64_t)*oval) 1802 *oval = nval; 1803 } 1804 1805 static void 1806 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1807 { 1808 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1809 int64_t val = (int64_t)nval; 1810 1811 if (val < 0) { 1812 for (i = 0; i < zero; i++) { 1813 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1814 quanta[i] += incr; 1815 return; 1816 } 1817 } 1818 } else { 1819 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1820 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1821 quanta[i - 1] += incr; 1822 return; 1823 } 1824 } 1825 1826 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1827 return; 1828 } 1829 1830 ASSERT(0); 1831 } 1832 1833 static void 1834 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1835 { 1836 uint64_t arg = *lquanta++; 1837 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1838 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1839 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1840 int32_t val = (int32_t)nval, level; 1841 1842 ASSERT(step != 0); 1843 ASSERT(levels != 0); 1844 1845 if (val < base) { 1846 /* 1847 * This is an underflow. 1848 */ 1849 lquanta[0] += incr; 1850 return; 1851 } 1852 1853 level = (val - base) / step; 1854 1855 if (level < levels) { 1856 lquanta[level + 1] += incr; 1857 return; 1858 } 1859 1860 /* 1861 * This is an overflow. 1862 */ 1863 lquanta[levels + 1] += incr; 1864 } 1865 1866 static int 1867 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1868 uint16_t high, uint16_t nsteps, int64_t value) 1869 { 1870 int64_t this = 1, last, next; 1871 int base = 1, order; 1872 1873 ASSERT(factor <= nsteps); 1874 ASSERT(nsteps % factor == 0); 1875 1876 for (order = 0; order < low; order++) 1877 this *= factor; 1878 1879 /* 1880 * If our value is less than our factor taken to the power of the 1881 * low order of magnitude, it goes into the zeroth bucket. 1882 */ 1883 if (value < (last = this)) 1884 return (0); 1885 1886 for (this *= factor; order <= high; order++) { 1887 int nbuckets = this > nsteps ? nsteps : this; 1888 1889 if ((next = this * factor) < this) { 1890 /* 1891 * We should not generally get log/linear quantizations 1892 * with a high magnitude that allows 64-bits to 1893 * overflow, but we nonetheless protect against this 1894 * by explicitly checking for overflow, and clamping 1895 * our value accordingly. 1896 */ 1897 value = this - 1; 1898 } 1899 1900 if (value < this) { 1901 /* 1902 * If our value lies within this order of magnitude, 1903 * determine its position by taking the offset within 1904 * the order of magnitude, dividing by the bucket 1905 * width, and adding to our (accumulated) base. 1906 */ 1907 return (base + (value - last) / (this / nbuckets)); 1908 } 1909 1910 base += nbuckets - (nbuckets / factor); 1911 last = this; 1912 this = next; 1913 } 1914 1915 /* 1916 * Our value is greater than or equal to our factor taken to the 1917 * power of one plus the high magnitude -- return the top bucket. 1918 */ 1919 return (base); 1920 } 1921 1922 static void 1923 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1924 { 1925 uint64_t arg = *llquanta++; 1926 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1927 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1928 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1929 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1930 1931 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1932 low, high, nsteps, nval)] += incr; 1933 } 1934 1935 /*ARGSUSED*/ 1936 static void 1937 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1938 { 1939 data[0]++; 1940 data[1] += nval; 1941 } 1942 1943 /*ARGSUSED*/ 1944 static void 1945 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1946 { 1947 int64_t snval = (int64_t)nval; 1948 uint64_t tmp[2]; 1949 1950 data[0]++; 1951 data[1] += nval; 1952 1953 /* 1954 * What we want to say here is: 1955 * 1956 * data[2] += nval * nval; 1957 * 1958 * But given that nval is 64-bit, we could easily overflow, so 1959 * we do this as 128-bit arithmetic. 1960 */ 1961 if (snval < 0) 1962 snval = -snval; 1963 1964 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 1965 dtrace_add_128(data + 2, tmp, data + 2); 1966 } 1967 1968 /*ARGSUSED*/ 1969 static void 1970 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1971 { 1972 *oval = *oval + 1; 1973 } 1974 1975 /*ARGSUSED*/ 1976 static void 1977 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1978 { 1979 *oval += nval; 1980 } 1981 1982 /* 1983 * Aggregate given the tuple in the principal data buffer, and the aggregating 1984 * action denoted by the specified dtrace_aggregation_t. The aggregation 1985 * buffer is specified as the buf parameter. This routine does not return 1986 * failure; if there is no space in the aggregation buffer, the data will be 1987 * dropped, and a corresponding counter incremented. 1988 */ 1989 static void 1990 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1991 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1992 { 1993 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1994 uint32_t i, ndx, size, fsize; 1995 uint32_t align = sizeof (uint64_t) - 1; 1996 dtrace_aggbuffer_t *agb; 1997 dtrace_aggkey_t *key; 1998 uint32_t hashval = 0, limit, isstr; 1999 caddr_t tomax, data, kdata; 2000 dtrace_actkind_t action; 2001 dtrace_action_t *act; 2002 uintptr_t offs; 2003 2004 if (buf == NULL) 2005 return; 2006 2007 if (!agg->dtag_hasarg) { 2008 /* 2009 * Currently, only quantize() and lquantize() take additional 2010 * arguments, and they have the same semantics: an increment 2011 * value that defaults to 1 when not present. If additional 2012 * aggregating actions take arguments, the setting of the 2013 * default argument value will presumably have to become more 2014 * sophisticated... 2015 */ 2016 arg = 1; 2017 } 2018 2019 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2020 size = rec->dtrd_offset - agg->dtag_base; 2021 fsize = size + rec->dtrd_size; 2022 2023 ASSERT(dbuf->dtb_tomax != NULL); 2024 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2025 2026 if ((tomax = buf->dtb_tomax) == NULL) { 2027 dtrace_buffer_drop(buf); 2028 return; 2029 } 2030 2031 /* 2032 * The metastructure is always at the bottom of the buffer. 2033 */ 2034 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2035 sizeof (dtrace_aggbuffer_t)); 2036 2037 if (buf->dtb_offset == 0) { 2038 /* 2039 * We just kludge up approximately 1/8th of the size to be 2040 * buckets. If this guess ends up being routinely 2041 * off-the-mark, we may need to dynamically readjust this 2042 * based on past performance. 2043 */ 2044 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2045 2046 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2047 (uintptr_t)tomax || hashsize == 0) { 2048 /* 2049 * We've been given a ludicrously small buffer; 2050 * increment our drop count and leave. 2051 */ 2052 dtrace_buffer_drop(buf); 2053 return; 2054 } 2055 2056 /* 2057 * And now, a pathetic attempt to try to get a an odd (or 2058 * perchance, a prime) hash size for better hash distribution. 2059 */ 2060 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2061 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2062 2063 agb->dtagb_hashsize = hashsize; 2064 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2065 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2066 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2067 2068 for (i = 0; i < agb->dtagb_hashsize; i++) 2069 agb->dtagb_hash[i] = NULL; 2070 } 2071 2072 ASSERT(agg->dtag_first != NULL); 2073 ASSERT(agg->dtag_first->dta_intuple); 2074 2075 /* 2076 * Calculate the hash value based on the key. Note that we _don't_ 2077 * include the aggid in the hashing (but we will store it as part of 2078 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2079 * algorithm: a simple, quick algorithm that has no known funnels, and 2080 * gets good distribution in practice. The efficacy of the hashing 2081 * algorithm (and a comparison with other algorithms) may be found by 2082 * running the ::dtrace_aggstat MDB dcmd. 2083 */ 2084 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2085 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2086 limit = i + act->dta_rec.dtrd_size; 2087 ASSERT(limit <= size); 2088 isstr = DTRACEACT_ISSTRING(act); 2089 2090 for (; i < limit; i++) { 2091 hashval += data[i]; 2092 hashval += (hashval << 10); 2093 hashval ^= (hashval >> 6); 2094 2095 if (isstr && data[i] == '\0') 2096 break; 2097 } 2098 } 2099 2100 hashval += (hashval << 3); 2101 hashval ^= (hashval >> 11); 2102 hashval += (hashval << 15); 2103 2104 /* 2105 * Yes, the divide here is expensive -- but it's generally the least 2106 * of the performance issues given the amount of data that we iterate 2107 * over to compute hash values, compare data, etc. 2108 */ 2109 ndx = hashval % agb->dtagb_hashsize; 2110 2111 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2112 ASSERT((caddr_t)key >= tomax); 2113 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2114 2115 if (hashval != key->dtak_hashval || key->dtak_size != size) 2116 continue; 2117 2118 kdata = key->dtak_data; 2119 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2120 2121 for (act = agg->dtag_first; act->dta_intuple; 2122 act = act->dta_next) { 2123 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2124 limit = i + act->dta_rec.dtrd_size; 2125 ASSERT(limit <= size); 2126 isstr = DTRACEACT_ISSTRING(act); 2127 2128 for (; i < limit; i++) { 2129 if (kdata[i] != data[i]) 2130 goto next; 2131 2132 if (isstr && data[i] == '\0') 2133 break; 2134 } 2135 } 2136 2137 if (action != key->dtak_action) { 2138 /* 2139 * We are aggregating on the same value in the same 2140 * aggregation with two different aggregating actions. 2141 * (This should have been picked up in the compiler, 2142 * so we may be dealing with errant or devious DIF.) 2143 * This is an error condition; we indicate as much, 2144 * and return. 2145 */ 2146 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2147 return; 2148 } 2149 2150 /* 2151 * This is a hit: we need to apply the aggregator to 2152 * the value at this key. 2153 */ 2154 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2155 return; 2156 next: 2157 continue; 2158 } 2159 2160 /* 2161 * We didn't find it. We need to allocate some zero-filled space, 2162 * link it into the hash table appropriately, and apply the aggregator 2163 * to the (zero-filled) value. 2164 */ 2165 offs = buf->dtb_offset; 2166 while (offs & (align - 1)) 2167 offs += sizeof (uint32_t); 2168 2169 /* 2170 * If we don't have enough room to both allocate a new key _and_ 2171 * its associated data, increment the drop count and return. 2172 */ 2173 if ((uintptr_t)tomax + offs + fsize > 2174 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2175 dtrace_buffer_drop(buf); 2176 return; 2177 } 2178 2179 /*CONSTCOND*/ 2180 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2181 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2182 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2183 2184 key->dtak_data = kdata = tomax + offs; 2185 buf->dtb_offset = offs + fsize; 2186 2187 /* 2188 * Now copy the data across. 2189 */ 2190 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2191 2192 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2193 kdata[i] = data[i]; 2194 2195 /* 2196 * Because strings are not zeroed out by default, we need to iterate 2197 * looking for actions that store strings, and we need to explicitly 2198 * pad these strings out with zeroes. 2199 */ 2200 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2201 int nul; 2202 2203 if (!DTRACEACT_ISSTRING(act)) 2204 continue; 2205 2206 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2207 limit = i + act->dta_rec.dtrd_size; 2208 ASSERT(limit <= size); 2209 2210 for (nul = 0; i < limit; i++) { 2211 if (nul) { 2212 kdata[i] = '\0'; 2213 continue; 2214 } 2215 2216 if (data[i] != '\0') 2217 continue; 2218 2219 nul = 1; 2220 } 2221 } 2222 2223 for (i = size; i < fsize; i++) 2224 kdata[i] = 0; 2225 2226 key->dtak_hashval = hashval; 2227 key->dtak_size = size; 2228 key->dtak_action = action; 2229 key->dtak_next = agb->dtagb_hash[ndx]; 2230 agb->dtagb_hash[ndx] = key; 2231 2232 /* 2233 * Finally, apply the aggregator. 2234 */ 2235 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2236 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2237 } 2238 2239 /* 2240 * Given consumer state, this routine finds a speculation in the INACTIVE 2241 * state and transitions it into the ACTIVE state. If there is no speculation 2242 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2243 * incremented -- it is up to the caller to take appropriate action. 2244 */ 2245 static int 2246 dtrace_speculation(dtrace_state_t *state) 2247 { 2248 int i = 0; 2249 dtrace_speculation_state_t current; 2250 uint32_t *stat = &state->dts_speculations_unavail, count; 2251 2252 while (i < state->dts_nspeculations) { 2253 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2254 2255 current = spec->dtsp_state; 2256 2257 if (current != DTRACESPEC_INACTIVE) { 2258 if (current == DTRACESPEC_COMMITTINGMANY || 2259 current == DTRACESPEC_COMMITTING || 2260 current == DTRACESPEC_DISCARDING) 2261 stat = &state->dts_speculations_busy; 2262 i++; 2263 continue; 2264 } 2265 2266 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2267 current, DTRACESPEC_ACTIVE) == current) 2268 return (i + 1); 2269 } 2270 2271 /* 2272 * We couldn't find a speculation. If we found as much as a single 2273 * busy speculation buffer, we'll attribute this failure as "busy" 2274 * instead of "unavail". 2275 */ 2276 do { 2277 count = *stat; 2278 } while (dtrace_cas32(stat, count, count + 1) != count); 2279 2280 return (0); 2281 } 2282 2283 /* 2284 * This routine commits an active speculation. If the specified speculation 2285 * is not in a valid state to perform a commit(), this routine will silently do 2286 * nothing. The state of the specified speculation is transitioned according 2287 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2288 */ 2289 static void 2290 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2291 dtrace_specid_t which) 2292 { 2293 dtrace_speculation_t *spec; 2294 dtrace_buffer_t *src, *dest; 2295 uintptr_t daddr, saddr, dlimit; 2296 dtrace_speculation_state_t current, new; 2297 intptr_t offs; 2298 2299 if (which == 0) 2300 return; 2301 2302 if (which > state->dts_nspeculations) { 2303 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2304 return; 2305 } 2306 2307 spec = &state->dts_speculations[which - 1]; 2308 src = &spec->dtsp_buffer[cpu]; 2309 dest = &state->dts_buffer[cpu]; 2310 2311 do { 2312 current = spec->dtsp_state; 2313 2314 if (current == DTRACESPEC_COMMITTINGMANY) 2315 break; 2316 2317 switch (current) { 2318 case DTRACESPEC_INACTIVE: 2319 case DTRACESPEC_DISCARDING: 2320 return; 2321 2322 case DTRACESPEC_COMMITTING: 2323 /* 2324 * This is only possible if we are (a) commit()'ing 2325 * without having done a prior speculate() on this CPU 2326 * and (b) racing with another commit() on a different 2327 * CPU. There's nothing to do -- we just assert that 2328 * our offset is 0. 2329 */ 2330 ASSERT(src->dtb_offset == 0); 2331 return; 2332 2333 case DTRACESPEC_ACTIVE: 2334 new = DTRACESPEC_COMMITTING; 2335 break; 2336 2337 case DTRACESPEC_ACTIVEONE: 2338 /* 2339 * This speculation is active on one CPU. If our 2340 * buffer offset is non-zero, we know that the one CPU 2341 * must be us. Otherwise, we are committing on a 2342 * different CPU from the speculate(), and we must 2343 * rely on being asynchronously cleaned. 2344 */ 2345 if (src->dtb_offset != 0) { 2346 new = DTRACESPEC_COMMITTING; 2347 break; 2348 } 2349 /*FALLTHROUGH*/ 2350 2351 case DTRACESPEC_ACTIVEMANY: 2352 new = DTRACESPEC_COMMITTINGMANY; 2353 break; 2354 2355 default: 2356 ASSERT(0); 2357 } 2358 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2359 current, new) != current); 2360 2361 /* 2362 * We have set the state to indicate that we are committing this 2363 * speculation. Now reserve the necessary space in the destination 2364 * buffer. 2365 */ 2366 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2367 sizeof (uint64_t), state, NULL)) < 0) { 2368 dtrace_buffer_drop(dest); 2369 goto out; 2370 } 2371 2372 /* 2373 * We have the space; copy the buffer across. (Note that this is a 2374 * highly subobtimal bcopy(); in the unlikely event that this becomes 2375 * a serious performance issue, a high-performance DTrace-specific 2376 * bcopy() should obviously be invented.) 2377 */ 2378 daddr = (uintptr_t)dest->dtb_tomax + offs; 2379 dlimit = daddr + src->dtb_offset; 2380 saddr = (uintptr_t)src->dtb_tomax; 2381 2382 /* 2383 * First, the aligned portion. 2384 */ 2385 while (dlimit - daddr >= sizeof (uint64_t)) { 2386 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2387 2388 daddr += sizeof (uint64_t); 2389 saddr += sizeof (uint64_t); 2390 } 2391 2392 /* 2393 * Now any left-over bit... 2394 */ 2395 while (dlimit - daddr) 2396 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2397 2398 /* 2399 * Finally, commit the reserved space in the destination buffer. 2400 */ 2401 dest->dtb_offset = offs + src->dtb_offset; 2402 2403 out: 2404 /* 2405 * If we're lucky enough to be the only active CPU on this speculation 2406 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2407 */ 2408 if (current == DTRACESPEC_ACTIVE || 2409 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2410 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2411 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2412 2413 ASSERT(rval == DTRACESPEC_COMMITTING); 2414 } 2415 2416 src->dtb_offset = 0; 2417 src->dtb_xamot_drops += src->dtb_drops; 2418 src->dtb_drops = 0; 2419 } 2420 2421 /* 2422 * This routine discards an active speculation. If the specified speculation 2423 * is not in a valid state to perform a discard(), this routine will silently 2424 * do nothing. The state of the specified speculation is transitioned 2425 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2426 */ 2427 static void 2428 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2429 dtrace_specid_t which) 2430 { 2431 dtrace_speculation_t *spec; 2432 dtrace_speculation_state_t current, new; 2433 dtrace_buffer_t *buf; 2434 2435 if (which == 0) 2436 return; 2437 2438 if (which > state->dts_nspeculations) { 2439 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2440 return; 2441 } 2442 2443 spec = &state->dts_speculations[which - 1]; 2444 buf = &spec->dtsp_buffer[cpu]; 2445 2446 do { 2447 current = spec->dtsp_state; 2448 2449 switch (current) { 2450 case DTRACESPEC_INACTIVE: 2451 case DTRACESPEC_COMMITTINGMANY: 2452 case DTRACESPEC_COMMITTING: 2453 case DTRACESPEC_DISCARDING: 2454 return; 2455 2456 case DTRACESPEC_ACTIVE: 2457 case DTRACESPEC_ACTIVEMANY: 2458 new = DTRACESPEC_DISCARDING; 2459 break; 2460 2461 case DTRACESPEC_ACTIVEONE: 2462 if (buf->dtb_offset != 0) { 2463 new = DTRACESPEC_INACTIVE; 2464 } else { 2465 new = DTRACESPEC_DISCARDING; 2466 } 2467 break; 2468 2469 default: 2470 ASSERT(0); 2471 } 2472 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2473 current, new) != current); 2474 2475 buf->dtb_offset = 0; 2476 buf->dtb_drops = 0; 2477 } 2478 2479 /* 2480 * Note: not called from probe context. This function is called 2481 * asynchronously from cross call context to clean any speculations that are 2482 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2483 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2484 * speculation. 2485 */ 2486 static void 2487 dtrace_speculation_clean_here(dtrace_state_t *state) 2488 { 2489 dtrace_icookie_t cookie; 2490 processorid_t cpu = CPU->cpu_id; 2491 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2492 dtrace_specid_t i; 2493 2494 cookie = dtrace_interrupt_disable(); 2495 2496 if (dest->dtb_tomax == NULL) { 2497 dtrace_interrupt_enable(cookie); 2498 return; 2499 } 2500 2501 for (i = 0; i < state->dts_nspeculations; i++) { 2502 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2503 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2504 2505 if (src->dtb_tomax == NULL) 2506 continue; 2507 2508 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2509 src->dtb_offset = 0; 2510 continue; 2511 } 2512 2513 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2514 continue; 2515 2516 if (src->dtb_offset == 0) 2517 continue; 2518 2519 dtrace_speculation_commit(state, cpu, i + 1); 2520 } 2521 2522 dtrace_interrupt_enable(cookie); 2523 } 2524 2525 /* 2526 * Note: not called from probe context. This function is called 2527 * asynchronously (and at a regular interval) to clean any speculations that 2528 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2529 * is work to be done, it cross calls all CPUs to perform that work; 2530 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2531 * INACTIVE state until they have been cleaned by all CPUs. 2532 */ 2533 static void 2534 dtrace_speculation_clean(dtrace_state_t *state) 2535 { 2536 int work = 0, rv; 2537 dtrace_specid_t i; 2538 2539 for (i = 0; i < state->dts_nspeculations; i++) { 2540 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2541 2542 ASSERT(!spec->dtsp_cleaning); 2543 2544 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2545 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2546 continue; 2547 2548 work++; 2549 spec->dtsp_cleaning = 1; 2550 } 2551 2552 if (!work) 2553 return; 2554 2555 dtrace_xcall(DTRACE_CPUALL, 2556 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2557 2558 /* 2559 * We now know that all CPUs have committed or discarded their 2560 * speculation buffers, as appropriate. We can now set the state 2561 * to inactive. 2562 */ 2563 for (i = 0; i < state->dts_nspeculations; i++) { 2564 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2565 dtrace_speculation_state_t current, new; 2566 2567 if (!spec->dtsp_cleaning) 2568 continue; 2569 2570 current = spec->dtsp_state; 2571 ASSERT(current == DTRACESPEC_DISCARDING || 2572 current == DTRACESPEC_COMMITTINGMANY); 2573 2574 new = DTRACESPEC_INACTIVE; 2575 2576 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2577 ASSERT(rv == current); 2578 spec->dtsp_cleaning = 0; 2579 } 2580 } 2581 2582 /* 2583 * Called as part of a speculate() to get the speculative buffer associated 2584 * with a given speculation. Returns NULL if the specified speculation is not 2585 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2586 * the active CPU is not the specified CPU -- the speculation will be 2587 * atomically transitioned into the ACTIVEMANY state. 2588 */ 2589 static dtrace_buffer_t * 2590 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2591 dtrace_specid_t which) 2592 { 2593 dtrace_speculation_t *spec; 2594 dtrace_speculation_state_t current, new; 2595 dtrace_buffer_t *buf; 2596 2597 if (which == 0) 2598 return (NULL); 2599 2600 if (which > state->dts_nspeculations) { 2601 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2602 return (NULL); 2603 } 2604 2605 spec = &state->dts_speculations[which - 1]; 2606 buf = &spec->dtsp_buffer[cpuid]; 2607 2608 do { 2609 current = spec->dtsp_state; 2610 2611 switch (current) { 2612 case DTRACESPEC_INACTIVE: 2613 case DTRACESPEC_COMMITTINGMANY: 2614 case DTRACESPEC_DISCARDING: 2615 return (NULL); 2616 2617 case DTRACESPEC_COMMITTING: 2618 ASSERT(buf->dtb_offset == 0); 2619 return (NULL); 2620 2621 case DTRACESPEC_ACTIVEONE: 2622 /* 2623 * This speculation is currently active on one CPU. 2624 * Check the offset in the buffer; if it's non-zero, 2625 * that CPU must be us (and we leave the state alone). 2626 * If it's zero, assume that we're starting on a new 2627 * CPU -- and change the state to indicate that the 2628 * speculation is active on more than one CPU. 2629 */ 2630 if (buf->dtb_offset != 0) 2631 return (buf); 2632 2633 new = DTRACESPEC_ACTIVEMANY; 2634 break; 2635 2636 case DTRACESPEC_ACTIVEMANY: 2637 return (buf); 2638 2639 case DTRACESPEC_ACTIVE: 2640 new = DTRACESPEC_ACTIVEONE; 2641 break; 2642 2643 default: 2644 ASSERT(0); 2645 } 2646 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2647 current, new) != current); 2648 2649 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2650 return (buf); 2651 } 2652 2653 /* 2654 * Return a string. In the event that the user lacks the privilege to access 2655 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2656 * don't fail access checking. 2657 * 2658 * dtrace_dif_variable() uses this routine as a helper for various 2659 * builtin values such as 'execname' and 'probefunc.' 2660 */ 2661 uintptr_t 2662 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2663 dtrace_mstate_t *mstate) 2664 { 2665 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2666 uintptr_t ret; 2667 size_t strsz; 2668 2669 /* 2670 * The easy case: this probe is allowed to read all of memory, so 2671 * we can just return this as a vanilla pointer. 2672 */ 2673 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2674 return (addr); 2675 2676 /* 2677 * This is the tougher case: we copy the string in question from 2678 * kernel memory into scratch memory and return it that way: this 2679 * ensures that we won't trip up when access checking tests the 2680 * BYREF return value. 2681 */ 2682 strsz = dtrace_strlen((char *)addr, size) + 1; 2683 2684 if (mstate->dtms_scratch_ptr + strsz > 2685 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2686 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2687 return (NULL); 2688 } 2689 2690 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2691 strsz); 2692 ret = mstate->dtms_scratch_ptr; 2693 mstate->dtms_scratch_ptr += strsz; 2694 return (ret); 2695 } 2696 2697 /* 2698 * This function implements the DIF emulator's variable lookups. The emulator 2699 * passes a reserved variable identifier and optional built-in array index. 2700 */ 2701 static uint64_t 2702 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2703 uint64_t ndx) 2704 { 2705 /* 2706 * If we're accessing one of the uncached arguments, we'll turn this 2707 * into a reference in the args array. 2708 */ 2709 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2710 ndx = v - DIF_VAR_ARG0; 2711 v = DIF_VAR_ARGS; 2712 } 2713 2714 switch (v) { 2715 case DIF_VAR_ARGS: 2716 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2717 if (ndx >= sizeof (mstate->dtms_arg) / 2718 sizeof (mstate->dtms_arg[0])) { 2719 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2720 dtrace_provider_t *pv; 2721 uint64_t val; 2722 2723 pv = mstate->dtms_probe->dtpr_provider; 2724 if (pv->dtpv_pops.dtps_getargval != NULL) 2725 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2726 mstate->dtms_probe->dtpr_id, 2727 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2728 else 2729 val = dtrace_getarg(ndx, aframes); 2730 2731 /* 2732 * This is regrettably required to keep the compiler 2733 * from tail-optimizing the call to dtrace_getarg(). 2734 * The condition always evaluates to true, but the 2735 * compiler has no way of figuring that out a priori. 2736 * (None of this would be necessary if the compiler 2737 * could be relied upon to _always_ tail-optimize 2738 * the call to dtrace_getarg() -- but it can't.) 2739 */ 2740 if (mstate->dtms_probe != NULL) 2741 return (val); 2742 2743 ASSERT(0); 2744 } 2745 2746 return (mstate->dtms_arg[ndx]); 2747 2748 case DIF_VAR_UREGS: { 2749 klwp_t *lwp; 2750 2751 if (!dtrace_priv_proc(state)) 2752 return (0); 2753 2754 if ((lwp = curthread->t_lwp) == NULL) { 2755 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2756 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2757 return (0); 2758 } 2759 2760 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2761 } 2762 2763 case DIF_VAR_VMREGS: { 2764 uint64_t rval; 2765 2766 if (!dtrace_priv_kernel(state)) 2767 return (0); 2768 2769 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2770 2771 rval = dtrace_getvmreg(ndx, 2772 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags); 2773 2774 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2775 2776 return (rval); 2777 } 2778 2779 case DIF_VAR_CURTHREAD: 2780 if (!dtrace_priv_kernel(state)) 2781 return (0); 2782 return ((uint64_t)(uintptr_t)curthread); 2783 2784 case DIF_VAR_TIMESTAMP: 2785 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2786 mstate->dtms_timestamp = dtrace_gethrtime(); 2787 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2788 } 2789 return (mstate->dtms_timestamp); 2790 2791 case DIF_VAR_VTIMESTAMP: 2792 ASSERT(dtrace_vtime_references != 0); 2793 return (curthread->t_dtrace_vtime); 2794 2795 case DIF_VAR_WALLTIMESTAMP: 2796 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2797 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2798 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2799 } 2800 return (mstate->dtms_walltimestamp); 2801 2802 case DIF_VAR_IPL: 2803 if (!dtrace_priv_kernel(state)) 2804 return (0); 2805 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2806 mstate->dtms_ipl = dtrace_getipl(); 2807 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2808 } 2809 return (mstate->dtms_ipl); 2810 2811 case DIF_VAR_EPID: 2812 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2813 return (mstate->dtms_epid); 2814 2815 case DIF_VAR_ID: 2816 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2817 return (mstate->dtms_probe->dtpr_id); 2818 2819 case DIF_VAR_STACKDEPTH: 2820 if (!dtrace_priv_kernel(state)) 2821 return (0); 2822 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2823 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2824 2825 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2826 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2827 } 2828 return (mstate->dtms_stackdepth); 2829 2830 case DIF_VAR_USTACKDEPTH: 2831 if (!dtrace_priv_proc(state)) 2832 return (0); 2833 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2834 /* 2835 * See comment in DIF_VAR_PID. 2836 */ 2837 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2838 CPU_ON_INTR(CPU)) { 2839 mstate->dtms_ustackdepth = 0; 2840 } else { 2841 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2842 mstate->dtms_ustackdepth = 2843 dtrace_getustackdepth(); 2844 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2845 } 2846 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2847 } 2848 return (mstate->dtms_ustackdepth); 2849 2850 case DIF_VAR_CALLER: 2851 if (!dtrace_priv_kernel(state)) 2852 return (0); 2853 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2854 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2855 2856 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2857 /* 2858 * If this is an unanchored probe, we are 2859 * required to go through the slow path: 2860 * dtrace_caller() only guarantees correct 2861 * results for anchored probes. 2862 */ 2863 pc_t caller[2]; 2864 2865 dtrace_getpcstack(caller, 2, aframes, 2866 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2867 mstate->dtms_caller = caller[1]; 2868 } else if ((mstate->dtms_caller = 2869 dtrace_caller(aframes)) == -1) { 2870 /* 2871 * We have failed to do this the quick way; 2872 * we must resort to the slower approach of 2873 * calling dtrace_getpcstack(). 2874 */ 2875 pc_t caller; 2876 2877 dtrace_getpcstack(&caller, 1, aframes, NULL); 2878 mstate->dtms_caller = caller; 2879 } 2880 2881 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2882 } 2883 return (mstate->dtms_caller); 2884 2885 case DIF_VAR_UCALLER: 2886 if (!dtrace_priv_proc(state)) 2887 return (0); 2888 2889 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2890 uint64_t ustack[3]; 2891 2892 /* 2893 * dtrace_getupcstack() fills in the first uint64_t 2894 * with the current PID. The second uint64_t will 2895 * be the program counter at user-level. The third 2896 * uint64_t will contain the caller, which is what 2897 * we're after. 2898 */ 2899 ustack[2] = NULL; 2900 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2901 dtrace_getupcstack(ustack, 3); 2902 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2903 mstate->dtms_ucaller = ustack[2]; 2904 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2905 } 2906 2907 return (mstate->dtms_ucaller); 2908 2909 case DIF_VAR_PROBEPROV: 2910 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2911 return (dtrace_dif_varstr( 2912 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2913 state, mstate)); 2914 2915 case DIF_VAR_PROBEMOD: 2916 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2917 return (dtrace_dif_varstr( 2918 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2919 state, mstate)); 2920 2921 case DIF_VAR_PROBEFUNC: 2922 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2923 return (dtrace_dif_varstr( 2924 (uintptr_t)mstate->dtms_probe->dtpr_func, 2925 state, mstate)); 2926 2927 case DIF_VAR_PROBENAME: 2928 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2929 return (dtrace_dif_varstr( 2930 (uintptr_t)mstate->dtms_probe->dtpr_name, 2931 state, mstate)); 2932 2933 case DIF_VAR_PID: 2934 if (!dtrace_priv_proc(state)) 2935 return (0); 2936 2937 /* 2938 * Note that we are assuming that an unanchored probe is 2939 * always due to a high-level interrupt. (And we're assuming 2940 * that there is only a single high level interrupt.) 2941 */ 2942 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2943 return (pid0.pid_id); 2944 2945 /* 2946 * It is always safe to dereference one's own t_procp pointer: 2947 * it always points to a valid, allocated proc structure. 2948 * Further, it is always safe to dereference the p_pidp member 2949 * of one's own proc structure. (These are truisms becuase 2950 * threads and processes don't clean up their own state -- 2951 * they leave that task to whomever reaps them.) 2952 */ 2953 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2954 2955 case DIF_VAR_PPID: 2956 if (!dtrace_priv_proc(state)) 2957 return (0); 2958 2959 /* 2960 * See comment in DIF_VAR_PID. 2961 */ 2962 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2963 return (pid0.pid_id); 2964 2965 /* 2966 * It is always safe to dereference one's own t_procp pointer: 2967 * it always points to a valid, allocated proc structure. 2968 * (This is true because threads don't clean up their own 2969 * state -- they leave that task to whomever reaps them.) 2970 */ 2971 return ((uint64_t)curthread->t_procp->p_ppid); 2972 2973 case DIF_VAR_TID: 2974 /* 2975 * See comment in DIF_VAR_PID. 2976 */ 2977 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2978 return (0); 2979 2980 return ((uint64_t)curthread->t_tid); 2981 2982 case DIF_VAR_EXECNAME: 2983 if (!dtrace_priv_proc(state)) 2984 return (0); 2985 2986 /* 2987 * See comment in DIF_VAR_PID. 2988 */ 2989 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2990 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2991 2992 /* 2993 * It is always safe to dereference one's own t_procp pointer: 2994 * it always points to a valid, allocated proc structure. 2995 * (This is true because threads don't clean up their own 2996 * state -- they leave that task to whomever reaps them.) 2997 */ 2998 return (dtrace_dif_varstr( 2999 (uintptr_t)curthread->t_procp->p_user.u_comm, 3000 state, mstate)); 3001 3002 case DIF_VAR_ZONENAME: 3003 if (!dtrace_priv_proc(state)) 3004 return (0); 3005 3006 /* 3007 * See comment in DIF_VAR_PID. 3008 */ 3009 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3010 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3011 3012 /* 3013 * It is always safe to dereference one's own t_procp pointer: 3014 * it always points to a valid, allocated proc structure. 3015 * (This is true because threads don't clean up their own 3016 * state -- they leave that task to whomever reaps them.) 3017 */ 3018 return (dtrace_dif_varstr( 3019 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3020 state, mstate)); 3021 3022 case DIF_VAR_UID: 3023 if (!dtrace_priv_proc(state)) 3024 return (0); 3025 3026 /* 3027 * See comment in DIF_VAR_PID. 3028 */ 3029 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3030 return ((uint64_t)p0.p_cred->cr_uid); 3031 3032 /* 3033 * It is always safe to dereference one's own t_procp pointer: 3034 * it always points to a valid, allocated proc structure. 3035 * (This is true because threads don't clean up their own 3036 * state -- they leave that task to whomever reaps them.) 3037 * 3038 * Additionally, it is safe to dereference one's own process 3039 * credential, since this is never NULL after process birth. 3040 */ 3041 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3042 3043 case DIF_VAR_GID: 3044 if (!dtrace_priv_proc(state)) 3045 return (0); 3046 3047 /* 3048 * See comment in DIF_VAR_PID. 3049 */ 3050 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3051 return ((uint64_t)p0.p_cred->cr_gid); 3052 3053 /* 3054 * It is always safe to dereference one's own t_procp pointer: 3055 * it always points to a valid, allocated proc structure. 3056 * (This is true because threads don't clean up their own 3057 * state -- they leave that task to whomever reaps them.) 3058 * 3059 * Additionally, it is safe to dereference one's own process 3060 * credential, since this is never NULL after process birth. 3061 */ 3062 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3063 3064 case DIF_VAR_ERRNO: { 3065 klwp_t *lwp; 3066 if (!dtrace_priv_proc(state)) 3067 return (0); 3068 3069 /* 3070 * See comment in DIF_VAR_PID. 3071 */ 3072 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3073 return (0); 3074 3075 /* 3076 * It is always safe to dereference one's own t_lwp pointer in 3077 * the event that this pointer is non-NULL. (This is true 3078 * because threads and lwps don't clean up their own state -- 3079 * they leave that task to whomever reaps them.) 3080 */ 3081 if ((lwp = curthread->t_lwp) == NULL) 3082 return (0); 3083 3084 return ((uint64_t)lwp->lwp_errno); 3085 } 3086 default: 3087 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3088 return (0); 3089 } 3090 } 3091 3092 /* 3093 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3094 * Notice that we don't bother validating the proper number of arguments or 3095 * their types in the tuple stack. This isn't needed because all argument 3096 * interpretation is safe because of our load safety -- the worst that can 3097 * happen is that a bogus program can obtain bogus results. 3098 */ 3099 static void 3100 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3101 dtrace_key_t *tupregs, int nargs, 3102 dtrace_mstate_t *mstate, dtrace_state_t *state) 3103 { 3104 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 3105 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 3106 dtrace_vstate_t *vstate = &state->dts_vstate; 3107 3108 union { 3109 mutex_impl_t mi; 3110 uint64_t mx; 3111 } m; 3112 3113 union { 3114 krwlock_t ri; 3115 uintptr_t rw; 3116 } r; 3117 3118 switch (subr) { 3119 case DIF_SUBR_RAND: 3120 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3121 break; 3122 3123 case DIF_SUBR_MUTEX_OWNED: 3124 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3125 mstate, vstate)) { 3126 regs[rd] = NULL; 3127 break; 3128 } 3129 3130 m.mx = dtrace_load64(tupregs[0].dttk_value); 3131 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3132 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3133 else 3134 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3135 break; 3136 3137 case DIF_SUBR_MUTEX_OWNER: 3138 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3139 mstate, vstate)) { 3140 regs[rd] = NULL; 3141 break; 3142 } 3143 3144 m.mx = dtrace_load64(tupregs[0].dttk_value); 3145 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3146 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3147 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3148 else 3149 regs[rd] = 0; 3150 break; 3151 3152 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3153 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3154 mstate, vstate)) { 3155 regs[rd] = NULL; 3156 break; 3157 } 3158 3159 m.mx = dtrace_load64(tupregs[0].dttk_value); 3160 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3161 break; 3162 3163 case DIF_SUBR_MUTEX_TYPE_SPIN: 3164 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3165 mstate, vstate)) { 3166 regs[rd] = NULL; 3167 break; 3168 } 3169 3170 m.mx = dtrace_load64(tupregs[0].dttk_value); 3171 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3172 break; 3173 3174 case DIF_SUBR_RW_READ_HELD: { 3175 uintptr_t tmp; 3176 3177 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3178 mstate, vstate)) { 3179 regs[rd] = NULL; 3180 break; 3181 } 3182 3183 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3184 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3185 break; 3186 } 3187 3188 case DIF_SUBR_RW_WRITE_HELD: 3189 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3190 mstate, vstate)) { 3191 regs[rd] = NULL; 3192 break; 3193 } 3194 3195 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3196 regs[rd] = _RW_WRITE_HELD(&r.ri); 3197 break; 3198 3199 case DIF_SUBR_RW_ISWRITER: 3200 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3201 mstate, vstate)) { 3202 regs[rd] = NULL; 3203 break; 3204 } 3205 3206 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3207 regs[rd] = _RW_ISWRITER(&r.ri); 3208 break; 3209 3210 case DIF_SUBR_BCOPY: { 3211 /* 3212 * We need to be sure that the destination is in the scratch 3213 * region -- no other region is allowed. 3214 */ 3215 uintptr_t src = tupregs[0].dttk_value; 3216 uintptr_t dest = tupregs[1].dttk_value; 3217 size_t size = tupregs[2].dttk_value; 3218 3219 if (!dtrace_inscratch(dest, size, mstate)) { 3220 *flags |= CPU_DTRACE_BADADDR; 3221 *illval = regs[rd]; 3222 break; 3223 } 3224 3225 if (!dtrace_canload(src, size, mstate, vstate)) { 3226 regs[rd] = NULL; 3227 break; 3228 } 3229 3230 dtrace_bcopy((void *)src, (void *)dest, size); 3231 break; 3232 } 3233 3234 case DIF_SUBR_ALLOCA: 3235 case DIF_SUBR_COPYIN: { 3236 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3237 uint64_t size = 3238 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3239 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3240 3241 /* 3242 * This action doesn't require any credential checks since 3243 * probes will not activate in user contexts to which the 3244 * enabling user does not have permissions. 3245 */ 3246 3247 /* 3248 * Rounding up the user allocation size could have overflowed 3249 * a large, bogus allocation (like -1ULL) to 0. 3250 */ 3251 if (scratch_size < size || 3252 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3253 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3254 regs[rd] = NULL; 3255 break; 3256 } 3257 3258 if (subr == DIF_SUBR_COPYIN) { 3259 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3260 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3261 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3262 } 3263 3264 mstate->dtms_scratch_ptr += scratch_size; 3265 regs[rd] = dest; 3266 break; 3267 } 3268 3269 case DIF_SUBR_COPYINTO: { 3270 uint64_t size = tupregs[1].dttk_value; 3271 uintptr_t dest = tupregs[2].dttk_value; 3272 3273 /* 3274 * This action doesn't require any credential checks since 3275 * probes will not activate in user contexts to which the 3276 * enabling user does not have permissions. 3277 */ 3278 if (!dtrace_inscratch(dest, size, mstate)) { 3279 *flags |= CPU_DTRACE_BADADDR; 3280 *illval = regs[rd]; 3281 break; 3282 } 3283 3284 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3285 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3286 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3287 break; 3288 } 3289 3290 case DIF_SUBR_COPYINSTR: { 3291 uintptr_t dest = mstate->dtms_scratch_ptr; 3292 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3293 3294 if (nargs > 1 && tupregs[1].dttk_value < size) 3295 size = tupregs[1].dttk_value + 1; 3296 3297 /* 3298 * This action doesn't require any credential checks since 3299 * probes will not activate in user contexts to which the 3300 * enabling user does not have permissions. 3301 */ 3302 if (!DTRACE_INSCRATCH(mstate, size)) { 3303 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3304 regs[rd] = NULL; 3305 break; 3306 } 3307 3308 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3309 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3310 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3311 3312 ((char *)dest)[size - 1] = '\0'; 3313 mstate->dtms_scratch_ptr += size; 3314 regs[rd] = dest; 3315 break; 3316 } 3317 3318 case DIF_SUBR_MSGSIZE: 3319 case DIF_SUBR_MSGDSIZE: { 3320 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3321 uintptr_t wptr, rptr; 3322 size_t count = 0; 3323 int cont = 0; 3324 3325 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3326 3327 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3328 vstate)) { 3329 regs[rd] = NULL; 3330 break; 3331 } 3332 3333 wptr = dtrace_loadptr(baddr + 3334 offsetof(mblk_t, b_wptr)); 3335 3336 rptr = dtrace_loadptr(baddr + 3337 offsetof(mblk_t, b_rptr)); 3338 3339 if (wptr < rptr) { 3340 *flags |= CPU_DTRACE_BADADDR; 3341 *illval = tupregs[0].dttk_value; 3342 break; 3343 } 3344 3345 daddr = dtrace_loadptr(baddr + 3346 offsetof(mblk_t, b_datap)); 3347 3348 baddr = dtrace_loadptr(baddr + 3349 offsetof(mblk_t, b_cont)); 3350 3351 /* 3352 * We want to prevent against denial-of-service here, 3353 * so we're only going to search the list for 3354 * dtrace_msgdsize_max mblks. 3355 */ 3356 if (cont++ > dtrace_msgdsize_max) { 3357 *flags |= CPU_DTRACE_ILLOP; 3358 break; 3359 } 3360 3361 if (subr == DIF_SUBR_MSGDSIZE) { 3362 if (dtrace_load8(daddr + 3363 offsetof(dblk_t, db_type)) != M_DATA) 3364 continue; 3365 } 3366 3367 count += wptr - rptr; 3368 } 3369 3370 if (!(*flags & CPU_DTRACE_FAULT)) 3371 regs[rd] = count; 3372 3373 break; 3374 } 3375 3376 case DIF_SUBR_PROGENYOF: { 3377 pid_t pid = tupregs[0].dttk_value; 3378 proc_t *p; 3379 int rval = 0; 3380 3381 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3382 3383 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3384 if (p->p_pidp->pid_id == pid) { 3385 rval = 1; 3386 break; 3387 } 3388 } 3389 3390 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3391 3392 regs[rd] = rval; 3393 break; 3394 } 3395 3396 case DIF_SUBR_SPECULATION: 3397 regs[rd] = dtrace_speculation(state); 3398 break; 3399 3400 case DIF_SUBR_COPYOUT: { 3401 uintptr_t kaddr = tupregs[0].dttk_value; 3402 uintptr_t uaddr = tupregs[1].dttk_value; 3403 uint64_t size = tupregs[2].dttk_value; 3404 3405 if (!dtrace_destructive_disallow && 3406 dtrace_priv_proc_control(state) && 3407 !dtrace_istoxic(kaddr, size)) { 3408 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3409 dtrace_copyout(kaddr, uaddr, size, flags); 3410 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3411 } 3412 break; 3413 } 3414 3415 case DIF_SUBR_COPYOUTSTR: { 3416 uintptr_t kaddr = tupregs[0].dttk_value; 3417 uintptr_t uaddr = tupregs[1].dttk_value; 3418 uint64_t size = tupregs[2].dttk_value; 3419 3420 if (!dtrace_destructive_disallow && 3421 dtrace_priv_proc_control(state) && 3422 !dtrace_istoxic(kaddr, size)) { 3423 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3424 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3425 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3426 } 3427 break; 3428 } 3429 3430 case DIF_SUBR_STRLEN: { 3431 size_t sz; 3432 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3433 sz = dtrace_strlen((char *)addr, 3434 state->dts_options[DTRACEOPT_STRSIZE]); 3435 3436 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3437 regs[rd] = NULL; 3438 break; 3439 } 3440 3441 regs[rd] = sz; 3442 3443 break; 3444 } 3445 3446 case DIF_SUBR_STRCHR: 3447 case DIF_SUBR_STRRCHR: { 3448 /* 3449 * We're going to iterate over the string looking for the 3450 * specified character. We will iterate until we have reached 3451 * the string length or we have found the character. If this 3452 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3453 * of the specified character instead of the first. 3454 */ 3455 uintptr_t saddr = tupregs[0].dttk_value; 3456 uintptr_t addr = tupregs[0].dttk_value; 3457 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3458 char c, target = (char)tupregs[1].dttk_value; 3459 3460 for (regs[rd] = NULL; addr < limit; addr++) { 3461 if ((c = dtrace_load8(addr)) == target) { 3462 regs[rd] = addr; 3463 3464 if (subr == DIF_SUBR_STRCHR) 3465 break; 3466 } 3467 3468 if (c == '\0') 3469 break; 3470 } 3471 3472 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3473 regs[rd] = NULL; 3474 break; 3475 } 3476 3477 break; 3478 } 3479 3480 case DIF_SUBR_STRSTR: 3481 case DIF_SUBR_INDEX: 3482 case DIF_SUBR_RINDEX: { 3483 /* 3484 * We're going to iterate over the string looking for the 3485 * specified string. We will iterate until we have reached 3486 * the string length or we have found the string. (Yes, this 3487 * is done in the most naive way possible -- but considering 3488 * that the string we're searching for is likely to be 3489 * relatively short, the complexity of Rabin-Karp or similar 3490 * hardly seems merited.) 3491 */ 3492 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3493 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3494 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3495 size_t len = dtrace_strlen(addr, size); 3496 size_t sublen = dtrace_strlen(substr, size); 3497 char *limit = addr + len, *orig = addr; 3498 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3499 int inc = 1; 3500 3501 regs[rd] = notfound; 3502 3503 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3504 regs[rd] = NULL; 3505 break; 3506 } 3507 3508 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3509 vstate)) { 3510 regs[rd] = NULL; 3511 break; 3512 } 3513 3514 /* 3515 * strstr() and index()/rindex() have similar semantics if 3516 * both strings are the empty string: strstr() returns a 3517 * pointer to the (empty) string, and index() and rindex() 3518 * both return index 0 (regardless of any position argument). 3519 */ 3520 if (sublen == 0 && len == 0) { 3521 if (subr == DIF_SUBR_STRSTR) 3522 regs[rd] = (uintptr_t)addr; 3523 else 3524 regs[rd] = 0; 3525 break; 3526 } 3527 3528 if (subr != DIF_SUBR_STRSTR) { 3529 if (subr == DIF_SUBR_RINDEX) { 3530 limit = orig - 1; 3531 addr += len; 3532 inc = -1; 3533 } 3534 3535 /* 3536 * Both index() and rindex() take an optional position 3537 * argument that denotes the starting position. 3538 */ 3539 if (nargs == 3) { 3540 int64_t pos = (int64_t)tupregs[2].dttk_value; 3541 3542 /* 3543 * If the position argument to index() is 3544 * negative, Perl implicitly clamps it at 3545 * zero. This semantic is a little surprising 3546 * given the special meaning of negative 3547 * positions to similar Perl functions like 3548 * substr(), but it appears to reflect a 3549 * notion that index() can start from a 3550 * negative index and increment its way up to 3551 * the string. Given this notion, Perl's 3552 * rindex() is at least self-consistent in 3553 * that it implicitly clamps positions greater 3554 * than the string length to be the string 3555 * length. Where Perl completely loses 3556 * coherence, however, is when the specified 3557 * substring is the empty string (""). In 3558 * this case, even if the position is 3559 * negative, rindex() returns 0 -- and even if 3560 * the position is greater than the length, 3561 * index() returns the string length. These 3562 * semantics violate the notion that index() 3563 * should never return a value less than the 3564 * specified position and that rindex() should 3565 * never return a value greater than the 3566 * specified position. (One assumes that 3567 * these semantics are artifacts of Perl's 3568 * implementation and not the results of 3569 * deliberate design -- it beggars belief that 3570 * even Larry Wall could desire such oddness.) 3571 * While in the abstract one would wish for 3572 * consistent position semantics across 3573 * substr(), index() and rindex() -- or at the 3574 * very least self-consistent position 3575 * semantics for index() and rindex() -- we 3576 * instead opt to keep with the extant Perl 3577 * semantics, in all their broken glory. (Do 3578 * we have more desire to maintain Perl's 3579 * semantics than Perl does? Probably.) 3580 */ 3581 if (subr == DIF_SUBR_RINDEX) { 3582 if (pos < 0) { 3583 if (sublen == 0) 3584 regs[rd] = 0; 3585 break; 3586 } 3587 3588 if (pos > len) 3589 pos = len; 3590 } else { 3591 if (pos < 0) 3592 pos = 0; 3593 3594 if (pos >= len) { 3595 if (sublen == 0) 3596 regs[rd] = len; 3597 break; 3598 } 3599 } 3600 3601 addr = orig + pos; 3602 } 3603 } 3604 3605 for (regs[rd] = notfound; addr != limit; addr += inc) { 3606 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3607 if (subr != DIF_SUBR_STRSTR) { 3608 /* 3609 * As D index() and rindex() are 3610 * modeled on Perl (and not on awk), 3611 * we return a zero-based (and not a 3612 * one-based) index. (For you Perl 3613 * weenies: no, we're not going to add 3614 * $[ -- and shouldn't you be at a con 3615 * or something?) 3616 */ 3617 regs[rd] = (uintptr_t)(addr - orig); 3618 break; 3619 } 3620 3621 ASSERT(subr == DIF_SUBR_STRSTR); 3622 regs[rd] = (uintptr_t)addr; 3623 break; 3624 } 3625 } 3626 3627 break; 3628 } 3629 3630 case DIF_SUBR_STRTOK: { 3631 uintptr_t addr = tupregs[0].dttk_value; 3632 uintptr_t tokaddr = tupregs[1].dttk_value; 3633 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3634 uintptr_t limit, toklimit = tokaddr + size; 3635 uint8_t c, tokmap[32]; /* 256 / 8 */ 3636 char *dest = (char *)mstate->dtms_scratch_ptr; 3637 int i; 3638 3639 /* 3640 * Check both the token buffer and (later) the input buffer, 3641 * since both could be non-scratch addresses. 3642 */ 3643 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3644 regs[rd] = NULL; 3645 break; 3646 } 3647 3648 if (!DTRACE_INSCRATCH(mstate, size)) { 3649 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3650 regs[rd] = NULL; 3651 break; 3652 } 3653 3654 if (addr == NULL) { 3655 /* 3656 * If the address specified is NULL, we use our saved 3657 * strtok pointer from the mstate. Note that this 3658 * means that the saved strtok pointer is _only_ 3659 * valid within multiple enablings of the same probe -- 3660 * it behaves like an implicit clause-local variable. 3661 */ 3662 addr = mstate->dtms_strtok; 3663 } else { 3664 /* 3665 * If the user-specified address is non-NULL we must 3666 * access check it. This is the only time we have 3667 * a chance to do so, since this address may reside 3668 * in the string table of this clause-- future calls 3669 * (when we fetch addr from mstate->dtms_strtok) 3670 * would fail this access check. 3671 */ 3672 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3673 regs[rd] = NULL; 3674 break; 3675 } 3676 } 3677 3678 /* 3679 * First, zero the token map, and then process the token 3680 * string -- setting a bit in the map for every character 3681 * found in the token string. 3682 */ 3683 for (i = 0; i < sizeof (tokmap); i++) 3684 tokmap[i] = 0; 3685 3686 for (; tokaddr < toklimit; tokaddr++) { 3687 if ((c = dtrace_load8(tokaddr)) == '\0') 3688 break; 3689 3690 ASSERT((c >> 3) < sizeof (tokmap)); 3691 tokmap[c >> 3] |= (1 << (c & 0x7)); 3692 } 3693 3694 for (limit = addr + size; addr < limit; addr++) { 3695 /* 3696 * We're looking for a character that is _not_ contained 3697 * in the token string. 3698 */ 3699 if ((c = dtrace_load8(addr)) == '\0') 3700 break; 3701 3702 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3703 break; 3704 } 3705 3706 if (c == '\0') { 3707 /* 3708 * We reached the end of the string without finding 3709 * any character that was not in the token string. 3710 * We return NULL in this case, and we set the saved 3711 * address to NULL as well. 3712 */ 3713 regs[rd] = NULL; 3714 mstate->dtms_strtok = NULL; 3715 break; 3716 } 3717 3718 /* 3719 * From here on, we're copying into the destination string. 3720 */ 3721 for (i = 0; addr < limit && i < size - 1; addr++) { 3722 if ((c = dtrace_load8(addr)) == '\0') 3723 break; 3724 3725 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3726 break; 3727 3728 ASSERT(i < size); 3729 dest[i++] = c; 3730 } 3731 3732 ASSERT(i < size); 3733 dest[i] = '\0'; 3734 regs[rd] = (uintptr_t)dest; 3735 mstate->dtms_scratch_ptr += size; 3736 mstate->dtms_strtok = addr; 3737 break; 3738 } 3739 3740 case DIF_SUBR_SUBSTR: { 3741 uintptr_t s = tupregs[0].dttk_value; 3742 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3743 char *d = (char *)mstate->dtms_scratch_ptr; 3744 int64_t index = (int64_t)tupregs[1].dttk_value; 3745 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3746 size_t len = dtrace_strlen((char *)s, size); 3747 int64_t i; 3748 3749 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3750 regs[rd] = NULL; 3751 break; 3752 } 3753 3754 if (!DTRACE_INSCRATCH(mstate, size)) { 3755 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3756 regs[rd] = NULL; 3757 break; 3758 } 3759 3760 if (nargs <= 2) 3761 remaining = (int64_t)size; 3762 3763 if (index < 0) { 3764 index += len; 3765 3766 if (index < 0 && index + remaining > 0) { 3767 remaining += index; 3768 index = 0; 3769 } 3770 } 3771 3772 if (index >= len || index < 0) { 3773 remaining = 0; 3774 } else if (remaining < 0) { 3775 remaining += len - index; 3776 } else if (index + remaining > size) { 3777 remaining = size - index; 3778 } 3779 3780 for (i = 0; i < remaining; i++) { 3781 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 3782 break; 3783 } 3784 3785 d[i] = '\0'; 3786 3787 mstate->dtms_scratch_ptr += size; 3788 regs[rd] = (uintptr_t)d; 3789 break; 3790 } 3791 3792 case DIF_SUBR_GETMAJOR: 3793 #ifdef _LP64 3794 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3795 #else 3796 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3797 #endif 3798 break; 3799 3800 case DIF_SUBR_GETMINOR: 3801 #ifdef _LP64 3802 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3803 #else 3804 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3805 #endif 3806 break; 3807 3808 case DIF_SUBR_DDI_PATHNAME: { 3809 /* 3810 * This one is a galactic mess. We are going to roughly 3811 * emulate ddi_pathname(), but it's made more complicated 3812 * by the fact that we (a) want to include the minor name and 3813 * (b) must proceed iteratively instead of recursively. 3814 */ 3815 uintptr_t dest = mstate->dtms_scratch_ptr; 3816 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3817 char *start = (char *)dest, *end = start + size - 1; 3818 uintptr_t daddr = tupregs[0].dttk_value; 3819 int64_t minor = (int64_t)tupregs[1].dttk_value; 3820 char *s; 3821 int i, len, depth = 0; 3822 3823 /* 3824 * Due to all the pointer jumping we do and context we must 3825 * rely upon, we just mandate that the user must have kernel 3826 * read privileges to use this routine. 3827 */ 3828 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3829 *flags |= CPU_DTRACE_KPRIV; 3830 *illval = daddr; 3831 regs[rd] = NULL; 3832 } 3833 3834 if (!DTRACE_INSCRATCH(mstate, size)) { 3835 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3836 regs[rd] = NULL; 3837 break; 3838 } 3839 3840 *end = '\0'; 3841 3842 /* 3843 * We want to have a name for the minor. In order to do this, 3844 * we need to walk the minor list from the devinfo. We want 3845 * to be sure that we don't infinitely walk a circular list, 3846 * so we check for circularity by sending a scout pointer 3847 * ahead two elements for every element that we iterate over; 3848 * if the list is circular, these will ultimately point to the 3849 * same element. You may recognize this little trick as the 3850 * answer to a stupid interview question -- one that always 3851 * seems to be asked by those who had to have it laboriously 3852 * explained to them, and who can't even concisely describe 3853 * the conditions under which one would be forced to resort to 3854 * this technique. Needless to say, those conditions are 3855 * found here -- and probably only here. Is this the only use 3856 * of this infamous trick in shipping, production code? If it 3857 * isn't, it probably should be... 3858 */ 3859 if (minor != -1) { 3860 uintptr_t maddr = dtrace_loadptr(daddr + 3861 offsetof(struct dev_info, devi_minor)); 3862 3863 uintptr_t next = offsetof(struct ddi_minor_data, next); 3864 uintptr_t name = offsetof(struct ddi_minor_data, 3865 d_minor) + offsetof(struct ddi_minor, name); 3866 uintptr_t dev = offsetof(struct ddi_minor_data, 3867 d_minor) + offsetof(struct ddi_minor, dev); 3868 uintptr_t scout; 3869 3870 if (maddr != NULL) 3871 scout = dtrace_loadptr(maddr + next); 3872 3873 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3874 uint64_t m; 3875 #ifdef _LP64 3876 m = dtrace_load64(maddr + dev) & MAXMIN64; 3877 #else 3878 m = dtrace_load32(maddr + dev) & MAXMIN; 3879 #endif 3880 if (m != minor) { 3881 maddr = dtrace_loadptr(maddr + next); 3882 3883 if (scout == NULL) 3884 continue; 3885 3886 scout = dtrace_loadptr(scout + next); 3887 3888 if (scout == NULL) 3889 continue; 3890 3891 scout = dtrace_loadptr(scout + next); 3892 3893 if (scout == NULL) 3894 continue; 3895 3896 if (scout == maddr) { 3897 *flags |= CPU_DTRACE_ILLOP; 3898 break; 3899 } 3900 3901 continue; 3902 } 3903 3904 /* 3905 * We have the minor data. Now we need to 3906 * copy the minor's name into the end of the 3907 * pathname. 3908 */ 3909 s = (char *)dtrace_loadptr(maddr + name); 3910 len = dtrace_strlen(s, size); 3911 3912 if (*flags & CPU_DTRACE_FAULT) 3913 break; 3914 3915 if (len != 0) { 3916 if ((end -= (len + 1)) < start) 3917 break; 3918 3919 *end = ':'; 3920 } 3921 3922 for (i = 1; i <= len; i++) 3923 end[i] = dtrace_load8((uintptr_t)s++); 3924 break; 3925 } 3926 } 3927 3928 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3929 ddi_node_state_t devi_state; 3930 3931 devi_state = dtrace_load32(daddr + 3932 offsetof(struct dev_info, devi_node_state)); 3933 3934 if (*flags & CPU_DTRACE_FAULT) 3935 break; 3936 3937 if (devi_state >= DS_INITIALIZED) { 3938 s = (char *)dtrace_loadptr(daddr + 3939 offsetof(struct dev_info, devi_addr)); 3940 len = dtrace_strlen(s, size); 3941 3942 if (*flags & CPU_DTRACE_FAULT) 3943 break; 3944 3945 if (len != 0) { 3946 if ((end -= (len + 1)) < start) 3947 break; 3948 3949 *end = '@'; 3950 } 3951 3952 for (i = 1; i <= len; i++) 3953 end[i] = dtrace_load8((uintptr_t)s++); 3954 } 3955 3956 /* 3957 * Now for the node name... 3958 */ 3959 s = (char *)dtrace_loadptr(daddr + 3960 offsetof(struct dev_info, devi_node_name)); 3961 3962 daddr = dtrace_loadptr(daddr + 3963 offsetof(struct dev_info, devi_parent)); 3964 3965 /* 3966 * If our parent is NULL (that is, if we're the root 3967 * node), we're going to use the special path 3968 * "devices". 3969 */ 3970 if (daddr == NULL) 3971 s = "devices"; 3972 3973 len = dtrace_strlen(s, size); 3974 if (*flags & CPU_DTRACE_FAULT) 3975 break; 3976 3977 if ((end -= (len + 1)) < start) 3978 break; 3979 3980 for (i = 1; i <= len; i++) 3981 end[i] = dtrace_load8((uintptr_t)s++); 3982 *end = '/'; 3983 3984 if (depth++ > dtrace_devdepth_max) { 3985 *flags |= CPU_DTRACE_ILLOP; 3986 break; 3987 } 3988 } 3989 3990 if (end < start) 3991 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3992 3993 if (daddr == NULL) { 3994 regs[rd] = (uintptr_t)end; 3995 mstate->dtms_scratch_ptr += size; 3996 } 3997 3998 break; 3999 } 4000 4001 case DIF_SUBR_STRJOIN: { 4002 char *d = (char *)mstate->dtms_scratch_ptr; 4003 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4004 uintptr_t s1 = tupregs[0].dttk_value; 4005 uintptr_t s2 = tupregs[1].dttk_value; 4006 int i = 0; 4007 4008 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4009 !dtrace_strcanload(s2, size, mstate, vstate)) { 4010 regs[rd] = NULL; 4011 break; 4012 } 4013 4014 if (!DTRACE_INSCRATCH(mstate, size)) { 4015 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4016 regs[rd] = NULL; 4017 break; 4018 } 4019 4020 for (;;) { 4021 if (i >= size) { 4022 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4023 regs[rd] = NULL; 4024 break; 4025 } 4026 4027 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4028 i--; 4029 break; 4030 } 4031 } 4032 4033 for (;;) { 4034 if (i >= size) { 4035 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4036 regs[rd] = NULL; 4037 break; 4038 } 4039 4040 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4041 break; 4042 } 4043 4044 if (i < size) { 4045 mstate->dtms_scratch_ptr += i; 4046 regs[rd] = (uintptr_t)d; 4047 } 4048 4049 break; 4050 } 4051 4052 case DIF_SUBR_LLTOSTR: { 4053 int64_t i = (int64_t)tupregs[0].dttk_value; 4054 int64_t val = i < 0 ? i * -1 : i; 4055 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4056 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4057 4058 if (!DTRACE_INSCRATCH(mstate, size)) { 4059 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4060 regs[rd] = NULL; 4061 break; 4062 } 4063 4064 for (*end-- = '\0'; val; val /= 10) 4065 *end-- = '0' + (val % 10); 4066 4067 if (i == 0) 4068 *end-- = '0'; 4069 4070 if (i < 0) 4071 *end-- = '-'; 4072 4073 regs[rd] = (uintptr_t)end + 1; 4074 mstate->dtms_scratch_ptr += size; 4075 break; 4076 } 4077 4078 case DIF_SUBR_HTONS: 4079 case DIF_SUBR_NTOHS: 4080 #ifdef _BIG_ENDIAN 4081 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4082 #else 4083 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4084 #endif 4085 break; 4086 4087 4088 case DIF_SUBR_HTONL: 4089 case DIF_SUBR_NTOHL: 4090 #ifdef _BIG_ENDIAN 4091 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4092 #else 4093 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4094 #endif 4095 break; 4096 4097 4098 case DIF_SUBR_HTONLL: 4099 case DIF_SUBR_NTOHLL: 4100 #ifdef _BIG_ENDIAN 4101 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4102 #else 4103 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4104 #endif 4105 break; 4106 4107 4108 case DIF_SUBR_DIRNAME: 4109 case DIF_SUBR_BASENAME: { 4110 char *dest = (char *)mstate->dtms_scratch_ptr; 4111 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4112 uintptr_t src = tupregs[0].dttk_value; 4113 int i, j, len = dtrace_strlen((char *)src, size); 4114 int lastbase = -1, firstbase = -1, lastdir = -1; 4115 int start, end; 4116 4117 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4118 regs[rd] = NULL; 4119 break; 4120 } 4121 4122 if (!DTRACE_INSCRATCH(mstate, size)) { 4123 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4124 regs[rd] = NULL; 4125 break; 4126 } 4127 4128 /* 4129 * The basename and dirname for a zero-length string is 4130 * defined to be "." 4131 */ 4132 if (len == 0) { 4133 len = 1; 4134 src = (uintptr_t)"."; 4135 } 4136 4137 /* 4138 * Start from the back of the string, moving back toward the 4139 * front until we see a character that isn't a slash. That 4140 * character is the last character in the basename. 4141 */ 4142 for (i = len - 1; i >= 0; i--) { 4143 if (dtrace_load8(src + i) != '/') 4144 break; 4145 } 4146 4147 if (i >= 0) 4148 lastbase = i; 4149 4150 /* 4151 * Starting from the last character in the basename, move 4152 * towards the front until we find a slash. The character 4153 * that we processed immediately before that is the first 4154 * character in the basename. 4155 */ 4156 for (; i >= 0; i--) { 4157 if (dtrace_load8(src + i) == '/') 4158 break; 4159 } 4160 4161 if (i >= 0) 4162 firstbase = i + 1; 4163 4164 /* 4165 * Now keep going until we find a non-slash character. That 4166 * character is the last character in the dirname. 4167 */ 4168 for (; i >= 0; i--) { 4169 if (dtrace_load8(src + i) != '/') 4170 break; 4171 } 4172 4173 if (i >= 0) 4174 lastdir = i; 4175 4176 ASSERT(!(lastbase == -1 && firstbase != -1)); 4177 ASSERT(!(firstbase == -1 && lastdir != -1)); 4178 4179 if (lastbase == -1) { 4180 /* 4181 * We didn't find a non-slash character. We know that 4182 * the length is non-zero, so the whole string must be 4183 * slashes. In either the dirname or the basename 4184 * case, we return '/'. 4185 */ 4186 ASSERT(firstbase == -1); 4187 firstbase = lastbase = lastdir = 0; 4188 } 4189 4190 if (firstbase == -1) { 4191 /* 4192 * The entire string consists only of a basename 4193 * component. If we're looking for dirname, we need 4194 * to change our string to be just "."; if we're 4195 * looking for a basename, we'll just set the first 4196 * character of the basename to be 0. 4197 */ 4198 if (subr == DIF_SUBR_DIRNAME) { 4199 ASSERT(lastdir == -1); 4200 src = (uintptr_t)"."; 4201 lastdir = 0; 4202 } else { 4203 firstbase = 0; 4204 } 4205 } 4206 4207 if (subr == DIF_SUBR_DIRNAME) { 4208 if (lastdir == -1) { 4209 /* 4210 * We know that we have a slash in the name -- 4211 * or lastdir would be set to 0, above. And 4212 * because lastdir is -1, we know that this 4213 * slash must be the first character. (That 4214 * is, the full string must be of the form 4215 * "/basename".) In this case, the last 4216 * character of the directory name is 0. 4217 */ 4218 lastdir = 0; 4219 } 4220 4221 start = 0; 4222 end = lastdir; 4223 } else { 4224 ASSERT(subr == DIF_SUBR_BASENAME); 4225 ASSERT(firstbase != -1 && lastbase != -1); 4226 start = firstbase; 4227 end = lastbase; 4228 } 4229 4230 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4231 dest[j] = dtrace_load8(src + i); 4232 4233 dest[j] = '\0'; 4234 regs[rd] = (uintptr_t)dest; 4235 mstate->dtms_scratch_ptr += size; 4236 break; 4237 } 4238 4239 case DIF_SUBR_CLEANPATH: { 4240 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4241 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4242 uintptr_t src = tupregs[0].dttk_value; 4243 int i = 0, j = 0; 4244 4245 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4246 regs[rd] = NULL; 4247 break; 4248 } 4249 4250 if (!DTRACE_INSCRATCH(mstate, size)) { 4251 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4252 regs[rd] = NULL; 4253 break; 4254 } 4255 4256 /* 4257 * Move forward, loading each character. 4258 */ 4259 do { 4260 c = dtrace_load8(src + i++); 4261 next: 4262 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4263 break; 4264 4265 if (c != '/') { 4266 dest[j++] = c; 4267 continue; 4268 } 4269 4270 c = dtrace_load8(src + i++); 4271 4272 if (c == '/') { 4273 /* 4274 * We have two slashes -- we can just advance 4275 * to the next character. 4276 */ 4277 goto next; 4278 } 4279 4280 if (c != '.') { 4281 /* 4282 * This is not "." and it's not ".." -- we can 4283 * just store the "/" and this character and 4284 * drive on. 4285 */ 4286 dest[j++] = '/'; 4287 dest[j++] = c; 4288 continue; 4289 } 4290 4291 c = dtrace_load8(src + i++); 4292 4293 if (c == '/') { 4294 /* 4295 * This is a "/./" component. We're not going 4296 * to store anything in the destination buffer; 4297 * we're just going to go to the next component. 4298 */ 4299 goto next; 4300 } 4301 4302 if (c != '.') { 4303 /* 4304 * This is not ".." -- we can just store the 4305 * "/." and this character and continue 4306 * processing. 4307 */ 4308 dest[j++] = '/'; 4309 dest[j++] = '.'; 4310 dest[j++] = c; 4311 continue; 4312 } 4313 4314 c = dtrace_load8(src + i++); 4315 4316 if (c != '/' && c != '\0') { 4317 /* 4318 * This is not ".." -- it's "..[mumble]". 4319 * We'll store the "/.." and this character 4320 * and continue processing. 4321 */ 4322 dest[j++] = '/'; 4323 dest[j++] = '.'; 4324 dest[j++] = '.'; 4325 dest[j++] = c; 4326 continue; 4327 } 4328 4329 /* 4330 * This is "/../" or "/..\0". We need to back up 4331 * our destination pointer until we find a "/". 4332 */ 4333 i--; 4334 while (j != 0 && dest[--j] != '/') 4335 continue; 4336 4337 if (c == '\0') 4338 dest[++j] = '/'; 4339 } while (c != '\0'); 4340 4341 dest[j] = '\0'; 4342 regs[rd] = (uintptr_t)dest; 4343 mstate->dtms_scratch_ptr += size; 4344 break; 4345 } 4346 4347 case DIF_SUBR_INET_NTOA: 4348 case DIF_SUBR_INET_NTOA6: 4349 case DIF_SUBR_INET_NTOP: { 4350 size_t size; 4351 int af, argi, i; 4352 char *base, *end; 4353 4354 if (subr == DIF_SUBR_INET_NTOP) { 4355 af = (int)tupregs[0].dttk_value; 4356 argi = 1; 4357 } else { 4358 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4359 argi = 0; 4360 } 4361 4362 if (af == AF_INET) { 4363 ipaddr_t ip4; 4364 uint8_t *ptr8, val; 4365 4366 /* 4367 * Safely load the IPv4 address. 4368 */ 4369 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4370 4371 /* 4372 * Check an IPv4 string will fit in scratch. 4373 */ 4374 size = INET_ADDRSTRLEN; 4375 if (!DTRACE_INSCRATCH(mstate, size)) { 4376 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4377 regs[rd] = NULL; 4378 break; 4379 } 4380 base = (char *)mstate->dtms_scratch_ptr; 4381 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4382 4383 /* 4384 * Stringify as a dotted decimal quad. 4385 */ 4386 *end-- = '\0'; 4387 ptr8 = (uint8_t *)&ip4; 4388 for (i = 3; i >= 0; i--) { 4389 val = ptr8[i]; 4390 4391 if (val == 0) { 4392 *end-- = '0'; 4393 } else { 4394 for (; val; val /= 10) { 4395 *end-- = '0' + (val % 10); 4396 } 4397 } 4398 4399 if (i > 0) 4400 *end-- = '.'; 4401 } 4402 ASSERT(end + 1 >= base); 4403 4404 } else if (af == AF_INET6) { 4405 struct in6_addr ip6; 4406 int firstzero, tryzero, numzero, v6end; 4407 uint16_t val; 4408 const char digits[] = "0123456789abcdef"; 4409 4410 /* 4411 * Stringify using RFC 1884 convention 2 - 16 bit 4412 * hexadecimal values with a zero-run compression. 4413 * Lower case hexadecimal digits are used. 4414 * eg, fe80::214:4fff:fe0b:76c8. 4415 * The IPv4 embedded form is returned for inet_ntop, 4416 * just the IPv4 string is returned for inet_ntoa6. 4417 */ 4418 4419 /* 4420 * Safely load the IPv6 address. 4421 */ 4422 dtrace_bcopy( 4423 (void *)(uintptr_t)tupregs[argi].dttk_value, 4424 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4425 4426 /* 4427 * Check an IPv6 string will fit in scratch. 4428 */ 4429 size = INET6_ADDRSTRLEN; 4430 if (!DTRACE_INSCRATCH(mstate, size)) { 4431 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4432 regs[rd] = NULL; 4433 break; 4434 } 4435 base = (char *)mstate->dtms_scratch_ptr; 4436 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4437 *end-- = '\0'; 4438 4439 /* 4440 * Find the longest run of 16 bit zero values 4441 * for the single allowed zero compression - "::". 4442 */ 4443 firstzero = -1; 4444 tryzero = -1; 4445 numzero = 1; 4446 for (i = 0; i < sizeof (struct in6_addr); i++) { 4447 if (ip6._S6_un._S6_u8[i] == 0 && 4448 tryzero == -1 && i % 2 == 0) { 4449 tryzero = i; 4450 continue; 4451 } 4452 4453 if (tryzero != -1 && 4454 (ip6._S6_un._S6_u8[i] != 0 || 4455 i == sizeof (struct in6_addr) - 1)) { 4456 4457 if (i - tryzero <= numzero) { 4458 tryzero = -1; 4459 continue; 4460 } 4461 4462 firstzero = tryzero; 4463 numzero = i - i % 2 - tryzero; 4464 tryzero = -1; 4465 4466 if (ip6._S6_un._S6_u8[i] == 0 && 4467 i == sizeof (struct in6_addr) - 1) 4468 numzero += 2; 4469 } 4470 } 4471 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4472 4473 /* 4474 * Check for an IPv4 embedded address. 4475 */ 4476 v6end = sizeof (struct in6_addr) - 2; 4477 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4478 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4479 for (i = sizeof (struct in6_addr) - 1; 4480 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4481 ASSERT(end >= base); 4482 4483 val = ip6._S6_un._S6_u8[i]; 4484 4485 if (val == 0) { 4486 *end-- = '0'; 4487 } else { 4488 for (; val; val /= 10) { 4489 *end-- = '0' + val % 10; 4490 } 4491 } 4492 4493 if (i > DTRACE_V4MAPPED_OFFSET) 4494 *end-- = '.'; 4495 } 4496 4497 if (subr == DIF_SUBR_INET_NTOA6) 4498 goto inetout; 4499 4500 /* 4501 * Set v6end to skip the IPv4 address that 4502 * we have already stringified. 4503 */ 4504 v6end = 10; 4505 } 4506 4507 /* 4508 * Build the IPv6 string by working through the 4509 * address in reverse. 4510 */ 4511 for (i = v6end; i >= 0; i -= 2) { 4512 ASSERT(end >= base); 4513 4514 if (i == firstzero + numzero - 2) { 4515 *end-- = ':'; 4516 *end-- = ':'; 4517 i -= numzero - 2; 4518 continue; 4519 } 4520 4521 if (i < 14 && i != firstzero - 2) 4522 *end-- = ':'; 4523 4524 val = (ip6._S6_un._S6_u8[i] << 8) + 4525 ip6._S6_un._S6_u8[i + 1]; 4526 4527 if (val == 0) { 4528 *end-- = '0'; 4529 } else { 4530 for (; val; val /= 16) { 4531 *end-- = digits[val % 16]; 4532 } 4533 } 4534 } 4535 ASSERT(end + 1 >= base); 4536 4537 } else { 4538 /* 4539 * The user didn't use AH_INET or AH_INET6. 4540 */ 4541 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4542 regs[rd] = NULL; 4543 break; 4544 } 4545 4546 inetout: regs[rd] = (uintptr_t)end + 1; 4547 mstate->dtms_scratch_ptr += size; 4548 break; 4549 } 4550 4551 } 4552 } 4553 4554 /* 4555 * Emulate the execution of DTrace IR instructions specified by the given 4556 * DIF object. This function is deliberately void of assertions as all of 4557 * the necessary checks are handled by a call to dtrace_difo_validate(). 4558 */ 4559 static uint64_t 4560 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4561 dtrace_vstate_t *vstate, dtrace_state_t *state) 4562 { 4563 const dif_instr_t *text = difo->dtdo_buf; 4564 const uint_t textlen = difo->dtdo_len; 4565 const char *strtab = difo->dtdo_strtab; 4566 const uint64_t *inttab = difo->dtdo_inttab; 4567 4568 uint64_t rval = 0; 4569 dtrace_statvar_t *svar; 4570 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4571 dtrace_difv_t *v; 4572 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4573 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4574 4575 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4576 uint64_t regs[DIF_DIR_NREGS]; 4577 uint64_t *tmp; 4578 4579 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4580 int64_t cc_r; 4581 uint_t pc = 0, id, opc; 4582 uint8_t ttop = 0; 4583 dif_instr_t instr; 4584 uint_t r1, r2, rd; 4585 4586 /* 4587 * We stash the current DIF object into the machine state: we need it 4588 * for subsequent access checking. 4589 */ 4590 mstate->dtms_difo = difo; 4591 4592 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4593 4594 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4595 opc = pc; 4596 4597 instr = text[pc++]; 4598 r1 = DIF_INSTR_R1(instr); 4599 r2 = DIF_INSTR_R2(instr); 4600 rd = DIF_INSTR_RD(instr); 4601 4602 switch (DIF_INSTR_OP(instr)) { 4603 case DIF_OP_OR: 4604 regs[rd] = regs[r1] | regs[r2]; 4605 break; 4606 case DIF_OP_XOR: 4607 regs[rd] = regs[r1] ^ regs[r2]; 4608 break; 4609 case DIF_OP_AND: 4610 regs[rd] = regs[r1] & regs[r2]; 4611 break; 4612 case DIF_OP_SLL: 4613 regs[rd] = regs[r1] << regs[r2]; 4614 break; 4615 case DIF_OP_SRL: 4616 regs[rd] = regs[r1] >> regs[r2]; 4617 break; 4618 case DIF_OP_SUB: 4619 regs[rd] = regs[r1] - regs[r2]; 4620 break; 4621 case DIF_OP_ADD: 4622 regs[rd] = regs[r1] + regs[r2]; 4623 break; 4624 case DIF_OP_MUL: 4625 regs[rd] = regs[r1] * regs[r2]; 4626 break; 4627 case DIF_OP_SDIV: 4628 if (regs[r2] == 0) { 4629 regs[rd] = 0; 4630 *flags |= CPU_DTRACE_DIVZERO; 4631 } else { 4632 regs[rd] = (int64_t)regs[r1] / 4633 (int64_t)regs[r2]; 4634 } 4635 break; 4636 4637 case DIF_OP_UDIV: 4638 if (regs[r2] == 0) { 4639 regs[rd] = 0; 4640 *flags |= CPU_DTRACE_DIVZERO; 4641 } else { 4642 regs[rd] = regs[r1] / regs[r2]; 4643 } 4644 break; 4645 4646 case DIF_OP_SREM: 4647 if (regs[r2] == 0) { 4648 regs[rd] = 0; 4649 *flags |= CPU_DTRACE_DIVZERO; 4650 } else { 4651 regs[rd] = (int64_t)regs[r1] % 4652 (int64_t)regs[r2]; 4653 } 4654 break; 4655 4656 case DIF_OP_UREM: 4657 if (regs[r2] == 0) { 4658 regs[rd] = 0; 4659 *flags |= CPU_DTRACE_DIVZERO; 4660 } else { 4661 regs[rd] = regs[r1] % regs[r2]; 4662 } 4663 break; 4664 4665 case DIF_OP_NOT: 4666 regs[rd] = ~regs[r1]; 4667 break; 4668 case DIF_OP_MOV: 4669 regs[rd] = regs[r1]; 4670 break; 4671 case DIF_OP_CMP: 4672 cc_r = regs[r1] - regs[r2]; 4673 cc_n = cc_r < 0; 4674 cc_z = cc_r == 0; 4675 cc_v = 0; 4676 cc_c = regs[r1] < regs[r2]; 4677 break; 4678 case DIF_OP_TST: 4679 cc_n = cc_v = cc_c = 0; 4680 cc_z = regs[r1] == 0; 4681 break; 4682 case DIF_OP_BA: 4683 pc = DIF_INSTR_LABEL(instr); 4684 break; 4685 case DIF_OP_BE: 4686 if (cc_z) 4687 pc = DIF_INSTR_LABEL(instr); 4688 break; 4689 case DIF_OP_BNE: 4690 if (cc_z == 0) 4691 pc = DIF_INSTR_LABEL(instr); 4692 break; 4693 case DIF_OP_BG: 4694 if ((cc_z | (cc_n ^ cc_v)) == 0) 4695 pc = DIF_INSTR_LABEL(instr); 4696 break; 4697 case DIF_OP_BGU: 4698 if ((cc_c | cc_z) == 0) 4699 pc = DIF_INSTR_LABEL(instr); 4700 break; 4701 case DIF_OP_BGE: 4702 if ((cc_n ^ cc_v) == 0) 4703 pc = DIF_INSTR_LABEL(instr); 4704 break; 4705 case DIF_OP_BGEU: 4706 if (cc_c == 0) 4707 pc = DIF_INSTR_LABEL(instr); 4708 break; 4709 case DIF_OP_BL: 4710 if (cc_n ^ cc_v) 4711 pc = DIF_INSTR_LABEL(instr); 4712 break; 4713 case DIF_OP_BLU: 4714 if (cc_c) 4715 pc = DIF_INSTR_LABEL(instr); 4716 break; 4717 case DIF_OP_BLE: 4718 if (cc_z | (cc_n ^ cc_v)) 4719 pc = DIF_INSTR_LABEL(instr); 4720 break; 4721 case DIF_OP_BLEU: 4722 if (cc_c | cc_z) 4723 pc = DIF_INSTR_LABEL(instr); 4724 break; 4725 case DIF_OP_RLDSB: 4726 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4727 *flags |= CPU_DTRACE_KPRIV; 4728 *illval = regs[r1]; 4729 break; 4730 } 4731 /*FALLTHROUGH*/ 4732 case DIF_OP_LDSB: 4733 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4734 break; 4735 case DIF_OP_RLDSH: 4736 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4737 *flags |= CPU_DTRACE_KPRIV; 4738 *illval = regs[r1]; 4739 break; 4740 } 4741 /*FALLTHROUGH*/ 4742 case DIF_OP_LDSH: 4743 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4744 break; 4745 case DIF_OP_RLDSW: 4746 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4747 *flags |= CPU_DTRACE_KPRIV; 4748 *illval = regs[r1]; 4749 break; 4750 } 4751 /*FALLTHROUGH*/ 4752 case DIF_OP_LDSW: 4753 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4754 break; 4755 case DIF_OP_RLDUB: 4756 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4757 *flags |= CPU_DTRACE_KPRIV; 4758 *illval = regs[r1]; 4759 break; 4760 } 4761 /*FALLTHROUGH*/ 4762 case DIF_OP_LDUB: 4763 regs[rd] = dtrace_load8(regs[r1]); 4764 break; 4765 case DIF_OP_RLDUH: 4766 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4767 *flags |= CPU_DTRACE_KPRIV; 4768 *illval = regs[r1]; 4769 break; 4770 } 4771 /*FALLTHROUGH*/ 4772 case DIF_OP_LDUH: 4773 regs[rd] = dtrace_load16(regs[r1]); 4774 break; 4775 case DIF_OP_RLDUW: 4776 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4777 *flags |= CPU_DTRACE_KPRIV; 4778 *illval = regs[r1]; 4779 break; 4780 } 4781 /*FALLTHROUGH*/ 4782 case DIF_OP_LDUW: 4783 regs[rd] = dtrace_load32(regs[r1]); 4784 break; 4785 case DIF_OP_RLDX: 4786 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4787 *flags |= CPU_DTRACE_KPRIV; 4788 *illval = regs[r1]; 4789 break; 4790 } 4791 /*FALLTHROUGH*/ 4792 case DIF_OP_LDX: 4793 regs[rd] = dtrace_load64(regs[r1]); 4794 break; 4795 case DIF_OP_ULDSB: 4796 regs[rd] = (int8_t) 4797 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4798 break; 4799 case DIF_OP_ULDSH: 4800 regs[rd] = (int16_t) 4801 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4802 break; 4803 case DIF_OP_ULDSW: 4804 regs[rd] = (int32_t) 4805 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4806 break; 4807 case DIF_OP_ULDUB: 4808 regs[rd] = 4809 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4810 break; 4811 case DIF_OP_ULDUH: 4812 regs[rd] = 4813 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4814 break; 4815 case DIF_OP_ULDUW: 4816 regs[rd] = 4817 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4818 break; 4819 case DIF_OP_ULDX: 4820 regs[rd] = 4821 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4822 break; 4823 case DIF_OP_RET: 4824 rval = regs[rd]; 4825 pc = textlen; 4826 break; 4827 case DIF_OP_NOP: 4828 break; 4829 case DIF_OP_SETX: 4830 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4831 break; 4832 case DIF_OP_SETS: 4833 regs[rd] = (uint64_t)(uintptr_t) 4834 (strtab + DIF_INSTR_STRING(instr)); 4835 break; 4836 case DIF_OP_SCMP: { 4837 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 4838 uintptr_t s1 = regs[r1]; 4839 uintptr_t s2 = regs[r2]; 4840 4841 if (s1 != NULL && 4842 !dtrace_strcanload(s1, sz, mstate, vstate)) 4843 break; 4844 if (s2 != NULL && 4845 !dtrace_strcanload(s2, sz, mstate, vstate)) 4846 break; 4847 4848 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 4849 4850 cc_n = cc_r < 0; 4851 cc_z = cc_r == 0; 4852 cc_v = cc_c = 0; 4853 break; 4854 } 4855 case DIF_OP_LDGA: 4856 regs[rd] = dtrace_dif_variable(mstate, state, 4857 r1, regs[r2]); 4858 break; 4859 case DIF_OP_LDGS: 4860 id = DIF_INSTR_VAR(instr); 4861 4862 if (id >= DIF_VAR_OTHER_UBASE) { 4863 uintptr_t a; 4864 4865 id -= DIF_VAR_OTHER_UBASE; 4866 svar = vstate->dtvs_globals[id]; 4867 ASSERT(svar != NULL); 4868 v = &svar->dtsv_var; 4869 4870 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4871 regs[rd] = svar->dtsv_data; 4872 break; 4873 } 4874 4875 a = (uintptr_t)svar->dtsv_data; 4876 4877 if (*(uint8_t *)a == UINT8_MAX) { 4878 /* 4879 * If the 0th byte is set to UINT8_MAX 4880 * then this is to be treated as a 4881 * reference to a NULL variable. 4882 */ 4883 regs[rd] = NULL; 4884 } else { 4885 regs[rd] = a + sizeof (uint64_t); 4886 } 4887 4888 break; 4889 } 4890 4891 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4892 break; 4893 4894 case DIF_OP_STGS: 4895 id = DIF_INSTR_VAR(instr); 4896 4897 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4898 id -= DIF_VAR_OTHER_UBASE; 4899 4900 svar = vstate->dtvs_globals[id]; 4901 ASSERT(svar != NULL); 4902 v = &svar->dtsv_var; 4903 4904 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4905 uintptr_t a = (uintptr_t)svar->dtsv_data; 4906 4907 ASSERT(a != NULL); 4908 ASSERT(svar->dtsv_size != 0); 4909 4910 if (regs[rd] == NULL) { 4911 *(uint8_t *)a = UINT8_MAX; 4912 break; 4913 } else { 4914 *(uint8_t *)a = 0; 4915 a += sizeof (uint64_t); 4916 } 4917 if (!dtrace_vcanload( 4918 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4919 mstate, vstate)) 4920 break; 4921 4922 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4923 (void *)a, &v->dtdv_type); 4924 break; 4925 } 4926 4927 svar->dtsv_data = regs[rd]; 4928 break; 4929 4930 case DIF_OP_LDTA: 4931 /* 4932 * There are no DTrace built-in thread-local arrays at 4933 * present. This opcode is saved for future work. 4934 */ 4935 *flags |= CPU_DTRACE_ILLOP; 4936 regs[rd] = 0; 4937 break; 4938 4939 case DIF_OP_LDLS: 4940 id = DIF_INSTR_VAR(instr); 4941 4942 if (id < DIF_VAR_OTHER_UBASE) { 4943 /* 4944 * For now, this has no meaning. 4945 */ 4946 regs[rd] = 0; 4947 break; 4948 } 4949 4950 id -= DIF_VAR_OTHER_UBASE; 4951 4952 ASSERT(id < vstate->dtvs_nlocals); 4953 ASSERT(vstate->dtvs_locals != NULL); 4954 4955 svar = vstate->dtvs_locals[id]; 4956 ASSERT(svar != NULL); 4957 v = &svar->dtsv_var; 4958 4959 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4960 uintptr_t a = (uintptr_t)svar->dtsv_data; 4961 size_t sz = v->dtdv_type.dtdt_size; 4962 4963 sz += sizeof (uint64_t); 4964 ASSERT(svar->dtsv_size == NCPU * sz); 4965 a += CPU->cpu_id * sz; 4966 4967 if (*(uint8_t *)a == UINT8_MAX) { 4968 /* 4969 * If the 0th byte is set to UINT8_MAX 4970 * then this is to be treated as a 4971 * reference to a NULL variable. 4972 */ 4973 regs[rd] = NULL; 4974 } else { 4975 regs[rd] = a + sizeof (uint64_t); 4976 } 4977 4978 break; 4979 } 4980 4981 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4982 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4983 regs[rd] = tmp[CPU->cpu_id]; 4984 break; 4985 4986 case DIF_OP_STLS: 4987 id = DIF_INSTR_VAR(instr); 4988 4989 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4990 id -= DIF_VAR_OTHER_UBASE; 4991 ASSERT(id < vstate->dtvs_nlocals); 4992 4993 ASSERT(vstate->dtvs_locals != NULL); 4994 svar = vstate->dtvs_locals[id]; 4995 ASSERT(svar != NULL); 4996 v = &svar->dtsv_var; 4997 4998 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4999 uintptr_t a = (uintptr_t)svar->dtsv_data; 5000 size_t sz = v->dtdv_type.dtdt_size; 5001 5002 sz += sizeof (uint64_t); 5003 ASSERT(svar->dtsv_size == NCPU * sz); 5004 a += CPU->cpu_id * sz; 5005 5006 if (regs[rd] == NULL) { 5007 *(uint8_t *)a = UINT8_MAX; 5008 break; 5009 } else { 5010 *(uint8_t *)a = 0; 5011 a += sizeof (uint64_t); 5012 } 5013 5014 if (!dtrace_vcanload( 5015 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5016 mstate, vstate)) 5017 break; 5018 5019 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5020 (void *)a, &v->dtdv_type); 5021 break; 5022 } 5023 5024 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5025 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5026 tmp[CPU->cpu_id] = regs[rd]; 5027 break; 5028 5029 case DIF_OP_LDTS: { 5030 dtrace_dynvar_t *dvar; 5031 dtrace_key_t *key; 5032 5033 id = DIF_INSTR_VAR(instr); 5034 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5035 id -= DIF_VAR_OTHER_UBASE; 5036 v = &vstate->dtvs_tlocals[id]; 5037 5038 key = &tupregs[DIF_DTR_NREGS]; 5039 key[0].dttk_value = (uint64_t)id; 5040 key[0].dttk_size = 0; 5041 DTRACE_TLS_THRKEY(key[1].dttk_value); 5042 key[1].dttk_size = 0; 5043 5044 dvar = dtrace_dynvar(dstate, 2, key, 5045 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5046 mstate, vstate); 5047 5048 if (dvar == NULL) { 5049 regs[rd] = 0; 5050 break; 5051 } 5052 5053 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5054 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5055 } else { 5056 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5057 } 5058 5059 break; 5060 } 5061 5062 case DIF_OP_STTS: { 5063 dtrace_dynvar_t *dvar; 5064 dtrace_key_t *key; 5065 5066 id = DIF_INSTR_VAR(instr); 5067 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5068 id -= DIF_VAR_OTHER_UBASE; 5069 5070 key = &tupregs[DIF_DTR_NREGS]; 5071 key[0].dttk_value = (uint64_t)id; 5072 key[0].dttk_size = 0; 5073 DTRACE_TLS_THRKEY(key[1].dttk_value); 5074 key[1].dttk_size = 0; 5075 v = &vstate->dtvs_tlocals[id]; 5076 5077 dvar = dtrace_dynvar(dstate, 2, key, 5078 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5079 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5080 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5081 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5082 5083 /* 5084 * Given that we're storing to thread-local data, 5085 * we need to flush our predicate cache. 5086 */ 5087 curthread->t_predcache = NULL; 5088 5089 if (dvar == NULL) 5090 break; 5091 5092 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5093 if (!dtrace_vcanload( 5094 (void *)(uintptr_t)regs[rd], 5095 &v->dtdv_type, mstate, vstate)) 5096 break; 5097 5098 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5099 dvar->dtdv_data, &v->dtdv_type); 5100 } else { 5101 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5102 } 5103 5104 break; 5105 } 5106 5107 case DIF_OP_SRA: 5108 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5109 break; 5110 5111 case DIF_OP_CALL: 5112 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5113 regs, tupregs, ttop, mstate, state); 5114 break; 5115 5116 case DIF_OP_PUSHTR: 5117 if (ttop == DIF_DTR_NREGS) { 5118 *flags |= CPU_DTRACE_TUPOFLOW; 5119 break; 5120 } 5121 5122 if (r1 == DIF_TYPE_STRING) { 5123 /* 5124 * If this is a string type and the size is 0, 5125 * we'll use the system-wide default string 5126 * size. Note that we are _not_ looking at 5127 * the value of the DTRACEOPT_STRSIZE option; 5128 * had this been set, we would expect to have 5129 * a non-zero size value in the "pushtr". 5130 */ 5131 tupregs[ttop].dttk_size = 5132 dtrace_strlen((char *)(uintptr_t)regs[rd], 5133 regs[r2] ? regs[r2] : 5134 dtrace_strsize_default) + 1; 5135 } else { 5136 tupregs[ttop].dttk_size = regs[r2]; 5137 } 5138 5139 tupregs[ttop++].dttk_value = regs[rd]; 5140 break; 5141 5142 case DIF_OP_PUSHTV: 5143 if (ttop == DIF_DTR_NREGS) { 5144 *flags |= CPU_DTRACE_TUPOFLOW; 5145 break; 5146 } 5147 5148 tupregs[ttop].dttk_value = regs[rd]; 5149 tupregs[ttop++].dttk_size = 0; 5150 break; 5151 5152 case DIF_OP_POPTS: 5153 if (ttop != 0) 5154 ttop--; 5155 break; 5156 5157 case DIF_OP_FLUSHTS: 5158 ttop = 0; 5159 break; 5160 5161 case DIF_OP_LDGAA: 5162 case DIF_OP_LDTAA: { 5163 dtrace_dynvar_t *dvar; 5164 dtrace_key_t *key = tupregs; 5165 uint_t nkeys = ttop; 5166 5167 id = DIF_INSTR_VAR(instr); 5168 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5169 id -= DIF_VAR_OTHER_UBASE; 5170 5171 key[nkeys].dttk_value = (uint64_t)id; 5172 key[nkeys++].dttk_size = 0; 5173 5174 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5175 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5176 key[nkeys++].dttk_size = 0; 5177 v = &vstate->dtvs_tlocals[id]; 5178 } else { 5179 v = &vstate->dtvs_globals[id]->dtsv_var; 5180 } 5181 5182 dvar = dtrace_dynvar(dstate, nkeys, key, 5183 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5184 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5185 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5186 5187 if (dvar == NULL) { 5188 regs[rd] = 0; 5189 break; 5190 } 5191 5192 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5193 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5194 } else { 5195 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5196 } 5197 5198 break; 5199 } 5200 5201 case DIF_OP_STGAA: 5202 case DIF_OP_STTAA: { 5203 dtrace_dynvar_t *dvar; 5204 dtrace_key_t *key = tupregs; 5205 uint_t nkeys = ttop; 5206 5207 id = DIF_INSTR_VAR(instr); 5208 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5209 id -= DIF_VAR_OTHER_UBASE; 5210 5211 key[nkeys].dttk_value = (uint64_t)id; 5212 key[nkeys++].dttk_size = 0; 5213 5214 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5215 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5216 key[nkeys++].dttk_size = 0; 5217 v = &vstate->dtvs_tlocals[id]; 5218 } else { 5219 v = &vstate->dtvs_globals[id]->dtsv_var; 5220 } 5221 5222 dvar = dtrace_dynvar(dstate, nkeys, key, 5223 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5224 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5225 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5226 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5227 5228 if (dvar == NULL) 5229 break; 5230 5231 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5232 if (!dtrace_vcanload( 5233 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5234 mstate, vstate)) 5235 break; 5236 5237 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5238 dvar->dtdv_data, &v->dtdv_type); 5239 } else { 5240 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5241 } 5242 5243 break; 5244 } 5245 5246 case DIF_OP_ALLOCS: { 5247 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5248 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5249 5250 /* 5251 * Rounding up the user allocation size could have 5252 * overflowed large, bogus allocations (like -1ULL) to 5253 * 0. 5254 */ 5255 if (size < regs[r1] || 5256 !DTRACE_INSCRATCH(mstate, size)) { 5257 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5258 regs[rd] = NULL; 5259 break; 5260 } 5261 5262 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5263 mstate->dtms_scratch_ptr += size; 5264 regs[rd] = ptr; 5265 break; 5266 } 5267 5268 case DIF_OP_COPYS: 5269 if (!dtrace_canstore(regs[rd], regs[r2], 5270 mstate, vstate)) { 5271 *flags |= CPU_DTRACE_BADADDR; 5272 *illval = regs[rd]; 5273 break; 5274 } 5275 5276 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5277 break; 5278 5279 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5280 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5281 break; 5282 5283 case DIF_OP_STB: 5284 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5285 *flags |= CPU_DTRACE_BADADDR; 5286 *illval = regs[rd]; 5287 break; 5288 } 5289 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5290 break; 5291 5292 case DIF_OP_STH: 5293 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5294 *flags |= CPU_DTRACE_BADADDR; 5295 *illval = regs[rd]; 5296 break; 5297 } 5298 if (regs[rd] & 1) { 5299 *flags |= CPU_DTRACE_BADALIGN; 5300 *illval = regs[rd]; 5301 break; 5302 } 5303 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5304 break; 5305 5306 case DIF_OP_STW: 5307 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5308 *flags |= CPU_DTRACE_BADADDR; 5309 *illval = regs[rd]; 5310 break; 5311 } 5312 if (regs[rd] & 3) { 5313 *flags |= CPU_DTRACE_BADALIGN; 5314 *illval = regs[rd]; 5315 break; 5316 } 5317 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5318 break; 5319 5320 case DIF_OP_STX: 5321 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5322 *flags |= CPU_DTRACE_BADADDR; 5323 *illval = regs[rd]; 5324 break; 5325 } 5326 if (regs[rd] & 7) { 5327 *flags |= CPU_DTRACE_BADALIGN; 5328 *illval = regs[rd]; 5329 break; 5330 } 5331 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5332 break; 5333 } 5334 } 5335 5336 if (!(*flags & CPU_DTRACE_FAULT)) 5337 return (rval); 5338 5339 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5340 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5341 5342 return (0); 5343 } 5344 5345 static void 5346 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5347 { 5348 dtrace_probe_t *probe = ecb->dte_probe; 5349 dtrace_provider_t *prov = probe->dtpr_provider; 5350 char c[DTRACE_FULLNAMELEN + 80], *str; 5351 char *msg = "dtrace: breakpoint action at probe "; 5352 char *ecbmsg = " (ecb "; 5353 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5354 uintptr_t val = (uintptr_t)ecb; 5355 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5356 5357 if (dtrace_destructive_disallow) 5358 return; 5359 5360 /* 5361 * It's impossible to be taking action on the NULL probe. 5362 */ 5363 ASSERT(probe != NULL); 5364 5365 /* 5366 * This is a poor man's (destitute man's?) sprintf(): we want to 5367 * print the provider name, module name, function name and name of 5368 * the probe, along with the hex address of the ECB with the breakpoint 5369 * action -- all of which we must place in the character buffer by 5370 * hand. 5371 */ 5372 while (*msg != '\0') 5373 c[i++] = *msg++; 5374 5375 for (str = prov->dtpv_name; *str != '\0'; str++) 5376 c[i++] = *str; 5377 c[i++] = ':'; 5378 5379 for (str = probe->dtpr_mod; *str != '\0'; str++) 5380 c[i++] = *str; 5381 c[i++] = ':'; 5382 5383 for (str = probe->dtpr_func; *str != '\0'; str++) 5384 c[i++] = *str; 5385 c[i++] = ':'; 5386 5387 for (str = probe->dtpr_name; *str != '\0'; str++) 5388 c[i++] = *str; 5389 5390 while (*ecbmsg != '\0') 5391 c[i++] = *ecbmsg++; 5392 5393 while (shift >= 0) { 5394 mask = (uintptr_t)0xf << shift; 5395 5396 if (val >= ((uintptr_t)1 << shift)) 5397 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5398 shift -= 4; 5399 } 5400 5401 c[i++] = ')'; 5402 c[i] = '\0'; 5403 5404 debug_enter(c); 5405 } 5406 5407 static void 5408 dtrace_action_panic(dtrace_ecb_t *ecb) 5409 { 5410 dtrace_probe_t *probe = ecb->dte_probe; 5411 5412 /* 5413 * It's impossible to be taking action on the NULL probe. 5414 */ 5415 ASSERT(probe != NULL); 5416 5417 if (dtrace_destructive_disallow) 5418 return; 5419 5420 if (dtrace_panicked != NULL) 5421 return; 5422 5423 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5424 return; 5425 5426 /* 5427 * We won the right to panic. (We want to be sure that only one 5428 * thread calls panic() from dtrace_probe(), and that panic() is 5429 * called exactly once.) 5430 */ 5431 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5432 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5433 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5434 } 5435 5436 static void 5437 dtrace_action_raise(uint64_t sig) 5438 { 5439 if (dtrace_destructive_disallow) 5440 return; 5441 5442 if (sig >= NSIG) { 5443 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5444 return; 5445 } 5446 5447 /* 5448 * raise() has a queue depth of 1 -- we ignore all subsequent 5449 * invocations of the raise() action. 5450 */ 5451 if (curthread->t_dtrace_sig == 0) 5452 curthread->t_dtrace_sig = (uint8_t)sig; 5453 5454 curthread->t_sig_check = 1; 5455 aston(curthread); 5456 } 5457 5458 static void 5459 dtrace_action_stop(void) 5460 { 5461 if (dtrace_destructive_disallow) 5462 return; 5463 5464 if (!curthread->t_dtrace_stop) { 5465 curthread->t_dtrace_stop = 1; 5466 curthread->t_sig_check = 1; 5467 aston(curthread); 5468 } 5469 } 5470 5471 static void 5472 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5473 { 5474 hrtime_t now; 5475 volatile uint16_t *flags; 5476 cpu_t *cpu = CPU; 5477 5478 if (dtrace_destructive_disallow) 5479 return; 5480 5481 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5482 5483 now = dtrace_gethrtime(); 5484 5485 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5486 /* 5487 * We need to advance the mark to the current time. 5488 */ 5489 cpu->cpu_dtrace_chillmark = now; 5490 cpu->cpu_dtrace_chilled = 0; 5491 } 5492 5493 /* 5494 * Now check to see if the requested chill time would take us over 5495 * the maximum amount of time allowed in the chill interval. (Or 5496 * worse, if the calculation itself induces overflow.) 5497 */ 5498 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5499 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5500 *flags |= CPU_DTRACE_ILLOP; 5501 return; 5502 } 5503 5504 while (dtrace_gethrtime() - now < val) 5505 continue; 5506 5507 /* 5508 * Normally, we assure that the value of the variable "timestamp" does 5509 * not change within an ECB. The presence of chill() represents an 5510 * exception to this rule, however. 5511 */ 5512 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5513 cpu->cpu_dtrace_chilled += val; 5514 } 5515 5516 static void 5517 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5518 uint64_t *buf, uint64_t arg) 5519 { 5520 int nframes = DTRACE_USTACK_NFRAMES(arg); 5521 int strsize = DTRACE_USTACK_STRSIZE(arg); 5522 uint64_t *pcs = &buf[1], *fps; 5523 char *str = (char *)&pcs[nframes]; 5524 int size, offs = 0, i, j; 5525 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5526 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5527 char *sym; 5528 5529 /* 5530 * Should be taking a faster path if string space has not been 5531 * allocated. 5532 */ 5533 ASSERT(strsize != 0); 5534 5535 /* 5536 * We will first allocate some temporary space for the frame pointers. 5537 */ 5538 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5539 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5540 (nframes * sizeof (uint64_t)); 5541 5542 if (!DTRACE_INSCRATCH(mstate, size)) { 5543 /* 5544 * Not enough room for our frame pointers -- need to indicate 5545 * that we ran out of scratch space. 5546 */ 5547 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5548 return; 5549 } 5550 5551 mstate->dtms_scratch_ptr += size; 5552 saved = mstate->dtms_scratch_ptr; 5553 5554 /* 5555 * Now get a stack with both program counters and frame pointers. 5556 */ 5557 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5558 dtrace_getufpstack(buf, fps, nframes + 1); 5559 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5560 5561 /* 5562 * If that faulted, we're cooked. 5563 */ 5564 if (*flags & CPU_DTRACE_FAULT) 5565 goto out; 5566 5567 /* 5568 * Now we want to walk up the stack, calling the USTACK helper. For 5569 * each iteration, we restore the scratch pointer. 5570 */ 5571 for (i = 0; i < nframes; i++) { 5572 mstate->dtms_scratch_ptr = saved; 5573 5574 if (offs >= strsize) 5575 break; 5576 5577 sym = (char *)(uintptr_t)dtrace_helper( 5578 DTRACE_HELPER_ACTION_USTACK, 5579 mstate, state, pcs[i], fps[i]); 5580 5581 /* 5582 * If we faulted while running the helper, we're going to 5583 * clear the fault and null out the corresponding string. 5584 */ 5585 if (*flags & CPU_DTRACE_FAULT) { 5586 *flags &= ~CPU_DTRACE_FAULT; 5587 str[offs++] = '\0'; 5588 continue; 5589 } 5590 5591 if (sym == NULL) { 5592 str[offs++] = '\0'; 5593 continue; 5594 } 5595 5596 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5597 5598 /* 5599 * Now copy in the string that the helper returned to us. 5600 */ 5601 for (j = 0; offs + j < strsize; j++) { 5602 if ((str[offs + j] = sym[j]) == '\0') 5603 break; 5604 } 5605 5606 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5607 5608 offs += j + 1; 5609 } 5610 5611 if (offs >= strsize) { 5612 /* 5613 * If we didn't have room for all of the strings, we don't 5614 * abort processing -- this needn't be a fatal error -- but we 5615 * still want to increment a counter (dts_stkstroverflows) to 5616 * allow this condition to be warned about. (If this is from 5617 * a jstack() action, it is easily tuned via jstackstrsize.) 5618 */ 5619 dtrace_error(&state->dts_stkstroverflows); 5620 } 5621 5622 while (offs < strsize) 5623 str[offs++] = '\0'; 5624 5625 out: 5626 mstate->dtms_scratch_ptr = old; 5627 } 5628 5629 /* 5630 * If you're looking for the epicenter of DTrace, you just found it. This 5631 * is the function called by the provider to fire a probe -- from which all 5632 * subsequent probe-context DTrace activity emanates. 5633 */ 5634 void 5635 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5636 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5637 { 5638 processorid_t cpuid; 5639 dtrace_icookie_t cookie; 5640 dtrace_probe_t *probe; 5641 dtrace_mstate_t mstate; 5642 dtrace_ecb_t *ecb; 5643 dtrace_action_t *act; 5644 intptr_t offs; 5645 size_t size; 5646 int vtime, onintr; 5647 volatile uint16_t *flags; 5648 hrtime_t now; 5649 5650 /* 5651 * Kick out immediately if this CPU is still being born (in which case 5652 * curthread will be set to -1) or the current thread can't allow 5653 * probes in its current context. 5654 */ 5655 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5656 return; 5657 5658 cookie = dtrace_interrupt_disable(); 5659 probe = dtrace_probes[id - 1]; 5660 cpuid = CPU->cpu_id; 5661 onintr = CPU_ON_INTR(CPU); 5662 5663 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5664 probe->dtpr_predcache == curthread->t_predcache) { 5665 /* 5666 * We have hit in the predicate cache; we know that 5667 * this predicate would evaluate to be false. 5668 */ 5669 dtrace_interrupt_enable(cookie); 5670 return; 5671 } 5672 5673 if (panic_quiesce) { 5674 /* 5675 * We don't trace anything if we're panicking. 5676 */ 5677 dtrace_interrupt_enable(cookie); 5678 return; 5679 } 5680 5681 now = dtrace_gethrtime(); 5682 vtime = dtrace_vtime_references != 0; 5683 5684 if (vtime && curthread->t_dtrace_start) 5685 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5686 5687 mstate.dtms_difo = NULL; 5688 mstate.dtms_probe = probe; 5689 mstate.dtms_strtok = NULL; 5690 mstate.dtms_arg[0] = arg0; 5691 mstate.dtms_arg[1] = arg1; 5692 mstate.dtms_arg[2] = arg2; 5693 mstate.dtms_arg[3] = arg3; 5694 mstate.dtms_arg[4] = arg4; 5695 5696 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5697 5698 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5699 dtrace_predicate_t *pred = ecb->dte_predicate; 5700 dtrace_state_t *state = ecb->dte_state; 5701 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5702 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5703 dtrace_vstate_t *vstate = &state->dts_vstate; 5704 dtrace_provider_t *prov = probe->dtpr_provider; 5705 int committed = 0; 5706 caddr_t tomax; 5707 5708 /* 5709 * A little subtlety with the following (seemingly innocuous) 5710 * declaration of the automatic 'val': by looking at the 5711 * code, you might think that it could be declared in the 5712 * action processing loop, below. (That is, it's only used in 5713 * the action processing loop.) However, it must be declared 5714 * out of that scope because in the case of DIF expression 5715 * arguments to aggregating actions, one iteration of the 5716 * action loop will use the last iteration's value. 5717 */ 5718 #ifdef lint 5719 uint64_t val = 0; 5720 #else 5721 uint64_t val; 5722 #endif 5723 5724 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5725 *flags &= ~CPU_DTRACE_ERROR; 5726 5727 if (prov == dtrace_provider) { 5728 /* 5729 * If dtrace itself is the provider of this probe, 5730 * we're only going to continue processing the ECB if 5731 * arg0 (the dtrace_state_t) is equal to the ECB's 5732 * creating state. (This prevents disjoint consumers 5733 * from seeing one another's metaprobes.) 5734 */ 5735 if (arg0 != (uint64_t)(uintptr_t)state) 5736 continue; 5737 } 5738 5739 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5740 /* 5741 * We're not currently active. If our provider isn't 5742 * the dtrace pseudo provider, we're not interested. 5743 */ 5744 if (prov != dtrace_provider) 5745 continue; 5746 5747 /* 5748 * Now we must further check if we are in the BEGIN 5749 * probe. If we are, we will only continue processing 5750 * if we're still in WARMUP -- if one BEGIN enabling 5751 * has invoked the exit() action, we don't want to 5752 * evaluate subsequent BEGIN enablings. 5753 */ 5754 if (probe->dtpr_id == dtrace_probeid_begin && 5755 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5756 ASSERT(state->dts_activity == 5757 DTRACE_ACTIVITY_DRAINING); 5758 continue; 5759 } 5760 } 5761 5762 if (ecb->dte_cond) { 5763 /* 5764 * If the dte_cond bits indicate that this 5765 * consumer is only allowed to see user-mode firings 5766 * of this probe, call the provider's dtps_usermode() 5767 * entry point to check that the probe was fired 5768 * while in a user context. Skip this ECB if that's 5769 * not the case. 5770 */ 5771 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5772 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5773 probe->dtpr_id, probe->dtpr_arg) == 0) 5774 continue; 5775 5776 /* 5777 * This is more subtle than it looks. We have to be 5778 * absolutely certain that CRED() isn't going to 5779 * change out from under us so it's only legit to 5780 * examine that structure if we're in constrained 5781 * situations. Currently, the only times we'll this 5782 * check is if a non-super-user has enabled the 5783 * profile or syscall providers -- providers that 5784 * allow visibility of all processes. For the 5785 * profile case, the check above will ensure that 5786 * we're examining a user context. 5787 */ 5788 if (ecb->dte_cond & DTRACE_COND_OWNER) { 5789 cred_t *cr; 5790 cred_t *s_cr = 5791 ecb->dte_state->dts_cred.dcr_cred; 5792 proc_t *proc; 5793 5794 ASSERT(s_cr != NULL); 5795 5796 if ((cr = CRED()) == NULL || 5797 s_cr->cr_uid != cr->cr_uid || 5798 s_cr->cr_uid != cr->cr_ruid || 5799 s_cr->cr_uid != cr->cr_suid || 5800 s_cr->cr_gid != cr->cr_gid || 5801 s_cr->cr_gid != cr->cr_rgid || 5802 s_cr->cr_gid != cr->cr_sgid || 5803 (proc = ttoproc(curthread)) == NULL || 5804 (proc->p_flag & SNOCD)) 5805 continue; 5806 } 5807 5808 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 5809 cred_t *cr; 5810 cred_t *s_cr = 5811 ecb->dte_state->dts_cred.dcr_cred; 5812 5813 ASSERT(s_cr != NULL); 5814 5815 if ((cr = CRED()) == NULL || 5816 s_cr->cr_zone->zone_id != 5817 cr->cr_zone->zone_id) 5818 continue; 5819 } 5820 } 5821 5822 if (now - state->dts_alive > dtrace_deadman_timeout) { 5823 /* 5824 * We seem to be dead. Unless we (a) have kernel 5825 * destructive permissions (b) have expicitly enabled 5826 * destructive actions and (c) destructive actions have 5827 * not been disabled, we're going to transition into 5828 * the KILLED state, from which no further processing 5829 * on this state will be performed. 5830 */ 5831 if (!dtrace_priv_kernel_destructive(state) || 5832 !state->dts_cred.dcr_destructive || 5833 dtrace_destructive_disallow) { 5834 void *activity = &state->dts_activity; 5835 dtrace_activity_t current; 5836 5837 do { 5838 current = state->dts_activity; 5839 } while (dtrace_cas32(activity, current, 5840 DTRACE_ACTIVITY_KILLED) != current); 5841 5842 continue; 5843 } 5844 } 5845 5846 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 5847 ecb->dte_alignment, state, &mstate)) < 0) 5848 continue; 5849 5850 tomax = buf->dtb_tomax; 5851 ASSERT(tomax != NULL); 5852 5853 if (ecb->dte_size != 0) 5854 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5855 5856 mstate.dtms_epid = ecb->dte_epid; 5857 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5858 5859 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 5860 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 5861 else 5862 mstate.dtms_access = 0; 5863 5864 if (pred != NULL) { 5865 dtrace_difo_t *dp = pred->dtp_difo; 5866 int rval; 5867 5868 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5869 5870 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5871 dtrace_cacheid_t cid = probe->dtpr_predcache; 5872 5873 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5874 /* 5875 * Update the predicate cache... 5876 */ 5877 ASSERT(cid == pred->dtp_cacheid); 5878 curthread->t_predcache = cid; 5879 } 5880 5881 continue; 5882 } 5883 } 5884 5885 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5886 act != NULL; act = act->dta_next) { 5887 size_t valoffs; 5888 dtrace_difo_t *dp; 5889 dtrace_recdesc_t *rec = &act->dta_rec; 5890 5891 size = rec->dtrd_size; 5892 valoffs = offs + rec->dtrd_offset; 5893 5894 if (DTRACEACT_ISAGG(act->dta_kind)) { 5895 uint64_t v = 0xbad; 5896 dtrace_aggregation_t *agg; 5897 5898 agg = (dtrace_aggregation_t *)act; 5899 5900 if ((dp = act->dta_difo) != NULL) 5901 v = dtrace_dif_emulate(dp, 5902 &mstate, vstate, state); 5903 5904 if (*flags & CPU_DTRACE_ERROR) 5905 continue; 5906 5907 /* 5908 * Note that we always pass the expression 5909 * value from the previous iteration of the 5910 * action loop. This value will only be used 5911 * if there is an expression argument to the 5912 * aggregating action, denoted by the 5913 * dtag_hasarg field. 5914 */ 5915 dtrace_aggregate(agg, buf, 5916 offs, aggbuf, v, val); 5917 continue; 5918 } 5919 5920 switch (act->dta_kind) { 5921 case DTRACEACT_STOP: 5922 if (dtrace_priv_proc_destructive(state)) 5923 dtrace_action_stop(); 5924 continue; 5925 5926 case DTRACEACT_BREAKPOINT: 5927 if (dtrace_priv_kernel_destructive(state)) 5928 dtrace_action_breakpoint(ecb); 5929 continue; 5930 5931 case DTRACEACT_PANIC: 5932 if (dtrace_priv_kernel_destructive(state)) 5933 dtrace_action_panic(ecb); 5934 continue; 5935 5936 case DTRACEACT_STACK: 5937 if (!dtrace_priv_kernel(state)) 5938 continue; 5939 5940 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5941 size / sizeof (pc_t), probe->dtpr_aframes, 5942 DTRACE_ANCHORED(probe) ? NULL : 5943 (uint32_t *)arg0); 5944 5945 continue; 5946 5947 case DTRACEACT_JSTACK: 5948 case DTRACEACT_USTACK: 5949 if (!dtrace_priv_proc(state)) 5950 continue; 5951 5952 /* 5953 * See comment in DIF_VAR_PID. 5954 */ 5955 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5956 CPU_ON_INTR(CPU)) { 5957 int depth = DTRACE_USTACK_NFRAMES( 5958 rec->dtrd_arg) + 1; 5959 5960 dtrace_bzero((void *)(tomax + valoffs), 5961 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5962 + depth * sizeof (uint64_t)); 5963 5964 continue; 5965 } 5966 5967 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5968 curproc->p_dtrace_helpers != NULL) { 5969 /* 5970 * This is the slow path -- we have 5971 * allocated string space, and we're 5972 * getting the stack of a process that 5973 * has helpers. Call into a separate 5974 * routine to perform this processing. 5975 */ 5976 dtrace_action_ustack(&mstate, state, 5977 (uint64_t *)(tomax + valoffs), 5978 rec->dtrd_arg); 5979 continue; 5980 } 5981 5982 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5983 dtrace_getupcstack((uint64_t *) 5984 (tomax + valoffs), 5985 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5986 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5987 continue; 5988 5989 default: 5990 break; 5991 } 5992 5993 dp = act->dta_difo; 5994 ASSERT(dp != NULL); 5995 5996 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5997 5998 if (*flags & CPU_DTRACE_ERROR) 5999 continue; 6000 6001 switch (act->dta_kind) { 6002 case DTRACEACT_SPECULATE: 6003 ASSERT(buf == &state->dts_buffer[cpuid]); 6004 buf = dtrace_speculation_buffer(state, 6005 cpuid, val); 6006 6007 if (buf == NULL) { 6008 *flags |= CPU_DTRACE_DROP; 6009 continue; 6010 } 6011 6012 offs = dtrace_buffer_reserve(buf, 6013 ecb->dte_needed, ecb->dte_alignment, 6014 state, NULL); 6015 6016 if (offs < 0) { 6017 *flags |= CPU_DTRACE_DROP; 6018 continue; 6019 } 6020 6021 tomax = buf->dtb_tomax; 6022 ASSERT(tomax != NULL); 6023 6024 if (ecb->dte_size != 0) 6025 DTRACE_STORE(uint32_t, tomax, offs, 6026 ecb->dte_epid); 6027 continue; 6028 6029 case DTRACEACT_CHILL: 6030 if (dtrace_priv_kernel_destructive(state)) 6031 dtrace_action_chill(&mstate, val); 6032 continue; 6033 6034 case DTRACEACT_RAISE: 6035 if (dtrace_priv_proc_destructive(state)) 6036 dtrace_action_raise(val); 6037 continue; 6038 6039 case DTRACEACT_COMMIT: 6040 ASSERT(!committed); 6041 6042 /* 6043 * We need to commit our buffer state. 6044 */ 6045 if (ecb->dte_size) 6046 buf->dtb_offset = offs + ecb->dte_size; 6047 buf = &state->dts_buffer[cpuid]; 6048 dtrace_speculation_commit(state, cpuid, val); 6049 committed = 1; 6050 continue; 6051 6052 case DTRACEACT_DISCARD: 6053 dtrace_speculation_discard(state, cpuid, val); 6054 continue; 6055 6056 case DTRACEACT_DIFEXPR: 6057 case DTRACEACT_LIBACT: 6058 case DTRACEACT_PRINTF: 6059 case DTRACEACT_PRINTA: 6060 case DTRACEACT_SYSTEM: 6061 case DTRACEACT_FREOPEN: 6062 break; 6063 6064 case DTRACEACT_SYM: 6065 case DTRACEACT_MOD: 6066 if (!dtrace_priv_kernel(state)) 6067 continue; 6068 break; 6069 6070 case DTRACEACT_USYM: 6071 case DTRACEACT_UMOD: 6072 case DTRACEACT_UADDR: { 6073 struct pid *pid = curthread->t_procp->p_pidp; 6074 6075 if (!dtrace_priv_proc(state)) 6076 continue; 6077 6078 DTRACE_STORE(uint64_t, tomax, 6079 valoffs, (uint64_t)pid->pid_id); 6080 DTRACE_STORE(uint64_t, tomax, 6081 valoffs + sizeof (uint64_t), val); 6082 6083 continue; 6084 } 6085 6086 case DTRACEACT_EXIT: { 6087 /* 6088 * For the exit action, we are going to attempt 6089 * to atomically set our activity to be 6090 * draining. If this fails (either because 6091 * another CPU has beat us to the exit action, 6092 * or because our current activity is something 6093 * other than ACTIVE or WARMUP), we will 6094 * continue. This assures that the exit action 6095 * can be successfully recorded at most once 6096 * when we're in the ACTIVE state. If we're 6097 * encountering the exit() action while in 6098 * COOLDOWN, however, we want to honor the new 6099 * status code. (We know that we're the only 6100 * thread in COOLDOWN, so there is no race.) 6101 */ 6102 void *activity = &state->dts_activity; 6103 dtrace_activity_t current = state->dts_activity; 6104 6105 if (current == DTRACE_ACTIVITY_COOLDOWN) 6106 break; 6107 6108 if (current != DTRACE_ACTIVITY_WARMUP) 6109 current = DTRACE_ACTIVITY_ACTIVE; 6110 6111 if (dtrace_cas32(activity, current, 6112 DTRACE_ACTIVITY_DRAINING) != current) { 6113 *flags |= CPU_DTRACE_DROP; 6114 continue; 6115 } 6116 6117 break; 6118 } 6119 6120 default: 6121 ASSERT(0); 6122 } 6123 6124 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6125 uintptr_t end = valoffs + size; 6126 6127 if (!dtrace_vcanload((void *)(uintptr_t)val, 6128 &dp->dtdo_rtype, &mstate, vstate)) 6129 continue; 6130 6131 /* 6132 * If this is a string, we're going to only 6133 * load until we find the zero byte -- after 6134 * which we'll store zero bytes. 6135 */ 6136 if (dp->dtdo_rtype.dtdt_kind == 6137 DIF_TYPE_STRING) { 6138 char c = '\0' + 1; 6139 int intuple = act->dta_intuple; 6140 size_t s; 6141 6142 for (s = 0; s < size; s++) { 6143 if (c != '\0') 6144 c = dtrace_load8(val++); 6145 6146 DTRACE_STORE(uint8_t, tomax, 6147 valoffs++, c); 6148 6149 if (c == '\0' && intuple) 6150 break; 6151 } 6152 6153 continue; 6154 } 6155 6156 while (valoffs < end) { 6157 DTRACE_STORE(uint8_t, tomax, valoffs++, 6158 dtrace_load8(val++)); 6159 } 6160 6161 continue; 6162 } 6163 6164 switch (size) { 6165 case 0: 6166 break; 6167 6168 case sizeof (uint8_t): 6169 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6170 break; 6171 case sizeof (uint16_t): 6172 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6173 break; 6174 case sizeof (uint32_t): 6175 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6176 break; 6177 case sizeof (uint64_t): 6178 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6179 break; 6180 default: 6181 /* 6182 * Any other size should have been returned by 6183 * reference, not by value. 6184 */ 6185 ASSERT(0); 6186 break; 6187 } 6188 } 6189 6190 if (*flags & CPU_DTRACE_DROP) 6191 continue; 6192 6193 if (*flags & CPU_DTRACE_FAULT) { 6194 int ndx; 6195 dtrace_action_t *err; 6196 6197 buf->dtb_errors++; 6198 6199 if (probe->dtpr_id == dtrace_probeid_error) { 6200 /* 6201 * There's nothing we can do -- we had an 6202 * error on the error probe. We bump an 6203 * error counter to at least indicate that 6204 * this condition happened. 6205 */ 6206 dtrace_error(&state->dts_dblerrors); 6207 continue; 6208 } 6209 6210 if (vtime) { 6211 /* 6212 * Before recursing on dtrace_probe(), we 6213 * need to explicitly clear out our start 6214 * time to prevent it from being accumulated 6215 * into t_dtrace_vtime. 6216 */ 6217 curthread->t_dtrace_start = 0; 6218 } 6219 6220 /* 6221 * Iterate over the actions to figure out which action 6222 * we were processing when we experienced the error. 6223 * Note that act points _past_ the faulting action; if 6224 * act is ecb->dte_action, the fault was in the 6225 * predicate, if it's ecb->dte_action->dta_next it's 6226 * in action #1, and so on. 6227 */ 6228 for (err = ecb->dte_action, ndx = 0; 6229 err != act; err = err->dta_next, ndx++) 6230 continue; 6231 6232 dtrace_probe_error(state, ecb->dte_epid, ndx, 6233 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6234 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6235 cpu_core[cpuid].cpuc_dtrace_illval); 6236 6237 continue; 6238 } 6239 6240 if (!committed) 6241 buf->dtb_offset = offs + ecb->dte_size; 6242 } 6243 6244 if (vtime) 6245 curthread->t_dtrace_start = dtrace_gethrtime(); 6246 6247 dtrace_interrupt_enable(cookie); 6248 } 6249 6250 /* 6251 * DTrace Probe Hashing Functions 6252 * 6253 * The functions in this section (and indeed, the functions in remaining 6254 * sections) are not _called_ from probe context. (Any exceptions to this are 6255 * marked with a "Note:".) Rather, they are called from elsewhere in the 6256 * DTrace framework to look-up probes in, add probes to and remove probes from 6257 * the DTrace probe hashes. (Each probe is hashed by each element of the 6258 * probe tuple -- allowing for fast lookups, regardless of what was 6259 * specified.) 6260 */ 6261 static uint_t 6262 dtrace_hash_str(char *p) 6263 { 6264 unsigned int g; 6265 uint_t hval = 0; 6266 6267 while (*p) { 6268 hval = (hval << 4) + *p++; 6269 if ((g = (hval & 0xf0000000)) != 0) 6270 hval ^= g >> 24; 6271 hval &= ~g; 6272 } 6273 return (hval); 6274 } 6275 6276 static dtrace_hash_t * 6277 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6278 { 6279 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6280 6281 hash->dth_stroffs = stroffs; 6282 hash->dth_nextoffs = nextoffs; 6283 hash->dth_prevoffs = prevoffs; 6284 6285 hash->dth_size = 1; 6286 hash->dth_mask = hash->dth_size - 1; 6287 6288 hash->dth_tab = kmem_zalloc(hash->dth_size * 6289 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6290 6291 return (hash); 6292 } 6293 6294 static void 6295 dtrace_hash_destroy(dtrace_hash_t *hash) 6296 { 6297 #ifdef DEBUG 6298 int i; 6299 6300 for (i = 0; i < hash->dth_size; i++) 6301 ASSERT(hash->dth_tab[i] == NULL); 6302 #endif 6303 6304 kmem_free(hash->dth_tab, 6305 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6306 kmem_free(hash, sizeof (dtrace_hash_t)); 6307 } 6308 6309 static void 6310 dtrace_hash_resize(dtrace_hash_t *hash) 6311 { 6312 int size = hash->dth_size, i, ndx; 6313 int new_size = hash->dth_size << 1; 6314 int new_mask = new_size - 1; 6315 dtrace_hashbucket_t **new_tab, *bucket, *next; 6316 6317 ASSERT((new_size & new_mask) == 0); 6318 6319 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6320 6321 for (i = 0; i < size; i++) { 6322 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6323 dtrace_probe_t *probe = bucket->dthb_chain; 6324 6325 ASSERT(probe != NULL); 6326 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6327 6328 next = bucket->dthb_next; 6329 bucket->dthb_next = new_tab[ndx]; 6330 new_tab[ndx] = bucket; 6331 } 6332 } 6333 6334 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6335 hash->dth_tab = new_tab; 6336 hash->dth_size = new_size; 6337 hash->dth_mask = new_mask; 6338 } 6339 6340 static void 6341 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6342 { 6343 int hashval = DTRACE_HASHSTR(hash, new); 6344 int ndx = hashval & hash->dth_mask; 6345 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6346 dtrace_probe_t **nextp, **prevp; 6347 6348 for (; bucket != NULL; bucket = bucket->dthb_next) { 6349 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6350 goto add; 6351 } 6352 6353 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6354 dtrace_hash_resize(hash); 6355 dtrace_hash_add(hash, new); 6356 return; 6357 } 6358 6359 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6360 bucket->dthb_next = hash->dth_tab[ndx]; 6361 hash->dth_tab[ndx] = bucket; 6362 hash->dth_nbuckets++; 6363 6364 add: 6365 nextp = DTRACE_HASHNEXT(hash, new); 6366 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6367 *nextp = bucket->dthb_chain; 6368 6369 if (bucket->dthb_chain != NULL) { 6370 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6371 ASSERT(*prevp == NULL); 6372 *prevp = new; 6373 } 6374 6375 bucket->dthb_chain = new; 6376 bucket->dthb_len++; 6377 } 6378 6379 static dtrace_probe_t * 6380 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6381 { 6382 int hashval = DTRACE_HASHSTR(hash, template); 6383 int ndx = hashval & hash->dth_mask; 6384 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6385 6386 for (; bucket != NULL; bucket = bucket->dthb_next) { 6387 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6388 return (bucket->dthb_chain); 6389 } 6390 6391 return (NULL); 6392 } 6393 6394 static int 6395 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6396 { 6397 int hashval = DTRACE_HASHSTR(hash, template); 6398 int ndx = hashval & hash->dth_mask; 6399 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6400 6401 for (; bucket != NULL; bucket = bucket->dthb_next) { 6402 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6403 return (bucket->dthb_len); 6404 } 6405 6406 return (NULL); 6407 } 6408 6409 static void 6410 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6411 { 6412 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6413 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6414 6415 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6416 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6417 6418 /* 6419 * Find the bucket that we're removing this probe from. 6420 */ 6421 for (; bucket != NULL; bucket = bucket->dthb_next) { 6422 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6423 break; 6424 } 6425 6426 ASSERT(bucket != NULL); 6427 6428 if (*prevp == NULL) { 6429 if (*nextp == NULL) { 6430 /* 6431 * The removed probe was the only probe on this 6432 * bucket; we need to remove the bucket. 6433 */ 6434 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6435 6436 ASSERT(bucket->dthb_chain == probe); 6437 ASSERT(b != NULL); 6438 6439 if (b == bucket) { 6440 hash->dth_tab[ndx] = bucket->dthb_next; 6441 } else { 6442 while (b->dthb_next != bucket) 6443 b = b->dthb_next; 6444 b->dthb_next = bucket->dthb_next; 6445 } 6446 6447 ASSERT(hash->dth_nbuckets > 0); 6448 hash->dth_nbuckets--; 6449 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6450 return; 6451 } 6452 6453 bucket->dthb_chain = *nextp; 6454 } else { 6455 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6456 } 6457 6458 if (*nextp != NULL) 6459 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6460 } 6461 6462 /* 6463 * DTrace Utility Functions 6464 * 6465 * These are random utility functions that are _not_ called from probe context. 6466 */ 6467 static int 6468 dtrace_badattr(const dtrace_attribute_t *a) 6469 { 6470 return (a->dtat_name > DTRACE_STABILITY_MAX || 6471 a->dtat_data > DTRACE_STABILITY_MAX || 6472 a->dtat_class > DTRACE_CLASS_MAX); 6473 } 6474 6475 /* 6476 * Return a duplicate copy of a string. If the specified string is NULL, 6477 * this function returns a zero-length string. 6478 */ 6479 static char * 6480 dtrace_strdup(const char *str) 6481 { 6482 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6483 6484 if (str != NULL) 6485 (void) strcpy(new, str); 6486 6487 return (new); 6488 } 6489 6490 #define DTRACE_ISALPHA(c) \ 6491 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6492 6493 static int 6494 dtrace_badname(const char *s) 6495 { 6496 char c; 6497 6498 if (s == NULL || (c = *s++) == '\0') 6499 return (0); 6500 6501 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6502 return (1); 6503 6504 while ((c = *s++) != '\0') { 6505 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6506 c != '-' && c != '_' && c != '.' && c != '`') 6507 return (1); 6508 } 6509 6510 return (0); 6511 } 6512 6513 static void 6514 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6515 { 6516 uint32_t priv; 6517 6518 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6519 /* 6520 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6521 */ 6522 priv = DTRACE_PRIV_ALL; 6523 } else { 6524 *uidp = crgetuid(cr); 6525 *zoneidp = crgetzoneid(cr); 6526 6527 priv = 0; 6528 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6529 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6530 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6531 priv |= DTRACE_PRIV_USER; 6532 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6533 priv |= DTRACE_PRIV_PROC; 6534 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6535 priv |= DTRACE_PRIV_OWNER; 6536 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6537 priv |= DTRACE_PRIV_ZONEOWNER; 6538 } 6539 6540 *privp = priv; 6541 } 6542 6543 #ifdef DTRACE_ERRDEBUG 6544 static void 6545 dtrace_errdebug(const char *str) 6546 { 6547 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6548 int occupied = 0; 6549 6550 mutex_enter(&dtrace_errlock); 6551 dtrace_errlast = str; 6552 dtrace_errthread = curthread; 6553 6554 while (occupied++ < DTRACE_ERRHASHSZ) { 6555 if (dtrace_errhash[hval].dter_msg == str) { 6556 dtrace_errhash[hval].dter_count++; 6557 goto out; 6558 } 6559 6560 if (dtrace_errhash[hval].dter_msg != NULL) { 6561 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6562 continue; 6563 } 6564 6565 dtrace_errhash[hval].dter_msg = str; 6566 dtrace_errhash[hval].dter_count = 1; 6567 goto out; 6568 } 6569 6570 panic("dtrace: undersized error hash"); 6571 out: 6572 mutex_exit(&dtrace_errlock); 6573 } 6574 #endif 6575 6576 /* 6577 * DTrace Matching Functions 6578 * 6579 * These functions are used to match groups of probes, given some elements of 6580 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6581 */ 6582 static int 6583 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6584 zoneid_t zoneid) 6585 { 6586 if (priv != DTRACE_PRIV_ALL) { 6587 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6588 uint32_t match = priv & ppriv; 6589 6590 /* 6591 * No PRIV_DTRACE_* privileges... 6592 */ 6593 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6594 DTRACE_PRIV_KERNEL)) == 0) 6595 return (0); 6596 6597 /* 6598 * No matching bits, but there were bits to match... 6599 */ 6600 if (match == 0 && ppriv != 0) 6601 return (0); 6602 6603 /* 6604 * Need to have permissions to the process, but don't... 6605 */ 6606 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6607 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6608 return (0); 6609 } 6610 6611 /* 6612 * Need to be in the same zone unless we possess the 6613 * privilege to examine all zones. 6614 */ 6615 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6616 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6617 return (0); 6618 } 6619 } 6620 6621 return (1); 6622 } 6623 6624 /* 6625 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6626 * consists of input pattern strings and an ops-vector to evaluate them. 6627 * This function returns >0 for match, 0 for no match, and <0 for error. 6628 */ 6629 static int 6630 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6631 uint32_t priv, uid_t uid, zoneid_t zoneid) 6632 { 6633 dtrace_provider_t *pvp = prp->dtpr_provider; 6634 int rv; 6635 6636 if (pvp->dtpv_defunct) 6637 return (0); 6638 6639 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6640 return (rv); 6641 6642 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6643 return (rv); 6644 6645 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6646 return (rv); 6647 6648 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6649 return (rv); 6650 6651 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6652 return (0); 6653 6654 return (rv); 6655 } 6656 6657 /* 6658 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6659 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6660 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6661 * In addition, all of the recursion cases except for '*' matching have been 6662 * unwound. For '*', we still implement recursive evaluation, but a depth 6663 * counter is maintained and matching is aborted if we recurse too deep. 6664 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6665 */ 6666 static int 6667 dtrace_match_glob(const char *s, const char *p, int depth) 6668 { 6669 const char *olds; 6670 char s1, c; 6671 int gs; 6672 6673 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6674 return (-1); 6675 6676 if (s == NULL) 6677 s = ""; /* treat NULL as empty string */ 6678 6679 top: 6680 olds = s; 6681 s1 = *s++; 6682 6683 if (p == NULL) 6684 return (0); 6685 6686 if ((c = *p++) == '\0') 6687 return (s1 == '\0'); 6688 6689 switch (c) { 6690 case '[': { 6691 int ok = 0, notflag = 0; 6692 char lc = '\0'; 6693 6694 if (s1 == '\0') 6695 return (0); 6696 6697 if (*p == '!') { 6698 notflag = 1; 6699 p++; 6700 } 6701 6702 if ((c = *p++) == '\0') 6703 return (0); 6704 6705 do { 6706 if (c == '-' && lc != '\0' && *p != ']') { 6707 if ((c = *p++) == '\0') 6708 return (0); 6709 if (c == '\\' && (c = *p++) == '\0') 6710 return (0); 6711 6712 if (notflag) { 6713 if (s1 < lc || s1 > c) 6714 ok++; 6715 else 6716 return (0); 6717 } else if (lc <= s1 && s1 <= c) 6718 ok++; 6719 6720 } else if (c == '\\' && (c = *p++) == '\0') 6721 return (0); 6722 6723 lc = c; /* save left-hand 'c' for next iteration */ 6724 6725 if (notflag) { 6726 if (s1 != c) 6727 ok++; 6728 else 6729 return (0); 6730 } else if (s1 == c) 6731 ok++; 6732 6733 if ((c = *p++) == '\0') 6734 return (0); 6735 6736 } while (c != ']'); 6737 6738 if (ok) 6739 goto top; 6740 6741 return (0); 6742 } 6743 6744 case '\\': 6745 if ((c = *p++) == '\0') 6746 return (0); 6747 /*FALLTHRU*/ 6748 6749 default: 6750 if (c != s1) 6751 return (0); 6752 /*FALLTHRU*/ 6753 6754 case '?': 6755 if (s1 != '\0') 6756 goto top; 6757 return (0); 6758 6759 case '*': 6760 while (*p == '*') 6761 p++; /* consecutive *'s are identical to a single one */ 6762 6763 if (*p == '\0') 6764 return (1); 6765 6766 for (s = olds; *s != '\0'; s++) { 6767 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 6768 return (gs); 6769 } 6770 6771 return (0); 6772 } 6773 } 6774 6775 /*ARGSUSED*/ 6776 static int 6777 dtrace_match_string(const char *s, const char *p, int depth) 6778 { 6779 return (s != NULL && strcmp(s, p) == 0); 6780 } 6781 6782 /*ARGSUSED*/ 6783 static int 6784 dtrace_match_nul(const char *s, const char *p, int depth) 6785 { 6786 return (1); /* always match the empty pattern */ 6787 } 6788 6789 /*ARGSUSED*/ 6790 static int 6791 dtrace_match_nonzero(const char *s, const char *p, int depth) 6792 { 6793 return (s != NULL && s[0] != '\0'); 6794 } 6795 6796 static int 6797 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 6798 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 6799 { 6800 dtrace_probe_t template, *probe; 6801 dtrace_hash_t *hash = NULL; 6802 int len, rc, best = INT_MAX, nmatched = 0; 6803 dtrace_id_t i; 6804 6805 ASSERT(MUTEX_HELD(&dtrace_lock)); 6806 6807 /* 6808 * If the probe ID is specified in the key, just lookup by ID and 6809 * invoke the match callback once if a matching probe is found. 6810 */ 6811 if (pkp->dtpk_id != DTRACE_IDNONE) { 6812 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 6813 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 6814 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL) 6815 return (DTRACE_MATCH_FAIL); 6816 nmatched++; 6817 } 6818 return (nmatched); 6819 } 6820 6821 template.dtpr_mod = (char *)pkp->dtpk_mod; 6822 template.dtpr_func = (char *)pkp->dtpk_func; 6823 template.dtpr_name = (char *)pkp->dtpk_name; 6824 6825 /* 6826 * We want to find the most distinct of the module name, function 6827 * name, and name. So for each one that is not a glob pattern or 6828 * empty string, we perform a lookup in the corresponding hash and 6829 * use the hash table with the fewest collisions to do our search. 6830 */ 6831 if (pkp->dtpk_mmatch == &dtrace_match_string && 6832 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 6833 best = len; 6834 hash = dtrace_bymod; 6835 } 6836 6837 if (pkp->dtpk_fmatch == &dtrace_match_string && 6838 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 6839 best = len; 6840 hash = dtrace_byfunc; 6841 } 6842 6843 if (pkp->dtpk_nmatch == &dtrace_match_string && 6844 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 6845 best = len; 6846 hash = dtrace_byname; 6847 } 6848 6849 /* 6850 * If we did not select a hash table, iterate over every probe and 6851 * invoke our callback for each one that matches our input probe key. 6852 */ 6853 if (hash == NULL) { 6854 for (i = 0; i < dtrace_nprobes; i++) { 6855 if ((probe = dtrace_probes[i]) == NULL || 6856 dtrace_match_probe(probe, pkp, priv, uid, 6857 zoneid) <= 0) 6858 continue; 6859 6860 nmatched++; 6861 6862 if ((rc = (*matched)(probe, arg)) != 6863 DTRACE_MATCH_NEXT) { 6864 if (rc == DTRACE_MATCH_FAIL) 6865 return (DTRACE_MATCH_FAIL); 6866 break; 6867 } 6868 } 6869 6870 return (nmatched); 6871 } 6872 6873 /* 6874 * If we selected a hash table, iterate over each probe of the same key 6875 * name and invoke the callback for every probe that matches the other 6876 * attributes of our input probe key. 6877 */ 6878 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6879 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6880 6881 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6882 continue; 6883 6884 nmatched++; 6885 6886 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { 6887 if (rc == DTRACE_MATCH_FAIL) 6888 return (DTRACE_MATCH_FAIL); 6889 break; 6890 } 6891 } 6892 6893 return (nmatched); 6894 } 6895 6896 /* 6897 * Return the function pointer dtrace_probecmp() should use to compare the 6898 * specified pattern with a string. For NULL or empty patterns, we select 6899 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6900 * For non-empty non-glob strings, we use dtrace_match_string(). 6901 */ 6902 static dtrace_probekey_f * 6903 dtrace_probekey_func(const char *p) 6904 { 6905 char c; 6906 6907 if (p == NULL || *p == '\0') 6908 return (&dtrace_match_nul); 6909 6910 while ((c = *p++) != '\0') { 6911 if (c == '[' || c == '?' || c == '*' || c == '\\') 6912 return (&dtrace_match_glob); 6913 } 6914 6915 return (&dtrace_match_string); 6916 } 6917 6918 /* 6919 * Build a probe comparison key for use with dtrace_match_probe() from the 6920 * given probe description. By convention, a null key only matches anchored 6921 * probes: if each field is the empty string, reset dtpk_fmatch to 6922 * dtrace_match_nonzero(). 6923 */ 6924 static void 6925 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6926 { 6927 pkp->dtpk_prov = pdp->dtpd_provider; 6928 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6929 6930 pkp->dtpk_mod = pdp->dtpd_mod; 6931 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6932 6933 pkp->dtpk_func = pdp->dtpd_func; 6934 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6935 6936 pkp->dtpk_name = pdp->dtpd_name; 6937 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6938 6939 pkp->dtpk_id = pdp->dtpd_id; 6940 6941 if (pkp->dtpk_id == DTRACE_IDNONE && 6942 pkp->dtpk_pmatch == &dtrace_match_nul && 6943 pkp->dtpk_mmatch == &dtrace_match_nul && 6944 pkp->dtpk_fmatch == &dtrace_match_nul && 6945 pkp->dtpk_nmatch == &dtrace_match_nul) 6946 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6947 } 6948 6949 /* 6950 * DTrace Provider-to-Framework API Functions 6951 * 6952 * These functions implement much of the Provider-to-Framework API, as 6953 * described in <sys/dtrace.h>. The parts of the API not in this section are 6954 * the functions in the API for probe management (found below), and 6955 * dtrace_probe() itself (found above). 6956 */ 6957 6958 /* 6959 * Register the calling provider with the DTrace framework. This should 6960 * generally be called by DTrace providers in their attach(9E) entry point. 6961 */ 6962 int 6963 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6964 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6965 { 6966 dtrace_provider_t *provider; 6967 6968 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6969 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6970 "arguments", name ? name : "<NULL>"); 6971 return (EINVAL); 6972 } 6973 6974 if (name[0] == '\0' || dtrace_badname(name)) { 6975 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6976 "provider name", name); 6977 return (EINVAL); 6978 } 6979 6980 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6981 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6982 pops->dtps_destroy == NULL || 6983 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6984 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6985 "provider ops", name); 6986 return (EINVAL); 6987 } 6988 6989 if (dtrace_badattr(&pap->dtpa_provider) || 6990 dtrace_badattr(&pap->dtpa_mod) || 6991 dtrace_badattr(&pap->dtpa_func) || 6992 dtrace_badattr(&pap->dtpa_name) || 6993 dtrace_badattr(&pap->dtpa_args)) { 6994 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6995 "provider attributes", name); 6996 return (EINVAL); 6997 } 6998 6999 if (priv & ~DTRACE_PRIV_ALL) { 7000 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7001 "privilege attributes", name); 7002 return (EINVAL); 7003 } 7004 7005 if ((priv & DTRACE_PRIV_KERNEL) && 7006 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7007 pops->dtps_usermode == NULL) { 7008 cmn_err(CE_WARN, "failed to register provider '%s': need " 7009 "dtps_usermode() op for given privilege attributes", name); 7010 return (EINVAL); 7011 } 7012 7013 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7014 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7015 (void) strcpy(provider->dtpv_name, name); 7016 7017 provider->dtpv_attr = *pap; 7018 provider->dtpv_priv.dtpp_flags = priv; 7019 if (cr != NULL) { 7020 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7021 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7022 } 7023 provider->dtpv_pops = *pops; 7024 7025 if (pops->dtps_provide == NULL) { 7026 ASSERT(pops->dtps_provide_module != NULL); 7027 provider->dtpv_pops.dtps_provide = 7028 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 7029 } 7030 7031 if (pops->dtps_provide_module == NULL) { 7032 ASSERT(pops->dtps_provide != NULL); 7033 provider->dtpv_pops.dtps_provide_module = 7034 (void (*)(void *, struct modctl *))dtrace_nullop; 7035 } 7036 7037 if (pops->dtps_suspend == NULL) { 7038 ASSERT(pops->dtps_resume == NULL); 7039 provider->dtpv_pops.dtps_suspend = 7040 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7041 provider->dtpv_pops.dtps_resume = 7042 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7043 } 7044 7045 provider->dtpv_arg = arg; 7046 *idp = (dtrace_provider_id_t)provider; 7047 7048 if (pops == &dtrace_provider_ops) { 7049 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7050 ASSERT(MUTEX_HELD(&dtrace_lock)); 7051 ASSERT(dtrace_anon.dta_enabling == NULL); 7052 7053 /* 7054 * We make sure that the DTrace provider is at the head of 7055 * the provider chain. 7056 */ 7057 provider->dtpv_next = dtrace_provider; 7058 dtrace_provider = provider; 7059 return (0); 7060 } 7061 7062 mutex_enter(&dtrace_provider_lock); 7063 mutex_enter(&dtrace_lock); 7064 7065 /* 7066 * If there is at least one provider registered, we'll add this 7067 * provider after the first provider. 7068 */ 7069 if (dtrace_provider != NULL) { 7070 provider->dtpv_next = dtrace_provider->dtpv_next; 7071 dtrace_provider->dtpv_next = provider; 7072 } else { 7073 dtrace_provider = provider; 7074 } 7075 7076 if (dtrace_retained != NULL) { 7077 dtrace_enabling_provide(provider); 7078 7079 /* 7080 * Now we need to call dtrace_enabling_matchall() -- which 7081 * will acquire cpu_lock and dtrace_lock. We therefore need 7082 * to drop all of our locks before calling into it... 7083 */ 7084 mutex_exit(&dtrace_lock); 7085 mutex_exit(&dtrace_provider_lock); 7086 dtrace_enabling_matchall(); 7087 7088 return (0); 7089 } 7090 7091 mutex_exit(&dtrace_lock); 7092 mutex_exit(&dtrace_provider_lock); 7093 7094 return (0); 7095 } 7096 7097 /* 7098 * Unregister the specified provider from the DTrace framework. This should 7099 * generally be called by DTrace providers in their detach(9E) entry point. 7100 */ 7101 int 7102 dtrace_unregister(dtrace_provider_id_t id) 7103 { 7104 dtrace_provider_t *old = (dtrace_provider_t *)id; 7105 dtrace_provider_t *prev = NULL; 7106 int i, self = 0, noreap = 0; 7107 dtrace_probe_t *probe, *first = NULL; 7108 7109 if (old->dtpv_pops.dtps_enable == 7110 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) { 7111 /* 7112 * If DTrace itself is the provider, we're called with locks 7113 * already held. 7114 */ 7115 ASSERT(old == dtrace_provider); 7116 ASSERT(dtrace_devi != NULL); 7117 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7118 ASSERT(MUTEX_HELD(&dtrace_lock)); 7119 self = 1; 7120 7121 if (dtrace_provider->dtpv_next != NULL) { 7122 /* 7123 * There's another provider here; return failure. 7124 */ 7125 return (EBUSY); 7126 } 7127 } else { 7128 mutex_enter(&dtrace_provider_lock); 7129 mutex_enter(&mod_lock); 7130 mutex_enter(&dtrace_lock); 7131 } 7132 7133 /* 7134 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7135 * probes, we refuse to let providers slither away, unless this 7136 * provider has already been explicitly invalidated. 7137 */ 7138 if (!old->dtpv_defunct && 7139 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7140 dtrace_anon.dta_state->dts_necbs > 0))) { 7141 if (!self) { 7142 mutex_exit(&dtrace_lock); 7143 mutex_exit(&mod_lock); 7144 mutex_exit(&dtrace_provider_lock); 7145 } 7146 return (EBUSY); 7147 } 7148 7149 /* 7150 * Attempt to destroy the probes associated with this provider. 7151 */ 7152 for (i = 0; i < dtrace_nprobes; i++) { 7153 if ((probe = dtrace_probes[i]) == NULL) 7154 continue; 7155 7156 if (probe->dtpr_provider != old) 7157 continue; 7158 7159 if (probe->dtpr_ecb == NULL) 7160 continue; 7161 7162 /* 7163 * If we are trying to unregister a defunct provider, and the 7164 * provider was made defunct within the interval dictated by 7165 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7166 * attempt to reap our enablings. To denote that the provider 7167 * should reattempt to unregister itself at some point in the 7168 * future, we will return a differentiable error code (EAGAIN 7169 * instead of EBUSY) in this case. 7170 */ 7171 if (dtrace_gethrtime() - old->dtpv_defunct > 7172 dtrace_unregister_defunct_reap) 7173 noreap = 1; 7174 7175 if (!self) { 7176 mutex_exit(&dtrace_lock); 7177 mutex_exit(&mod_lock); 7178 mutex_exit(&dtrace_provider_lock); 7179 } 7180 7181 if (noreap) 7182 return (EBUSY); 7183 7184 (void) taskq_dispatch(dtrace_taskq, 7185 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7186 7187 return (EAGAIN); 7188 } 7189 7190 /* 7191 * All of the probes for this provider are disabled; we can safely 7192 * remove all of them from their hash chains and from the probe array. 7193 */ 7194 for (i = 0; i < dtrace_nprobes; i++) { 7195 if ((probe = dtrace_probes[i]) == NULL) 7196 continue; 7197 7198 if (probe->dtpr_provider != old) 7199 continue; 7200 7201 dtrace_probes[i] = NULL; 7202 7203 dtrace_hash_remove(dtrace_bymod, probe); 7204 dtrace_hash_remove(dtrace_byfunc, probe); 7205 dtrace_hash_remove(dtrace_byname, probe); 7206 7207 if (first == NULL) { 7208 first = probe; 7209 probe->dtpr_nextmod = NULL; 7210 } else { 7211 probe->dtpr_nextmod = first; 7212 first = probe; 7213 } 7214 } 7215 7216 /* 7217 * The provider's probes have been removed from the hash chains and 7218 * from the probe array. Now issue a dtrace_sync() to be sure that 7219 * everyone has cleared out from any probe array processing. 7220 */ 7221 dtrace_sync(); 7222 7223 for (probe = first; probe != NULL; probe = first) { 7224 first = probe->dtpr_nextmod; 7225 7226 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7227 probe->dtpr_arg); 7228 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7229 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7230 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7231 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7232 kmem_free(probe, sizeof (dtrace_probe_t)); 7233 } 7234 7235 if ((prev = dtrace_provider) == old) { 7236 ASSERT(self || dtrace_devi == NULL); 7237 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7238 dtrace_provider = old->dtpv_next; 7239 } else { 7240 while (prev != NULL && prev->dtpv_next != old) 7241 prev = prev->dtpv_next; 7242 7243 if (prev == NULL) { 7244 panic("attempt to unregister non-existent " 7245 "dtrace provider %p\n", (void *)id); 7246 } 7247 7248 prev->dtpv_next = old->dtpv_next; 7249 } 7250 7251 if (!self) { 7252 mutex_exit(&dtrace_lock); 7253 mutex_exit(&mod_lock); 7254 mutex_exit(&dtrace_provider_lock); 7255 } 7256 7257 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7258 kmem_free(old, sizeof (dtrace_provider_t)); 7259 7260 return (0); 7261 } 7262 7263 /* 7264 * Invalidate the specified provider. All subsequent probe lookups for the 7265 * specified provider will fail, but its probes will not be removed. 7266 */ 7267 void 7268 dtrace_invalidate(dtrace_provider_id_t id) 7269 { 7270 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7271 7272 ASSERT(pvp->dtpv_pops.dtps_enable != 7273 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7274 7275 mutex_enter(&dtrace_provider_lock); 7276 mutex_enter(&dtrace_lock); 7277 7278 pvp->dtpv_defunct = dtrace_gethrtime(); 7279 7280 mutex_exit(&dtrace_lock); 7281 mutex_exit(&dtrace_provider_lock); 7282 } 7283 7284 /* 7285 * Indicate whether or not DTrace has attached. 7286 */ 7287 int 7288 dtrace_attached(void) 7289 { 7290 /* 7291 * dtrace_provider will be non-NULL iff the DTrace driver has 7292 * attached. (It's non-NULL because DTrace is always itself a 7293 * provider.) 7294 */ 7295 return (dtrace_provider != NULL); 7296 } 7297 7298 /* 7299 * Remove all the unenabled probes for the given provider. This function is 7300 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7301 * -- just as many of its associated probes as it can. 7302 */ 7303 int 7304 dtrace_condense(dtrace_provider_id_t id) 7305 { 7306 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7307 int i; 7308 dtrace_probe_t *probe; 7309 7310 /* 7311 * Make sure this isn't the dtrace provider itself. 7312 */ 7313 ASSERT(prov->dtpv_pops.dtps_enable != 7314 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7315 7316 mutex_enter(&dtrace_provider_lock); 7317 mutex_enter(&dtrace_lock); 7318 7319 /* 7320 * Attempt to destroy the probes associated with this provider. 7321 */ 7322 for (i = 0; i < dtrace_nprobes; i++) { 7323 if ((probe = dtrace_probes[i]) == NULL) 7324 continue; 7325 7326 if (probe->dtpr_provider != prov) 7327 continue; 7328 7329 if (probe->dtpr_ecb != NULL) 7330 continue; 7331 7332 dtrace_probes[i] = NULL; 7333 7334 dtrace_hash_remove(dtrace_bymod, probe); 7335 dtrace_hash_remove(dtrace_byfunc, probe); 7336 dtrace_hash_remove(dtrace_byname, probe); 7337 7338 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7339 probe->dtpr_arg); 7340 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7341 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7342 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7343 kmem_free(probe, sizeof (dtrace_probe_t)); 7344 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7345 } 7346 7347 mutex_exit(&dtrace_lock); 7348 mutex_exit(&dtrace_provider_lock); 7349 7350 return (0); 7351 } 7352 7353 /* 7354 * DTrace Probe Management Functions 7355 * 7356 * The functions in this section perform the DTrace probe management, 7357 * including functions to create probes, look-up probes, and call into the 7358 * providers to request that probes be provided. Some of these functions are 7359 * in the Provider-to-Framework API; these functions can be identified by the 7360 * fact that they are not declared "static". 7361 */ 7362 7363 /* 7364 * Create a probe with the specified module name, function name, and name. 7365 */ 7366 dtrace_id_t 7367 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7368 const char *func, const char *name, int aframes, void *arg) 7369 { 7370 dtrace_probe_t *probe, **probes; 7371 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7372 dtrace_id_t id; 7373 7374 if (provider == dtrace_provider) { 7375 ASSERT(MUTEX_HELD(&dtrace_lock)); 7376 } else { 7377 mutex_enter(&dtrace_lock); 7378 } 7379 7380 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7381 VM_BESTFIT | VM_SLEEP); 7382 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7383 7384 probe->dtpr_id = id; 7385 probe->dtpr_gen = dtrace_probegen++; 7386 probe->dtpr_mod = dtrace_strdup(mod); 7387 probe->dtpr_func = dtrace_strdup(func); 7388 probe->dtpr_name = dtrace_strdup(name); 7389 probe->dtpr_arg = arg; 7390 probe->dtpr_aframes = aframes; 7391 probe->dtpr_provider = provider; 7392 7393 dtrace_hash_add(dtrace_bymod, probe); 7394 dtrace_hash_add(dtrace_byfunc, probe); 7395 dtrace_hash_add(dtrace_byname, probe); 7396 7397 if (id - 1 >= dtrace_nprobes) { 7398 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7399 size_t nsize = osize << 1; 7400 7401 if (nsize == 0) { 7402 ASSERT(osize == 0); 7403 ASSERT(dtrace_probes == NULL); 7404 nsize = sizeof (dtrace_probe_t *); 7405 } 7406 7407 probes = kmem_zalloc(nsize, KM_SLEEP); 7408 7409 if (dtrace_probes == NULL) { 7410 ASSERT(osize == 0); 7411 dtrace_probes = probes; 7412 dtrace_nprobes = 1; 7413 } else { 7414 dtrace_probe_t **oprobes = dtrace_probes; 7415 7416 bcopy(oprobes, probes, osize); 7417 dtrace_membar_producer(); 7418 dtrace_probes = probes; 7419 7420 dtrace_sync(); 7421 7422 /* 7423 * All CPUs are now seeing the new probes array; we can 7424 * safely free the old array. 7425 */ 7426 kmem_free(oprobes, osize); 7427 dtrace_nprobes <<= 1; 7428 } 7429 7430 ASSERT(id - 1 < dtrace_nprobes); 7431 } 7432 7433 ASSERT(dtrace_probes[id - 1] == NULL); 7434 dtrace_probes[id - 1] = probe; 7435 7436 if (provider != dtrace_provider) 7437 mutex_exit(&dtrace_lock); 7438 7439 return (id); 7440 } 7441 7442 static dtrace_probe_t * 7443 dtrace_probe_lookup_id(dtrace_id_t id) 7444 { 7445 ASSERT(MUTEX_HELD(&dtrace_lock)); 7446 7447 if (id == 0 || id > dtrace_nprobes) 7448 return (NULL); 7449 7450 return (dtrace_probes[id - 1]); 7451 } 7452 7453 static int 7454 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7455 { 7456 *((dtrace_id_t *)arg) = probe->dtpr_id; 7457 7458 return (DTRACE_MATCH_DONE); 7459 } 7460 7461 /* 7462 * Look up a probe based on provider and one or more of module name, function 7463 * name and probe name. 7464 */ 7465 dtrace_id_t 7466 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 7467 const char *func, const char *name) 7468 { 7469 dtrace_probekey_t pkey; 7470 dtrace_id_t id; 7471 int match; 7472 7473 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7474 pkey.dtpk_pmatch = &dtrace_match_string; 7475 pkey.dtpk_mod = mod; 7476 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7477 pkey.dtpk_func = func; 7478 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7479 pkey.dtpk_name = name; 7480 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7481 pkey.dtpk_id = DTRACE_IDNONE; 7482 7483 mutex_enter(&dtrace_lock); 7484 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7485 dtrace_probe_lookup_match, &id); 7486 mutex_exit(&dtrace_lock); 7487 7488 ASSERT(match == 1 || match == 0); 7489 return (match ? id : 0); 7490 } 7491 7492 /* 7493 * Returns the probe argument associated with the specified probe. 7494 */ 7495 void * 7496 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7497 { 7498 dtrace_probe_t *probe; 7499 void *rval = NULL; 7500 7501 mutex_enter(&dtrace_lock); 7502 7503 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7504 probe->dtpr_provider == (dtrace_provider_t *)id) 7505 rval = probe->dtpr_arg; 7506 7507 mutex_exit(&dtrace_lock); 7508 7509 return (rval); 7510 } 7511 7512 /* 7513 * Copy a probe into a probe description. 7514 */ 7515 static void 7516 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7517 { 7518 bzero(pdp, sizeof (dtrace_probedesc_t)); 7519 pdp->dtpd_id = prp->dtpr_id; 7520 7521 (void) strncpy(pdp->dtpd_provider, 7522 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7523 7524 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7525 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7526 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7527 } 7528 7529 /* 7530 * Called to indicate that a probe -- or probes -- should be provided by a 7531 * specfied provider. If the specified description is NULL, the provider will 7532 * be told to provide all of its probes. (This is done whenever a new 7533 * consumer comes along, or whenever a retained enabling is to be matched.) If 7534 * the specified description is non-NULL, the provider is given the 7535 * opportunity to dynamically provide the specified probe, allowing providers 7536 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7537 * probes.) If the provider is NULL, the operations will be applied to all 7538 * providers; if the provider is non-NULL the operations will only be applied 7539 * to the specified provider. The dtrace_provider_lock must be held, and the 7540 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7541 * will need to grab the dtrace_lock when it reenters the framework through 7542 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7543 */ 7544 static void 7545 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7546 { 7547 struct modctl *ctl; 7548 int all = 0; 7549 7550 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7551 7552 if (prv == NULL) { 7553 all = 1; 7554 prv = dtrace_provider; 7555 } 7556 7557 do { 7558 /* 7559 * First, call the blanket provide operation. 7560 */ 7561 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7562 7563 /* 7564 * Now call the per-module provide operation. We will grab 7565 * mod_lock to prevent the list from being modified. Note 7566 * that this also prevents the mod_busy bits from changing. 7567 * (mod_busy can only be changed with mod_lock held.) 7568 */ 7569 mutex_enter(&mod_lock); 7570 7571 ctl = &modules; 7572 do { 7573 if (ctl->mod_busy || ctl->mod_mp == NULL) 7574 continue; 7575 7576 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7577 7578 } while ((ctl = ctl->mod_next) != &modules); 7579 7580 mutex_exit(&mod_lock); 7581 } while (all && (prv = prv->dtpv_next) != NULL); 7582 } 7583 7584 /* 7585 * Iterate over each probe, and call the Framework-to-Provider API function 7586 * denoted by offs. 7587 */ 7588 static void 7589 dtrace_probe_foreach(uintptr_t offs) 7590 { 7591 dtrace_provider_t *prov; 7592 void (*func)(void *, dtrace_id_t, void *); 7593 dtrace_probe_t *probe; 7594 dtrace_icookie_t cookie; 7595 int i; 7596 7597 /* 7598 * We disable interrupts to walk through the probe array. This is 7599 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7600 * won't see stale data. 7601 */ 7602 cookie = dtrace_interrupt_disable(); 7603 7604 for (i = 0; i < dtrace_nprobes; i++) { 7605 if ((probe = dtrace_probes[i]) == NULL) 7606 continue; 7607 7608 if (probe->dtpr_ecb == NULL) { 7609 /* 7610 * This probe isn't enabled -- don't call the function. 7611 */ 7612 continue; 7613 } 7614 7615 prov = probe->dtpr_provider; 7616 func = *((void(**)(void *, dtrace_id_t, void *)) 7617 ((uintptr_t)&prov->dtpv_pops + offs)); 7618 7619 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7620 } 7621 7622 dtrace_interrupt_enable(cookie); 7623 } 7624 7625 static int 7626 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7627 { 7628 dtrace_probekey_t pkey; 7629 uint32_t priv; 7630 uid_t uid; 7631 zoneid_t zoneid; 7632 7633 ASSERT(MUTEX_HELD(&dtrace_lock)); 7634 dtrace_ecb_create_cache = NULL; 7635 7636 if (desc == NULL) { 7637 /* 7638 * If we're passed a NULL description, we're being asked to 7639 * create an ECB with a NULL probe. 7640 */ 7641 (void) dtrace_ecb_create_enable(NULL, enab); 7642 return (0); 7643 } 7644 7645 dtrace_probekey(desc, &pkey); 7646 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7647 &priv, &uid, &zoneid); 7648 7649 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7650 enab)); 7651 } 7652 7653 /* 7654 * DTrace Helper Provider Functions 7655 */ 7656 static void 7657 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7658 { 7659 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7660 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7661 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7662 } 7663 7664 static void 7665 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7666 const dof_provider_t *dofprov, char *strtab) 7667 { 7668 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7669 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7670 dofprov->dofpv_provattr); 7671 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7672 dofprov->dofpv_modattr); 7673 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7674 dofprov->dofpv_funcattr); 7675 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7676 dofprov->dofpv_nameattr); 7677 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7678 dofprov->dofpv_argsattr); 7679 } 7680 7681 static void 7682 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7683 { 7684 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7685 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7686 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7687 dof_provider_t *provider; 7688 dof_probe_t *probe; 7689 uint32_t *off, *enoff; 7690 uint8_t *arg; 7691 char *strtab; 7692 uint_t i, nprobes; 7693 dtrace_helper_provdesc_t dhpv; 7694 dtrace_helper_probedesc_t dhpb; 7695 dtrace_meta_t *meta = dtrace_meta_pid; 7696 dtrace_mops_t *mops = &meta->dtm_mops; 7697 void *parg; 7698 7699 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7700 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7701 provider->dofpv_strtab * dof->dofh_secsize); 7702 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7703 provider->dofpv_probes * dof->dofh_secsize); 7704 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7705 provider->dofpv_prargs * dof->dofh_secsize); 7706 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7707 provider->dofpv_proffs * dof->dofh_secsize); 7708 7709 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7710 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7711 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7712 enoff = NULL; 7713 7714 /* 7715 * See dtrace_helper_provider_validate(). 7716 */ 7717 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7718 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7719 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7720 provider->dofpv_prenoffs * dof->dofh_secsize); 7721 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 7722 } 7723 7724 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 7725 7726 /* 7727 * Create the provider. 7728 */ 7729 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7730 7731 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 7732 return; 7733 7734 meta->dtm_count++; 7735 7736 /* 7737 * Create the probes. 7738 */ 7739 for (i = 0; i < nprobes; i++) { 7740 probe = (dof_probe_t *)(uintptr_t)(daddr + 7741 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 7742 7743 dhpb.dthpb_mod = dhp->dofhp_mod; 7744 dhpb.dthpb_func = strtab + probe->dofpr_func; 7745 dhpb.dthpb_name = strtab + probe->dofpr_name; 7746 dhpb.dthpb_base = probe->dofpr_addr; 7747 dhpb.dthpb_offs = off + probe->dofpr_offidx; 7748 dhpb.dthpb_noffs = probe->dofpr_noffs; 7749 if (enoff != NULL) { 7750 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 7751 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 7752 } else { 7753 dhpb.dthpb_enoffs = NULL; 7754 dhpb.dthpb_nenoffs = 0; 7755 } 7756 dhpb.dthpb_args = arg + probe->dofpr_argidx; 7757 dhpb.dthpb_nargc = probe->dofpr_nargc; 7758 dhpb.dthpb_xargc = probe->dofpr_xargc; 7759 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 7760 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 7761 7762 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 7763 } 7764 } 7765 7766 static void 7767 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 7768 { 7769 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7770 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7771 int i; 7772 7773 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7774 7775 for (i = 0; i < dof->dofh_secnum; i++) { 7776 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7777 dof->dofh_secoff + i * dof->dofh_secsize); 7778 7779 if (sec->dofs_type != DOF_SECT_PROVIDER) 7780 continue; 7781 7782 dtrace_helper_provide_one(dhp, sec, pid); 7783 } 7784 7785 /* 7786 * We may have just created probes, so we must now rematch against 7787 * any retained enablings. Note that this call will acquire both 7788 * cpu_lock and dtrace_lock; the fact that we are holding 7789 * dtrace_meta_lock now is what defines the ordering with respect to 7790 * these three locks. 7791 */ 7792 dtrace_enabling_matchall(); 7793 } 7794 7795 static void 7796 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7797 { 7798 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7799 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7800 dof_sec_t *str_sec; 7801 dof_provider_t *provider; 7802 char *strtab; 7803 dtrace_helper_provdesc_t dhpv; 7804 dtrace_meta_t *meta = dtrace_meta_pid; 7805 dtrace_mops_t *mops = &meta->dtm_mops; 7806 7807 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7808 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7809 provider->dofpv_strtab * dof->dofh_secsize); 7810 7811 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7812 7813 /* 7814 * Create the provider. 7815 */ 7816 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7817 7818 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 7819 7820 meta->dtm_count--; 7821 } 7822 7823 static void 7824 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 7825 { 7826 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7827 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7828 int i; 7829 7830 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7831 7832 for (i = 0; i < dof->dofh_secnum; i++) { 7833 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7834 dof->dofh_secoff + i * dof->dofh_secsize); 7835 7836 if (sec->dofs_type != DOF_SECT_PROVIDER) 7837 continue; 7838 7839 dtrace_helper_provider_remove_one(dhp, sec, pid); 7840 } 7841 } 7842 7843 /* 7844 * DTrace Meta Provider-to-Framework API Functions 7845 * 7846 * These functions implement the Meta Provider-to-Framework API, as described 7847 * in <sys/dtrace.h>. 7848 */ 7849 int 7850 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 7851 dtrace_meta_provider_id_t *idp) 7852 { 7853 dtrace_meta_t *meta; 7854 dtrace_helpers_t *help, *next; 7855 int i; 7856 7857 *idp = DTRACE_METAPROVNONE; 7858 7859 /* 7860 * We strictly don't need the name, but we hold onto it for 7861 * debuggability. All hail error queues! 7862 */ 7863 if (name == NULL) { 7864 cmn_err(CE_WARN, "failed to register meta-provider: " 7865 "invalid name"); 7866 return (EINVAL); 7867 } 7868 7869 if (mops == NULL || 7870 mops->dtms_create_probe == NULL || 7871 mops->dtms_provide_pid == NULL || 7872 mops->dtms_remove_pid == NULL) { 7873 cmn_err(CE_WARN, "failed to register meta-register %s: " 7874 "invalid ops", name); 7875 return (EINVAL); 7876 } 7877 7878 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 7879 meta->dtm_mops = *mops; 7880 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7881 (void) strcpy(meta->dtm_name, name); 7882 meta->dtm_arg = arg; 7883 7884 mutex_enter(&dtrace_meta_lock); 7885 mutex_enter(&dtrace_lock); 7886 7887 if (dtrace_meta_pid != NULL) { 7888 mutex_exit(&dtrace_lock); 7889 mutex_exit(&dtrace_meta_lock); 7890 cmn_err(CE_WARN, "failed to register meta-register %s: " 7891 "user-land meta-provider exists", name); 7892 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7893 kmem_free(meta, sizeof (dtrace_meta_t)); 7894 return (EINVAL); 7895 } 7896 7897 dtrace_meta_pid = meta; 7898 *idp = (dtrace_meta_provider_id_t)meta; 7899 7900 /* 7901 * If there are providers and probes ready to go, pass them 7902 * off to the new meta provider now. 7903 */ 7904 7905 help = dtrace_deferred_pid; 7906 dtrace_deferred_pid = NULL; 7907 7908 mutex_exit(&dtrace_lock); 7909 7910 while (help != NULL) { 7911 for (i = 0; i < help->dthps_nprovs; i++) { 7912 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7913 help->dthps_pid); 7914 } 7915 7916 next = help->dthps_next; 7917 help->dthps_next = NULL; 7918 help->dthps_prev = NULL; 7919 help->dthps_deferred = 0; 7920 help = next; 7921 } 7922 7923 mutex_exit(&dtrace_meta_lock); 7924 7925 return (0); 7926 } 7927 7928 int 7929 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7930 { 7931 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7932 7933 mutex_enter(&dtrace_meta_lock); 7934 mutex_enter(&dtrace_lock); 7935 7936 if (old == dtrace_meta_pid) { 7937 pp = &dtrace_meta_pid; 7938 } else { 7939 panic("attempt to unregister non-existent " 7940 "dtrace meta-provider %p\n", (void *)old); 7941 } 7942 7943 if (old->dtm_count != 0) { 7944 mutex_exit(&dtrace_lock); 7945 mutex_exit(&dtrace_meta_lock); 7946 return (EBUSY); 7947 } 7948 7949 *pp = NULL; 7950 7951 mutex_exit(&dtrace_lock); 7952 mutex_exit(&dtrace_meta_lock); 7953 7954 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7955 kmem_free(old, sizeof (dtrace_meta_t)); 7956 7957 return (0); 7958 } 7959 7960 7961 /* 7962 * DTrace DIF Object Functions 7963 */ 7964 static int 7965 dtrace_difo_err(uint_t pc, const char *format, ...) 7966 { 7967 if (dtrace_err_verbose) { 7968 va_list alist; 7969 7970 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7971 va_start(alist, format); 7972 (void) vuprintf(format, alist); 7973 va_end(alist); 7974 } 7975 7976 #ifdef DTRACE_ERRDEBUG 7977 dtrace_errdebug(format); 7978 #endif 7979 return (1); 7980 } 7981 7982 /* 7983 * Validate a DTrace DIF object by checking the IR instructions. The following 7984 * rules are currently enforced by dtrace_difo_validate(): 7985 * 7986 * 1. Each instruction must have a valid opcode 7987 * 2. Each register, string, variable, or subroutine reference must be valid 7988 * 3. No instruction can modify register %r0 (must be zero) 7989 * 4. All instruction reserved bits must be set to zero 7990 * 5. The last instruction must be a "ret" instruction 7991 * 6. All branch targets must reference a valid instruction _after_ the branch 7992 */ 7993 static int 7994 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7995 cred_t *cr) 7996 { 7997 int err = 0, i; 7998 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7999 int kcheckload; 8000 uint_t pc; 8001 8002 kcheckload = cr == NULL || 8003 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8004 8005 dp->dtdo_destructive = 0; 8006 8007 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8008 dif_instr_t instr = dp->dtdo_buf[pc]; 8009 8010 uint_t r1 = DIF_INSTR_R1(instr); 8011 uint_t r2 = DIF_INSTR_R2(instr); 8012 uint_t rd = DIF_INSTR_RD(instr); 8013 uint_t rs = DIF_INSTR_RS(instr); 8014 uint_t label = DIF_INSTR_LABEL(instr); 8015 uint_t v = DIF_INSTR_VAR(instr); 8016 uint_t subr = DIF_INSTR_SUBR(instr); 8017 uint_t type = DIF_INSTR_TYPE(instr); 8018 uint_t op = DIF_INSTR_OP(instr); 8019 8020 switch (op) { 8021 case DIF_OP_OR: 8022 case DIF_OP_XOR: 8023 case DIF_OP_AND: 8024 case DIF_OP_SLL: 8025 case DIF_OP_SRL: 8026 case DIF_OP_SRA: 8027 case DIF_OP_SUB: 8028 case DIF_OP_ADD: 8029 case DIF_OP_MUL: 8030 case DIF_OP_SDIV: 8031 case DIF_OP_UDIV: 8032 case DIF_OP_SREM: 8033 case DIF_OP_UREM: 8034 case DIF_OP_COPYS: 8035 if (r1 >= nregs) 8036 err += efunc(pc, "invalid register %u\n", r1); 8037 if (r2 >= nregs) 8038 err += efunc(pc, "invalid register %u\n", r2); 8039 if (rd >= nregs) 8040 err += efunc(pc, "invalid register %u\n", rd); 8041 if (rd == 0) 8042 err += efunc(pc, "cannot write to %r0\n"); 8043 break; 8044 case DIF_OP_NOT: 8045 case DIF_OP_MOV: 8046 case DIF_OP_ALLOCS: 8047 if (r1 >= nregs) 8048 err += efunc(pc, "invalid register %u\n", r1); 8049 if (r2 != 0) 8050 err += efunc(pc, "non-zero reserved bits\n"); 8051 if (rd >= nregs) 8052 err += efunc(pc, "invalid register %u\n", rd); 8053 if (rd == 0) 8054 err += efunc(pc, "cannot write to %r0\n"); 8055 break; 8056 case DIF_OP_LDSB: 8057 case DIF_OP_LDSH: 8058 case DIF_OP_LDSW: 8059 case DIF_OP_LDUB: 8060 case DIF_OP_LDUH: 8061 case DIF_OP_LDUW: 8062 case DIF_OP_LDX: 8063 if (r1 >= nregs) 8064 err += efunc(pc, "invalid register %u\n", r1); 8065 if (r2 != 0) 8066 err += efunc(pc, "non-zero reserved bits\n"); 8067 if (rd >= nregs) 8068 err += efunc(pc, "invalid register %u\n", rd); 8069 if (rd == 0) 8070 err += efunc(pc, "cannot write to %r0\n"); 8071 if (kcheckload) 8072 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8073 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8074 break; 8075 case DIF_OP_RLDSB: 8076 case DIF_OP_RLDSH: 8077 case DIF_OP_RLDSW: 8078 case DIF_OP_RLDUB: 8079 case DIF_OP_RLDUH: 8080 case DIF_OP_RLDUW: 8081 case DIF_OP_RLDX: 8082 if (r1 >= nregs) 8083 err += efunc(pc, "invalid register %u\n", r1); 8084 if (r2 != 0) 8085 err += efunc(pc, "non-zero reserved bits\n"); 8086 if (rd >= nregs) 8087 err += efunc(pc, "invalid register %u\n", rd); 8088 if (rd == 0) 8089 err += efunc(pc, "cannot write to %r0\n"); 8090 break; 8091 case DIF_OP_ULDSB: 8092 case DIF_OP_ULDSH: 8093 case DIF_OP_ULDSW: 8094 case DIF_OP_ULDUB: 8095 case DIF_OP_ULDUH: 8096 case DIF_OP_ULDUW: 8097 case DIF_OP_ULDX: 8098 if (r1 >= nregs) 8099 err += efunc(pc, "invalid register %u\n", r1); 8100 if (r2 != 0) 8101 err += efunc(pc, "non-zero reserved bits\n"); 8102 if (rd >= nregs) 8103 err += efunc(pc, "invalid register %u\n", rd); 8104 if (rd == 0) 8105 err += efunc(pc, "cannot write to %r0\n"); 8106 break; 8107 case DIF_OP_STB: 8108 case DIF_OP_STH: 8109 case DIF_OP_STW: 8110 case DIF_OP_STX: 8111 if (r1 >= nregs) 8112 err += efunc(pc, "invalid register %u\n", r1); 8113 if (r2 != 0) 8114 err += efunc(pc, "non-zero reserved bits\n"); 8115 if (rd >= nregs) 8116 err += efunc(pc, "invalid register %u\n", rd); 8117 if (rd == 0) 8118 err += efunc(pc, "cannot write to 0 address\n"); 8119 break; 8120 case DIF_OP_CMP: 8121 case DIF_OP_SCMP: 8122 if (r1 >= nregs) 8123 err += efunc(pc, "invalid register %u\n", r1); 8124 if (r2 >= nregs) 8125 err += efunc(pc, "invalid register %u\n", r2); 8126 if (rd != 0) 8127 err += efunc(pc, "non-zero reserved bits\n"); 8128 break; 8129 case DIF_OP_TST: 8130 if (r1 >= nregs) 8131 err += efunc(pc, "invalid register %u\n", r1); 8132 if (r2 != 0 || rd != 0) 8133 err += efunc(pc, "non-zero reserved bits\n"); 8134 break; 8135 case DIF_OP_BA: 8136 case DIF_OP_BE: 8137 case DIF_OP_BNE: 8138 case DIF_OP_BG: 8139 case DIF_OP_BGU: 8140 case DIF_OP_BGE: 8141 case DIF_OP_BGEU: 8142 case DIF_OP_BL: 8143 case DIF_OP_BLU: 8144 case DIF_OP_BLE: 8145 case DIF_OP_BLEU: 8146 if (label >= dp->dtdo_len) { 8147 err += efunc(pc, "invalid branch target %u\n", 8148 label); 8149 } 8150 if (label <= pc) { 8151 err += efunc(pc, "backward branch to %u\n", 8152 label); 8153 } 8154 break; 8155 case DIF_OP_RET: 8156 if (r1 != 0 || r2 != 0) 8157 err += efunc(pc, "non-zero reserved bits\n"); 8158 if (rd >= nregs) 8159 err += efunc(pc, "invalid register %u\n", rd); 8160 break; 8161 case DIF_OP_NOP: 8162 case DIF_OP_POPTS: 8163 case DIF_OP_FLUSHTS: 8164 if (r1 != 0 || r2 != 0 || rd != 0) 8165 err += efunc(pc, "non-zero reserved bits\n"); 8166 break; 8167 case DIF_OP_SETX: 8168 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8169 err += efunc(pc, "invalid integer ref %u\n", 8170 DIF_INSTR_INTEGER(instr)); 8171 } 8172 if (rd >= nregs) 8173 err += efunc(pc, "invalid register %u\n", rd); 8174 if (rd == 0) 8175 err += efunc(pc, "cannot write to %r0\n"); 8176 break; 8177 case DIF_OP_SETS: 8178 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8179 err += efunc(pc, "invalid string ref %u\n", 8180 DIF_INSTR_STRING(instr)); 8181 } 8182 if (rd >= nregs) 8183 err += efunc(pc, "invalid register %u\n", rd); 8184 if (rd == 0) 8185 err += efunc(pc, "cannot write to %r0\n"); 8186 break; 8187 case DIF_OP_LDGA: 8188 case DIF_OP_LDTA: 8189 if (r1 > DIF_VAR_ARRAY_MAX) 8190 err += efunc(pc, "invalid array %u\n", r1); 8191 if (r2 >= nregs) 8192 err += efunc(pc, "invalid register %u\n", r2); 8193 if (rd >= nregs) 8194 err += efunc(pc, "invalid register %u\n", rd); 8195 if (rd == 0) 8196 err += efunc(pc, "cannot write to %r0\n"); 8197 break; 8198 case DIF_OP_LDGS: 8199 case DIF_OP_LDTS: 8200 case DIF_OP_LDLS: 8201 case DIF_OP_LDGAA: 8202 case DIF_OP_LDTAA: 8203 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8204 err += efunc(pc, "invalid variable %u\n", v); 8205 if (rd >= nregs) 8206 err += efunc(pc, "invalid register %u\n", rd); 8207 if (rd == 0) 8208 err += efunc(pc, "cannot write to %r0\n"); 8209 break; 8210 case DIF_OP_STGS: 8211 case DIF_OP_STTS: 8212 case DIF_OP_STLS: 8213 case DIF_OP_STGAA: 8214 case DIF_OP_STTAA: 8215 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8216 err += efunc(pc, "invalid variable %u\n", v); 8217 if (rs >= nregs) 8218 err += efunc(pc, "invalid register %u\n", rd); 8219 break; 8220 case DIF_OP_CALL: 8221 if (subr > DIF_SUBR_MAX) 8222 err += efunc(pc, "invalid subr %u\n", subr); 8223 if (rd >= nregs) 8224 err += efunc(pc, "invalid register %u\n", rd); 8225 if (rd == 0) 8226 err += efunc(pc, "cannot write to %r0\n"); 8227 8228 if (subr == DIF_SUBR_COPYOUT || 8229 subr == DIF_SUBR_COPYOUTSTR) { 8230 dp->dtdo_destructive = 1; 8231 } 8232 break; 8233 case DIF_OP_PUSHTR: 8234 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8235 err += efunc(pc, "invalid ref type %u\n", type); 8236 if (r2 >= nregs) 8237 err += efunc(pc, "invalid register %u\n", r2); 8238 if (rs >= nregs) 8239 err += efunc(pc, "invalid register %u\n", rs); 8240 break; 8241 case DIF_OP_PUSHTV: 8242 if (type != DIF_TYPE_CTF) 8243 err += efunc(pc, "invalid val type %u\n", type); 8244 if (r2 >= nregs) 8245 err += efunc(pc, "invalid register %u\n", r2); 8246 if (rs >= nregs) 8247 err += efunc(pc, "invalid register %u\n", rs); 8248 break; 8249 default: 8250 err += efunc(pc, "invalid opcode %u\n", 8251 DIF_INSTR_OP(instr)); 8252 } 8253 } 8254 8255 if (dp->dtdo_len != 0 && 8256 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8257 err += efunc(dp->dtdo_len - 1, 8258 "expected 'ret' as last DIF instruction\n"); 8259 } 8260 8261 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8262 /* 8263 * If we're not returning by reference, the size must be either 8264 * 0 or the size of one of the base types. 8265 */ 8266 switch (dp->dtdo_rtype.dtdt_size) { 8267 case 0: 8268 case sizeof (uint8_t): 8269 case sizeof (uint16_t): 8270 case sizeof (uint32_t): 8271 case sizeof (uint64_t): 8272 break; 8273 8274 default: 8275 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 8276 } 8277 } 8278 8279 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8280 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8281 dtrace_diftype_t *vt, *et; 8282 uint_t id, ndx; 8283 8284 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8285 v->dtdv_scope != DIFV_SCOPE_THREAD && 8286 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8287 err += efunc(i, "unrecognized variable scope %d\n", 8288 v->dtdv_scope); 8289 break; 8290 } 8291 8292 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8293 v->dtdv_kind != DIFV_KIND_SCALAR) { 8294 err += efunc(i, "unrecognized variable type %d\n", 8295 v->dtdv_kind); 8296 break; 8297 } 8298 8299 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8300 err += efunc(i, "%d exceeds variable id limit\n", id); 8301 break; 8302 } 8303 8304 if (id < DIF_VAR_OTHER_UBASE) 8305 continue; 8306 8307 /* 8308 * For user-defined variables, we need to check that this 8309 * definition is identical to any previous definition that we 8310 * encountered. 8311 */ 8312 ndx = id - DIF_VAR_OTHER_UBASE; 8313 8314 switch (v->dtdv_scope) { 8315 case DIFV_SCOPE_GLOBAL: 8316 if (ndx < vstate->dtvs_nglobals) { 8317 dtrace_statvar_t *svar; 8318 8319 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8320 existing = &svar->dtsv_var; 8321 } 8322 8323 break; 8324 8325 case DIFV_SCOPE_THREAD: 8326 if (ndx < vstate->dtvs_ntlocals) 8327 existing = &vstate->dtvs_tlocals[ndx]; 8328 break; 8329 8330 case DIFV_SCOPE_LOCAL: 8331 if (ndx < vstate->dtvs_nlocals) { 8332 dtrace_statvar_t *svar; 8333 8334 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8335 existing = &svar->dtsv_var; 8336 } 8337 8338 break; 8339 } 8340 8341 vt = &v->dtdv_type; 8342 8343 if (vt->dtdt_flags & DIF_TF_BYREF) { 8344 if (vt->dtdt_size == 0) { 8345 err += efunc(i, "zero-sized variable\n"); 8346 break; 8347 } 8348 8349 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8350 vt->dtdt_size > dtrace_global_maxsize) { 8351 err += efunc(i, "oversized by-ref global\n"); 8352 break; 8353 } 8354 } 8355 8356 if (existing == NULL || existing->dtdv_id == 0) 8357 continue; 8358 8359 ASSERT(existing->dtdv_id == v->dtdv_id); 8360 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8361 8362 if (existing->dtdv_kind != v->dtdv_kind) 8363 err += efunc(i, "%d changed variable kind\n", id); 8364 8365 et = &existing->dtdv_type; 8366 8367 if (vt->dtdt_flags != et->dtdt_flags) { 8368 err += efunc(i, "%d changed variable type flags\n", id); 8369 break; 8370 } 8371 8372 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8373 err += efunc(i, "%d changed variable type size\n", id); 8374 break; 8375 } 8376 } 8377 8378 return (err); 8379 } 8380 8381 /* 8382 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8383 * are much more constrained than normal DIFOs. Specifically, they may 8384 * not: 8385 * 8386 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8387 * miscellaneous string routines 8388 * 2. Access DTrace variables other than the args[] array, and the 8389 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8390 * 3. Have thread-local variables. 8391 * 4. Have dynamic variables. 8392 */ 8393 static int 8394 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8395 { 8396 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8397 int err = 0; 8398 uint_t pc; 8399 8400 for (pc = 0; pc < dp->dtdo_len; pc++) { 8401 dif_instr_t instr = dp->dtdo_buf[pc]; 8402 8403 uint_t v = DIF_INSTR_VAR(instr); 8404 uint_t subr = DIF_INSTR_SUBR(instr); 8405 uint_t op = DIF_INSTR_OP(instr); 8406 8407 switch (op) { 8408 case DIF_OP_OR: 8409 case DIF_OP_XOR: 8410 case DIF_OP_AND: 8411 case DIF_OP_SLL: 8412 case DIF_OP_SRL: 8413 case DIF_OP_SRA: 8414 case DIF_OP_SUB: 8415 case DIF_OP_ADD: 8416 case DIF_OP_MUL: 8417 case DIF_OP_SDIV: 8418 case DIF_OP_UDIV: 8419 case DIF_OP_SREM: 8420 case DIF_OP_UREM: 8421 case DIF_OP_COPYS: 8422 case DIF_OP_NOT: 8423 case DIF_OP_MOV: 8424 case DIF_OP_RLDSB: 8425 case DIF_OP_RLDSH: 8426 case DIF_OP_RLDSW: 8427 case DIF_OP_RLDUB: 8428 case DIF_OP_RLDUH: 8429 case DIF_OP_RLDUW: 8430 case DIF_OP_RLDX: 8431 case DIF_OP_ULDSB: 8432 case DIF_OP_ULDSH: 8433 case DIF_OP_ULDSW: 8434 case DIF_OP_ULDUB: 8435 case DIF_OP_ULDUH: 8436 case DIF_OP_ULDUW: 8437 case DIF_OP_ULDX: 8438 case DIF_OP_STB: 8439 case DIF_OP_STH: 8440 case DIF_OP_STW: 8441 case DIF_OP_STX: 8442 case DIF_OP_ALLOCS: 8443 case DIF_OP_CMP: 8444 case DIF_OP_SCMP: 8445 case DIF_OP_TST: 8446 case DIF_OP_BA: 8447 case DIF_OP_BE: 8448 case DIF_OP_BNE: 8449 case DIF_OP_BG: 8450 case DIF_OP_BGU: 8451 case DIF_OP_BGE: 8452 case DIF_OP_BGEU: 8453 case DIF_OP_BL: 8454 case DIF_OP_BLU: 8455 case DIF_OP_BLE: 8456 case DIF_OP_BLEU: 8457 case DIF_OP_RET: 8458 case DIF_OP_NOP: 8459 case DIF_OP_POPTS: 8460 case DIF_OP_FLUSHTS: 8461 case DIF_OP_SETX: 8462 case DIF_OP_SETS: 8463 case DIF_OP_LDGA: 8464 case DIF_OP_LDLS: 8465 case DIF_OP_STGS: 8466 case DIF_OP_STLS: 8467 case DIF_OP_PUSHTR: 8468 case DIF_OP_PUSHTV: 8469 break; 8470 8471 case DIF_OP_LDGS: 8472 if (v >= DIF_VAR_OTHER_UBASE) 8473 break; 8474 8475 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8476 break; 8477 8478 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8479 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8480 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8481 v == DIF_VAR_UID || v == DIF_VAR_GID) 8482 break; 8483 8484 err += efunc(pc, "illegal variable %u\n", v); 8485 break; 8486 8487 case DIF_OP_LDTA: 8488 case DIF_OP_LDTS: 8489 case DIF_OP_LDGAA: 8490 case DIF_OP_LDTAA: 8491 err += efunc(pc, "illegal dynamic variable load\n"); 8492 break; 8493 8494 case DIF_OP_STTS: 8495 case DIF_OP_STGAA: 8496 case DIF_OP_STTAA: 8497 err += efunc(pc, "illegal dynamic variable store\n"); 8498 break; 8499 8500 case DIF_OP_CALL: 8501 if (subr == DIF_SUBR_ALLOCA || 8502 subr == DIF_SUBR_BCOPY || 8503 subr == DIF_SUBR_COPYIN || 8504 subr == DIF_SUBR_COPYINTO || 8505 subr == DIF_SUBR_COPYINSTR || 8506 subr == DIF_SUBR_INDEX || 8507 subr == DIF_SUBR_INET_NTOA || 8508 subr == DIF_SUBR_INET_NTOA6 || 8509 subr == DIF_SUBR_INET_NTOP || 8510 subr == DIF_SUBR_LLTOSTR || 8511 subr == DIF_SUBR_RINDEX || 8512 subr == DIF_SUBR_STRCHR || 8513 subr == DIF_SUBR_STRJOIN || 8514 subr == DIF_SUBR_STRRCHR || 8515 subr == DIF_SUBR_STRSTR || 8516 subr == DIF_SUBR_HTONS || 8517 subr == DIF_SUBR_HTONL || 8518 subr == DIF_SUBR_HTONLL || 8519 subr == DIF_SUBR_NTOHS || 8520 subr == DIF_SUBR_NTOHL || 8521 subr == DIF_SUBR_NTOHLL) 8522 break; 8523 8524 err += efunc(pc, "invalid subr %u\n", subr); 8525 break; 8526 8527 default: 8528 err += efunc(pc, "invalid opcode %u\n", 8529 DIF_INSTR_OP(instr)); 8530 } 8531 } 8532 8533 return (err); 8534 } 8535 8536 /* 8537 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8538 * basis; 0 if not. 8539 */ 8540 static int 8541 dtrace_difo_cacheable(dtrace_difo_t *dp) 8542 { 8543 int i; 8544 8545 if (dp == NULL) 8546 return (0); 8547 8548 for (i = 0; i < dp->dtdo_varlen; i++) { 8549 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8550 8551 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8552 continue; 8553 8554 switch (v->dtdv_id) { 8555 case DIF_VAR_CURTHREAD: 8556 case DIF_VAR_PID: 8557 case DIF_VAR_TID: 8558 case DIF_VAR_EXECNAME: 8559 case DIF_VAR_ZONENAME: 8560 break; 8561 8562 default: 8563 return (0); 8564 } 8565 } 8566 8567 /* 8568 * This DIF object may be cacheable. Now we need to look for any 8569 * array loading instructions, any memory loading instructions, or 8570 * any stores to thread-local variables. 8571 */ 8572 for (i = 0; i < dp->dtdo_len; i++) { 8573 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8574 8575 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8576 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8577 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8578 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8579 return (0); 8580 } 8581 8582 return (1); 8583 } 8584 8585 static void 8586 dtrace_difo_hold(dtrace_difo_t *dp) 8587 { 8588 int i; 8589 8590 ASSERT(MUTEX_HELD(&dtrace_lock)); 8591 8592 dp->dtdo_refcnt++; 8593 ASSERT(dp->dtdo_refcnt != 0); 8594 8595 /* 8596 * We need to check this DIF object for references to the variable 8597 * DIF_VAR_VTIMESTAMP. 8598 */ 8599 for (i = 0; i < dp->dtdo_varlen; i++) { 8600 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8601 8602 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8603 continue; 8604 8605 if (dtrace_vtime_references++ == 0) 8606 dtrace_vtime_enable(); 8607 } 8608 } 8609 8610 /* 8611 * This routine calculates the dynamic variable chunksize for a given DIF 8612 * object. The calculation is not fool-proof, and can probably be tricked by 8613 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8614 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8615 * if a dynamic variable size exceeds the chunksize. 8616 */ 8617 static void 8618 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8619 { 8620 uint64_t sval; 8621 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8622 const dif_instr_t *text = dp->dtdo_buf; 8623 uint_t pc, srd = 0; 8624 uint_t ttop = 0; 8625 size_t size, ksize; 8626 uint_t id, i; 8627 8628 for (pc = 0; pc < dp->dtdo_len; pc++) { 8629 dif_instr_t instr = text[pc]; 8630 uint_t op = DIF_INSTR_OP(instr); 8631 uint_t rd = DIF_INSTR_RD(instr); 8632 uint_t r1 = DIF_INSTR_R1(instr); 8633 uint_t nkeys = 0; 8634 uchar_t scope; 8635 8636 dtrace_key_t *key = tupregs; 8637 8638 switch (op) { 8639 case DIF_OP_SETX: 8640 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8641 srd = rd; 8642 continue; 8643 8644 case DIF_OP_STTS: 8645 key = &tupregs[DIF_DTR_NREGS]; 8646 key[0].dttk_size = 0; 8647 key[1].dttk_size = 0; 8648 nkeys = 2; 8649 scope = DIFV_SCOPE_THREAD; 8650 break; 8651 8652 case DIF_OP_STGAA: 8653 case DIF_OP_STTAA: 8654 nkeys = ttop; 8655 8656 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8657 key[nkeys++].dttk_size = 0; 8658 8659 key[nkeys++].dttk_size = 0; 8660 8661 if (op == DIF_OP_STTAA) { 8662 scope = DIFV_SCOPE_THREAD; 8663 } else { 8664 scope = DIFV_SCOPE_GLOBAL; 8665 } 8666 8667 break; 8668 8669 case DIF_OP_PUSHTR: 8670 if (ttop == DIF_DTR_NREGS) 8671 return; 8672 8673 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8674 /* 8675 * If the register for the size of the "pushtr" 8676 * is %r0 (or the value is 0) and the type is 8677 * a string, we'll use the system-wide default 8678 * string size. 8679 */ 8680 tupregs[ttop++].dttk_size = 8681 dtrace_strsize_default; 8682 } else { 8683 if (srd == 0) 8684 return; 8685 8686 tupregs[ttop++].dttk_size = sval; 8687 } 8688 8689 break; 8690 8691 case DIF_OP_PUSHTV: 8692 if (ttop == DIF_DTR_NREGS) 8693 return; 8694 8695 tupregs[ttop++].dttk_size = 0; 8696 break; 8697 8698 case DIF_OP_FLUSHTS: 8699 ttop = 0; 8700 break; 8701 8702 case DIF_OP_POPTS: 8703 if (ttop != 0) 8704 ttop--; 8705 break; 8706 } 8707 8708 sval = 0; 8709 srd = 0; 8710 8711 if (nkeys == 0) 8712 continue; 8713 8714 /* 8715 * We have a dynamic variable allocation; calculate its size. 8716 */ 8717 for (ksize = 0, i = 0; i < nkeys; i++) 8718 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 8719 8720 size = sizeof (dtrace_dynvar_t); 8721 size += sizeof (dtrace_key_t) * (nkeys - 1); 8722 size += ksize; 8723 8724 /* 8725 * Now we need to determine the size of the stored data. 8726 */ 8727 id = DIF_INSTR_VAR(instr); 8728 8729 for (i = 0; i < dp->dtdo_varlen; i++) { 8730 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8731 8732 if (v->dtdv_id == id && v->dtdv_scope == scope) { 8733 size += v->dtdv_type.dtdt_size; 8734 break; 8735 } 8736 } 8737 8738 if (i == dp->dtdo_varlen) 8739 return; 8740 8741 /* 8742 * We have the size. If this is larger than the chunk size 8743 * for our dynamic variable state, reset the chunk size. 8744 */ 8745 size = P2ROUNDUP(size, sizeof (uint64_t)); 8746 8747 if (size > vstate->dtvs_dynvars.dtds_chunksize) 8748 vstate->dtvs_dynvars.dtds_chunksize = size; 8749 } 8750 } 8751 8752 static void 8753 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8754 { 8755 int i, oldsvars, osz, nsz, otlocals, ntlocals; 8756 uint_t id; 8757 8758 ASSERT(MUTEX_HELD(&dtrace_lock)); 8759 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 8760 8761 for (i = 0; i < dp->dtdo_varlen; i++) { 8762 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8763 dtrace_statvar_t *svar, ***svarp; 8764 size_t dsize = 0; 8765 uint8_t scope = v->dtdv_scope; 8766 int *np; 8767 8768 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8769 continue; 8770 8771 id -= DIF_VAR_OTHER_UBASE; 8772 8773 switch (scope) { 8774 case DIFV_SCOPE_THREAD: 8775 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 8776 dtrace_difv_t *tlocals; 8777 8778 if ((ntlocals = (otlocals << 1)) == 0) 8779 ntlocals = 1; 8780 8781 osz = otlocals * sizeof (dtrace_difv_t); 8782 nsz = ntlocals * sizeof (dtrace_difv_t); 8783 8784 tlocals = kmem_zalloc(nsz, KM_SLEEP); 8785 8786 if (osz != 0) { 8787 bcopy(vstate->dtvs_tlocals, 8788 tlocals, osz); 8789 kmem_free(vstate->dtvs_tlocals, osz); 8790 } 8791 8792 vstate->dtvs_tlocals = tlocals; 8793 vstate->dtvs_ntlocals = ntlocals; 8794 } 8795 8796 vstate->dtvs_tlocals[id] = *v; 8797 continue; 8798 8799 case DIFV_SCOPE_LOCAL: 8800 np = &vstate->dtvs_nlocals; 8801 svarp = &vstate->dtvs_locals; 8802 8803 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8804 dsize = NCPU * (v->dtdv_type.dtdt_size + 8805 sizeof (uint64_t)); 8806 else 8807 dsize = NCPU * sizeof (uint64_t); 8808 8809 break; 8810 8811 case DIFV_SCOPE_GLOBAL: 8812 np = &vstate->dtvs_nglobals; 8813 svarp = &vstate->dtvs_globals; 8814 8815 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8816 dsize = v->dtdv_type.dtdt_size + 8817 sizeof (uint64_t); 8818 8819 break; 8820 8821 default: 8822 ASSERT(0); 8823 } 8824 8825 while (id >= (oldsvars = *np)) { 8826 dtrace_statvar_t **statics; 8827 int newsvars, oldsize, newsize; 8828 8829 if ((newsvars = (oldsvars << 1)) == 0) 8830 newsvars = 1; 8831 8832 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 8833 newsize = newsvars * sizeof (dtrace_statvar_t *); 8834 8835 statics = kmem_zalloc(newsize, KM_SLEEP); 8836 8837 if (oldsize != 0) { 8838 bcopy(*svarp, statics, oldsize); 8839 kmem_free(*svarp, oldsize); 8840 } 8841 8842 *svarp = statics; 8843 *np = newsvars; 8844 } 8845 8846 if ((svar = (*svarp)[id]) == NULL) { 8847 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 8848 svar->dtsv_var = *v; 8849 8850 if ((svar->dtsv_size = dsize) != 0) { 8851 svar->dtsv_data = (uint64_t)(uintptr_t) 8852 kmem_zalloc(dsize, KM_SLEEP); 8853 } 8854 8855 (*svarp)[id] = svar; 8856 } 8857 8858 svar->dtsv_refcnt++; 8859 } 8860 8861 dtrace_difo_chunksize(dp, vstate); 8862 dtrace_difo_hold(dp); 8863 } 8864 8865 static dtrace_difo_t * 8866 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8867 { 8868 dtrace_difo_t *new; 8869 size_t sz; 8870 8871 ASSERT(dp->dtdo_buf != NULL); 8872 ASSERT(dp->dtdo_refcnt != 0); 8873 8874 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 8875 8876 ASSERT(dp->dtdo_buf != NULL); 8877 sz = dp->dtdo_len * sizeof (dif_instr_t); 8878 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 8879 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 8880 new->dtdo_len = dp->dtdo_len; 8881 8882 if (dp->dtdo_strtab != NULL) { 8883 ASSERT(dp->dtdo_strlen != 0); 8884 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 8885 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 8886 new->dtdo_strlen = dp->dtdo_strlen; 8887 } 8888 8889 if (dp->dtdo_inttab != NULL) { 8890 ASSERT(dp->dtdo_intlen != 0); 8891 sz = dp->dtdo_intlen * sizeof (uint64_t); 8892 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8893 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8894 new->dtdo_intlen = dp->dtdo_intlen; 8895 } 8896 8897 if (dp->dtdo_vartab != NULL) { 8898 ASSERT(dp->dtdo_varlen != 0); 8899 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8900 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8901 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8902 new->dtdo_varlen = dp->dtdo_varlen; 8903 } 8904 8905 dtrace_difo_init(new, vstate); 8906 return (new); 8907 } 8908 8909 static void 8910 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8911 { 8912 int i; 8913 8914 ASSERT(dp->dtdo_refcnt == 0); 8915 8916 for (i = 0; i < dp->dtdo_varlen; i++) { 8917 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8918 dtrace_statvar_t *svar, **svarp; 8919 uint_t id; 8920 uint8_t scope = v->dtdv_scope; 8921 int *np; 8922 8923 switch (scope) { 8924 case DIFV_SCOPE_THREAD: 8925 continue; 8926 8927 case DIFV_SCOPE_LOCAL: 8928 np = &vstate->dtvs_nlocals; 8929 svarp = vstate->dtvs_locals; 8930 break; 8931 8932 case DIFV_SCOPE_GLOBAL: 8933 np = &vstate->dtvs_nglobals; 8934 svarp = vstate->dtvs_globals; 8935 break; 8936 8937 default: 8938 ASSERT(0); 8939 } 8940 8941 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8942 continue; 8943 8944 id -= DIF_VAR_OTHER_UBASE; 8945 ASSERT(id < *np); 8946 8947 svar = svarp[id]; 8948 ASSERT(svar != NULL); 8949 ASSERT(svar->dtsv_refcnt > 0); 8950 8951 if (--svar->dtsv_refcnt > 0) 8952 continue; 8953 8954 if (svar->dtsv_size != 0) { 8955 ASSERT(svar->dtsv_data != NULL); 8956 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8957 svar->dtsv_size); 8958 } 8959 8960 kmem_free(svar, sizeof (dtrace_statvar_t)); 8961 svarp[id] = NULL; 8962 } 8963 8964 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8965 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8966 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8967 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8968 8969 kmem_free(dp, sizeof (dtrace_difo_t)); 8970 } 8971 8972 static void 8973 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8974 { 8975 int i; 8976 8977 ASSERT(MUTEX_HELD(&dtrace_lock)); 8978 ASSERT(dp->dtdo_refcnt != 0); 8979 8980 for (i = 0; i < dp->dtdo_varlen; i++) { 8981 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8982 8983 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8984 continue; 8985 8986 ASSERT(dtrace_vtime_references > 0); 8987 if (--dtrace_vtime_references == 0) 8988 dtrace_vtime_disable(); 8989 } 8990 8991 if (--dp->dtdo_refcnt == 0) 8992 dtrace_difo_destroy(dp, vstate); 8993 } 8994 8995 /* 8996 * DTrace Format Functions 8997 */ 8998 static uint16_t 8999 dtrace_format_add(dtrace_state_t *state, char *str) 9000 { 9001 char *fmt, **new; 9002 uint16_t ndx, len = strlen(str) + 1; 9003 9004 fmt = kmem_zalloc(len, KM_SLEEP); 9005 bcopy(str, fmt, len); 9006 9007 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9008 if (state->dts_formats[ndx] == NULL) { 9009 state->dts_formats[ndx] = fmt; 9010 return (ndx + 1); 9011 } 9012 } 9013 9014 if (state->dts_nformats == USHRT_MAX) { 9015 /* 9016 * This is only likely if a denial-of-service attack is being 9017 * attempted. As such, it's okay to fail silently here. 9018 */ 9019 kmem_free(fmt, len); 9020 return (0); 9021 } 9022 9023 /* 9024 * For simplicity, we always resize the formats array to be exactly the 9025 * number of formats. 9026 */ 9027 ndx = state->dts_nformats++; 9028 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9029 9030 if (state->dts_formats != NULL) { 9031 ASSERT(ndx != 0); 9032 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9033 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9034 } 9035 9036 state->dts_formats = new; 9037 state->dts_formats[ndx] = fmt; 9038 9039 return (ndx + 1); 9040 } 9041 9042 static void 9043 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9044 { 9045 char *fmt; 9046 9047 ASSERT(state->dts_formats != NULL); 9048 ASSERT(format <= state->dts_nformats); 9049 ASSERT(state->dts_formats[format - 1] != NULL); 9050 9051 fmt = state->dts_formats[format - 1]; 9052 kmem_free(fmt, strlen(fmt) + 1); 9053 state->dts_formats[format - 1] = NULL; 9054 } 9055 9056 static void 9057 dtrace_format_destroy(dtrace_state_t *state) 9058 { 9059 int i; 9060 9061 if (state->dts_nformats == 0) { 9062 ASSERT(state->dts_formats == NULL); 9063 return; 9064 } 9065 9066 ASSERT(state->dts_formats != NULL); 9067 9068 for (i = 0; i < state->dts_nformats; i++) { 9069 char *fmt = state->dts_formats[i]; 9070 9071 if (fmt == NULL) 9072 continue; 9073 9074 kmem_free(fmt, strlen(fmt) + 1); 9075 } 9076 9077 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9078 state->dts_nformats = 0; 9079 state->dts_formats = NULL; 9080 } 9081 9082 /* 9083 * DTrace Predicate Functions 9084 */ 9085 static dtrace_predicate_t * 9086 dtrace_predicate_create(dtrace_difo_t *dp) 9087 { 9088 dtrace_predicate_t *pred; 9089 9090 ASSERT(MUTEX_HELD(&dtrace_lock)); 9091 ASSERT(dp->dtdo_refcnt != 0); 9092 9093 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9094 pred->dtp_difo = dp; 9095 pred->dtp_refcnt = 1; 9096 9097 if (!dtrace_difo_cacheable(dp)) 9098 return (pred); 9099 9100 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9101 /* 9102 * This is only theoretically possible -- we have had 2^32 9103 * cacheable predicates on this machine. We cannot allow any 9104 * more predicates to become cacheable: as unlikely as it is, 9105 * there may be a thread caching a (now stale) predicate cache 9106 * ID. (N.B.: the temptation is being successfully resisted to 9107 * have this cmn_err() "Holy shit -- we executed this code!") 9108 */ 9109 return (pred); 9110 } 9111 9112 pred->dtp_cacheid = dtrace_predcache_id++; 9113 9114 return (pred); 9115 } 9116 9117 static void 9118 dtrace_predicate_hold(dtrace_predicate_t *pred) 9119 { 9120 ASSERT(MUTEX_HELD(&dtrace_lock)); 9121 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9122 ASSERT(pred->dtp_refcnt > 0); 9123 9124 pred->dtp_refcnt++; 9125 } 9126 9127 static void 9128 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9129 { 9130 dtrace_difo_t *dp = pred->dtp_difo; 9131 9132 ASSERT(MUTEX_HELD(&dtrace_lock)); 9133 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9134 ASSERT(pred->dtp_refcnt > 0); 9135 9136 if (--pred->dtp_refcnt == 0) { 9137 dtrace_difo_release(pred->dtp_difo, vstate); 9138 kmem_free(pred, sizeof (dtrace_predicate_t)); 9139 } 9140 } 9141 9142 /* 9143 * DTrace Action Description Functions 9144 */ 9145 static dtrace_actdesc_t * 9146 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9147 uint64_t uarg, uint64_t arg) 9148 { 9149 dtrace_actdesc_t *act; 9150 9151 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9152 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9153 9154 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9155 act->dtad_kind = kind; 9156 act->dtad_ntuple = ntuple; 9157 act->dtad_uarg = uarg; 9158 act->dtad_arg = arg; 9159 act->dtad_refcnt = 1; 9160 9161 return (act); 9162 } 9163 9164 static void 9165 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9166 { 9167 ASSERT(act->dtad_refcnt >= 1); 9168 act->dtad_refcnt++; 9169 } 9170 9171 static void 9172 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9173 { 9174 dtrace_actkind_t kind = act->dtad_kind; 9175 dtrace_difo_t *dp; 9176 9177 ASSERT(act->dtad_refcnt >= 1); 9178 9179 if (--act->dtad_refcnt != 0) 9180 return; 9181 9182 if ((dp = act->dtad_difo) != NULL) 9183 dtrace_difo_release(dp, vstate); 9184 9185 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9186 char *str = (char *)(uintptr_t)act->dtad_arg; 9187 9188 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9189 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9190 9191 if (str != NULL) 9192 kmem_free(str, strlen(str) + 1); 9193 } 9194 9195 kmem_free(act, sizeof (dtrace_actdesc_t)); 9196 } 9197 9198 /* 9199 * DTrace ECB Functions 9200 */ 9201 static dtrace_ecb_t * 9202 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9203 { 9204 dtrace_ecb_t *ecb; 9205 dtrace_epid_t epid; 9206 9207 ASSERT(MUTEX_HELD(&dtrace_lock)); 9208 9209 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9210 ecb->dte_predicate = NULL; 9211 ecb->dte_probe = probe; 9212 9213 /* 9214 * The default size is the size of the default action: recording 9215 * the epid. 9216 */ 9217 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9218 ecb->dte_alignment = sizeof (dtrace_epid_t); 9219 9220 epid = state->dts_epid++; 9221 9222 if (epid - 1 >= state->dts_necbs) { 9223 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9224 int necbs = state->dts_necbs << 1; 9225 9226 ASSERT(epid == state->dts_necbs + 1); 9227 9228 if (necbs == 0) { 9229 ASSERT(oecbs == NULL); 9230 necbs = 1; 9231 } 9232 9233 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9234 9235 if (oecbs != NULL) 9236 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9237 9238 dtrace_membar_producer(); 9239 state->dts_ecbs = ecbs; 9240 9241 if (oecbs != NULL) { 9242 /* 9243 * If this state is active, we must dtrace_sync() 9244 * before we can free the old dts_ecbs array: we're 9245 * coming in hot, and there may be active ring 9246 * buffer processing (which indexes into the dts_ecbs 9247 * array) on another CPU. 9248 */ 9249 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9250 dtrace_sync(); 9251 9252 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9253 } 9254 9255 dtrace_membar_producer(); 9256 state->dts_necbs = necbs; 9257 } 9258 9259 ecb->dte_state = state; 9260 9261 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9262 dtrace_membar_producer(); 9263 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9264 9265 return (ecb); 9266 } 9267 9268 static int 9269 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9270 { 9271 dtrace_probe_t *probe = ecb->dte_probe; 9272 9273 ASSERT(MUTEX_HELD(&cpu_lock)); 9274 ASSERT(MUTEX_HELD(&dtrace_lock)); 9275 ASSERT(ecb->dte_next == NULL); 9276 9277 if (probe == NULL) { 9278 /* 9279 * This is the NULL probe -- there's nothing to do. 9280 */ 9281 return (0); 9282 } 9283 9284 if (probe->dtpr_ecb == NULL) { 9285 dtrace_provider_t *prov = probe->dtpr_provider; 9286 9287 /* 9288 * We're the first ECB on this probe. 9289 */ 9290 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9291 9292 if (ecb->dte_predicate != NULL) 9293 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9294 9295 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9296 probe->dtpr_id, probe->dtpr_arg)); 9297 } else { 9298 /* 9299 * This probe is already active. Swing the last pointer to 9300 * point to the new ECB, and issue a dtrace_sync() to assure 9301 * that all CPUs have seen the change. 9302 */ 9303 ASSERT(probe->dtpr_ecb_last != NULL); 9304 probe->dtpr_ecb_last->dte_next = ecb; 9305 probe->dtpr_ecb_last = ecb; 9306 probe->dtpr_predcache = 0; 9307 9308 dtrace_sync(); 9309 return (0); 9310 } 9311 } 9312 9313 static void 9314 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9315 { 9316 uint32_t maxalign = sizeof (dtrace_epid_t); 9317 uint32_t align = sizeof (uint8_t), offs, diff; 9318 dtrace_action_t *act; 9319 int wastuple = 0; 9320 uint32_t aggbase = UINT32_MAX; 9321 dtrace_state_t *state = ecb->dte_state; 9322 9323 /* 9324 * If we record anything, we always record the epid. (And we always 9325 * record it first.) 9326 */ 9327 offs = sizeof (dtrace_epid_t); 9328 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9329 9330 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9331 dtrace_recdesc_t *rec = &act->dta_rec; 9332 9333 if ((align = rec->dtrd_alignment) > maxalign) 9334 maxalign = align; 9335 9336 if (!wastuple && act->dta_intuple) { 9337 /* 9338 * This is the first record in a tuple. Align the 9339 * offset to be at offset 4 in an 8-byte aligned 9340 * block. 9341 */ 9342 diff = offs + sizeof (dtrace_aggid_t); 9343 9344 if (diff = (diff & (sizeof (uint64_t) - 1))) 9345 offs += sizeof (uint64_t) - diff; 9346 9347 aggbase = offs - sizeof (dtrace_aggid_t); 9348 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9349 } 9350 9351 /*LINTED*/ 9352 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9353 /* 9354 * The current offset is not properly aligned; align it. 9355 */ 9356 offs += align - diff; 9357 } 9358 9359 rec->dtrd_offset = offs; 9360 9361 if (offs + rec->dtrd_size > ecb->dte_needed) { 9362 ecb->dte_needed = offs + rec->dtrd_size; 9363 9364 if (ecb->dte_needed > state->dts_needed) 9365 state->dts_needed = ecb->dte_needed; 9366 } 9367 9368 if (DTRACEACT_ISAGG(act->dta_kind)) { 9369 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9370 dtrace_action_t *first = agg->dtag_first, *prev; 9371 9372 ASSERT(rec->dtrd_size != 0 && first != NULL); 9373 ASSERT(wastuple); 9374 ASSERT(aggbase != UINT32_MAX); 9375 9376 agg->dtag_base = aggbase; 9377 9378 while ((prev = first->dta_prev) != NULL && 9379 DTRACEACT_ISAGG(prev->dta_kind)) { 9380 agg = (dtrace_aggregation_t *)prev; 9381 first = agg->dtag_first; 9382 } 9383 9384 if (prev != NULL) { 9385 offs = prev->dta_rec.dtrd_offset + 9386 prev->dta_rec.dtrd_size; 9387 } else { 9388 offs = sizeof (dtrace_epid_t); 9389 } 9390 wastuple = 0; 9391 } else { 9392 if (!act->dta_intuple) 9393 ecb->dte_size = offs + rec->dtrd_size; 9394 9395 offs += rec->dtrd_size; 9396 } 9397 9398 wastuple = act->dta_intuple; 9399 } 9400 9401 if ((act = ecb->dte_action) != NULL && 9402 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9403 ecb->dte_size == sizeof (dtrace_epid_t)) { 9404 /* 9405 * If the size is still sizeof (dtrace_epid_t), then all 9406 * actions store no data; set the size to 0. 9407 */ 9408 ecb->dte_alignment = maxalign; 9409 ecb->dte_size = 0; 9410 9411 /* 9412 * If the needed space is still sizeof (dtrace_epid_t), then 9413 * all actions need no additional space; set the needed 9414 * size to 0. 9415 */ 9416 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9417 ecb->dte_needed = 0; 9418 9419 return; 9420 } 9421 9422 /* 9423 * Set our alignment, and make sure that the dte_size and dte_needed 9424 * are aligned to the size of an EPID. 9425 */ 9426 ecb->dte_alignment = maxalign; 9427 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9428 ~(sizeof (dtrace_epid_t) - 1); 9429 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9430 ~(sizeof (dtrace_epid_t) - 1); 9431 ASSERT(ecb->dte_size <= ecb->dte_needed); 9432 } 9433 9434 static dtrace_action_t * 9435 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9436 { 9437 dtrace_aggregation_t *agg; 9438 size_t size = sizeof (uint64_t); 9439 int ntuple = desc->dtad_ntuple; 9440 dtrace_action_t *act; 9441 dtrace_recdesc_t *frec; 9442 dtrace_aggid_t aggid; 9443 dtrace_state_t *state = ecb->dte_state; 9444 9445 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9446 agg->dtag_ecb = ecb; 9447 9448 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9449 9450 switch (desc->dtad_kind) { 9451 case DTRACEAGG_MIN: 9452 agg->dtag_initial = INT64_MAX; 9453 agg->dtag_aggregate = dtrace_aggregate_min; 9454 break; 9455 9456 case DTRACEAGG_MAX: 9457 agg->dtag_initial = INT64_MIN; 9458 agg->dtag_aggregate = dtrace_aggregate_max; 9459 break; 9460 9461 case DTRACEAGG_COUNT: 9462 agg->dtag_aggregate = dtrace_aggregate_count; 9463 break; 9464 9465 case DTRACEAGG_QUANTIZE: 9466 agg->dtag_aggregate = dtrace_aggregate_quantize; 9467 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9468 sizeof (uint64_t); 9469 break; 9470 9471 case DTRACEAGG_LQUANTIZE: { 9472 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9473 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9474 9475 agg->dtag_initial = desc->dtad_arg; 9476 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9477 9478 if (step == 0 || levels == 0) 9479 goto err; 9480 9481 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9482 break; 9483 } 9484 9485 case DTRACEAGG_LLQUANTIZE: { 9486 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 9487 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 9488 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 9489 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 9490 int64_t v; 9491 9492 agg->dtag_initial = desc->dtad_arg; 9493 agg->dtag_aggregate = dtrace_aggregate_llquantize; 9494 9495 if (factor < 2 || low >= high || nsteps < factor) 9496 goto err; 9497 9498 /* 9499 * Now check that the number of steps evenly divides a power 9500 * of the factor. (This assures both integer bucket size and 9501 * linearity within each magnitude.) 9502 */ 9503 for (v = factor; v < nsteps; v *= factor) 9504 continue; 9505 9506 if ((v % nsteps) || (nsteps % factor)) 9507 goto err; 9508 9509 size = (dtrace_aggregate_llquantize_bucket(factor, 9510 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 9511 break; 9512 } 9513 9514 case DTRACEAGG_AVG: 9515 agg->dtag_aggregate = dtrace_aggregate_avg; 9516 size = sizeof (uint64_t) * 2; 9517 break; 9518 9519 case DTRACEAGG_STDDEV: 9520 agg->dtag_aggregate = dtrace_aggregate_stddev; 9521 size = sizeof (uint64_t) * 4; 9522 break; 9523 9524 case DTRACEAGG_SUM: 9525 agg->dtag_aggregate = dtrace_aggregate_sum; 9526 break; 9527 9528 default: 9529 goto err; 9530 } 9531 9532 agg->dtag_action.dta_rec.dtrd_size = size; 9533 9534 if (ntuple == 0) 9535 goto err; 9536 9537 /* 9538 * We must make sure that we have enough actions for the n-tuple. 9539 */ 9540 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9541 if (DTRACEACT_ISAGG(act->dta_kind)) 9542 break; 9543 9544 if (--ntuple == 0) { 9545 /* 9546 * This is the action with which our n-tuple begins. 9547 */ 9548 agg->dtag_first = act; 9549 goto success; 9550 } 9551 } 9552 9553 /* 9554 * This n-tuple is short by ntuple elements. Return failure. 9555 */ 9556 ASSERT(ntuple != 0); 9557 err: 9558 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9559 return (NULL); 9560 9561 success: 9562 /* 9563 * If the last action in the tuple has a size of zero, it's actually 9564 * an expression argument for the aggregating action. 9565 */ 9566 ASSERT(ecb->dte_action_last != NULL); 9567 act = ecb->dte_action_last; 9568 9569 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9570 ASSERT(act->dta_difo != NULL); 9571 9572 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9573 agg->dtag_hasarg = 1; 9574 } 9575 9576 /* 9577 * We need to allocate an id for this aggregation. 9578 */ 9579 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9580 VM_BESTFIT | VM_SLEEP); 9581 9582 if (aggid - 1 >= state->dts_naggregations) { 9583 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9584 dtrace_aggregation_t **aggs; 9585 int naggs = state->dts_naggregations << 1; 9586 int onaggs = state->dts_naggregations; 9587 9588 ASSERT(aggid == state->dts_naggregations + 1); 9589 9590 if (naggs == 0) { 9591 ASSERT(oaggs == NULL); 9592 naggs = 1; 9593 } 9594 9595 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9596 9597 if (oaggs != NULL) { 9598 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9599 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9600 } 9601 9602 state->dts_aggregations = aggs; 9603 state->dts_naggregations = naggs; 9604 } 9605 9606 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9607 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9608 9609 frec = &agg->dtag_first->dta_rec; 9610 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9611 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9612 9613 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9614 ASSERT(!act->dta_intuple); 9615 act->dta_intuple = 1; 9616 } 9617 9618 return (&agg->dtag_action); 9619 } 9620 9621 static void 9622 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9623 { 9624 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9625 dtrace_state_t *state = ecb->dte_state; 9626 dtrace_aggid_t aggid = agg->dtag_id; 9627 9628 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9629 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9630 9631 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9632 state->dts_aggregations[aggid - 1] = NULL; 9633 9634 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9635 } 9636 9637 static int 9638 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9639 { 9640 dtrace_action_t *action, *last; 9641 dtrace_difo_t *dp = desc->dtad_difo; 9642 uint32_t size = 0, align = sizeof (uint8_t), mask; 9643 uint16_t format = 0; 9644 dtrace_recdesc_t *rec; 9645 dtrace_state_t *state = ecb->dte_state; 9646 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9647 uint64_t arg = desc->dtad_arg; 9648 9649 ASSERT(MUTEX_HELD(&dtrace_lock)); 9650 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9651 9652 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9653 /* 9654 * If this is an aggregating action, there must be neither 9655 * a speculate nor a commit on the action chain. 9656 */ 9657 dtrace_action_t *act; 9658 9659 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9660 if (act->dta_kind == DTRACEACT_COMMIT) 9661 return (EINVAL); 9662 9663 if (act->dta_kind == DTRACEACT_SPECULATE) 9664 return (EINVAL); 9665 } 9666 9667 action = dtrace_ecb_aggregation_create(ecb, desc); 9668 9669 if (action == NULL) 9670 return (EINVAL); 9671 } else { 9672 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9673 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9674 dp != NULL && dp->dtdo_destructive)) { 9675 state->dts_destructive = 1; 9676 } 9677 9678 switch (desc->dtad_kind) { 9679 case DTRACEACT_PRINTF: 9680 case DTRACEACT_PRINTA: 9681 case DTRACEACT_SYSTEM: 9682 case DTRACEACT_FREOPEN: 9683 /* 9684 * We know that our arg is a string -- turn it into a 9685 * format. 9686 */ 9687 if (arg == NULL) { 9688 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 9689 format = 0; 9690 } else { 9691 ASSERT(arg != NULL); 9692 ASSERT(arg > KERNELBASE); 9693 format = dtrace_format_add(state, 9694 (char *)(uintptr_t)arg); 9695 } 9696 9697 /*FALLTHROUGH*/ 9698 case DTRACEACT_LIBACT: 9699 case DTRACEACT_DIFEXPR: 9700 if (dp == NULL) 9701 return (EINVAL); 9702 9703 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9704 break; 9705 9706 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 9707 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9708 return (EINVAL); 9709 9710 size = opt[DTRACEOPT_STRSIZE]; 9711 } 9712 9713 break; 9714 9715 case DTRACEACT_STACK: 9716 if ((nframes = arg) == 0) { 9717 nframes = opt[DTRACEOPT_STACKFRAMES]; 9718 ASSERT(nframes > 0); 9719 arg = nframes; 9720 } 9721 9722 size = nframes * sizeof (pc_t); 9723 break; 9724 9725 case DTRACEACT_JSTACK: 9726 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 9727 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 9728 9729 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 9730 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 9731 9732 arg = DTRACE_USTACK_ARG(nframes, strsize); 9733 9734 /*FALLTHROUGH*/ 9735 case DTRACEACT_USTACK: 9736 if (desc->dtad_kind != DTRACEACT_JSTACK && 9737 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 9738 strsize = DTRACE_USTACK_STRSIZE(arg); 9739 nframes = opt[DTRACEOPT_USTACKFRAMES]; 9740 ASSERT(nframes > 0); 9741 arg = DTRACE_USTACK_ARG(nframes, strsize); 9742 } 9743 9744 /* 9745 * Save a slot for the pid. 9746 */ 9747 size = (nframes + 1) * sizeof (uint64_t); 9748 size += DTRACE_USTACK_STRSIZE(arg); 9749 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 9750 9751 break; 9752 9753 case DTRACEACT_SYM: 9754 case DTRACEACT_MOD: 9755 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 9756 sizeof (uint64_t)) || 9757 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9758 return (EINVAL); 9759 break; 9760 9761 case DTRACEACT_USYM: 9762 case DTRACEACT_UMOD: 9763 case DTRACEACT_UADDR: 9764 if (dp == NULL || 9765 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 9766 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9767 return (EINVAL); 9768 9769 /* 9770 * We have a slot for the pid, plus a slot for the 9771 * argument. To keep things simple (aligned with 9772 * bitness-neutral sizing), we store each as a 64-bit 9773 * quantity. 9774 */ 9775 size = 2 * sizeof (uint64_t); 9776 break; 9777 9778 case DTRACEACT_STOP: 9779 case DTRACEACT_BREAKPOINT: 9780 case DTRACEACT_PANIC: 9781 break; 9782 9783 case DTRACEACT_CHILL: 9784 case DTRACEACT_DISCARD: 9785 case DTRACEACT_RAISE: 9786 if (dp == NULL) 9787 return (EINVAL); 9788 break; 9789 9790 case DTRACEACT_EXIT: 9791 if (dp == NULL || 9792 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 9793 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9794 return (EINVAL); 9795 break; 9796 9797 case DTRACEACT_SPECULATE: 9798 if (ecb->dte_size > sizeof (dtrace_epid_t)) 9799 return (EINVAL); 9800 9801 if (dp == NULL) 9802 return (EINVAL); 9803 9804 state->dts_speculates = 1; 9805 break; 9806 9807 case DTRACEACT_COMMIT: { 9808 dtrace_action_t *act = ecb->dte_action; 9809 9810 for (; act != NULL; act = act->dta_next) { 9811 if (act->dta_kind == DTRACEACT_COMMIT) 9812 return (EINVAL); 9813 } 9814 9815 if (dp == NULL) 9816 return (EINVAL); 9817 break; 9818 } 9819 9820 default: 9821 return (EINVAL); 9822 } 9823 9824 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 9825 /* 9826 * If this is a data-storing action or a speculate, 9827 * we must be sure that there isn't a commit on the 9828 * action chain. 9829 */ 9830 dtrace_action_t *act = ecb->dte_action; 9831 9832 for (; act != NULL; act = act->dta_next) { 9833 if (act->dta_kind == DTRACEACT_COMMIT) 9834 return (EINVAL); 9835 } 9836 } 9837 9838 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 9839 action->dta_rec.dtrd_size = size; 9840 } 9841 9842 action->dta_refcnt = 1; 9843 rec = &action->dta_rec; 9844 size = rec->dtrd_size; 9845 9846 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 9847 if (!(size & mask)) { 9848 align = mask + 1; 9849 break; 9850 } 9851 } 9852 9853 action->dta_kind = desc->dtad_kind; 9854 9855 if ((action->dta_difo = dp) != NULL) 9856 dtrace_difo_hold(dp); 9857 9858 rec->dtrd_action = action->dta_kind; 9859 rec->dtrd_arg = arg; 9860 rec->dtrd_uarg = desc->dtad_uarg; 9861 rec->dtrd_alignment = (uint16_t)align; 9862 rec->dtrd_format = format; 9863 9864 if ((last = ecb->dte_action_last) != NULL) { 9865 ASSERT(ecb->dte_action != NULL); 9866 action->dta_prev = last; 9867 last->dta_next = action; 9868 } else { 9869 ASSERT(ecb->dte_action == NULL); 9870 ecb->dte_action = action; 9871 } 9872 9873 ecb->dte_action_last = action; 9874 9875 return (0); 9876 } 9877 9878 static void 9879 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 9880 { 9881 dtrace_action_t *act = ecb->dte_action, *next; 9882 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 9883 dtrace_difo_t *dp; 9884 uint16_t format; 9885 9886 if (act != NULL && act->dta_refcnt > 1) { 9887 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 9888 act->dta_refcnt--; 9889 } else { 9890 for (; act != NULL; act = next) { 9891 next = act->dta_next; 9892 ASSERT(next != NULL || act == ecb->dte_action_last); 9893 ASSERT(act->dta_refcnt == 1); 9894 9895 if ((format = act->dta_rec.dtrd_format) != 0) 9896 dtrace_format_remove(ecb->dte_state, format); 9897 9898 if ((dp = act->dta_difo) != NULL) 9899 dtrace_difo_release(dp, vstate); 9900 9901 if (DTRACEACT_ISAGG(act->dta_kind)) { 9902 dtrace_ecb_aggregation_destroy(ecb, act); 9903 } else { 9904 kmem_free(act, sizeof (dtrace_action_t)); 9905 } 9906 } 9907 } 9908 9909 ecb->dte_action = NULL; 9910 ecb->dte_action_last = NULL; 9911 ecb->dte_size = sizeof (dtrace_epid_t); 9912 } 9913 9914 static void 9915 dtrace_ecb_disable(dtrace_ecb_t *ecb) 9916 { 9917 /* 9918 * We disable the ECB by removing it from its probe. 9919 */ 9920 dtrace_ecb_t *pecb, *prev = NULL; 9921 dtrace_probe_t *probe = ecb->dte_probe; 9922 9923 ASSERT(MUTEX_HELD(&dtrace_lock)); 9924 9925 if (probe == NULL) { 9926 /* 9927 * This is the NULL probe; there is nothing to disable. 9928 */ 9929 return; 9930 } 9931 9932 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9933 if (pecb == ecb) 9934 break; 9935 prev = pecb; 9936 } 9937 9938 ASSERT(pecb != NULL); 9939 9940 if (prev == NULL) { 9941 probe->dtpr_ecb = ecb->dte_next; 9942 } else { 9943 prev->dte_next = ecb->dte_next; 9944 } 9945 9946 if (ecb == probe->dtpr_ecb_last) { 9947 ASSERT(ecb->dte_next == NULL); 9948 probe->dtpr_ecb_last = prev; 9949 } 9950 9951 /* 9952 * The ECB has been disconnected from the probe; now sync to assure 9953 * that all CPUs have seen the change before returning. 9954 */ 9955 dtrace_sync(); 9956 9957 if (probe->dtpr_ecb == NULL) { 9958 /* 9959 * That was the last ECB on the probe; clear the predicate 9960 * cache ID for the probe, disable it and sync one more time 9961 * to assure that we'll never hit it again. 9962 */ 9963 dtrace_provider_t *prov = probe->dtpr_provider; 9964 9965 ASSERT(ecb->dte_next == NULL); 9966 ASSERT(probe->dtpr_ecb_last == NULL); 9967 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9968 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9969 probe->dtpr_id, probe->dtpr_arg); 9970 dtrace_sync(); 9971 } else { 9972 /* 9973 * There is at least one ECB remaining on the probe. If there 9974 * is _exactly_ one, set the probe's predicate cache ID to be 9975 * the predicate cache ID of the remaining ECB. 9976 */ 9977 ASSERT(probe->dtpr_ecb_last != NULL); 9978 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9979 9980 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9981 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9982 9983 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9984 9985 if (p != NULL) 9986 probe->dtpr_predcache = p->dtp_cacheid; 9987 } 9988 9989 ecb->dte_next = NULL; 9990 } 9991 } 9992 9993 static void 9994 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9995 { 9996 dtrace_state_t *state = ecb->dte_state; 9997 dtrace_vstate_t *vstate = &state->dts_vstate; 9998 dtrace_predicate_t *pred; 9999 dtrace_epid_t epid = ecb->dte_epid; 10000 10001 ASSERT(MUTEX_HELD(&dtrace_lock)); 10002 ASSERT(ecb->dte_next == NULL); 10003 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10004 10005 if ((pred = ecb->dte_predicate) != NULL) 10006 dtrace_predicate_release(pred, vstate); 10007 10008 dtrace_ecb_action_remove(ecb); 10009 10010 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10011 state->dts_ecbs[epid - 1] = NULL; 10012 10013 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10014 } 10015 10016 static dtrace_ecb_t * 10017 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10018 dtrace_enabling_t *enab) 10019 { 10020 dtrace_ecb_t *ecb; 10021 dtrace_predicate_t *pred; 10022 dtrace_actdesc_t *act; 10023 dtrace_provider_t *prov; 10024 dtrace_ecbdesc_t *desc = enab->dten_current; 10025 10026 ASSERT(MUTEX_HELD(&dtrace_lock)); 10027 ASSERT(state != NULL); 10028 10029 ecb = dtrace_ecb_add(state, probe); 10030 ecb->dte_uarg = desc->dted_uarg; 10031 10032 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10033 dtrace_predicate_hold(pred); 10034 ecb->dte_predicate = pred; 10035 } 10036 10037 if (probe != NULL) { 10038 /* 10039 * If the provider shows more leg than the consumer is old 10040 * enough to see, we need to enable the appropriate implicit 10041 * predicate bits to prevent the ecb from activating at 10042 * revealing times. 10043 * 10044 * Providers specifying DTRACE_PRIV_USER at register time 10045 * are stating that they need the /proc-style privilege 10046 * model to be enforced, and this is what DTRACE_COND_OWNER 10047 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10048 */ 10049 prov = probe->dtpr_provider; 10050 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10051 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10052 ecb->dte_cond |= DTRACE_COND_OWNER; 10053 10054 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10055 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10056 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10057 10058 /* 10059 * If the provider shows us kernel innards and the user 10060 * is lacking sufficient privilege, enable the 10061 * DTRACE_COND_USERMODE implicit predicate. 10062 */ 10063 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10064 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10065 ecb->dte_cond |= DTRACE_COND_USERMODE; 10066 } 10067 10068 if (dtrace_ecb_create_cache != NULL) { 10069 /* 10070 * If we have a cached ecb, we'll use its action list instead 10071 * of creating our own (saving both time and space). 10072 */ 10073 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10074 dtrace_action_t *act = cached->dte_action; 10075 10076 if (act != NULL) { 10077 ASSERT(act->dta_refcnt > 0); 10078 act->dta_refcnt++; 10079 ecb->dte_action = act; 10080 ecb->dte_action_last = cached->dte_action_last; 10081 ecb->dte_needed = cached->dte_needed; 10082 ecb->dte_size = cached->dte_size; 10083 ecb->dte_alignment = cached->dte_alignment; 10084 } 10085 10086 return (ecb); 10087 } 10088 10089 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10090 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10091 dtrace_ecb_destroy(ecb); 10092 return (NULL); 10093 } 10094 } 10095 10096 dtrace_ecb_resize(ecb); 10097 10098 return (dtrace_ecb_create_cache = ecb); 10099 } 10100 10101 static int 10102 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10103 { 10104 dtrace_ecb_t *ecb; 10105 dtrace_enabling_t *enab = arg; 10106 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10107 10108 ASSERT(state != NULL); 10109 10110 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10111 /* 10112 * This probe was created in a generation for which this 10113 * enabling has previously created ECBs; we don't want to 10114 * enable it again, so just kick out. 10115 */ 10116 return (DTRACE_MATCH_NEXT); 10117 } 10118 10119 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10120 return (DTRACE_MATCH_DONE); 10121 10122 if (dtrace_ecb_enable(ecb) < 0) 10123 return (DTRACE_MATCH_FAIL); 10124 10125 return (DTRACE_MATCH_NEXT); 10126 } 10127 10128 static dtrace_ecb_t * 10129 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10130 { 10131 dtrace_ecb_t *ecb; 10132 10133 ASSERT(MUTEX_HELD(&dtrace_lock)); 10134 10135 if (id == 0 || id > state->dts_necbs) 10136 return (NULL); 10137 10138 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10139 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10140 10141 return (state->dts_ecbs[id - 1]); 10142 } 10143 10144 static dtrace_aggregation_t * 10145 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10146 { 10147 dtrace_aggregation_t *agg; 10148 10149 ASSERT(MUTEX_HELD(&dtrace_lock)); 10150 10151 if (id == 0 || id > state->dts_naggregations) 10152 return (NULL); 10153 10154 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10155 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10156 agg->dtag_id == id); 10157 10158 return (state->dts_aggregations[id - 1]); 10159 } 10160 10161 /* 10162 * DTrace Buffer Functions 10163 * 10164 * The following functions manipulate DTrace buffers. Most of these functions 10165 * are called in the context of establishing or processing consumer state; 10166 * exceptions are explicitly noted. 10167 */ 10168 10169 /* 10170 * Note: called from cross call context. This function switches the two 10171 * buffers on a given CPU. The atomicity of this operation is assured by 10172 * disabling interrupts while the actual switch takes place; the disabling of 10173 * interrupts serializes the execution with any execution of dtrace_probe() on 10174 * the same CPU. 10175 */ 10176 static void 10177 dtrace_buffer_switch(dtrace_buffer_t *buf) 10178 { 10179 caddr_t tomax = buf->dtb_tomax; 10180 caddr_t xamot = buf->dtb_xamot; 10181 dtrace_icookie_t cookie; 10182 hrtime_t now = dtrace_gethrtime(); 10183 10184 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10185 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10186 10187 cookie = dtrace_interrupt_disable(); 10188 buf->dtb_tomax = xamot; 10189 buf->dtb_xamot = tomax; 10190 buf->dtb_xamot_drops = buf->dtb_drops; 10191 buf->dtb_xamot_offset = buf->dtb_offset; 10192 buf->dtb_xamot_errors = buf->dtb_errors; 10193 buf->dtb_xamot_flags = buf->dtb_flags; 10194 buf->dtb_offset = 0; 10195 buf->dtb_drops = 0; 10196 buf->dtb_errors = 0; 10197 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10198 buf->dtb_interval = now - buf->dtb_switched; 10199 buf->dtb_switched = now; 10200 dtrace_interrupt_enable(cookie); 10201 } 10202 10203 /* 10204 * Note: called from cross call context. This function activates a buffer 10205 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10206 * is guaranteed by the disabling of interrupts. 10207 */ 10208 static void 10209 dtrace_buffer_activate(dtrace_state_t *state) 10210 { 10211 dtrace_buffer_t *buf; 10212 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10213 10214 buf = &state->dts_buffer[CPU->cpu_id]; 10215 10216 if (buf->dtb_tomax != NULL) { 10217 /* 10218 * We might like to assert that the buffer is marked inactive, 10219 * but this isn't necessarily true: the buffer for the CPU 10220 * that processes the BEGIN probe has its buffer activated 10221 * manually. In this case, we take the (harmless) action 10222 * re-clearing the bit INACTIVE bit. 10223 */ 10224 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10225 } 10226 10227 dtrace_interrupt_enable(cookie); 10228 } 10229 10230 static int 10231 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10232 processorid_t cpu) 10233 { 10234 cpu_t *cp; 10235 dtrace_buffer_t *buf; 10236 10237 ASSERT(MUTEX_HELD(&cpu_lock)); 10238 ASSERT(MUTEX_HELD(&dtrace_lock)); 10239 10240 if (size > dtrace_nonroot_maxsize && 10241 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10242 return (EFBIG); 10243 10244 cp = cpu_list; 10245 10246 do { 10247 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10248 continue; 10249 10250 buf = &bufs[cp->cpu_id]; 10251 10252 /* 10253 * If there is already a buffer allocated for this CPU, it 10254 * is only possible that this is a DR event. In this case, 10255 * the buffer size must match our specified size. 10256 */ 10257 if (buf->dtb_tomax != NULL) { 10258 ASSERT(buf->dtb_size == size); 10259 continue; 10260 } 10261 10262 ASSERT(buf->dtb_xamot == NULL); 10263 10264 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10265 goto err; 10266 10267 buf->dtb_size = size; 10268 buf->dtb_flags = flags; 10269 buf->dtb_offset = 0; 10270 buf->dtb_drops = 0; 10271 10272 if (flags & DTRACEBUF_NOSWITCH) 10273 continue; 10274 10275 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10276 goto err; 10277 } while ((cp = cp->cpu_next) != cpu_list); 10278 10279 return (0); 10280 10281 err: 10282 cp = cpu_list; 10283 10284 do { 10285 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10286 continue; 10287 10288 buf = &bufs[cp->cpu_id]; 10289 10290 if (buf->dtb_xamot != NULL) { 10291 ASSERT(buf->dtb_tomax != NULL); 10292 ASSERT(buf->dtb_size == size); 10293 kmem_free(buf->dtb_xamot, size); 10294 } 10295 10296 if (buf->dtb_tomax != NULL) { 10297 ASSERT(buf->dtb_size == size); 10298 kmem_free(buf->dtb_tomax, size); 10299 } 10300 10301 buf->dtb_tomax = NULL; 10302 buf->dtb_xamot = NULL; 10303 buf->dtb_size = 0; 10304 } while ((cp = cp->cpu_next) != cpu_list); 10305 10306 return (ENOMEM); 10307 } 10308 10309 /* 10310 * Note: called from probe context. This function just increments the drop 10311 * count on a buffer. It has been made a function to allow for the 10312 * possibility of understanding the source of mysterious drop counts. (A 10313 * problem for which one may be particularly disappointed that DTrace cannot 10314 * be used to understand DTrace.) 10315 */ 10316 static void 10317 dtrace_buffer_drop(dtrace_buffer_t *buf) 10318 { 10319 buf->dtb_drops++; 10320 } 10321 10322 /* 10323 * Note: called from probe context. This function is called to reserve space 10324 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10325 * mstate. Returns the new offset in the buffer, or a negative value if an 10326 * error has occurred. 10327 */ 10328 static intptr_t 10329 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10330 dtrace_state_t *state, dtrace_mstate_t *mstate) 10331 { 10332 intptr_t offs = buf->dtb_offset, soffs; 10333 intptr_t woffs; 10334 caddr_t tomax; 10335 size_t total; 10336 10337 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10338 return (-1); 10339 10340 if ((tomax = buf->dtb_tomax) == NULL) { 10341 dtrace_buffer_drop(buf); 10342 return (-1); 10343 } 10344 10345 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10346 while (offs & (align - 1)) { 10347 /* 10348 * Assert that our alignment is off by a number which 10349 * is itself sizeof (uint32_t) aligned. 10350 */ 10351 ASSERT(!((align - (offs & (align - 1))) & 10352 (sizeof (uint32_t) - 1))); 10353 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10354 offs += sizeof (uint32_t); 10355 } 10356 10357 if ((soffs = offs + needed) > buf->dtb_size) { 10358 dtrace_buffer_drop(buf); 10359 return (-1); 10360 } 10361 10362 if (mstate == NULL) 10363 return (offs); 10364 10365 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10366 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10367 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10368 10369 return (offs); 10370 } 10371 10372 if (buf->dtb_flags & DTRACEBUF_FILL) { 10373 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10374 (buf->dtb_flags & DTRACEBUF_FULL)) 10375 return (-1); 10376 goto out; 10377 } 10378 10379 total = needed + (offs & (align - 1)); 10380 10381 /* 10382 * For a ring buffer, life is quite a bit more complicated. Before 10383 * we can store any padding, we need to adjust our wrapping offset. 10384 * (If we've never before wrapped or we're not about to, no adjustment 10385 * is required.) 10386 */ 10387 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10388 offs + total > buf->dtb_size) { 10389 woffs = buf->dtb_xamot_offset; 10390 10391 if (offs + total > buf->dtb_size) { 10392 /* 10393 * We can't fit in the end of the buffer. First, a 10394 * sanity check that we can fit in the buffer at all. 10395 */ 10396 if (total > buf->dtb_size) { 10397 dtrace_buffer_drop(buf); 10398 return (-1); 10399 } 10400 10401 /* 10402 * We're going to be storing at the top of the buffer, 10403 * so now we need to deal with the wrapped offset. We 10404 * only reset our wrapped offset to 0 if it is 10405 * currently greater than the current offset. If it 10406 * is less than the current offset, it is because a 10407 * previous allocation induced a wrap -- but the 10408 * allocation didn't subsequently take the space due 10409 * to an error or false predicate evaluation. In this 10410 * case, we'll just leave the wrapped offset alone: if 10411 * the wrapped offset hasn't been advanced far enough 10412 * for this allocation, it will be adjusted in the 10413 * lower loop. 10414 */ 10415 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10416 if (woffs >= offs) 10417 woffs = 0; 10418 } else { 10419 woffs = 0; 10420 } 10421 10422 /* 10423 * Now we know that we're going to be storing to the 10424 * top of the buffer and that there is room for us 10425 * there. We need to clear the buffer from the current 10426 * offset to the end (there may be old gunk there). 10427 */ 10428 while (offs < buf->dtb_size) 10429 tomax[offs++] = 0; 10430 10431 /* 10432 * We need to set our offset to zero. And because we 10433 * are wrapping, we need to set the bit indicating as 10434 * much. We can also adjust our needed space back 10435 * down to the space required by the ECB -- we know 10436 * that the top of the buffer is aligned. 10437 */ 10438 offs = 0; 10439 total = needed; 10440 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10441 } else { 10442 /* 10443 * There is room for us in the buffer, so we simply 10444 * need to check the wrapped offset. 10445 */ 10446 if (woffs < offs) { 10447 /* 10448 * The wrapped offset is less than the offset. 10449 * This can happen if we allocated buffer space 10450 * that induced a wrap, but then we didn't 10451 * subsequently take the space due to an error 10452 * or false predicate evaluation. This is 10453 * okay; we know that _this_ allocation isn't 10454 * going to induce a wrap. We still can't 10455 * reset the wrapped offset to be zero, 10456 * however: the space may have been trashed in 10457 * the previous failed probe attempt. But at 10458 * least the wrapped offset doesn't need to 10459 * be adjusted at all... 10460 */ 10461 goto out; 10462 } 10463 } 10464 10465 while (offs + total > woffs) { 10466 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 10467 size_t size; 10468 10469 if (epid == DTRACE_EPIDNONE) { 10470 size = sizeof (uint32_t); 10471 } else { 10472 ASSERT(epid <= state->dts_necbs); 10473 ASSERT(state->dts_ecbs[epid - 1] != NULL); 10474 10475 size = state->dts_ecbs[epid - 1]->dte_size; 10476 } 10477 10478 ASSERT(woffs + size <= buf->dtb_size); 10479 ASSERT(size != 0); 10480 10481 if (woffs + size == buf->dtb_size) { 10482 /* 10483 * We've reached the end of the buffer; we want 10484 * to set the wrapped offset to 0 and break 10485 * out. However, if the offs is 0, then we're 10486 * in a strange edge-condition: the amount of 10487 * space that we want to reserve plus the size 10488 * of the record that we're overwriting is 10489 * greater than the size of the buffer. This 10490 * is problematic because if we reserve the 10491 * space but subsequently don't consume it (due 10492 * to a failed predicate or error) the wrapped 10493 * offset will be 0 -- yet the EPID at offset 0 10494 * will not be committed. This situation is 10495 * relatively easy to deal with: if we're in 10496 * this case, the buffer is indistinguishable 10497 * from one that hasn't wrapped; we need only 10498 * finish the job by clearing the wrapped bit, 10499 * explicitly setting the offset to be 0, and 10500 * zero'ing out the old data in the buffer. 10501 */ 10502 if (offs == 0) { 10503 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 10504 buf->dtb_offset = 0; 10505 woffs = total; 10506 10507 while (woffs < buf->dtb_size) 10508 tomax[woffs++] = 0; 10509 } 10510 10511 woffs = 0; 10512 break; 10513 } 10514 10515 woffs += size; 10516 } 10517 10518 /* 10519 * We have a wrapped offset. It may be that the wrapped offset 10520 * has become zero -- that's okay. 10521 */ 10522 buf->dtb_xamot_offset = woffs; 10523 } 10524 10525 out: 10526 /* 10527 * Now we can plow the buffer with any necessary padding. 10528 */ 10529 while (offs & (align - 1)) { 10530 /* 10531 * Assert that our alignment is off by a number which 10532 * is itself sizeof (uint32_t) aligned. 10533 */ 10534 ASSERT(!((align - (offs & (align - 1))) & 10535 (sizeof (uint32_t) - 1))); 10536 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10537 offs += sizeof (uint32_t); 10538 } 10539 10540 if (buf->dtb_flags & DTRACEBUF_FILL) { 10541 if (offs + needed > buf->dtb_size - state->dts_reserve) { 10542 buf->dtb_flags |= DTRACEBUF_FULL; 10543 return (-1); 10544 } 10545 } 10546 10547 if (mstate == NULL) 10548 return (offs); 10549 10550 /* 10551 * For ring buffers and fill buffers, the scratch space is always 10552 * the inactive buffer. 10553 */ 10554 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 10555 mstate->dtms_scratch_size = buf->dtb_size; 10556 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10557 10558 return (offs); 10559 } 10560 10561 static void 10562 dtrace_buffer_polish(dtrace_buffer_t *buf) 10563 { 10564 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 10565 ASSERT(MUTEX_HELD(&dtrace_lock)); 10566 10567 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 10568 return; 10569 10570 /* 10571 * We need to polish the ring buffer. There are three cases: 10572 * 10573 * - The first (and presumably most common) is that there is no gap 10574 * between the buffer offset and the wrapped offset. In this case, 10575 * there is nothing in the buffer that isn't valid data; we can 10576 * mark the buffer as polished and return. 10577 * 10578 * - The second (less common than the first but still more common 10579 * than the third) is that there is a gap between the buffer offset 10580 * and the wrapped offset, and the wrapped offset is larger than the 10581 * buffer offset. This can happen because of an alignment issue, or 10582 * can happen because of a call to dtrace_buffer_reserve() that 10583 * didn't subsequently consume the buffer space. In this case, 10584 * we need to zero the data from the buffer offset to the wrapped 10585 * offset. 10586 * 10587 * - The third (and least common) is that there is a gap between the 10588 * buffer offset and the wrapped offset, but the wrapped offset is 10589 * _less_ than the buffer offset. This can only happen because a 10590 * call to dtrace_buffer_reserve() induced a wrap, but the space 10591 * was not subsequently consumed. In this case, we need to zero the 10592 * space from the offset to the end of the buffer _and_ from the 10593 * top of the buffer to the wrapped offset. 10594 */ 10595 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10596 bzero(buf->dtb_tomax + buf->dtb_offset, 10597 buf->dtb_xamot_offset - buf->dtb_offset); 10598 } 10599 10600 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10601 bzero(buf->dtb_tomax + buf->dtb_offset, 10602 buf->dtb_size - buf->dtb_offset); 10603 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10604 } 10605 } 10606 10607 /* 10608 * This routine determines if data generated at the specified time has likely 10609 * been entirely consumed at user-level. This routine is called to determine 10610 * if an ECB on a defunct probe (but for an active enabling) can be safely 10611 * disabled and destroyed. 10612 */ 10613 static int 10614 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 10615 { 10616 int i; 10617 10618 for (i = 0; i < NCPU; i++) { 10619 dtrace_buffer_t *buf = &bufs[i]; 10620 10621 if (buf->dtb_size == 0) 10622 continue; 10623 10624 if (buf->dtb_flags & DTRACEBUF_RING) 10625 return (0); 10626 10627 if (!buf->dtb_switched && buf->dtb_offset != 0) 10628 return (0); 10629 10630 if (buf->dtb_switched - buf->dtb_interval < when) 10631 return (0); 10632 } 10633 10634 return (1); 10635 } 10636 10637 static void 10638 dtrace_buffer_free(dtrace_buffer_t *bufs) 10639 { 10640 int i; 10641 10642 for (i = 0; i < NCPU; i++) { 10643 dtrace_buffer_t *buf = &bufs[i]; 10644 10645 if (buf->dtb_tomax == NULL) { 10646 ASSERT(buf->dtb_xamot == NULL); 10647 ASSERT(buf->dtb_size == 0); 10648 continue; 10649 } 10650 10651 if (buf->dtb_xamot != NULL) { 10652 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10653 kmem_free(buf->dtb_xamot, buf->dtb_size); 10654 } 10655 10656 kmem_free(buf->dtb_tomax, buf->dtb_size); 10657 buf->dtb_size = 0; 10658 buf->dtb_tomax = NULL; 10659 buf->dtb_xamot = NULL; 10660 } 10661 } 10662 10663 /* 10664 * DTrace Enabling Functions 10665 */ 10666 static dtrace_enabling_t * 10667 dtrace_enabling_create(dtrace_vstate_t *vstate) 10668 { 10669 dtrace_enabling_t *enab; 10670 10671 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10672 enab->dten_vstate = vstate; 10673 10674 return (enab); 10675 } 10676 10677 static void 10678 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10679 { 10680 dtrace_ecbdesc_t **ndesc; 10681 size_t osize, nsize; 10682 10683 /* 10684 * We can't add to enablings after we've enabled them, or after we've 10685 * retained them. 10686 */ 10687 ASSERT(enab->dten_probegen == 0); 10688 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10689 10690 if (enab->dten_ndesc < enab->dten_maxdesc) { 10691 enab->dten_desc[enab->dten_ndesc++] = ecb; 10692 return; 10693 } 10694 10695 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10696 10697 if (enab->dten_maxdesc == 0) { 10698 enab->dten_maxdesc = 1; 10699 } else { 10700 enab->dten_maxdesc <<= 1; 10701 } 10702 10703 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 10704 10705 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10706 ndesc = kmem_zalloc(nsize, KM_SLEEP); 10707 bcopy(enab->dten_desc, ndesc, osize); 10708 kmem_free(enab->dten_desc, osize); 10709 10710 enab->dten_desc = ndesc; 10711 enab->dten_desc[enab->dten_ndesc++] = ecb; 10712 } 10713 10714 static void 10715 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 10716 dtrace_probedesc_t *pd) 10717 { 10718 dtrace_ecbdesc_t *new; 10719 dtrace_predicate_t *pred; 10720 dtrace_actdesc_t *act; 10721 10722 /* 10723 * We're going to create a new ECB description that matches the 10724 * specified ECB in every way, but has the specified probe description. 10725 */ 10726 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10727 10728 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 10729 dtrace_predicate_hold(pred); 10730 10731 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 10732 dtrace_actdesc_hold(act); 10733 10734 new->dted_action = ecb->dted_action; 10735 new->dted_pred = ecb->dted_pred; 10736 new->dted_probe = *pd; 10737 new->dted_uarg = ecb->dted_uarg; 10738 10739 dtrace_enabling_add(enab, new); 10740 } 10741 10742 static void 10743 dtrace_enabling_dump(dtrace_enabling_t *enab) 10744 { 10745 int i; 10746 10747 for (i = 0; i < enab->dten_ndesc; i++) { 10748 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 10749 10750 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 10751 desc->dtpd_provider, desc->dtpd_mod, 10752 desc->dtpd_func, desc->dtpd_name); 10753 } 10754 } 10755 10756 static void 10757 dtrace_enabling_destroy(dtrace_enabling_t *enab) 10758 { 10759 int i; 10760 dtrace_ecbdesc_t *ep; 10761 dtrace_vstate_t *vstate = enab->dten_vstate; 10762 10763 ASSERT(MUTEX_HELD(&dtrace_lock)); 10764 10765 for (i = 0; i < enab->dten_ndesc; i++) { 10766 dtrace_actdesc_t *act, *next; 10767 dtrace_predicate_t *pred; 10768 10769 ep = enab->dten_desc[i]; 10770 10771 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 10772 dtrace_predicate_release(pred, vstate); 10773 10774 for (act = ep->dted_action; act != NULL; act = next) { 10775 next = act->dtad_next; 10776 dtrace_actdesc_release(act, vstate); 10777 } 10778 10779 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10780 } 10781 10782 kmem_free(enab->dten_desc, 10783 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 10784 10785 /* 10786 * If this was a retained enabling, decrement the dts_nretained count 10787 * and take it off of the dtrace_retained list. 10788 */ 10789 if (enab->dten_prev != NULL || enab->dten_next != NULL || 10790 dtrace_retained == enab) { 10791 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10792 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 10793 enab->dten_vstate->dtvs_state->dts_nretained--; 10794 dtrace_retained_gen++; 10795 } 10796 10797 if (enab->dten_prev == NULL) { 10798 if (dtrace_retained == enab) { 10799 dtrace_retained = enab->dten_next; 10800 10801 if (dtrace_retained != NULL) 10802 dtrace_retained->dten_prev = NULL; 10803 } 10804 } else { 10805 ASSERT(enab != dtrace_retained); 10806 ASSERT(dtrace_retained != NULL); 10807 enab->dten_prev->dten_next = enab->dten_next; 10808 } 10809 10810 if (enab->dten_next != NULL) { 10811 ASSERT(dtrace_retained != NULL); 10812 enab->dten_next->dten_prev = enab->dten_prev; 10813 } 10814 10815 kmem_free(enab, sizeof (dtrace_enabling_t)); 10816 } 10817 10818 static int 10819 dtrace_enabling_retain(dtrace_enabling_t *enab) 10820 { 10821 dtrace_state_t *state; 10822 10823 ASSERT(MUTEX_HELD(&dtrace_lock)); 10824 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10825 ASSERT(enab->dten_vstate != NULL); 10826 10827 state = enab->dten_vstate->dtvs_state; 10828 ASSERT(state != NULL); 10829 10830 /* 10831 * We only allow each state to retain dtrace_retain_max enablings. 10832 */ 10833 if (state->dts_nretained >= dtrace_retain_max) 10834 return (ENOSPC); 10835 10836 state->dts_nretained++; 10837 dtrace_retained_gen++; 10838 10839 if (dtrace_retained == NULL) { 10840 dtrace_retained = enab; 10841 return (0); 10842 } 10843 10844 enab->dten_next = dtrace_retained; 10845 dtrace_retained->dten_prev = enab; 10846 dtrace_retained = enab; 10847 10848 return (0); 10849 } 10850 10851 static int 10852 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 10853 dtrace_probedesc_t *create) 10854 { 10855 dtrace_enabling_t *new, *enab; 10856 int found = 0, err = ENOENT; 10857 10858 ASSERT(MUTEX_HELD(&dtrace_lock)); 10859 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 10860 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 10861 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 10862 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 10863 10864 new = dtrace_enabling_create(&state->dts_vstate); 10865 10866 /* 10867 * Iterate over all retained enablings, looking for enablings that 10868 * match the specified state. 10869 */ 10870 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10871 int i; 10872 10873 /* 10874 * dtvs_state can only be NULL for helper enablings -- and 10875 * helper enablings can't be retained. 10876 */ 10877 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10878 10879 if (enab->dten_vstate->dtvs_state != state) 10880 continue; 10881 10882 /* 10883 * Now iterate over each probe description; we're looking for 10884 * an exact match to the specified probe description. 10885 */ 10886 for (i = 0; i < enab->dten_ndesc; i++) { 10887 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10888 dtrace_probedesc_t *pd = &ep->dted_probe; 10889 10890 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 10891 continue; 10892 10893 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 10894 continue; 10895 10896 if (strcmp(pd->dtpd_func, match->dtpd_func)) 10897 continue; 10898 10899 if (strcmp(pd->dtpd_name, match->dtpd_name)) 10900 continue; 10901 10902 /* 10903 * We have a winning probe! Add it to our growing 10904 * enabling. 10905 */ 10906 found = 1; 10907 dtrace_enabling_addlike(new, ep, create); 10908 } 10909 } 10910 10911 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 10912 dtrace_enabling_destroy(new); 10913 return (err); 10914 } 10915 10916 return (0); 10917 } 10918 10919 static void 10920 dtrace_enabling_retract(dtrace_state_t *state) 10921 { 10922 dtrace_enabling_t *enab, *next; 10923 10924 ASSERT(MUTEX_HELD(&dtrace_lock)); 10925 10926 /* 10927 * Iterate over all retained enablings, destroy the enablings retained 10928 * for the specified state. 10929 */ 10930 for (enab = dtrace_retained; enab != NULL; enab = next) { 10931 next = enab->dten_next; 10932 10933 /* 10934 * dtvs_state can only be NULL for helper enablings -- and 10935 * helper enablings can't be retained. 10936 */ 10937 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10938 10939 if (enab->dten_vstate->dtvs_state == state) { 10940 ASSERT(state->dts_nretained > 0); 10941 dtrace_enabling_destroy(enab); 10942 } 10943 } 10944 10945 ASSERT(state->dts_nretained == 0); 10946 } 10947 10948 static int 10949 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 10950 { 10951 int i = 0; 10952 int total_matched = 0, matched = 0; 10953 10954 ASSERT(MUTEX_HELD(&cpu_lock)); 10955 ASSERT(MUTEX_HELD(&dtrace_lock)); 10956 10957 for (i = 0; i < enab->dten_ndesc; i++) { 10958 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10959 10960 enab->dten_current = ep; 10961 enab->dten_error = 0; 10962 10963 /* 10964 * If a provider failed to enable a probe then get out and 10965 * let the consumer know we failed. 10966 */ 10967 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0) 10968 return (EBUSY); 10969 10970 total_matched += matched; 10971 10972 if (enab->dten_error != 0) { 10973 /* 10974 * If we get an error half-way through enabling the 10975 * probes, we kick out -- perhaps with some number of 10976 * them enabled. Leaving enabled probes enabled may 10977 * be slightly confusing for user-level, but we expect 10978 * that no one will attempt to actually drive on in 10979 * the face of such errors. If this is an anonymous 10980 * enabling (indicated with a NULL nmatched pointer), 10981 * we cmn_err() a message. We aren't expecting to 10982 * get such an error -- such as it can exist at all, 10983 * it would be a result of corrupted DOF in the driver 10984 * properties. 10985 */ 10986 if (nmatched == NULL) { 10987 cmn_err(CE_WARN, "dtrace_enabling_match() " 10988 "error on %p: %d", (void *)ep, 10989 enab->dten_error); 10990 } 10991 10992 return (enab->dten_error); 10993 } 10994 } 10995 10996 enab->dten_probegen = dtrace_probegen; 10997 if (nmatched != NULL) 10998 *nmatched = total_matched; 10999 11000 return (0); 11001 } 11002 11003 static void 11004 dtrace_enabling_matchall(void) 11005 { 11006 dtrace_enabling_t *enab; 11007 11008 mutex_enter(&cpu_lock); 11009 mutex_enter(&dtrace_lock); 11010 11011 /* 11012 * Iterate over all retained enablings to see if any probes match 11013 * against them. We only perform this operation on enablings for which 11014 * we have sufficient permissions by virtue of being in the global zone 11015 * or in the same zone as the DTrace client. Because we can be called 11016 * after dtrace_detach() has been called, we cannot assert that there 11017 * are retained enablings. We can safely load from dtrace_retained, 11018 * however: the taskq_destroy() at the end of dtrace_detach() will 11019 * block pending our completion. 11020 */ 11021 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11022 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred; 11023 cred_t *cr = dcr->dcr_cred; 11024 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0; 11025 11026 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL && 11027 (zone == GLOBAL_ZONEID || getzoneid() == zone))) 11028 (void) dtrace_enabling_match(enab, NULL); 11029 } 11030 11031 mutex_exit(&dtrace_lock); 11032 mutex_exit(&cpu_lock); 11033 } 11034 11035 /* 11036 * If an enabling is to be enabled without having matched probes (that is, if 11037 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11038 * enabling must be _primed_ by creating an ECB for every ECB description. 11039 * This must be done to assure that we know the number of speculations, the 11040 * number of aggregations, the minimum buffer size needed, etc. before we 11041 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11042 * enabling any probes, we create ECBs for every ECB decription, but with a 11043 * NULL probe -- which is exactly what this function does. 11044 */ 11045 static void 11046 dtrace_enabling_prime(dtrace_state_t *state) 11047 { 11048 dtrace_enabling_t *enab; 11049 int i; 11050 11051 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11052 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11053 11054 if (enab->dten_vstate->dtvs_state != state) 11055 continue; 11056 11057 /* 11058 * We don't want to prime an enabling more than once, lest 11059 * we allow a malicious user to induce resource exhaustion. 11060 * (The ECBs that result from priming an enabling aren't 11061 * leaked -- but they also aren't deallocated until the 11062 * consumer state is destroyed.) 11063 */ 11064 if (enab->dten_primed) 11065 continue; 11066 11067 for (i = 0; i < enab->dten_ndesc; i++) { 11068 enab->dten_current = enab->dten_desc[i]; 11069 (void) dtrace_probe_enable(NULL, enab); 11070 } 11071 11072 enab->dten_primed = 1; 11073 } 11074 } 11075 11076 /* 11077 * Called to indicate that probes should be provided due to retained 11078 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11079 * must take an initial lap through the enabling calling the dtps_provide() 11080 * entry point explicitly to allow for autocreated probes. 11081 */ 11082 static void 11083 dtrace_enabling_provide(dtrace_provider_t *prv) 11084 { 11085 int i, all = 0; 11086 dtrace_probedesc_t desc; 11087 dtrace_genid_t gen; 11088 11089 ASSERT(MUTEX_HELD(&dtrace_lock)); 11090 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11091 11092 if (prv == NULL) { 11093 all = 1; 11094 prv = dtrace_provider; 11095 } 11096 11097 do { 11098 dtrace_enabling_t *enab; 11099 void *parg = prv->dtpv_arg; 11100 11101 retry: 11102 gen = dtrace_retained_gen; 11103 for (enab = dtrace_retained; enab != NULL; 11104 enab = enab->dten_next) { 11105 for (i = 0; i < enab->dten_ndesc; i++) { 11106 desc = enab->dten_desc[i]->dted_probe; 11107 mutex_exit(&dtrace_lock); 11108 prv->dtpv_pops.dtps_provide(parg, &desc); 11109 mutex_enter(&dtrace_lock); 11110 /* 11111 * Process the retained enablings again if 11112 * they have changed while we weren't holding 11113 * dtrace_lock. 11114 */ 11115 if (gen != dtrace_retained_gen) 11116 goto retry; 11117 } 11118 } 11119 } while (all && (prv = prv->dtpv_next) != NULL); 11120 11121 mutex_exit(&dtrace_lock); 11122 dtrace_probe_provide(NULL, all ? NULL : prv); 11123 mutex_enter(&dtrace_lock); 11124 } 11125 11126 /* 11127 * Called to reap ECBs that are attached to probes from defunct providers. 11128 */ 11129 static void 11130 dtrace_enabling_reap(void) 11131 { 11132 dtrace_provider_t *prov; 11133 dtrace_probe_t *probe; 11134 dtrace_ecb_t *ecb; 11135 hrtime_t when; 11136 int i; 11137 11138 mutex_enter(&cpu_lock); 11139 mutex_enter(&dtrace_lock); 11140 11141 for (i = 0; i < dtrace_nprobes; i++) { 11142 if ((probe = dtrace_probes[i]) == NULL) 11143 continue; 11144 11145 if (probe->dtpr_ecb == NULL) 11146 continue; 11147 11148 prov = probe->dtpr_provider; 11149 11150 if ((when = prov->dtpv_defunct) == 0) 11151 continue; 11152 11153 /* 11154 * We have ECBs on a defunct provider: we want to reap these 11155 * ECBs to allow the provider to unregister. The destruction 11156 * of these ECBs must be done carefully: if we destroy the ECB 11157 * and the consumer later wishes to consume an EPID that 11158 * corresponds to the destroyed ECB (and if the EPID metadata 11159 * has not been previously consumed), the consumer will abort 11160 * processing on the unknown EPID. To reduce (but not, sadly, 11161 * eliminate) the possibility of this, we will only destroy an 11162 * ECB for a defunct provider if, for the state that 11163 * corresponds to the ECB: 11164 * 11165 * (a) There is no speculative tracing (which can effectively 11166 * cache an EPID for an arbitrary amount of time). 11167 * 11168 * (b) The principal buffers have been switched twice since the 11169 * provider became defunct. 11170 * 11171 * (c) The aggregation buffers are of zero size or have been 11172 * switched twice since the provider became defunct. 11173 * 11174 * We use dts_speculates to determine (a) and call a function 11175 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11176 * that as soon as we've been unable to destroy one of the ECBs 11177 * associated with the probe, we quit trying -- reaping is only 11178 * fruitful in as much as we can destroy all ECBs associated 11179 * with the defunct provider's probes. 11180 */ 11181 while ((ecb = probe->dtpr_ecb) != NULL) { 11182 dtrace_state_t *state = ecb->dte_state; 11183 dtrace_buffer_t *buf = state->dts_buffer; 11184 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11185 11186 if (state->dts_speculates) 11187 break; 11188 11189 if (!dtrace_buffer_consumed(buf, when)) 11190 break; 11191 11192 if (!dtrace_buffer_consumed(aggbuf, when)) 11193 break; 11194 11195 dtrace_ecb_disable(ecb); 11196 ASSERT(probe->dtpr_ecb != ecb); 11197 dtrace_ecb_destroy(ecb); 11198 } 11199 } 11200 11201 mutex_exit(&dtrace_lock); 11202 mutex_exit(&cpu_lock); 11203 } 11204 11205 /* 11206 * DTrace DOF Functions 11207 */ 11208 /*ARGSUSED*/ 11209 static void 11210 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11211 { 11212 if (dtrace_err_verbose) 11213 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11214 11215 #ifdef DTRACE_ERRDEBUG 11216 dtrace_errdebug(str); 11217 #endif 11218 } 11219 11220 /* 11221 * Create DOF out of a currently enabled state. Right now, we only create 11222 * DOF containing the run-time options -- but this could be expanded to create 11223 * complete DOF representing the enabled state. 11224 */ 11225 static dof_hdr_t * 11226 dtrace_dof_create(dtrace_state_t *state) 11227 { 11228 dof_hdr_t *dof; 11229 dof_sec_t *sec; 11230 dof_optdesc_t *opt; 11231 int i, len = sizeof (dof_hdr_t) + 11232 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11233 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11234 11235 ASSERT(MUTEX_HELD(&dtrace_lock)); 11236 11237 dof = kmem_zalloc(len, KM_SLEEP); 11238 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11239 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11240 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11241 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11242 11243 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11244 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11245 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11246 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11247 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11248 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11249 11250 dof->dofh_flags = 0; 11251 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11252 dof->dofh_secsize = sizeof (dof_sec_t); 11253 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11254 dof->dofh_secoff = sizeof (dof_hdr_t); 11255 dof->dofh_loadsz = len; 11256 dof->dofh_filesz = len; 11257 dof->dofh_pad = 0; 11258 11259 /* 11260 * Fill in the option section header... 11261 */ 11262 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11263 sec->dofs_type = DOF_SECT_OPTDESC; 11264 sec->dofs_align = sizeof (uint64_t); 11265 sec->dofs_flags = DOF_SECF_LOAD; 11266 sec->dofs_entsize = sizeof (dof_optdesc_t); 11267 11268 opt = (dof_optdesc_t *)((uintptr_t)sec + 11269 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11270 11271 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11272 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11273 11274 for (i = 0; i < DTRACEOPT_MAX; i++) { 11275 opt[i].dofo_option = i; 11276 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11277 opt[i].dofo_value = state->dts_options[i]; 11278 } 11279 11280 return (dof); 11281 } 11282 11283 static dof_hdr_t * 11284 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11285 { 11286 dof_hdr_t hdr, *dof; 11287 11288 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11289 11290 /* 11291 * First, we're going to copyin() the sizeof (dof_hdr_t). 11292 */ 11293 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11294 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11295 *errp = EFAULT; 11296 return (NULL); 11297 } 11298 11299 /* 11300 * Now we'll allocate the entire DOF and copy it in -- provided 11301 * that the length isn't outrageous. 11302 */ 11303 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11304 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11305 *errp = E2BIG; 11306 return (NULL); 11307 } 11308 11309 if (hdr.dofh_loadsz < sizeof (hdr)) { 11310 dtrace_dof_error(&hdr, "invalid load size"); 11311 *errp = EINVAL; 11312 return (NULL); 11313 } 11314 11315 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11316 11317 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 11318 dof->dofh_loadsz != hdr.dofh_loadsz) { 11319 kmem_free(dof, hdr.dofh_loadsz); 11320 *errp = EFAULT; 11321 return (NULL); 11322 } 11323 11324 return (dof); 11325 } 11326 11327 static dof_hdr_t * 11328 dtrace_dof_property(const char *name) 11329 { 11330 uchar_t *buf; 11331 uint64_t loadsz; 11332 unsigned int len, i; 11333 dof_hdr_t *dof; 11334 11335 /* 11336 * Unfortunately, array of values in .conf files are always (and 11337 * only) interpreted to be integer arrays. We must read our DOF 11338 * as an integer array, and then squeeze it into a byte array. 11339 */ 11340 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11341 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11342 return (NULL); 11343 11344 for (i = 0; i < len; i++) 11345 buf[i] = (uchar_t)(((int *)buf)[i]); 11346 11347 if (len < sizeof (dof_hdr_t)) { 11348 ddi_prop_free(buf); 11349 dtrace_dof_error(NULL, "truncated header"); 11350 return (NULL); 11351 } 11352 11353 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11354 ddi_prop_free(buf); 11355 dtrace_dof_error(NULL, "truncated DOF"); 11356 return (NULL); 11357 } 11358 11359 if (loadsz >= dtrace_dof_maxsize) { 11360 ddi_prop_free(buf); 11361 dtrace_dof_error(NULL, "oversized DOF"); 11362 return (NULL); 11363 } 11364 11365 dof = kmem_alloc(loadsz, KM_SLEEP); 11366 bcopy(buf, dof, loadsz); 11367 ddi_prop_free(buf); 11368 11369 return (dof); 11370 } 11371 11372 static void 11373 dtrace_dof_destroy(dof_hdr_t *dof) 11374 { 11375 kmem_free(dof, dof->dofh_loadsz); 11376 } 11377 11378 /* 11379 * Return the dof_sec_t pointer corresponding to a given section index. If the 11380 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11381 * a type other than DOF_SECT_NONE is specified, the header is checked against 11382 * this type and NULL is returned if the types do not match. 11383 */ 11384 static dof_sec_t * 11385 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11386 { 11387 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11388 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11389 11390 if (i >= dof->dofh_secnum) { 11391 dtrace_dof_error(dof, "referenced section index is invalid"); 11392 return (NULL); 11393 } 11394 11395 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11396 dtrace_dof_error(dof, "referenced section is not loadable"); 11397 return (NULL); 11398 } 11399 11400 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11401 dtrace_dof_error(dof, "referenced section is the wrong type"); 11402 return (NULL); 11403 } 11404 11405 return (sec); 11406 } 11407 11408 static dtrace_probedesc_t * 11409 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11410 { 11411 dof_probedesc_t *probe; 11412 dof_sec_t *strtab; 11413 uintptr_t daddr = (uintptr_t)dof; 11414 uintptr_t str; 11415 size_t size; 11416 11417 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11418 dtrace_dof_error(dof, "invalid probe section"); 11419 return (NULL); 11420 } 11421 11422 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11423 dtrace_dof_error(dof, "bad alignment in probe description"); 11424 return (NULL); 11425 } 11426 11427 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11428 dtrace_dof_error(dof, "truncated probe description"); 11429 return (NULL); 11430 } 11431 11432 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11433 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11434 11435 if (strtab == NULL) 11436 return (NULL); 11437 11438 str = daddr + strtab->dofs_offset; 11439 size = strtab->dofs_size; 11440 11441 if (probe->dofp_provider >= strtab->dofs_size) { 11442 dtrace_dof_error(dof, "corrupt probe provider"); 11443 return (NULL); 11444 } 11445 11446 (void) strncpy(desc->dtpd_provider, 11447 (char *)(str + probe->dofp_provider), 11448 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11449 11450 if (probe->dofp_mod >= strtab->dofs_size) { 11451 dtrace_dof_error(dof, "corrupt probe module"); 11452 return (NULL); 11453 } 11454 11455 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11456 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11457 11458 if (probe->dofp_func >= strtab->dofs_size) { 11459 dtrace_dof_error(dof, "corrupt probe function"); 11460 return (NULL); 11461 } 11462 11463 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11464 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11465 11466 if (probe->dofp_name >= strtab->dofs_size) { 11467 dtrace_dof_error(dof, "corrupt probe name"); 11468 return (NULL); 11469 } 11470 11471 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11472 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11473 11474 return (desc); 11475 } 11476 11477 static dtrace_difo_t * 11478 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11479 cred_t *cr) 11480 { 11481 dtrace_difo_t *dp; 11482 size_t ttl = 0; 11483 dof_difohdr_t *dofd; 11484 uintptr_t daddr = (uintptr_t)dof; 11485 size_t max = dtrace_difo_maxsize; 11486 int i, l, n; 11487 11488 static const struct { 11489 int section; 11490 int bufoffs; 11491 int lenoffs; 11492 int entsize; 11493 int align; 11494 const char *msg; 11495 } difo[] = { 11496 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 11497 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 11498 sizeof (dif_instr_t), "multiple DIF sections" }, 11499 11500 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 11501 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 11502 sizeof (uint64_t), "multiple integer tables" }, 11503 11504 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 11505 offsetof(dtrace_difo_t, dtdo_strlen), 0, 11506 sizeof (char), "multiple string tables" }, 11507 11508 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 11509 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 11510 sizeof (uint_t), "multiple variable tables" }, 11511 11512 { DOF_SECT_NONE, 0, 0, 0, NULL } 11513 }; 11514 11515 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 11516 dtrace_dof_error(dof, "invalid DIFO header section"); 11517 return (NULL); 11518 } 11519 11520 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11521 dtrace_dof_error(dof, "bad alignment in DIFO header"); 11522 return (NULL); 11523 } 11524 11525 if (sec->dofs_size < sizeof (dof_difohdr_t) || 11526 sec->dofs_size % sizeof (dof_secidx_t)) { 11527 dtrace_dof_error(dof, "bad size in DIFO header"); 11528 return (NULL); 11529 } 11530 11531 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11532 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 11533 11534 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 11535 dp->dtdo_rtype = dofd->dofd_rtype; 11536 11537 for (l = 0; l < n; l++) { 11538 dof_sec_t *subsec; 11539 void **bufp; 11540 uint32_t *lenp; 11541 11542 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 11543 dofd->dofd_links[l])) == NULL) 11544 goto err; /* invalid section link */ 11545 11546 if (ttl + subsec->dofs_size > max) { 11547 dtrace_dof_error(dof, "exceeds maximum size"); 11548 goto err; 11549 } 11550 11551 ttl += subsec->dofs_size; 11552 11553 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 11554 if (subsec->dofs_type != difo[i].section) 11555 continue; 11556 11557 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 11558 dtrace_dof_error(dof, "section not loaded"); 11559 goto err; 11560 } 11561 11562 if (subsec->dofs_align != difo[i].align) { 11563 dtrace_dof_error(dof, "bad alignment"); 11564 goto err; 11565 } 11566 11567 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 11568 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 11569 11570 if (*bufp != NULL) { 11571 dtrace_dof_error(dof, difo[i].msg); 11572 goto err; 11573 } 11574 11575 if (difo[i].entsize != subsec->dofs_entsize) { 11576 dtrace_dof_error(dof, "entry size mismatch"); 11577 goto err; 11578 } 11579 11580 if (subsec->dofs_entsize != 0 && 11581 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 11582 dtrace_dof_error(dof, "corrupt entry size"); 11583 goto err; 11584 } 11585 11586 *lenp = subsec->dofs_size; 11587 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 11588 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 11589 *bufp, subsec->dofs_size); 11590 11591 if (subsec->dofs_entsize != 0) 11592 *lenp /= subsec->dofs_entsize; 11593 11594 break; 11595 } 11596 11597 /* 11598 * If we encounter a loadable DIFO sub-section that is not 11599 * known to us, assume this is a broken program and fail. 11600 */ 11601 if (difo[i].section == DOF_SECT_NONE && 11602 (subsec->dofs_flags & DOF_SECF_LOAD)) { 11603 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 11604 goto err; 11605 } 11606 } 11607 11608 if (dp->dtdo_buf == NULL) { 11609 /* 11610 * We can't have a DIF object without DIF text. 11611 */ 11612 dtrace_dof_error(dof, "missing DIF text"); 11613 goto err; 11614 } 11615 11616 /* 11617 * Before we validate the DIF object, run through the variable table 11618 * looking for the strings -- if any of their size are under, we'll set 11619 * their size to be the system-wide default string size. Note that 11620 * this should _not_ happen if the "strsize" option has been set -- 11621 * in this case, the compiler should have set the size to reflect the 11622 * setting of the option. 11623 */ 11624 for (i = 0; i < dp->dtdo_varlen; i++) { 11625 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 11626 dtrace_diftype_t *t = &v->dtdv_type; 11627 11628 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 11629 continue; 11630 11631 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 11632 t->dtdt_size = dtrace_strsize_default; 11633 } 11634 11635 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 11636 goto err; 11637 11638 dtrace_difo_init(dp, vstate); 11639 return (dp); 11640 11641 err: 11642 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 11643 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 11644 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 11645 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 11646 11647 kmem_free(dp, sizeof (dtrace_difo_t)); 11648 return (NULL); 11649 } 11650 11651 static dtrace_predicate_t * 11652 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11653 cred_t *cr) 11654 { 11655 dtrace_difo_t *dp; 11656 11657 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 11658 return (NULL); 11659 11660 return (dtrace_predicate_create(dp)); 11661 } 11662 11663 static dtrace_actdesc_t * 11664 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11665 cred_t *cr) 11666 { 11667 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 11668 dof_actdesc_t *desc; 11669 dof_sec_t *difosec; 11670 size_t offs; 11671 uintptr_t daddr = (uintptr_t)dof; 11672 uint64_t arg; 11673 dtrace_actkind_t kind; 11674 11675 if (sec->dofs_type != DOF_SECT_ACTDESC) { 11676 dtrace_dof_error(dof, "invalid action section"); 11677 return (NULL); 11678 } 11679 11680 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 11681 dtrace_dof_error(dof, "truncated action description"); 11682 return (NULL); 11683 } 11684 11685 if (sec->dofs_align != sizeof (uint64_t)) { 11686 dtrace_dof_error(dof, "bad alignment in action description"); 11687 return (NULL); 11688 } 11689 11690 if (sec->dofs_size < sec->dofs_entsize) { 11691 dtrace_dof_error(dof, "section entry size exceeds total size"); 11692 return (NULL); 11693 } 11694 11695 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 11696 dtrace_dof_error(dof, "bad entry size in action description"); 11697 return (NULL); 11698 } 11699 11700 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 11701 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 11702 return (NULL); 11703 } 11704 11705 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 11706 desc = (dof_actdesc_t *)(daddr + 11707 (uintptr_t)sec->dofs_offset + offs); 11708 kind = (dtrace_actkind_t)desc->dofa_kind; 11709 11710 if (DTRACEACT_ISPRINTFLIKE(kind) && 11711 (kind != DTRACEACT_PRINTA || 11712 desc->dofa_strtab != DOF_SECIDX_NONE)) { 11713 dof_sec_t *strtab; 11714 char *str, *fmt; 11715 uint64_t i; 11716 11717 /* 11718 * printf()-like actions must have a format string. 11719 */ 11720 if ((strtab = dtrace_dof_sect(dof, 11721 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 11722 goto err; 11723 11724 str = (char *)((uintptr_t)dof + 11725 (uintptr_t)strtab->dofs_offset); 11726 11727 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 11728 if (str[i] == '\0') 11729 break; 11730 } 11731 11732 if (i >= strtab->dofs_size) { 11733 dtrace_dof_error(dof, "bogus format string"); 11734 goto err; 11735 } 11736 11737 if (i == desc->dofa_arg) { 11738 dtrace_dof_error(dof, "empty format string"); 11739 goto err; 11740 } 11741 11742 i -= desc->dofa_arg; 11743 fmt = kmem_alloc(i + 1, KM_SLEEP); 11744 bcopy(&str[desc->dofa_arg], fmt, i + 1); 11745 arg = (uint64_t)(uintptr_t)fmt; 11746 } else { 11747 if (kind == DTRACEACT_PRINTA) { 11748 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 11749 arg = 0; 11750 } else { 11751 arg = desc->dofa_arg; 11752 } 11753 } 11754 11755 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 11756 desc->dofa_uarg, arg); 11757 11758 if (last != NULL) { 11759 last->dtad_next = act; 11760 } else { 11761 first = act; 11762 } 11763 11764 last = act; 11765 11766 if (desc->dofa_difo == DOF_SECIDX_NONE) 11767 continue; 11768 11769 if ((difosec = dtrace_dof_sect(dof, 11770 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 11771 goto err; 11772 11773 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 11774 11775 if (act->dtad_difo == NULL) 11776 goto err; 11777 } 11778 11779 ASSERT(first != NULL); 11780 return (first); 11781 11782 err: 11783 for (act = first; act != NULL; act = next) { 11784 next = act->dtad_next; 11785 dtrace_actdesc_release(act, vstate); 11786 } 11787 11788 return (NULL); 11789 } 11790 11791 static dtrace_ecbdesc_t * 11792 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11793 cred_t *cr) 11794 { 11795 dtrace_ecbdesc_t *ep; 11796 dof_ecbdesc_t *ecb; 11797 dtrace_probedesc_t *desc; 11798 dtrace_predicate_t *pred = NULL; 11799 11800 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 11801 dtrace_dof_error(dof, "truncated ECB description"); 11802 return (NULL); 11803 } 11804 11805 if (sec->dofs_align != sizeof (uint64_t)) { 11806 dtrace_dof_error(dof, "bad alignment in ECB description"); 11807 return (NULL); 11808 } 11809 11810 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 11811 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 11812 11813 if (sec == NULL) 11814 return (NULL); 11815 11816 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11817 ep->dted_uarg = ecb->dofe_uarg; 11818 desc = &ep->dted_probe; 11819 11820 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 11821 goto err; 11822 11823 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 11824 if ((sec = dtrace_dof_sect(dof, 11825 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 11826 goto err; 11827 11828 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 11829 goto err; 11830 11831 ep->dted_pred.dtpdd_predicate = pred; 11832 } 11833 11834 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 11835 if ((sec = dtrace_dof_sect(dof, 11836 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 11837 goto err; 11838 11839 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 11840 11841 if (ep->dted_action == NULL) 11842 goto err; 11843 } 11844 11845 return (ep); 11846 11847 err: 11848 if (pred != NULL) 11849 dtrace_predicate_release(pred, vstate); 11850 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11851 return (NULL); 11852 } 11853 11854 /* 11855 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 11856 * specified DOF. At present, this amounts to simply adding 'ubase' to the 11857 * site of any user SETX relocations to account for load object base address. 11858 * In the future, if we need other relocations, this function can be extended. 11859 */ 11860 static int 11861 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 11862 { 11863 uintptr_t daddr = (uintptr_t)dof; 11864 dof_relohdr_t *dofr = 11865 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11866 dof_sec_t *ss, *rs, *ts; 11867 dof_relodesc_t *r; 11868 uint_t i, n; 11869 11870 if (sec->dofs_size < sizeof (dof_relohdr_t) || 11871 sec->dofs_align != sizeof (dof_secidx_t)) { 11872 dtrace_dof_error(dof, "invalid relocation header"); 11873 return (-1); 11874 } 11875 11876 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 11877 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 11878 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 11879 11880 if (ss == NULL || rs == NULL || ts == NULL) 11881 return (-1); /* dtrace_dof_error() has been called already */ 11882 11883 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 11884 rs->dofs_align != sizeof (uint64_t)) { 11885 dtrace_dof_error(dof, "invalid relocation section"); 11886 return (-1); 11887 } 11888 11889 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 11890 n = rs->dofs_size / rs->dofs_entsize; 11891 11892 for (i = 0; i < n; i++) { 11893 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 11894 11895 switch (r->dofr_type) { 11896 case DOF_RELO_NONE: 11897 break; 11898 case DOF_RELO_SETX: 11899 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 11900 sizeof (uint64_t) > ts->dofs_size) { 11901 dtrace_dof_error(dof, "bad relocation offset"); 11902 return (-1); 11903 } 11904 11905 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 11906 dtrace_dof_error(dof, "misaligned setx relo"); 11907 return (-1); 11908 } 11909 11910 *(uint64_t *)taddr += ubase; 11911 break; 11912 default: 11913 dtrace_dof_error(dof, "invalid relocation type"); 11914 return (-1); 11915 } 11916 11917 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 11918 } 11919 11920 return (0); 11921 } 11922 11923 /* 11924 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 11925 * header: it should be at the front of a memory region that is at least 11926 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 11927 * size. It need not be validated in any other way. 11928 */ 11929 static int 11930 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 11931 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 11932 { 11933 uint64_t len = dof->dofh_loadsz, seclen; 11934 uintptr_t daddr = (uintptr_t)dof; 11935 dtrace_ecbdesc_t *ep; 11936 dtrace_enabling_t *enab; 11937 uint_t i; 11938 11939 ASSERT(MUTEX_HELD(&dtrace_lock)); 11940 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 11941 11942 /* 11943 * Check the DOF header identification bytes. In addition to checking 11944 * valid settings, we also verify that unused bits/bytes are zeroed so 11945 * we can use them later without fear of regressing existing binaries. 11946 */ 11947 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 11948 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 11949 dtrace_dof_error(dof, "DOF magic string mismatch"); 11950 return (-1); 11951 } 11952 11953 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 11954 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 11955 dtrace_dof_error(dof, "DOF has invalid data model"); 11956 return (-1); 11957 } 11958 11959 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 11960 dtrace_dof_error(dof, "DOF encoding mismatch"); 11961 return (-1); 11962 } 11963 11964 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 11965 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 11966 dtrace_dof_error(dof, "DOF version mismatch"); 11967 return (-1); 11968 } 11969 11970 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 11971 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 11972 return (-1); 11973 } 11974 11975 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 11976 dtrace_dof_error(dof, "DOF uses too many integer registers"); 11977 return (-1); 11978 } 11979 11980 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 11981 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 11982 return (-1); 11983 } 11984 11985 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 11986 if (dof->dofh_ident[i] != 0) { 11987 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 11988 return (-1); 11989 } 11990 } 11991 11992 if (dof->dofh_flags & ~DOF_FL_VALID) { 11993 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 11994 return (-1); 11995 } 11996 11997 if (dof->dofh_secsize == 0) { 11998 dtrace_dof_error(dof, "zero section header size"); 11999 return (-1); 12000 } 12001 12002 /* 12003 * Check that the section headers don't exceed the amount of DOF 12004 * data. Note that we cast the section size and number of sections 12005 * to uint64_t's to prevent possible overflow in the multiplication. 12006 */ 12007 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12008 12009 if (dof->dofh_secoff > len || seclen > len || 12010 dof->dofh_secoff + seclen > len) { 12011 dtrace_dof_error(dof, "truncated section headers"); 12012 return (-1); 12013 } 12014 12015 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12016 dtrace_dof_error(dof, "misaligned section headers"); 12017 return (-1); 12018 } 12019 12020 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12021 dtrace_dof_error(dof, "misaligned section size"); 12022 return (-1); 12023 } 12024 12025 /* 12026 * Take an initial pass through the section headers to be sure that 12027 * the headers don't have stray offsets. If the 'noprobes' flag is 12028 * set, do not permit sections relating to providers, probes, or args. 12029 */ 12030 for (i = 0; i < dof->dofh_secnum; i++) { 12031 dof_sec_t *sec = (dof_sec_t *)(daddr + 12032 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12033 12034 if (noprobes) { 12035 switch (sec->dofs_type) { 12036 case DOF_SECT_PROVIDER: 12037 case DOF_SECT_PROBES: 12038 case DOF_SECT_PRARGS: 12039 case DOF_SECT_PROFFS: 12040 dtrace_dof_error(dof, "illegal sections " 12041 "for enabling"); 12042 return (-1); 12043 } 12044 } 12045 12046 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 12047 !(sec->dofs_flags & DOF_SECF_LOAD)) { 12048 dtrace_dof_error(dof, "loadable section with load " 12049 "flag unset"); 12050 return (-1); 12051 } 12052 12053 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12054 continue; /* just ignore non-loadable sections */ 12055 12056 if (sec->dofs_align & (sec->dofs_align - 1)) { 12057 dtrace_dof_error(dof, "bad section alignment"); 12058 return (-1); 12059 } 12060 12061 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12062 dtrace_dof_error(dof, "misaligned section"); 12063 return (-1); 12064 } 12065 12066 if (sec->dofs_offset > len || sec->dofs_size > len || 12067 sec->dofs_offset + sec->dofs_size > len) { 12068 dtrace_dof_error(dof, "corrupt section header"); 12069 return (-1); 12070 } 12071 12072 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12073 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12074 dtrace_dof_error(dof, "non-terminating string table"); 12075 return (-1); 12076 } 12077 } 12078 12079 /* 12080 * Take a second pass through the sections and locate and perform any 12081 * relocations that are present. We do this after the first pass to 12082 * be sure that all sections have had their headers validated. 12083 */ 12084 for (i = 0; i < dof->dofh_secnum; i++) { 12085 dof_sec_t *sec = (dof_sec_t *)(daddr + 12086 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12087 12088 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12089 continue; /* skip sections that are not loadable */ 12090 12091 switch (sec->dofs_type) { 12092 case DOF_SECT_URELHDR: 12093 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12094 return (-1); 12095 break; 12096 } 12097 } 12098 12099 if ((enab = *enabp) == NULL) 12100 enab = *enabp = dtrace_enabling_create(vstate); 12101 12102 for (i = 0; i < dof->dofh_secnum; i++) { 12103 dof_sec_t *sec = (dof_sec_t *)(daddr + 12104 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12105 12106 if (sec->dofs_type != DOF_SECT_ECBDESC) 12107 continue; 12108 12109 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12110 dtrace_enabling_destroy(enab); 12111 *enabp = NULL; 12112 return (-1); 12113 } 12114 12115 dtrace_enabling_add(enab, ep); 12116 } 12117 12118 return (0); 12119 } 12120 12121 /* 12122 * Process DOF for any options. This routine assumes that the DOF has been 12123 * at least processed by dtrace_dof_slurp(). 12124 */ 12125 static int 12126 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12127 { 12128 int i, rval; 12129 uint32_t entsize; 12130 size_t offs; 12131 dof_optdesc_t *desc; 12132 12133 for (i = 0; i < dof->dofh_secnum; i++) { 12134 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12135 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12136 12137 if (sec->dofs_type != DOF_SECT_OPTDESC) 12138 continue; 12139 12140 if (sec->dofs_align != sizeof (uint64_t)) { 12141 dtrace_dof_error(dof, "bad alignment in " 12142 "option description"); 12143 return (EINVAL); 12144 } 12145 12146 if ((entsize = sec->dofs_entsize) == 0) { 12147 dtrace_dof_error(dof, "zeroed option entry size"); 12148 return (EINVAL); 12149 } 12150 12151 if (entsize < sizeof (dof_optdesc_t)) { 12152 dtrace_dof_error(dof, "bad option entry size"); 12153 return (EINVAL); 12154 } 12155 12156 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12157 desc = (dof_optdesc_t *)((uintptr_t)dof + 12158 (uintptr_t)sec->dofs_offset + offs); 12159 12160 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12161 dtrace_dof_error(dof, "non-zero option string"); 12162 return (EINVAL); 12163 } 12164 12165 if (desc->dofo_value == DTRACEOPT_UNSET) { 12166 dtrace_dof_error(dof, "unset option"); 12167 return (EINVAL); 12168 } 12169 12170 if ((rval = dtrace_state_option(state, 12171 desc->dofo_option, desc->dofo_value)) != 0) { 12172 dtrace_dof_error(dof, "rejected option"); 12173 return (rval); 12174 } 12175 } 12176 } 12177 12178 return (0); 12179 } 12180 12181 /* 12182 * DTrace Consumer State Functions 12183 */ 12184 int 12185 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12186 { 12187 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12188 void *base; 12189 uintptr_t limit; 12190 dtrace_dynvar_t *dvar, *next, *start; 12191 int i; 12192 12193 ASSERT(MUTEX_HELD(&dtrace_lock)); 12194 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12195 12196 bzero(dstate, sizeof (dtrace_dstate_t)); 12197 12198 if ((dstate->dtds_chunksize = chunksize) == 0) 12199 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12200 12201 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12202 size = min; 12203 12204 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12205 return (ENOMEM); 12206 12207 dstate->dtds_size = size; 12208 dstate->dtds_base = base; 12209 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12210 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12211 12212 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12213 12214 if (hashsize != 1 && (hashsize & 1)) 12215 hashsize--; 12216 12217 dstate->dtds_hashsize = hashsize; 12218 dstate->dtds_hash = dstate->dtds_base; 12219 12220 /* 12221 * Set all of our hash buckets to point to the single sink, and (if 12222 * it hasn't already been set), set the sink's hash value to be the 12223 * sink sentinel value. The sink is needed for dynamic variable 12224 * lookups to know that they have iterated over an entire, valid hash 12225 * chain. 12226 */ 12227 for (i = 0; i < hashsize; i++) 12228 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12229 12230 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12231 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12232 12233 /* 12234 * Determine number of active CPUs. Divide free list evenly among 12235 * active CPUs. 12236 */ 12237 start = (dtrace_dynvar_t *) 12238 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12239 limit = (uintptr_t)base + size; 12240 12241 maxper = (limit - (uintptr_t)start) / NCPU; 12242 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12243 12244 for (i = 0; i < NCPU; i++) { 12245 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12246 12247 /* 12248 * If we don't even have enough chunks to make it once through 12249 * NCPUs, we're just going to allocate everything to the first 12250 * CPU. And if we're on the last CPU, we're going to allocate 12251 * whatever is left over. In either case, we set the limit to 12252 * be the limit of the dynamic variable space. 12253 */ 12254 if (maxper == 0 || i == NCPU - 1) { 12255 limit = (uintptr_t)base + size; 12256 start = NULL; 12257 } else { 12258 limit = (uintptr_t)start + maxper; 12259 start = (dtrace_dynvar_t *)limit; 12260 } 12261 12262 ASSERT(limit <= (uintptr_t)base + size); 12263 12264 for (;;) { 12265 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12266 dstate->dtds_chunksize); 12267 12268 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12269 break; 12270 12271 dvar->dtdv_next = next; 12272 dvar = next; 12273 } 12274 12275 if (maxper == 0) 12276 break; 12277 } 12278 12279 return (0); 12280 } 12281 12282 void 12283 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12284 { 12285 ASSERT(MUTEX_HELD(&cpu_lock)); 12286 12287 if (dstate->dtds_base == NULL) 12288 return; 12289 12290 kmem_free(dstate->dtds_base, dstate->dtds_size); 12291 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12292 } 12293 12294 static void 12295 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12296 { 12297 /* 12298 * Logical XOR, where are you? 12299 */ 12300 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12301 12302 if (vstate->dtvs_nglobals > 0) { 12303 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12304 sizeof (dtrace_statvar_t *)); 12305 } 12306 12307 if (vstate->dtvs_ntlocals > 0) { 12308 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12309 sizeof (dtrace_difv_t)); 12310 } 12311 12312 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12313 12314 if (vstate->dtvs_nlocals > 0) { 12315 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12316 sizeof (dtrace_statvar_t *)); 12317 } 12318 } 12319 12320 static void 12321 dtrace_state_clean(dtrace_state_t *state) 12322 { 12323 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12324 return; 12325 12326 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12327 dtrace_speculation_clean(state); 12328 } 12329 12330 static void 12331 dtrace_state_deadman(dtrace_state_t *state) 12332 { 12333 hrtime_t now; 12334 12335 dtrace_sync(); 12336 12337 now = dtrace_gethrtime(); 12338 12339 if (state != dtrace_anon.dta_state && 12340 now - state->dts_laststatus >= dtrace_deadman_user) 12341 return; 12342 12343 /* 12344 * We must be sure that dts_alive never appears to be less than the 12345 * value upon entry to dtrace_state_deadman(), and because we lack a 12346 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12347 * store INT64_MAX to it, followed by a memory barrier, followed by 12348 * the new value. This assures that dts_alive never appears to be 12349 * less than its true value, regardless of the order in which the 12350 * stores to the underlying storage are issued. 12351 */ 12352 state->dts_alive = INT64_MAX; 12353 dtrace_membar_producer(); 12354 state->dts_alive = now; 12355 } 12356 12357 dtrace_state_t * 12358 dtrace_state_create(dev_t *devp, cred_t *cr) 12359 { 12360 minor_t minor; 12361 major_t major; 12362 char c[30]; 12363 dtrace_state_t *state; 12364 dtrace_optval_t *opt; 12365 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12366 12367 ASSERT(MUTEX_HELD(&dtrace_lock)); 12368 ASSERT(MUTEX_HELD(&cpu_lock)); 12369 12370 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12371 VM_BESTFIT | VM_SLEEP); 12372 12373 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12374 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12375 return (NULL); 12376 } 12377 12378 state = ddi_get_soft_state(dtrace_softstate, minor); 12379 state->dts_epid = DTRACE_EPIDNONE + 1; 12380 12381 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 12382 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12383 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12384 12385 if (devp != NULL) { 12386 major = getemajor(*devp); 12387 } else { 12388 major = ddi_driver_major(dtrace_devi); 12389 } 12390 12391 state->dts_dev = makedevice(major, minor); 12392 12393 if (devp != NULL) 12394 *devp = state->dts_dev; 12395 12396 /* 12397 * We allocate NCPU buffers. On the one hand, this can be quite 12398 * a bit of memory per instance (nearly 36K on a Starcat). On the 12399 * other hand, it saves an additional memory reference in the probe 12400 * path. 12401 */ 12402 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 12403 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 12404 state->dts_cleaner = CYCLIC_NONE; 12405 state->dts_deadman = CYCLIC_NONE; 12406 state->dts_vstate.dtvs_state = state; 12407 12408 for (i = 0; i < DTRACEOPT_MAX; i++) 12409 state->dts_options[i] = DTRACEOPT_UNSET; 12410 12411 /* 12412 * Set the default options. 12413 */ 12414 opt = state->dts_options; 12415 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 12416 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 12417 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 12418 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 12419 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 12420 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 12421 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 12422 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 12423 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 12424 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 12425 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 12426 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 12427 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 12428 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 12429 12430 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 12431 12432 /* 12433 * Depending on the user credentials, we set flag bits which alter probe 12434 * visibility or the amount of destructiveness allowed. In the case of 12435 * actual anonymous tracing, or the possession of all privileges, all of 12436 * the normal checks are bypassed. 12437 */ 12438 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 12439 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 12440 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 12441 } else { 12442 /* 12443 * Set up the credentials for this instantiation. We take a 12444 * hold on the credential to prevent it from disappearing on 12445 * us; this in turn prevents the zone_t referenced by this 12446 * credential from disappearing. This means that we can 12447 * examine the credential and the zone from probe context. 12448 */ 12449 crhold(cr); 12450 state->dts_cred.dcr_cred = cr; 12451 12452 /* 12453 * CRA_PROC means "we have *some* privilege for dtrace" and 12454 * unlocks the use of variables like pid, zonename, etc. 12455 */ 12456 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 12457 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12458 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 12459 } 12460 12461 /* 12462 * dtrace_user allows use of syscall and profile providers. 12463 * If the user also has proc_owner and/or proc_zone, we 12464 * extend the scope to include additional visibility and 12465 * destructive power. 12466 */ 12467 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 12468 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 12469 state->dts_cred.dcr_visible |= 12470 DTRACE_CRV_ALLPROC; 12471 12472 state->dts_cred.dcr_action |= 12473 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12474 } 12475 12476 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 12477 state->dts_cred.dcr_visible |= 12478 DTRACE_CRV_ALLZONE; 12479 12480 state->dts_cred.dcr_action |= 12481 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12482 } 12483 12484 /* 12485 * If we have all privs in whatever zone this is, 12486 * we can do destructive things to processes which 12487 * have altered credentials. 12488 */ 12489 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12490 cr->cr_zone->zone_privset)) { 12491 state->dts_cred.dcr_action |= 12492 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12493 } 12494 } 12495 12496 /* 12497 * Holding the dtrace_kernel privilege also implies that 12498 * the user has the dtrace_user privilege from a visibility 12499 * perspective. But without further privileges, some 12500 * destructive actions are not available. 12501 */ 12502 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 12503 /* 12504 * Make all probes in all zones visible. However, 12505 * this doesn't mean that all actions become available 12506 * to all zones. 12507 */ 12508 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 12509 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 12510 12511 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 12512 DTRACE_CRA_PROC; 12513 /* 12514 * Holding proc_owner means that destructive actions 12515 * for *this* zone are allowed. 12516 */ 12517 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12518 state->dts_cred.dcr_action |= 12519 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12520 12521 /* 12522 * Holding proc_zone means that destructive actions 12523 * for this user/group ID in all zones is allowed. 12524 */ 12525 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12526 state->dts_cred.dcr_action |= 12527 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12528 12529 /* 12530 * If we have all privs in whatever zone this is, 12531 * we can do destructive things to processes which 12532 * have altered credentials. 12533 */ 12534 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 12535 cr->cr_zone->zone_privset)) { 12536 state->dts_cred.dcr_action |= 12537 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 12538 } 12539 } 12540 12541 /* 12542 * Holding the dtrace_proc privilege gives control over fasttrap 12543 * and pid providers. We need to grant wider destructive 12544 * privileges in the event that the user has proc_owner and/or 12545 * proc_zone. 12546 */ 12547 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 12548 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 12549 state->dts_cred.dcr_action |= 12550 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 12551 12552 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 12553 state->dts_cred.dcr_action |= 12554 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 12555 } 12556 } 12557 12558 return (state); 12559 } 12560 12561 static int 12562 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 12563 { 12564 dtrace_optval_t *opt = state->dts_options, size; 12565 processorid_t cpu; 12566 int flags = 0, rval; 12567 12568 ASSERT(MUTEX_HELD(&dtrace_lock)); 12569 ASSERT(MUTEX_HELD(&cpu_lock)); 12570 ASSERT(which < DTRACEOPT_MAX); 12571 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 12572 (state == dtrace_anon.dta_state && 12573 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 12574 12575 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 12576 return (0); 12577 12578 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 12579 cpu = opt[DTRACEOPT_CPU]; 12580 12581 if (which == DTRACEOPT_SPECSIZE) 12582 flags |= DTRACEBUF_NOSWITCH; 12583 12584 if (which == DTRACEOPT_BUFSIZE) { 12585 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 12586 flags |= DTRACEBUF_RING; 12587 12588 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 12589 flags |= DTRACEBUF_FILL; 12590 12591 if (state != dtrace_anon.dta_state || 12592 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 12593 flags |= DTRACEBUF_INACTIVE; 12594 } 12595 12596 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 12597 /* 12598 * The size must be 8-byte aligned. If the size is not 8-byte 12599 * aligned, drop it down by the difference. 12600 */ 12601 if (size & (sizeof (uint64_t) - 1)) 12602 size -= size & (sizeof (uint64_t) - 1); 12603 12604 if (size < state->dts_reserve) { 12605 /* 12606 * Buffers always must be large enough to accommodate 12607 * their prereserved space. We return E2BIG instead 12608 * of ENOMEM in this case to allow for user-level 12609 * software to differentiate the cases. 12610 */ 12611 return (E2BIG); 12612 } 12613 12614 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 12615 12616 if (rval != ENOMEM) { 12617 opt[which] = size; 12618 return (rval); 12619 } 12620 12621 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12622 return (rval); 12623 } 12624 12625 return (ENOMEM); 12626 } 12627 12628 static int 12629 dtrace_state_buffers(dtrace_state_t *state) 12630 { 12631 dtrace_speculation_t *spec = state->dts_speculations; 12632 int rval, i; 12633 12634 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 12635 DTRACEOPT_BUFSIZE)) != 0) 12636 return (rval); 12637 12638 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 12639 DTRACEOPT_AGGSIZE)) != 0) 12640 return (rval); 12641 12642 for (i = 0; i < state->dts_nspeculations; i++) { 12643 if ((rval = dtrace_state_buffer(state, 12644 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 12645 return (rval); 12646 } 12647 12648 return (0); 12649 } 12650 12651 static void 12652 dtrace_state_prereserve(dtrace_state_t *state) 12653 { 12654 dtrace_ecb_t *ecb; 12655 dtrace_probe_t *probe; 12656 12657 state->dts_reserve = 0; 12658 12659 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 12660 return; 12661 12662 /* 12663 * If our buffer policy is a "fill" buffer policy, we need to set the 12664 * prereserved space to be the space required by the END probes. 12665 */ 12666 probe = dtrace_probes[dtrace_probeid_end - 1]; 12667 ASSERT(probe != NULL); 12668 12669 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 12670 if (ecb->dte_state != state) 12671 continue; 12672 12673 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 12674 } 12675 } 12676 12677 static int 12678 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 12679 { 12680 dtrace_optval_t *opt = state->dts_options, sz, nspec; 12681 dtrace_speculation_t *spec; 12682 dtrace_buffer_t *buf; 12683 cyc_handler_t hdlr; 12684 cyc_time_t when; 12685 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12686 dtrace_icookie_t cookie; 12687 12688 mutex_enter(&cpu_lock); 12689 mutex_enter(&dtrace_lock); 12690 12691 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 12692 rval = EBUSY; 12693 goto out; 12694 } 12695 12696 /* 12697 * Before we can perform any checks, we must prime all of the 12698 * retained enablings that correspond to this state. 12699 */ 12700 dtrace_enabling_prime(state); 12701 12702 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 12703 rval = EACCES; 12704 goto out; 12705 } 12706 12707 dtrace_state_prereserve(state); 12708 12709 /* 12710 * Now we want to do is try to allocate our speculations. 12711 * We do not automatically resize the number of speculations; if 12712 * this fails, we will fail the operation. 12713 */ 12714 nspec = opt[DTRACEOPT_NSPEC]; 12715 ASSERT(nspec != DTRACEOPT_UNSET); 12716 12717 if (nspec > INT_MAX) { 12718 rval = ENOMEM; 12719 goto out; 12720 } 12721 12722 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 12723 12724 if (spec == NULL) { 12725 rval = ENOMEM; 12726 goto out; 12727 } 12728 12729 state->dts_speculations = spec; 12730 state->dts_nspeculations = (int)nspec; 12731 12732 for (i = 0; i < nspec; i++) { 12733 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 12734 rval = ENOMEM; 12735 goto err; 12736 } 12737 12738 spec[i].dtsp_buffer = buf; 12739 } 12740 12741 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 12742 if (dtrace_anon.dta_state == NULL) { 12743 rval = ENOENT; 12744 goto out; 12745 } 12746 12747 if (state->dts_necbs != 0) { 12748 rval = EALREADY; 12749 goto out; 12750 } 12751 12752 state->dts_anon = dtrace_anon_grab(); 12753 ASSERT(state->dts_anon != NULL); 12754 state = state->dts_anon; 12755 12756 /* 12757 * We want "grabanon" to be set in the grabbed state, so we'll 12758 * copy that option value from the grabbing state into the 12759 * grabbed state. 12760 */ 12761 state->dts_options[DTRACEOPT_GRABANON] = 12762 opt[DTRACEOPT_GRABANON]; 12763 12764 *cpu = dtrace_anon.dta_beganon; 12765 12766 /* 12767 * If the anonymous state is active (as it almost certainly 12768 * is if the anonymous enabling ultimately matched anything), 12769 * we don't allow any further option processing -- but we 12770 * don't return failure. 12771 */ 12772 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12773 goto out; 12774 } 12775 12776 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 12777 opt[DTRACEOPT_AGGSIZE] != 0) { 12778 if (state->dts_aggregations == NULL) { 12779 /* 12780 * We're not going to create an aggregation buffer 12781 * because we don't have any ECBs that contain 12782 * aggregations -- set this option to 0. 12783 */ 12784 opt[DTRACEOPT_AGGSIZE] = 0; 12785 } else { 12786 /* 12787 * If we have an aggregation buffer, we must also have 12788 * a buffer to use as scratch. 12789 */ 12790 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 12791 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 12792 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 12793 } 12794 } 12795 } 12796 12797 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 12798 opt[DTRACEOPT_SPECSIZE] != 0) { 12799 if (!state->dts_speculates) { 12800 /* 12801 * We're not going to create speculation buffers 12802 * because we don't have any ECBs that actually 12803 * speculate -- set the speculation size to 0. 12804 */ 12805 opt[DTRACEOPT_SPECSIZE] = 0; 12806 } 12807 } 12808 12809 /* 12810 * The bare minimum size for any buffer that we're actually going to 12811 * do anything to is sizeof (uint64_t). 12812 */ 12813 sz = sizeof (uint64_t); 12814 12815 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 12816 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 12817 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 12818 /* 12819 * A buffer size has been explicitly set to 0 (or to a size 12820 * that will be adjusted to 0) and we need the space -- we 12821 * need to return failure. We return ENOSPC to differentiate 12822 * it from failing to allocate a buffer due to failure to meet 12823 * the reserve (for which we return E2BIG). 12824 */ 12825 rval = ENOSPC; 12826 goto out; 12827 } 12828 12829 if ((rval = dtrace_state_buffers(state)) != 0) 12830 goto err; 12831 12832 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 12833 sz = dtrace_dstate_defsize; 12834 12835 do { 12836 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 12837 12838 if (rval == 0) 12839 break; 12840 12841 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12842 goto err; 12843 } while (sz >>= 1); 12844 12845 opt[DTRACEOPT_DYNVARSIZE] = sz; 12846 12847 if (rval != 0) 12848 goto err; 12849 12850 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 12851 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 12852 12853 if (opt[DTRACEOPT_CLEANRATE] == 0) 12854 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12855 12856 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 12857 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 12858 12859 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 12860 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12861 12862 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 12863 hdlr.cyh_arg = state; 12864 hdlr.cyh_level = CY_LOW_LEVEL; 12865 12866 when.cyt_when = 0; 12867 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 12868 12869 state->dts_cleaner = cyclic_add(&hdlr, &when); 12870 12871 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 12872 hdlr.cyh_arg = state; 12873 hdlr.cyh_level = CY_LOW_LEVEL; 12874 12875 when.cyt_when = 0; 12876 when.cyt_interval = dtrace_deadman_interval; 12877 12878 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 12879 state->dts_deadman = cyclic_add(&hdlr, &when); 12880 12881 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 12882 12883 /* 12884 * Now it's time to actually fire the BEGIN probe. We need to disable 12885 * interrupts here both to record the CPU on which we fired the BEGIN 12886 * probe (the data from this CPU will be processed first at user 12887 * level) and to manually activate the buffer for this CPU. 12888 */ 12889 cookie = dtrace_interrupt_disable(); 12890 *cpu = CPU->cpu_id; 12891 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 12892 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12893 12894 dtrace_probe(dtrace_probeid_begin, 12895 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12896 dtrace_interrupt_enable(cookie); 12897 /* 12898 * We may have had an exit action from a BEGIN probe; only change our 12899 * state to ACTIVE if we're still in WARMUP. 12900 */ 12901 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 12902 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 12903 12904 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 12905 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 12906 12907 /* 12908 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 12909 * want each CPU to transition its principal buffer out of the 12910 * INACTIVE state. Doing this assures that no CPU will suddenly begin 12911 * processing an ECB halfway down a probe's ECB chain; all CPUs will 12912 * atomically transition from processing none of a state's ECBs to 12913 * processing all of them. 12914 */ 12915 dtrace_xcall(DTRACE_CPUALL, 12916 (dtrace_xcall_t)dtrace_buffer_activate, state); 12917 goto out; 12918 12919 err: 12920 dtrace_buffer_free(state->dts_buffer); 12921 dtrace_buffer_free(state->dts_aggbuffer); 12922 12923 if ((nspec = state->dts_nspeculations) == 0) { 12924 ASSERT(state->dts_speculations == NULL); 12925 goto out; 12926 } 12927 12928 spec = state->dts_speculations; 12929 ASSERT(spec != NULL); 12930 12931 for (i = 0; i < state->dts_nspeculations; i++) { 12932 if ((buf = spec[i].dtsp_buffer) == NULL) 12933 break; 12934 12935 dtrace_buffer_free(buf); 12936 kmem_free(buf, bufsize); 12937 } 12938 12939 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12940 state->dts_nspeculations = 0; 12941 state->dts_speculations = NULL; 12942 12943 out: 12944 mutex_exit(&dtrace_lock); 12945 mutex_exit(&cpu_lock); 12946 12947 return (rval); 12948 } 12949 12950 static int 12951 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 12952 { 12953 dtrace_icookie_t cookie; 12954 12955 ASSERT(MUTEX_HELD(&dtrace_lock)); 12956 12957 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 12958 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 12959 return (EINVAL); 12960 12961 /* 12962 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 12963 * to be sure that every CPU has seen it. See below for the details 12964 * on why this is done. 12965 */ 12966 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 12967 dtrace_sync(); 12968 12969 /* 12970 * By this point, it is impossible for any CPU to be still processing 12971 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 12972 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 12973 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 12974 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 12975 * iff we're in the END probe. 12976 */ 12977 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 12978 dtrace_sync(); 12979 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 12980 12981 /* 12982 * Finally, we can release the reserve and call the END probe. We 12983 * disable interrupts across calling the END probe to allow us to 12984 * return the CPU on which we actually called the END probe. This 12985 * allows user-land to be sure that this CPU's principal buffer is 12986 * processed last. 12987 */ 12988 state->dts_reserve = 0; 12989 12990 cookie = dtrace_interrupt_disable(); 12991 *cpu = CPU->cpu_id; 12992 dtrace_probe(dtrace_probeid_end, 12993 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12994 dtrace_interrupt_enable(cookie); 12995 12996 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 12997 dtrace_sync(); 12998 12999 return (0); 13000 } 13001 13002 static int 13003 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13004 dtrace_optval_t val) 13005 { 13006 ASSERT(MUTEX_HELD(&dtrace_lock)); 13007 13008 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13009 return (EBUSY); 13010 13011 if (option >= DTRACEOPT_MAX) 13012 return (EINVAL); 13013 13014 if (option != DTRACEOPT_CPU && val < 0) 13015 return (EINVAL); 13016 13017 switch (option) { 13018 case DTRACEOPT_DESTRUCTIVE: 13019 if (dtrace_destructive_disallow) 13020 return (EACCES); 13021 13022 state->dts_cred.dcr_destructive = 1; 13023 break; 13024 13025 case DTRACEOPT_BUFSIZE: 13026 case DTRACEOPT_DYNVARSIZE: 13027 case DTRACEOPT_AGGSIZE: 13028 case DTRACEOPT_SPECSIZE: 13029 case DTRACEOPT_STRSIZE: 13030 if (val < 0) 13031 return (EINVAL); 13032 13033 if (val >= LONG_MAX) { 13034 /* 13035 * If this is an otherwise negative value, set it to 13036 * the highest multiple of 128m less than LONG_MAX. 13037 * Technically, we're adjusting the size without 13038 * regard to the buffer resizing policy, but in fact, 13039 * this has no effect -- if we set the buffer size to 13040 * ~LONG_MAX and the buffer policy is ultimately set to 13041 * be "manual", the buffer allocation is guaranteed to 13042 * fail, if only because the allocation requires two 13043 * buffers. (We set the the size to the highest 13044 * multiple of 128m because it ensures that the size 13045 * will remain a multiple of a megabyte when 13046 * repeatedly halved -- all the way down to 15m.) 13047 */ 13048 val = LONG_MAX - (1 << 27) + 1; 13049 } 13050 } 13051 13052 state->dts_options[option] = val; 13053 13054 return (0); 13055 } 13056 13057 static void 13058 dtrace_state_destroy(dtrace_state_t *state) 13059 { 13060 dtrace_ecb_t *ecb; 13061 dtrace_vstate_t *vstate = &state->dts_vstate; 13062 minor_t minor = getminor(state->dts_dev); 13063 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13064 dtrace_speculation_t *spec = state->dts_speculations; 13065 int nspec = state->dts_nspeculations; 13066 uint32_t match; 13067 13068 ASSERT(MUTEX_HELD(&dtrace_lock)); 13069 ASSERT(MUTEX_HELD(&cpu_lock)); 13070 13071 /* 13072 * First, retract any retained enablings for this state. 13073 */ 13074 dtrace_enabling_retract(state); 13075 ASSERT(state->dts_nretained == 0); 13076 13077 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13078 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13079 /* 13080 * We have managed to come into dtrace_state_destroy() on a 13081 * hot enabling -- almost certainly because of a disorderly 13082 * shutdown of a consumer. (That is, a consumer that is 13083 * exiting without having called dtrace_stop().) In this case, 13084 * we're going to set our activity to be KILLED, and then 13085 * issue a sync to be sure that everyone is out of probe 13086 * context before we start blowing away ECBs. 13087 */ 13088 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13089 dtrace_sync(); 13090 } 13091 13092 /* 13093 * Release the credential hold we took in dtrace_state_create(). 13094 */ 13095 if (state->dts_cred.dcr_cred != NULL) 13096 crfree(state->dts_cred.dcr_cred); 13097 13098 /* 13099 * Now we can safely disable and destroy any enabled probes. Because 13100 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13101 * (especially if they're all enabled), we take two passes through the 13102 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13103 * in the second we disable whatever is left over. 13104 */ 13105 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13106 for (i = 0; i < state->dts_necbs; i++) { 13107 if ((ecb = state->dts_ecbs[i]) == NULL) 13108 continue; 13109 13110 if (match && ecb->dte_probe != NULL) { 13111 dtrace_probe_t *probe = ecb->dte_probe; 13112 dtrace_provider_t *prov = probe->dtpr_provider; 13113 13114 if (!(prov->dtpv_priv.dtpp_flags & match)) 13115 continue; 13116 } 13117 13118 dtrace_ecb_disable(ecb); 13119 dtrace_ecb_destroy(ecb); 13120 } 13121 13122 if (!match) 13123 break; 13124 } 13125 13126 /* 13127 * Before we free the buffers, perform one more sync to assure that 13128 * every CPU is out of probe context. 13129 */ 13130 dtrace_sync(); 13131 13132 dtrace_buffer_free(state->dts_buffer); 13133 dtrace_buffer_free(state->dts_aggbuffer); 13134 13135 for (i = 0; i < nspec; i++) 13136 dtrace_buffer_free(spec[i].dtsp_buffer); 13137 13138 if (state->dts_cleaner != CYCLIC_NONE) 13139 cyclic_remove(state->dts_cleaner); 13140 13141 if (state->dts_deadman != CYCLIC_NONE) 13142 cyclic_remove(state->dts_deadman); 13143 13144 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13145 dtrace_vstate_fini(vstate); 13146 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13147 13148 if (state->dts_aggregations != NULL) { 13149 #ifdef DEBUG 13150 for (i = 0; i < state->dts_naggregations; i++) 13151 ASSERT(state->dts_aggregations[i] == NULL); 13152 #endif 13153 ASSERT(state->dts_naggregations > 0); 13154 kmem_free(state->dts_aggregations, 13155 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13156 } 13157 13158 kmem_free(state->dts_buffer, bufsize); 13159 kmem_free(state->dts_aggbuffer, bufsize); 13160 13161 for (i = 0; i < nspec; i++) 13162 kmem_free(spec[i].dtsp_buffer, bufsize); 13163 13164 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13165 13166 dtrace_format_destroy(state); 13167 13168 vmem_destroy(state->dts_aggid_arena); 13169 ddi_soft_state_free(dtrace_softstate, minor); 13170 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13171 } 13172 13173 /* 13174 * DTrace Anonymous Enabling Functions 13175 */ 13176 static dtrace_state_t * 13177 dtrace_anon_grab(void) 13178 { 13179 dtrace_state_t *state; 13180 13181 ASSERT(MUTEX_HELD(&dtrace_lock)); 13182 13183 if ((state = dtrace_anon.dta_state) == NULL) { 13184 ASSERT(dtrace_anon.dta_enabling == NULL); 13185 return (NULL); 13186 } 13187 13188 ASSERT(dtrace_anon.dta_enabling != NULL); 13189 ASSERT(dtrace_retained != NULL); 13190 13191 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13192 dtrace_anon.dta_enabling = NULL; 13193 dtrace_anon.dta_state = NULL; 13194 13195 return (state); 13196 } 13197 13198 static void 13199 dtrace_anon_property(void) 13200 { 13201 int i, rv; 13202 dtrace_state_t *state; 13203 dof_hdr_t *dof; 13204 char c[32]; /* enough for "dof-data-" + digits */ 13205 13206 ASSERT(MUTEX_HELD(&dtrace_lock)); 13207 ASSERT(MUTEX_HELD(&cpu_lock)); 13208 13209 for (i = 0; ; i++) { 13210 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13211 13212 dtrace_err_verbose = 1; 13213 13214 if ((dof = dtrace_dof_property(c)) == NULL) { 13215 dtrace_err_verbose = 0; 13216 break; 13217 } 13218 13219 /* 13220 * We want to create anonymous state, so we need to transition 13221 * the kernel debugger to indicate that DTrace is active. If 13222 * this fails (e.g. because the debugger has modified text in 13223 * some way), we won't continue with the processing. 13224 */ 13225 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13226 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13227 "enabling ignored."); 13228 dtrace_dof_destroy(dof); 13229 break; 13230 } 13231 13232 /* 13233 * If we haven't allocated an anonymous state, we'll do so now. 13234 */ 13235 if ((state = dtrace_anon.dta_state) == NULL) { 13236 state = dtrace_state_create(NULL, NULL); 13237 dtrace_anon.dta_state = state; 13238 13239 if (state == NULL) { 13240 /* 13241 * This basically shouldn't happen: the only 13242 * failure mode from dtrace_state_create() is a 13243 * failure of ddi_soft_state_zalloc() that 13244 * itself should never happen. Still, the 13245 * interface allows for a failure mode, and 13246 * we want to fail as gracefully as possible: 13247 * we'll emit an error message and cease 13248 * processing anonymous state in this case. 13249 */ 13250 cmn_err(CE_WARN, "failed to create " 13251 "anonymous state"); 13252 dtrace_dof_destroy(dof); 13253 break; 13254 } 13255 } 13256 13257 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13258 &dtrace_anon.dta_enabling, 0, B_TRUE); 13259 13260 if (rv == 0) 13261 rv = dtrace_dof_options(dof, state); 13262 13263 dtrace_err_verbose = 0; 13264 dtrace_dof_destroy(dof); 13265 13266 if (rv != 0) { 13267 /* 13268 * This is malformed DOF; chuck any anonymous state 13269 * that we created. 13270 */ 13271 ASSERT(dtrace_anon.dta_enabling == NULL); 13272 dtrace_state_destroy(state); 13273 dtrace_anon.dta_state = NULL; 13274 break; 13275 } 13276 13277 ASSERT(dtrace_anon.dta_enabling != NULL); 13278 } 13279 13280 if (dtrace_anon.dta_enabling != NULL) { 13281 int rval; 13282 13283 /* 13284 * dtrace_enabling_retain() can only fail because we are 13285 * trying to retain more enablings than are allowed -- but 13286 * we only have one anonymous enabling, and we are guaranteed 13287 * to be allowed at least one retained enabling; we assert 13288 * that dtrace_enabling_retain() returns success. 13289 */ 13290 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13291 ASSERT(rval == 0); 13292 13293 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13294 } 13295 } 13296 13297 /* 13298 * DTrace Helper Functions 13299 */ 13300 static void 13301 dtrace_helper_trace(dtrace_helper_action_t *helper, 13302 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13303 { 13304 uint32_t size, next, nnext, i; 13305 dtrace_helptrace_t *ent; 13306 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13307 13308 if (!dtrace_helptrace_enabled) 13309 return; 13310 13311 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13312 13313 /* 13314 * What would a tracing framework be without its own tracing 13315 * framework? (Well, a hell of a lot simpler, for starters...) 13316 */ 13317 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13318 sizeof (uint64_t) - sizeof (uint64_t); 13319 13320 /* 13321 * Iterate until we can allocate a slot in the trace buffer. 13322 */ 13323 do { 13324 next = dtrace_helptrace_next; 13325 13326 if (next + size < dtrace_helptrace_bufsize) { 13327 nnext = next + size; 13328 } else { 13329 nnext = size; 13330 } 13331 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13332 13333 /* 13334 * We have our slot; fill it in. 13335 */ 13336 if (nnext == size) 13337 next = 0; 13338 13339 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13340 ent->dtht_helper = helper; 13341 ent->dtht_where = where; 13342 ent->dtht_nlocals = vstate->dtvs_nlocals; 13343 13344 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 13345 mstate->dtms_fltoffs : -1; 13346 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 13347 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 13348 13349 for (i = 0; i < vstate->dtvs_nlocals; i++) { 13350 dtrace_statvar_t *svar; 13351 13352 if ((svar = vstate->dtvs_locals[i]) == NULL) 13353 continue; 13354 13355 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 13356 ent->dtht_locals[i] = 13357 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 13358 } 13359 } 13360 13361 static uint64_t 13362 dtrace_helper(int which, dtrace_mstate_t *mstate, 13363 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 13364 { 13365 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 13366 uint64_t sarg0 = mstate->dtms_arg[0]; 13367 uint64_t sarg1 = mstate->dtms_arg[1]; 13368 uint64_t rval; 13369 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 13370 dtrace_helper_action_t *helper; 13371 dtrace_vstate_t *vstate; 13372 dtrace_difo_t *pred; 13373 int i, trace = dtrace_helptrace_enabled; 13374 13375 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 13376 13377 if (helpers == NULL) 13378 return (0); 13379 13380 if ((helper = helpers->dthps_actions[which]) == NULL) 13381 return (0); 13382 13383 vstate = &helpers->dthps_vstate; 13384 mstate->dtms_arg[0] = arg0; 13385 mstate->dtms_arg[1] = arg1; 13386 13387 /* 13388 * Now iterate over each helper. If its predicate evaluates to 'true', 13389 * we'll call the corresponding actions. Note that the below calls 13390 * to dtrace_dif_emulate() may set faults in machine state. This is 13391 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 13392 * the stored DIF offset with its own (which is the desired behavior). 13393 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 13394 * from machine state; this is okay, too. 13395 */ 13396 for (; helper != NULL; helper = helper->dtha_next) { 13397 if ((pred = helper->dtha_predicate) != NULL) { 13398 if (trace) 13399 dtrace_helper_trace(helper, mstate, vstate, 0); 13400 13401 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 13402 goto next; 13403 13404 if (*flags & CPU_DTRACE_FAULT) 13405 goto err; 13406 } 13407 13408 for (i = 0; i < helper->dtha_nactions; i++) { 13409 if (trace) 13410 dtrace_helper_trace(helper, 13411 mstate, vstate, i + 1); 13412 13413 rval = dtrace_dif_emulate(helper->dtha_actions[i], 13414 mstate, vstate, state); 13415 13416 if (*flags & CPU_DTRACE_FAULT) 13417 goto err; 13418 } 13419 13420 next: 13421 if (trace) 13422 dtrace_helper_trace(helper, mstate, vstate, 13423 DTRACE_HELPTRACE_NEXT); 13424 } 13425 13426 if (trace) 13427 dtrace_helper_trace(helper, mstate, vstate, 13428 DTRACE_HELPTRACE_DONE); 13429 13430 /* 13431 * Restore the arg0 that we saved upon entry. 13432 */ 13433 mstate->dtms_arg[0] = sarg0; 13434 mstate->dtms_arg[1] = sarg1; 13435 13436 return (rval); 13437 13438 err: 13439 if (trace) 13440 dtrace_helper_trace(helper, mstate, vstate, 13441 DTRACE_HELPTRACE_ERR); 13442 13443 /* 13444 * Restore the arg0 that we saved upon entry. 13445 */ 13446 mstate->dtms_arg[0] = sarg0; 13447 mstate->dtms_arg[1] = sarg1; 13448 13449 return (NULL); 13450 } 13451 13452 static void 13453 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 13454 dtrace_vstate_t *vstate) 13455 { 13456 int i; 13457 13458 if (helper->dtha_predicate != NULL) 13459 dtrace_difo_release(helper->dtha_predicate, vstate); 13460 13461 for (i = 0; i < helper->dtha_nactions; i++) { 13462 ASSERT(helper->dtha_actions[i] != NULL); 13463 dtrace_difo_release(helper->dtha_actions[i], vstate); 13464 } 13465 13466 kmem_free(helper->dtha_actions, 13467 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 13468 kmem_free(helper, sizeof (dtrace_helper_action_t)); 13469 } 13470 13471 static int 13472 dtrace_helper_destroygen(int gen) 13473 { 13474 proc_t *p = curproc; 13475 dtrace_helpers_t *help = p->p_dtrace_helpers; 13476 dtrace_vstate_t *vstate; 13477 int i; 13478 13479 ASSERT(MUTEX_HELD(&dtrace_lock)); 13480 13481 if (help == NULL || gen > help->dthps_generation) 13482 return (EINVAL); 13483 13484 vstate = &help->dthps_vstate; 13485 13486 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13487 dtrace_helper_action_t *last = NULL, *h, *next; 13488 13489 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13490 next = h->dtha_next; 13491 13492 if (h->dtha_generation == gen) { 13493 if (last != NULL) { 13494 last->dtha_next = next; 13495 } else { 13496 help->dthps_actions[i] = next; 13497 } 13498 13499 dtrace_helper_action_destroy(h, vstate); 13500 } else { 13501 last = h; 13502 } 13503 } 13504 } 13505 13506 /* 13507 * Interate until we've cleared out all helper providers with the 13508 * given generation number. 13509 */ 13510 for (;;) { 13511 dtrace_helper_provider_t *prov; 13512 13513 /* 13514 * Look for a helper provider with the right generation. We 13515 * have to start back at the beginning of the list each time 13516 * because we drop dtrace_lock. It's unlikely that we'll make 13517 * more than two passes. 13518 */ 13519 for (i = 0; i < help->dthps_nprovs; i++) { 13520 prov = help->dthps_provs[i]; 13521 13522 if (prov->dthp_generation == gen) 13523 break; 13524 } 13525 13526 /* 13527 * If there were no matches, we're done. 13528 */ 13529 if (i == help->dthps_nprovs) 13530 break; 13531 13532 /* 13533 * Move the last helper provider into this slot. 13534 */ 13535 help->dthps_nprovs--; 13536 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 13537 help->dthps_provs[help->dthps_nprovs] = NULL; 13538 13539 mutex_exit(&dtrace_lock); 13540 13541 /* 13542 * If we have a meta provider, remove this helper provider. 13543 */ 13544 mutex_enter(&dtrace_meta_lock); 13545 if (dtrace_meta_pid != NULL) { 13546 ASSERT(dtrace_deferred_pid == NULL); 13547 dtrace_helper_provider_remove(&prov->dthp_prov, 13548 p->p_pid); 13549 } 13550 mutex_exit(&dtrace_meta_lock); 13551 13552 dtrace_helper_provider_destroy(prov); 13553 13554 mutex_enter(&dtrace_lock); 13555 } 13556 13557 return (0); 13558 } 13559 13560 static int 13561 dtrace_helper_validate(dtrace_helper_action_t *helper) 13562 { 13563 int err = 0, i; 13564 dtrace_difo_t *dp; 13565 13566 if ((dp = helper->dtha_predicate) != NULL) 13567 err += dtrace_difo_validate_helper(dp); 13568 13569 for (i = 0; i < helper->dtha_nactions; i++) 13570 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 13571 13572 return (err == 0); 13573 } 13574 13575 static int 13576 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 13577 { 13578 dtrace_helpers_t *help; 13579 dtrace_helper_action_t *helper, *last; 13580 dtrace_actdesc_t *act; 13581 dtrace_vstate_t *vstate; 13582 dtrace_predicate_t *pred; 13583 int count = 0, nactions = 0, i; 13584 13585 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 13586 return (EINVAL); 13587 13588 help = curproc->p_dtrace_helpers; 13589 last = help->dthps_actions[which]; 13590 vstate = &help->dthps_vstate; 13591 13592 for (count = 0; last != NULL; last = last->dtha_next) { 13593 count++; 13594 if (last->dtha_next == NULL) 13595 break; 13596 } 13597 13598 /* 13599 * If we already have dtrace_helper_actions_max helper actions for this 13600 * helper action type, we'll refuse to add a new one. 13601 */ 13602 if (count >= dtrace_helper_actions_max) 13603 return (ENOSPC); 13604 13605 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 13606 helper->dtha_generation = help->dthps_generation; 13607 13608 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 13609 ASSERT(pred->dtp_difo != NULL); 13610 dtrace_difo_hold(pred->dtp_difo); 13611 helper->dtha_predicate = pred->dtp_difo; 13612 } 13613 13614 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 13615 if (act->dtad_kind != DTRACEACT_DIFEXPR) 13616 goto err; 13617 13618 if (act->dtad_difo == NULL) 13619 goto err; 13620 13621 nactions++; 13622 } 13623 13624 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 13625 (helper->dtha_nactions = nactions), KM_SLEEP); 13626 13627 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 13628 dtrace_difo_hold(act->dtad_difo); 13629 helper->dtha_actions[i++] = act->dtad_difo; 13630 } 13631 13632 if (!dtrace_helper_validate(helper)) 13633 goto err; 13634 13635 if (last == NULL) { 13636 help->dthps_actions[which] = helper; 13637 } else { 13638 last->dtha_next = helper; 13639 } 13640 13641 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 13642 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 13643 dtrace_helptrace_next = 0; 13644 } 13645 13646 return (0); 13647 err: 13648 dtrace_helper_action_destroy(helper, vstate); 13649 return (EINVAL); 13650 } 13651 13652 static void 13653 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 13654 dof_helper_t *dofhp) 13655 { 13656 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 13657 13658 mutex_enter(&dtrace_meta_lock); 13659 mutex_enter(&dtrace_lock); 13660 13661 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 13662 /* 13663 * If the dtrace module is loaded but not attached, or if 13664 * there aren't isn't a meta provider registered to deal with 13665 * these provider descriptions, we need to postpone creating 13666 * the actual providers until later. 13667 */ 13668 13669 if (help->dthps_next == NULL && help->dthps_prev == NULL && 13670 dtrace_deferred_pid != help) { 13671 help->dthps_deferred = 1; 13672 help->dthps_pid = p->p_pid; 13673 help->dthps_next = dtrace_deferred_pid; 13674 help->dthps_prev = NULL; 13675 if (dtrace_deferred_pid != NULL) 13676 dtrace_deferred_pid->dthps_prev = help; 13677 dtrace_deferred_pid = help; 13678 } 13679 13680 mutex_exit(&dtrace_lock); 13681 13682 } else if (dofhp != NULL) { 13683 /* 13684 * If the dtrace module is loaded and we have a particular 13685 * helper provider description, pass that off to the 13686 * meta provider. 13687 */ 13688 13689 mutex_exit(&dtrace_lock); 13690 13691 dtrace_helper_provide(dofhp, p->p_pid); 13692 13693 } else { 13694 /* 13695 * Otherwise, just pass all the helper provider descriptions 13696 * off to the meta provider. 13697 */ 13698 13699 int i; 13700 mutex_exit(&dtrace_lock); 13701 13702 for (i = 0; i < help->dthps_nprovs; i++) { 13703 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 13704 p->p_pid); 13705 } 13706 } 13707 13708 mutex_exit(&dtrace_meta_lock); 13709 } 13710 13711 static int 13712 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 13713 { 13714 dtrace_helpers_t *help; 13715 dtrace_helper_provider_t *hprov, **tmp_provs; 13716 uint_t tmp_maxprovs, i; 13717 13718 ASSERT(MUTEX_HELD(&dtrace_lock)); 13719 13720 help = curproc->p_dtrace_helpers; 13721 ASSERT(help != NULL); 13722 13723 /* 13724 * If we already have dtrace_helper_providers_max helper providers, 13725 * we're refuse to add a new one. 13726 */ 13727 if (help->dthps_nprovs >= dtrace_helper_providers_max) 13728 return (ENOSPC); 13729 13730 /* 13731 * Check to make sure this isn't a duplicate. 13732 */ 13733 for (i = 0; i < help->dthps_nprovs; i++) { 13734 if (dofhp->dofhp_addr == 13735 help->dthps_provs[i]->dthp_prov.dofhp_addr) 13736 return (EALREADY); 13737 } 13738 13739 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 13740 hprov->dthp_prov = *dofhp; 13741 hprov->dthp_ref = 1; 13742 hprov->dthp_generation = gen; 13743 13744 /* 13745 * Allocate a bigger table for helper providers if it's already full. 13746 */ 13747 if (help->dthps_maxprovs == help->dthps_nprovs) { 13748 tmp_maxprovs = help->dthps_maxprovs; 13749 tmp_provs = help->dthps_provs; 13750 13751 if (help->dthps_maxprovs == 0) 13752 help->dthps_maxprovs = 2; 13753 else 13754 help->dthps_maxprovs *= 2; 13755 if (help->dthps_maxprovs > dtrace_helper_providers_max) 13756 help->dthps_maxprovs = dtrace_helper_providers_max; 13757 13758 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 13759 13760 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 13761 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13762 13763 if (tmp_provs != NULL) { 13764 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 13765 sizeof (dtrace_helper_provider_t *)); 13766 kmem_free(tmp_provs, tmp_maxprovs * 13767 sizeof (dtrace_helper_provider_t *)); 13768 } 13769 } 13770 13771 help->dthps_provs[help->dthps_nprovs] = hprov; 13772 help->dthps_nprovs++; 13773 13774 return (0); 13775 } 13776 13777 static void 13778 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 13779 { 13780 mutex_enter(&dtrace_lock); 13781 13782 if (--hprov->dthp_ref == 0) { 13783 dof_hdr_t *dof; 13784 mutex_exit(&dtrace_lock); 13785 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 13786 dtrace_dof_destroy(dof); 13787 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 13788 } else { 13789 mutex_exit(&dtrace_lock); 13790 } 13791 } 13792 13793 static int 13794 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 13795 { 13796 uintptr_t daddr = (uintptr_t)dof; 13797 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 13798 dof_provider_t *provider; 13799 dof_probe_t *probe; 13800 uint8_t *arg; 13801 char *strtab, *typestr; 13802 dof_stridx_t typeidx; 13803 size_t typesz; 13804 uint_t nprobes, j, k; 13805 13806 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 13807 13808 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 13809 dtrace_dof_error(dof, "misaligned section offset"); 13810 return (-1); 13811 } 13812 13813 /* 13814 * The section needs to be large enough to contain the DOF provider 13815 * structure appropriate for the given version. 13816 */ 13817 if (sec->dofs_size < 13818 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 13819 offsetof(dof_provider_t, dofpv_prenoffs) : 13820 sizeof (dof_provider_t))) { 13821 dtrace_dof_error(dof, "provider section too small"); 13822 return (-1); 13823 } 13824 13825 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 13826 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 13827 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 13828 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 13829 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 13830 13831 if (str_sec == NULL || prb_sec == NULL || 13832 arg_sec == NULL || off_sec == NULL) 13833 return (-1); 13834 13835 enoff_sec = NULL; 13836 13837 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13838 provider->dofpv_prenoffs != DOF_SECT_NONE && 13839 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 13840 provider->dofpv_prenoffs)) == NULL) 13841 return (-1); 13842 13843 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 13844 13845 if (provider->dofpv_name >= str_sec->dofs_size || 13846 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 13847 dtrace_dof_error(dof, "invalid provider name"); 13848 return (-1); 13849 } 13850 13851 if (prb_sec->dofs_entsize == 0 || 13852 prb_sec->dofs_entsize > prb_sec->dofs_size) { 13853 dtrace_dof_error(dof, "invalid entry size"); 13854 return (-1); 13855 } 13856 13857 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 13858 dtrace_dof_error(dof, "misaligned entry size"); 13859 return (-1); 13860 } 13861 13862 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 13863 dtrace_dof_error(dof, "invalid entry size"); 13864 return (-1); 13865 } 13866 13867 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 13868 dtrace_dof_error(dof, "misaligned section offset"); 13869 return (-1); 13870 } 13871 13872 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 13873 dtrace_dof_error(dof, "invalid entry size"); 13874 return (-1); 13875 } 13876 13877 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 13878 13879 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 13880 13881 /* 13882 * Take a pass through the probes to check for errors. 13883 */ 13884 for (j = 0; j < nprobes; j++) { 13885 probe = (dof_probe_t *)(uintptr_t)(daddr + 13886 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 13887 13888 if (probe->dofpr_func >= str_sec->dofs_size) { 13889 dtrace_dof_error(dof, "invalid function name"); 13890 return (-1); 13891 } 13892 13893 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 13894 dtrace_dof_error(dof, "function name too long"); 13895 return (-1); 13896 } 13897 13898 if (probe->dofpr_name >= str_sec->dofs_size || 13899 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 13900 dtrace_dof_error(dof, "invalid probe name"); 13901 return (-1); 13902 } 13903 13904 /* 13905 * The offset count must not wrap the index, and the offsets 13906 * must also not overflow the section's data. 13907 */ 13908 if (probe->dofpr_offidx + probe->dofpr_noffs < 13909 probe->dofpr_offidx || 13910 (probe->dofpr_offidx + probe->dofpr_noffs) * 13911 off_sec->dofs_entsize > off_sec->dofs_size) { 13912 dtrace_dof_error(dof, "invalid probe offset"); 13913 return (-1); 13914 } 13915 13916 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 13917 /* 13918 * If there's no is-enabled offset section, make sure 13919 * there aren't any is-enabled offsets. Otherwise 13920 * perform the same checks as for probe offsets 13921 * (immediately above). 13922 */ 13923 if (enoff_sec == NULL) { 13924 if (probe->dofpr_enoffidx != 0 || 13925 probe->dofpr_nenoffs != 0) { 13926 dtrace_dof_error(dof, "is-enabled " 13927 "offsets with null section"); 13928 return (-1); 13929 } 13930 } else if (probe->dofpr_enoffidx + 13931 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 13932 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 13933 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 13934 dtrace_dof_error(dof, "invalid is-enabled " 13935 "offset"); 13936 return (-1); 13937 } 13938 13939 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 13940 dtrace_dof_error(dof, "zero probe and " 13941 "is-enabled offsets"); 13942 return (-1); 13943 } 13944 } else if (probe->dofpr_noffs == 0) { 13945 dtrace_dof_error(dof, "zero probe offsets"); 13946 return (-1); 13947 } 13948 13949 if (probe->dofpr_argidx + probe->dofpr_xargc < 13950 probe->dofpr_argidx || 13951 (probe->dofpr_argidx + probe->dofpr_xargc) * 13952 arg_sec->dofs_entsize > arg_sec->dofs_size) { 13953 dtrace_dof_error(dof, "invalid args"); 13954 return (-1); 13955 } 13956 13957 typeidx = probe->dofpr_nargv; 13958 typestr = strtab + probe->dofpr_nargv; 13959 for (k = 0; k < probe->dofpr_nargc; k++) { 13960 if (typeidx >= str_sec->dofs_size) { 13961 dtrace_dof_error(dof, "bad " 13962 "native argument type"); 13963 return (-1); 13964 } 13965 13966 typesz = strlen(typestr) + 1; 13967 if (typesz > DTRACE_ARGTYPELEN) { 13968 dtrace_dof_error(dof, "native " 13969 "argument type too long"); 13970 return (-1); 13971 } 13972 typeidx += typesz; 13973 typestr += typesz; 13974 } 13975 13976 typeidx = probe->dofpr_xargv; 13977 typestr = strtab + probe->dofpr_xargv; 13978 for (k = 0; k < probe->dofpr_xargc; k++) { 13979 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 13980 dtrace_dof_error(dof, "bad " 13981 "native argument index"); 13982 return (-1); 13983 } 13984 13985 if (typeidx >= str_sec->dofs_size) { 13986 dtrace_dof_error(dof, "bad " 13987 "translated argument type"); 13988 return (-1); 13989 } 13990 13991 typesz = strlen(typestr) + 1; 13992 if (typesz > DTRACE_ARGTYPELEN) { 13993 dtrace_dof_error(dof, "translated argument " 13994 "type too long"); 13995 return (-1); 13996 } 13997 13998 typeidx += typesz; 13999 typestr += typesz; 14000 } 14001 } 14002 14003 return (0); 14004 } 14005 14006 static int 14007 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14008 { 14009 dtrace_helpers_t *help; 14010 dtrace_vstate_t *vstate; 14011 dtrace_enabling_t *enab = NULL; 14012 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14013 uintptr_t daddr = (uintptr_t)dof; 14014 14015 ASSERT(MUTEX_HELD(&dtrace_lock)); 14016 14017 if ((help = curproc->p_dtrace_helpers) == NULL) 14018 help = dtrace_helpers_create(curproc); 14019 14020 vstate = &help->dthps_vstate; 14021 14022 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14023 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14024 dtrace_dof_destroy(dof); 14025 return (rv); 14026 } 14027 14028 /* 14029 * Look for helper providers and validate their descriptions. 14030 */ 14031 if (dhp != NULL) { 14032 for (i = 0; i < dof->dofh_secnum; i++) { 14033 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14034 dof->dofh_secoff + i * dof->dofh_secsize); 14035 14036 if (sec->dofs_type != DOF_SECT_PROVIDER) 14037 continue; 14038 14039 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14040 dtrace_enabling_destroy(enab); 14041 dtrace_dof_destroy(dof); 14042 return (-1); 14043 } 14044 14045 nprovs++; 14046 } 14047 } 14048 14049 /* 14050 * Now we need to walk through the ECB descriptions in the enabling. 14051 */ 14052 for (i = 0; i < enab->dten_ndesc; i++) { 14053 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14054 dtrace_probedesc_t *desc = &ep->dted_probe; 14055 14056 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14057 continue; 14058 14059 if (strcmp(desc->dtpd_mod, "helper") != 0) 14060 continue; 14061 14062 if (strcmp(desc->dtpd_func, "ustack") != 0) 14063 continue; 14064 14065 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14066 ep)) != 0) { 14067 /* 14068 * Adding this helper action failed -- we are now going 14069 * to rip out the entire generation and return failure. 14070 */ 14071 (void) dtrace_helper_destroygen(help->dthps_generation); 14072 dtrace_enabling_destroy(enab); 14073 dtrace_dof_destroy(dof); 14074 return (-1); 14075 } 14076 14077 nhelpers++; 14078 } 14079 14080 if (nhelpers < enab->dten_ndesc) 14081 dtrace_dof_error(dof, "unmatched helpers"); 14082 14083 gen = help->dthps_generation++; 14084 dtrace_enabling_destroy(enab); 14085 14086 if (dhp != NULL && nprovs > 0) { 14087 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14088 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14089 mutex_exit(&dtrace_lock); 14090 dtrace_helper_provider_register(curproc, help, dhp); 14091 mutex_enter(&dtrace_lock); 14092 14093 destroy = 0; 14094 } 14095 } 14096 14097 if (destroy) 14098 dtrace_dof_destroy(dof); 14099 14100 return (gen); 14101 } 14102 14103 static dtrace_helpers_t * 14104 dtrace_helpers_create(proc_t *p) 14105 { 14106 dtrace_helpers_t *help; 14107 14108 ASSERT(MUTEX_HELD(&dtrace_lock)); 14109 ASSERT(p->p_dtrace_helpers == NULL); 14110 14111 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14112 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14113 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14114 14115 p->p_dtrace_helpers = help; 14116 dtrace_helpers++; 14117 14118 return (help); 14119 } 14120 14121 static void 14122 dtrace_helpers_destroy(void) 14123 { 14124 dtrace_helpers_t *help; 14125 dtrace_vstate_t *vstate; 14126 proc_t *p = curproc; 14127 int i; 14128 14129 mutex_enter(&dtrace_lock); 14130 14131 ASSERT(p->p_dtrace_helpers != NULL); 14132 ASSERT(dtrace_helpers > 0); 14133 14134 help = p->p_dtrace_helpers; 14135 vstate = &help->dthps_vstate; 14136 14137 /* 14138 * We're now going to lose the help from this process. 14139 */ 14140 p->p_dtrace_helpers = NULL; 14141 dtrace_sync(); 14142 14143 /* 14144 * Destory the helper actions. 14145 */ 14146 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14147 dtrace_helper_action_t *h, *next; 14148 14149 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14150 next = h->dtha_next; 14151 dtrace_helper_action_destroy(h, vstate); 14152 h = next; 14153 } 14154 } 14155 14156 mutex_exit(&dtrace_lock); 14157 14158 /* 14159 * Destroy the helper providers. 14160 */ 14161 if (help->dthps_maxprovs > 0) { 14162 mutex_enter(&dtrace_meta_lock); 14163 if (dtrace_meta_pid != NULL) { 14164 ASSERT(dtrace_deferred_pid == NULL); 14165 14166 for (i = 0; i < help->dthps_nprovs; i++) { 14167 dtrace_helper_provider_remove( 14168 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14169 } 14170 } else { 14171 mutex_enter(&dtrace_lock); 14172 ASSERT(help->dthps_deferred == 0 || 14173 help->dthps_next != NULL || 14174 help->dthps_prev != NULL || 14175 help == dtrace_deferred_pid); 14176 14177 /* 14178 * Remove the helper from the deferred list. 14179 */ 14180 if (help->dthps_next != NULL) 14181 help->dthps_next->dthps_prev = help->dthps_prev; 14182 if (help->dthps_prev != NULL) 14183 help->dthps_prev->dthps_next = help->dthps_next; 14184 if (dtrace_deferred_pid == help) { 14185 dtrace_deferred_pid = help->dthps_next; 14186 ASSERT(help->dthps_prev == NULL); 14187 } 14188 14189 mutex_exit(&dtrace_lock); 14190 } 14191 14192 mutex_exit(&dtrace_meta_lock); 14193 14194 for (i = 0; i < help->dthps_nprovs; i++) { 14195 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14196 } 14197 14198 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14199 sizeof (dtrace_helper_provider_t *)); 14200 } 14201 14202 mutex_enter(&dtrace_lock); 14203 14204 dtrace_vstate_fini(&help->dthps_vstate); 14205 kmem_free(help->dthps_actions, 14206 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14207 kmem_free(help, sizeof (dtrace_helpers_t)); 14208 14209 --dtrace_helpers; 14210 mutex_exit(&dtrace_lock); 14211 } 14212 14213 static void 14214 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14215 { 14216 dtrace_helpers_t *help, *newhelp; 14217 dtrace_helper_action_t *helper, *new, *last; 14218 dtrace_difo_t *dp; 14219 dtrace_vstate_t *vstate; 14220 int i, j, sz, hasprovs = 0; 14221 14222 mutex_enter(&dtrace_lock); 14223 ASSERT(from->p_dtrace_helpers != NULL); 14224 ASSERT(dtrace_helpers > 0); 14225 14226 help = from->p_dtrace_helpers; 14227 newhelp = dtrace_helpers_create(to); 14228 ASSERT(to->p_dtrace_helpers != NULL); 14229 14230 newhelp->dthps_generation = help->dthps_generation; 14231 vstate = &newhelp->dthps_vstate; 14232 14233 /* 14234 * Duplicate the helper actions. 14235 */ 14236 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14237 if ((helper = help->dthps_actions[i]) == NULL) 14238 continue; 14239 14240 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14241 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14242 KM_SLEEP); 14243 new->dtha_generation = helper->dtha_generation; 14244 14245 if ((dp = helper->dtha_predicate) != NULL) { 14246 dp = dtrace_difo_duplicate(dp, vstate); 14247 new->dtha_predicate = dp; 14248 } 14249 14250 new->dtha_nactions = helper->dtha_nactions; 14251 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14252 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14253 14254 for (j = 0; j < new->dtha_nactions; j++) { 14255 dtrace_difo_t *dp = helper->dtha_actions[j]; 14256 14257 ASSERT(dp != NULL); 14258 dp = dtrace_difo_duplicate(dp, vstate); 14259 new->dtha_actions[j] = dp; 14260 } 14261 14262 if (last != NULL) { 14263 last->dtha_next = new; 14264 } else { 14265 newhelp->dthps_actions[i] = new; 14266 } 14267 14268 last = new; 14269 } 14270 } 14271 14272 /* 14273 * Duplicate the helper providers and register them with the 14274 * DTrace framework. 14275 */ 14276 if (help->dthps_nprovs > 0) { 14277 newhelp->dthps_nprovs = help->dthps_nprovs; 14278 newhelp->dthps_maxprovs = help->dthps_nprovs; 14279 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14280 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14281 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14282 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14283 newhelp->dthps_provs[i]->dthp_ref++; 14284 } 14285 14286 hasprovs = 1; 14287 } 14288 14289 mutex_exit(&dtrace_lock); 14290 14291 if (hasprovs) 14292 dtrace_helper_provider_register(to, newhelp, NULL); 14293 } 14294 14295 /* 14296 * DTrace Hook Functions 14297 */ 14298 static void 14299 dtrace_module_loaded(struct modctl *ctl) 14300 { 14301 dtrace_provider_t *prv; 14302 14303 mutex_enter(&dtrace_provider_lock); 14304 mutex_enter(&mod_lock); 14305 14306 ASSERT(ctl->mod_busy); 14307 14308 /* 14309 * We're going to call each providers per-module provide operation 14310 * specifying only this module. 14311 */ 14312 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14313 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14314 14315 mutex_exit(&mod_lock); 14316 mutex_exit(&dtrace_provider_lock); 14317 14318 /* 14319 * If we have any retained enablings, we need to match against them. 14320 * Enabling probes requires that cpu_lock be held, and we cannot hold 14321 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14322 * module. (In particular, this happens when loading scheduling 14323 * classes.) So if we have any retained enablings, we need to dispatch 14324 * our task queue to do the match for us. 14325 */ 14326 mutex_enter(&dtrace_lock); 14327 14328 if (dtrace_retained == NULL) { 14329 mutex_exit(&dtrace_lock); 14330 return; 14331 } 14332 14333 (void) taskq_dispatch(dtrace_taskq, 14334 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 14335 14336 mutex_exit(&dtrace_lock); 14337 14338 /* 14339 * And now, for a little heuristic sleaze: in general, we want to 14340 * match modules as soon as they load. However, we cannot guarantee 14341 * this, because it would lead us to the lock ordering violation 14342 * outlined above. The common case, of course, is that cpu_lock is 14343 * _not_ held -- so we delay here for a clock tick, hoping that that's 14344 * long enough for the task queue to do its work. If it's not, it's 14345 * not a serious problem -- it just means that the module that we 14346 * just loaded may not be immediately instrumentable. 14347 */ 14348 delay(1); 14349 } 14350 14351 static void 14352 dtrace_module_unloaded(struct modctl *ctl) 14353 { 14354 dtrace_probe_t template, *probe, *first, *next; 14355 dtrace_provider_t *prov; 14356 14357 template.dtpr_mod = ctl->mod_modname; 14358 14359 mutex_enter(&dtrace_provider_lock); 14360 mutex_enter(&mod_lock); 14361 mutex_enter(&dtrace_lock); 14362 14363 if (dtrace_bymod == NULL) { 14364 /* 14365 * The DTrace module is loaded (obviously) but not attached; 14366 * we don't have any work to do. 14367 */ 14368 mutex_exit(&dtrace_provider_lock); 14369 mutex_exit(&mod_lock); 14370 mutex_exit(&dtrace_lock); 14371 return; 14372 } 14373 14374 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 14375 probe != NULL; probe = probe->dtpr_nextmod) { 14376 if (probe->dtpr_ecb != NULL) { 14377 mutex_exit(&dtrace_provider_lock); 14378 mutex_exit(&mod_lock); 14379 mutex_exit(&dtrace_lock); 14380 14381 /* 14382 * This shouldn't _actually_ be possible -- we're 14383 * unloading a module that has an enabled probe in it. 14384 * (It's normally up to the provider to make sure that 14385 * this can't happen.) However, because dtps_enable() 14386 * doesn't have a failure mode, there can be an 14387 * enable/unload race. Upshot: we don't want to 14388 * assert, but we're not going to disable the 14389 * probe, either. 14390 */ 14391 if (dtrace_err_verbose) { 14392 cmn_err(CE_WARN, "unloaded module '%s' had " 14393 "enabled probes", ctl->mod_modname); 14394 } 14395 14396 return; 14397 } 14398 } 14399 14400 probe = first; 14401 14402 for (first = NULL; probe != NULL; probe = next) { 14403 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 14404 14405 dtrace_probes[probe->dtpr_id - 1] = NULL; 14406 14407 next = probe->dtpr_nextmod; 14408 dtrace_hash_remove(dtrace_bymod, probe); 14409 dtrace_hash_remove(dtrace_byfunc, probe); 14410 dtrace_hash_remove(dtrace_byname, probe); 14411 14412 if (first == NULL) { 14413 first = probe; 14414 probe->dtpr_nextmod = NULL; 14415 } else { 14416 probe->dtpr_nextmod = first; 14417 first = probe; 14418 } 14419 } 14420 14421 /* 14422 * We've removed all of the module's probes from the hash chains and 14423 * from the probe array. Now issue a dtrace_sync() to be sure that 14424 * everyone has cleared out from any probe array processing. 14425 */ 14426 dtrace_sync(); 14427 14428 for (probe = first; probe != NULL; probe = first) { 14429 first = probe->dtpr_nextmod; 14430 prov = probe->dtpr_provider; 14431 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 14432 probe->dtpr_arg); 14433 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 14434 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 14435 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 14436 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 14437 kmem_free(probe, sizeof (dtrace_probe_t)); 14438 } 14439 14440 mutex_exit(&dtrace_lock); 14441 mutex_exit(&mod_lock); 14442 mutex_exit(&dtrace_provider_lock); 14443 } 14444 14445 void 14446 dtrace_suspend(void) 14447 { 14448 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 14449 } 14450 14451 void 14452 dtrace_resume(void) 14453 { 14454 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 14455 } 14456 14457 static int 14458 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 14459 { 14460 ASSERT(MUTEX_HELD(&cpu_lock)); 14461 mutex_enter(&dtrace_lock); 14462 14463 switch (what) { 14464 case CPU_CONFIG: { 14465 dtrace_state_t *state; 14466 dtrace_optval_t *opt, rs, c; 14467 14468 /* 14469 * For now, we only allocate a new buffer for anonymous state. 14470 */ 14471 if ((state = dtrace_anon.dta_state) == NULL) 14472 break; 14473 14474 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 14475 break; 14476 14477 opt = state->dts_options; 14478 c = opt[DTRACEOPT_CPU]; 14479 14480 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 14481 break; 14482 14483 /* 14484 * Regardless of what the actual policy is, we're going to 14485 * temporarily set our resize policy to be manual. We're 14486 * also going to temporarily set our CPU option to denote 14487 * the newly configured CPU. 14488 */ 14489 rs = opt[DTRACEOPT_BUFRESIZE]; 14490 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 14491 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 14492 14493 (void) dtrace_state_buffers(state); 14494 14495 opt[DTRACEOPT_BUFRESIZE] = rs; 14496 opt[DTRACEOPT_CPU] = c; 14497 14498 break; 14499 } 14500 14501 case CPU_UNCONFIG: 14502 /* 14503 * We don't free the buffer in the CPU_UNCONFIG case. (The 14504 * buffer will be freed when the consumer exits.) 14505 */ 14506 break; 14507 14508 default: 14509 break; 14510 } 14511 14512 mutex_exit(&dtrace_lock); 14513 return (0); 14514 } 14515 14516 static void 14517 dtrace_cpu_setup_initial(processorid_t cpu) 14518 { 14519 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 14520 } 14521 14522 static void 14523 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 14524 { 14525 if (dtrace_toxranges >= dtrace_toxranges_max) { 14526 int osize, nsize; 14527 dtrace_toxrange_t *range; 14528 14529 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14530 14531 if (osize == 0) { 14532 ASSERT(dtrace_toxrange == NULL); 14533 ASSERT(dtrace_toxranges_max == 0); 14534 dtrace_toxranges_max = 1; 14535 } else { 14536 dtrace_toxranges_max <<= 1; 14537 } 14538 14539 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 14540 range = kmem_zalloc(nsize, KM_SLEEP); 14541 14542 if (dtrace_toxrange != NULL) { 14543 ASSERT(osize != 0); 14544 bcopy(dtrace_toxrange, range, osize); 14545 kmem_free(dtrace_toxrange, osize); 14546 } 14547 14548 dtrace_toxrange = range; 14549 } 14550 14551 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 14552 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 14553 14554 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 14555 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 14556 dtrace_toxranges++; 14557 } 14558 14559 /* 14560 * DTrace Driver Cookbook Functions 14561 */ 14562 /*ARGSUSED*/ 14563 static int 14564 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 14565 { 14566 dtrace_provider_id_t id; 14567 dtrace_state_t *state = NULL; 14568 dtrace_enabling_t *enab; 14569 14570 mutex_enter(&cpu_lock); 14571 mutex_enter(&dtrace_provider_lock); 14572 mutex_enter(&dtrace_lock); 14573 14574 if (ddi_soft_state_init(&dtrace_softstate, 14575 sizeof (dtrace_state_t), 0) != 0) { 14576 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 14577 mutex_exit(&cpu_lock); 14578 mutex_exit(&dtrace_provider_lock); 14579 mutex_exit(&dtrace_lock); 14580 return (DDI_FAILURE); 14581 } 14582 14583 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 14584 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 14585 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 14586 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 14587 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 14588 ddi_remove_minor_node(devi, NULL); 14589 ddi_soft_state_fini(&dtrace_softstate); 14590 mutex_exit(&cpu_lock); 14591 mutex_exit(&dtrace_provider_lock); 14592 mutex_exit(&dtrace_lock); 14593 return (DDI_FAILURE); 14594 } 14595 14596 ddi_report_dev(devi); 14597 dtrace_devi = devi; 14598 14599 dtrace_modload = dtrace_module_loaded; 14600 dtrace_modunload = dtrace_module_unloaded; 14601 dtrace_cpu_init = dtrace_cpu_setup_initial; 14602 dtrace_helpers_cleanup = dtrace_helpers_destroy; 14603 dtrace_helpers_fork = dtrace_helpers_duplicate; 14604 dtrace_cpustart_init = dtrace_suspend; 14605 dtrace_cpustart_fini = dtrace_resume; 14606 dtrace_debugger_init = dtrace_suspend; 14607 dtrace_debugger_fini = dtrace_resume; 14608 14609 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 14610 14611 ASSERT(MUTEX_HELD(&cpu_lock)); 14612 14613 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 14614 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 14615 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 14616 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 14617 VM_SLEEP | VMC_IDENTIFIER); 14618 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 14619 1, INT_MAX, 0); 14620 14621 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 14622 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 14623 NULL, NULL, NULL, NULL, NULL, 0); 14624 14625 ASSERT(MUTEX_HELD(&cpu_lock)); 14626 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 14627 offsetof(dtrace_probe_t, dtpr_nextmod), 14628 offsetof(dtrace_probe_t, dtpr_prevmod)); 14629 14630 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 14631 offsetof(dtrace_probe_t, dtpr_nextfunc), 14632 offsetof(dtrace_probe_t, dtpr_prevfunc)); 14633 14634 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 14635 offsetof(dtrace_probe_t, dtpr_nextname), 14636 offsetof(dtrace_probe_t, dtpr_prevname)); 14637 14638 if (dtrace_retain_max < 1) { 14639 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 14640 "setting to 1", dtrace_retain_max); 14641 dtrace_retain_max = 1; 14642 } 14643 14644 /* 14645 * Now discover our toxic ranges. 14646 */ 14647 dtrace_toxic_ranges(dtrace_toxrange_add); 14648 14649 /* 14650 * Before we register ourselves as a provider to our own framework, 14651 * we would like to assert that dtrace_provider is NULL -- but that's 14652 * not true if we were loaded as a dependency of a DTrace provider. 14653 * Once we've registered, we can assert that dtrace_provider is our 14654 * pseudo provider. 14655 */ 14656 (void) dtrace_register("dtrace", &dtrace_provider_attr, 14657 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 14658 14659 ASSERT(dtrace_provider != NULL); 14660 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 14661 14662 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 14663 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 14664 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 14665 dtrace_provider, NULL, NULL, "END", 0, NULL); 14666 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 14667 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 14668 14669 dtrace_anon_property(); 14670 mutex_exit(&cpu_lock); 14671 14672 /* 14673 * If DTrace helper tracing is enabled, we need to allocate the 14674 * trace buffer and initialize the values. 14675 */ 14676 if (dtrace_helptrace_enabled) { 14677 ASSERT(dtrace_helptrace_buffer == NULL); 14678 dtrace_helptrace_buffer = 14679 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 14680 dtrace_helptrace_next = 0; 14681 } 14682 14683 /* 14684 * If there are already providers, we must ask them to provide their 14685 * probes, and then match any anonymous enabling against them. Note 14686 * that there should be no other retained enablings at this time: 14687 * the only retained enablings at this time should be the anonymous 14688 * enabling. 14689 */ 14690 if (dtrace_anon.dta_enabling != NULL) { 14691 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 14692 14693 dtrace_enabling_provide(NULL); 14694 state = dtrace_anon.dta_state; 14695 14696 /* 14697 * We couldn't hold cpu_lock across the above call to 14698 * dtrace_enabling_provide(), but we must hold it to actually 14699 * enable the probes. We have to drop all of our locks, pick 14700 * up cpu_lock, and regain our locks before matching the 14701 * retained anonymous enabling. 14702 */ 14703 mutex_exit(&dtrace_lock); 14704 mutex_exit(&dtrace_provider_lock); 14705 14706 mutex_enter(&cpu_lock); 14707 mutex_enter(&dtrace_provider_lock); 14708 mutex_enter(&dtrace_lock); 14709 14710 if ((enab = dtrace_anon.dta_enabling) != NULL) 14711 (void) dtrace_enabling_match(enab, NULL); 14712 14713 mutex_exit(&cpu_lock); 14714 } 14715 14716 mutex_exit(&dtrace_lock); 14717 mutex_exit(&dtrace_provider_lock); 14718 14719 if (state != NULL) { 14720 /* 14721 * If we created any anonymous state, set it going now. 14722 */ 14723 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 14724 } 14725 14726 return (DDI_SUCCESS); 14727 } 14728 14729 /*ARGSUSED*/ 14730 static int 14731 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 14732 { 14733 dtrace_state_t *state; 14734 uint32_t priv; 14735 uid_t uid; 14736 zoneid_t zoneid; 14737 14738 if (getminor(*devp) == DTRACEMNRN_HELPER) 14739 return (0); 14740 14741 /* 14742 * If this wasn't an open with the "helper" minor, then it must be 14743 * the "dtrace" minor. 14744 */ 14745 if (getminor(*devp) != DTRACEMNRN_DTRACE) 14746 return (ENXIO); 14747 14748 /* 14749 * If no DTRACE_PRIV_* bits are set in the credential, then the 14750 * caller lacks sufficient permission to do anything with DTrace. 14751 */ 14752 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 14753 if (priv == DTRACE_PRIV_NONE) 14754 return (EACCES); 14755 14756 /* 14757 * Ask all providers to provide all their probes. 14758 */ 14759 mutex_enter(&dtrace_provider_lock); 14760 dtrace_probe_provide(NULL, NULL); 14761 mutex_exit(&dtrace_provider_lock); 14762 14763 mutex_enter(&cpu_lock); 14764 mutex_enter(&dtrace_lock); 14765 dtrace_opens++; 14766 dtrace_membar_producer(); 14767 14768 /* 14769 * If the kernel debugger is active (that is, if the kernel debugger 14770 * modified text in some way), we won't allow the open. 14771 */ 14772 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14773 dtrace_opens--; 14774 mutex_exit(&cpu_lock); 14775 mutex_exit(&dtrace_lock); 14776 return (EBUSY); 14777 } 14778 14779 state = dtrace_state_create(devp, cred_p); 14780 mutex_exit(&cpu_lock); 14781 14782 if (state == NULL) { 14783 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 14784 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14785 mutex_exit(&dtrace_lock); 14786 return (EAGAIN); 14787 } 14788 14789 mutex_exit(&dtrace_lock); 14790 14791 return (0); 14792 } 14793 14794 /*ARGSUSED*/ 14795 static int 14796 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 14797 { 14798 minor_t minor = getminor(dev); 14799 dtrace_state_t *state; 14800 14801 if (minor == DTRACEMNRN_HELPER) 14802 return (0); 14803 14804 state = ddi_get_soft_state(dtrace_softstate, minor); 14805 14806 mutex_enter(&cpu_lock); 14807 mutex_enter(&dtrace_lock); 14808 14809 if (state->dts_anon) { 14810 /* 14811 * There is anonymous state. Destroy that first. 14812 */ 14813 ASSERT(dtrace_anon.dta_state == NULL); 14814 dtrace_state_destroy(state->dts_anon); 14815 } 14816 14817 dtrace_state_destroy(state); 14818 ASSERT(dtrace_opens > 0); 14819 14820 /* 14821 * Only relinquish control of the kernel debugger interface when there 14822 * are no consumers and no anonymous enablings. 14823 */ 14824 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 14825 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14826 14827 mutex_exit(&dtrace_lock); 14828 mutex_exit(&cpu_lock); 14829 14830 return (0); 14831 } 14832 14833 /*ARGSUSED*/ 14834 static int 14835 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 14836 { 14837 int rval; 14838 dof_helper_t help, *dhp = NULL; 14839 14840 switch (cmd) { 14841 case DTRACEHIOC_ADDDOF: 14842 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 14843 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 14844 return (EFAULT); 14845 } 14846 14847 dhp = &help; 14848 arg = (intptr_t)help.dofhp_dof; 14849 /*FALLTHROUGH*/ 14850 14851 case DTRACEHIOC_ADD: { 14852 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 14853 14854 if (dof == NULL) 14855 return (rval); 14856 14857 mutex_enter(&dtrace_lock); 14858 14859 /* 14860 * dtrace_helper_slurp() takes responsibility for the dof -- 14861 * it may free it now or it may save it and free it later. 14862 */ 14863 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 14864 *rv = rval; 14865 rval = 0; 14866 } else { 14867 rval = EINVAL; 14868 } 14869 14870 mutex_exit(&dtrace_lock); 14871 return (rval); 14872 } 14873 14874 case DTRACEHIOC_REMOVE: { 14875 mutex_enter(&dtrace_lock); 14876 rval = dtrace_helper_destroygen(arg); 14877 mutex_exit(&dtrace_lock); 14878 14879 return (rval); 14880 } 14881 14882 default: 14883 break; 14884 } 14885 14886 return (ENOTTY); 14887 } 14888 14889 /*ARGSUSED*/ 14890 static int 14891 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 14892 { 14893 minor_t minor = getminor(dev); 14894 dtrace_state_t *state; 14895 int rval; 14896 14897 if (minor == DTRACEMNRN_HELPER) 14898 return (dtrace_ioctl_helper(cmd, arg, rv)); 14899 14900 state = ddi_get_soft_state(dtrace_softstate, minor); 14901 14902 if (state->dts_anon) { 14903 ASSERT(dtrace_anon.dta_state == NULL); 14904 state = state->dts_anon; 14905 } 14906 14907 switch (cmd) { 14908 case DTRACEIOC_PROVIDER: { 14909 dtrace_providerdesc_t pvd; 14910 dtrace_provider_t *pvp; 14911 14912 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 14913 return (EFAULT); 14914 14915 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 14916 mutex_enter(&dtrace_provider_lock); 14917 14918 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 14919 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 14920 break; 14921 } 14922 14923 mutex_exit(&dtrace_provider_lock); 14924 14925 if (pvp == NULL) 14926 return (ESRCH); 14927 14928 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 14929 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 14930 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 14931 return (EFAULT); 14932 14933 return (0); 14934 } 14935 14936 case DTRACEIOC_EPROBE: { 14937 dtrace_eprobedesc_t epdesc; 14938 dtrace_ecb_t *ecb; 14939 dtrace_action_t *act; 14940 void *buf; 14941 size_t size; 14942 uintptr_t dest; 14943 int nrecs; 14944 14945 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 14946 return (EFAULT); 14947 14948 mutex_enter(&dtrace_lock); 14949 14950 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 14951 mutex_exit(&dtrace_lock); 14952 return (EINVAL); 14953 } 14954 14955 if (ecb->dte_probe == NULL) { 14956 mutex_exit(&dtrace_lock); 14957 return (EINVAL); 14958 } 14959 14960 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 14961 epdesc.dtepd_uarg = ecb->dte_uarg; 14962 epdesc.dtepd_size = ecb->dte_size; 14963 14964 nrecs = epdesc.dtepd_nrecs; 14965 epdesc.dtepd_nrecs = 0; 14966 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14967 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14968 continue; 14969 14970 epdesc.dtepd_nrecs++; 14971 } 14972 14973 /* 14974 * Now that we have the size, we need to allocate a temporary 14975 * buffer in which to store the complete description. We need 14976 * the temporary buffer to be able to drop dtrace_lock() 14977 * across the copyout(), below. 14978 */ 14979 size = sizeof (dtrace_eprobedesc_t) + 14980 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 14981 14982 buf = kmem_alloc(size, KM_SLEEP); 14983 dest = (uintptr_t)buf; 14984 14985 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 14986 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 14987 14988 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14989 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14990 continue; 14991 14992 if (nrecs-- == 0) 14993 break; 14994 14995 bcopy(&act->dta_rec, (void *)dest, 14996 sizeof (dtrace_recdesc_t)); 14997 dest += sizeof (dtrace_recdesc_t); 14998 } 14999 15000 mutex_exit(&dtrace_lock); 15001 15002 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15003 kmem_free(buf, size); 15004 return (EFAULT); 15005 } 15006 15007 kmem_free(buf, size); 15008 return (0); 15009 } 15010 15011 case DTRACEIOC_AGGDESC: { 15012 dtrace_aggdesc_t aggdesc; 15013 dtrace_action_t *act; 15014 dtrace_aggregation_t *agg; 15015 int nrecs; 15016 uint32_t offs; 15017 dtrace_recdesc_t *lrec; 15018 void *buf; 15019 size_t size; 15020 uintptr_t dest; 15021 15022 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15023 return (EFAULT); 15024 15025 mutex_enter(&dtrace_lock); 15026 15027 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15028 mutex_exit(&dtrace_lock); 15029 return (EINVAL); 15030 } 15031 15032 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15033 15034 nrecs = aggdesc.dtagd_nrecs; 15035 aggdesc.dtagd_nrecs = 0; 15036 15037 offs = agg->dtag_base; 15038 lrec = &agg->dtag_action.dta_rec; 15039 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15040 15041 for (act = agg->dtag_first; ; act = act->dta_next) { 15042 ASSERT(act->dta_intuple || 15043 DTRACEACT_ISAGG(act->dta_kind)); 15044 15045 /* 15046 * If this action has a record size of zero, it 15047 * denotes an argument to the aggregating action. 15048 * Because the presence of this record doesn't (or 15049 * shouldn't) affect the way the data is interpreted, 15050 * we don't copy it out to save user-level the 15051 * confusion of dealing with a zero-length record. 15052 */ 15053 if (act->dta_rec.dtrd_size == 0) { 15054 ASSERT(agg->dtag_hasarg); 15055 continue; 15056 } 15057 15058 aggdesc.dtagd_nrecs++; 15059 15060 if (act == &agg->dtag_action) 15061 break; 15062 } 15063 15064 /* 15065 * Now that we have the size, we need to allocate a temporary 15066 * buffer in which to store the complete description. We need 15067 * the temporary buffer to be able to drop dtrace_lock() 15068 * across the copyout(), below. 15069 */ 15070 size = sizeof (dtrace_aggdesc_t) + 15071 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15072 15073 buf = kmem_alloc(size, KM_SLEEP); 15074 dest = (uintptr_t)buf; 15075 15076 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15077 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15078 15079 for (act = agg->dtag_first; ; act = act->dta_next) { 15080 dtrace_recdesc_t rec = act->dta_rec; 15081 15082 /* 15083 * See the comment in the above loop for why we pass 15084 * over zero-length records. 15085 */ 15086 if (rec.dtrd_size == 0) { 15087 ASSERT(agg->dtag_hasarg); 15088 continue; 15089 } 15090 15091 if (nrecs-- == 0) 15092 break; 15093 15094 rec.dtrd_offset -= offs; 15095 bcopy(&rec, (void *)dest, sizeof (rec)); 15096 dest += sizeof (dtrace_recdesc_t); 15097 15098 if (act == &agg->dtag_action) 15099 break; 15100 } 15101 15102 mutex_exit(&dtrace_lock); 15103 15104 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15105 kmem_free(buf, size); 15106 return (EFAULT); 15107 } 15108 15109 kmem_free(buf, size); 15110 return (0); 15111 } 15112 15113 case DTRACEIOC_ENABLE: { 15114 dof_hdr_t *dof; 15115 dtrace_enabling_t *enab = NULL; 15116 dtrace_vstate_t *vstate; 15117 int err = 0; 15118 15119 *rv = 0; 15120 15121 /* 15122 * If a NULL argument has been passed, we take this as our 15123 * cue to reevaluate our enablings. 15124 */ 15125 if (arg == NULL) { 15126 dtrace_enabling_matchall(); 15127 15128 return (0); 15129 } 15130 15131 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15132 return (rval); 15133 15134 mutex_enter(&cpu_lock); 15135 mutex_enter(&dtrace_lock); 15136 vstate = &state->dts_vstate; 15137 15138 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15139 mutex_exit(&dtrace_lock); 15140 mutex_exit(&cpu_lock); 15141 dtrace_dof_destroy(dof); 15142 return (EBUSY); 15143 } 15144 15145 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15146 mutex_exit(&dtrace_lock); 15147 mutex_exit(&cpu_lock); 15148 dtrace_dof_destroy(dof); 15149 return (EINVAL); 15150 } 15151 15152 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15153 dtrace_enabling_destroy(enab); 15154 mutex_exit(&dtrace_lock); 15155 mutex_exit(&cpu_lock); 15156 dtrace_dof_destroy(dof); 15157 return (rval); 15158 } 15159 15160 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15161 err = dtrace_enabling_retain(enab); 15162 } else { 15163 dtrace_enabling_destroy(enab); 15164 } 15165 15166 mutex_exit(&cpu_lock); 15167 mutex_exit(&dtrace_lock); 15168 dtrace_dof_destroy(dof); 15169 15170 return (err); 15171 } 15172 15173 case DTRACEIOC_REPLICATE: { 15174 dtrace_repldesc_t desc; 15175 dtrace_probedesc_t *match = &desc.dtrpd_match; 15176 dtrace_probedesc_t *create = &desc.dtrpd_create; 15177 int err; 15178 15179 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15180 return (EFAULT); 15181 15182 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15183 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15184 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15185 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15186 15187 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15188 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15189 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15190 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15191 15192 mutex_enter(&dtrace_lock); 15193 err = dtrace_enabling_replicate(state, match, create); 15194 mutex_exit(&dtrace_lock); 15195 15196 return (err); 15197 } 15198 15199 case DTRACEIOC_PROBEMATCH: 15200 case DTRACEIOC_PROBES: { 15201 dtrace_probe_t *probe = NULL; 15202 dtrace_probedesc_t desc; 15203 dtrace_probekey_t pkey; 15204 dtrace_id_t i; 15205 int m = 0; 15206 uint32_t priv; 15207 uid_t uid; 15208 zoneid_t zoneid; 15209 15210 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15211 return (EFAULT); 15212 15213 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15214 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15215 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15216 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15217 15218 /* 15219 * Before we attempt to match this probe, we want to give 15220 * all providers the opportunity to provide it. 15221 */ 15222 if (desc.dtpd_id == DTRACE_IDNONE) { 15223 mutex_enter(&dtrace_provider_lock); 15224 dtrace_probe_provide(&desc, NULL); 15225 mutex_exit(&dtrace_provider_lock); 15226 desc.dtpd_id++; 15227 } 15228 15229 if (cmd == DTRACEIOC_PROBEMATCH) { 15230 dtrace_probekey(&desc, &pkey); 15231 pkey.dtpk_id = DTRACE_IDNONE; 15232 } 15233 15234 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 15235 15236 mutex_enter(&dtrace_lock); 15237 15238 if (cmd == DTRACEIOC_PROBEMATCH) { 15239 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15240 if ((probe = dtrace_probes[i - 1]) != NULL && 15241 (m = dtrace_match_probe(probe, &pkey, 15242 priv, uid, zoneid)) != 0) 15243 break; 15244 } 15245 15246 if (m < 0) { 15247 mutex_exit(&dtrace_lock); 15248 return (EINVAL); 15249 } 15250 15251 } else { 15252 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 15253 if ((probe = dtrace_probes[i - 1]) != NULL && 15254 dtrace_match_priv(probe, priv, uid, zoneid)) 15255 break; 15256 } 15257 } 15258 15259 if (probe == NULL) { 15260 mutex_exit(&dtrace_lock); 15261 return (ESRCH); 15262 } 15263 15264 dtrace_probe_description(probe, &desc); 15265 mutex_exit(&dtrace_lock); 15266 15267 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15268 return (EFAULT); 15269 15270 return (0); 15271 } 15272 15273 case DTRACEIOC_PROBEARG: { 15274 dtrace_argdesc_t desc; 15275 dtrace_probe_t *probe; 15276 dtrace_provider_t *prov; 15277 15278 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15279 return (EFAULT); 15280 15281 if (desc.dtargd_id == DTRACE_IDNONE) 15282 return (EINVAL); 15283 15284 if (desc.dtargd_ndx == DTRACE_ARGNONE) 15285 return (EINVAL); 15286 15287 mutex_enter(&dtrace_provider_lock); 15288 mutex_enter(&mod_lock); 15289 mutex_enter(&dtrace_lock); 15290 15291 if (desc.dtargd_id > dtrace_nprobes) { 15292 mutex_exit(&dtrace_lock); 15293 mutex_exit(&mod_lock); 15294 mutex_exit(&dtrace_provider_lock); 15295 return (EINVAL); 15296 } 15297 15298 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 15299 mutex_exit(&dtrace_lock); 15300 mutex_exit(&mod_lock); 15301 mutex_exit(&dtrace_provider_lock); 15302 return (EINVAL); 15303 } 15304 15305 mutex_exit(&dtrace_lock); 15306 15307 prov = probe->dtpr_provider; 15308 15309 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 15310 /* 15311 * There isn't any typed information for this probe. 15312 * Set the argument number to DTRACE_ARGNONE. 15313 */ 15314 desc.dtargd_ndx = DTRACE_ARGNONE; 15315 } else { 15316 desc.dtargd_native[0] = '\0'; 15317 desc.dtargd_xlate[0] = '\0'; 15318 desc.dtargd_mapping = desc.dtargd_ndx; 15319 15320 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 15321 probe->dtpr_id, probe->dtpr_arg, &desc); 15322 } 15323 15324 mutex_exit(&mod_lock); 15325 mutex_exit(&dtrace_provider_lock); 15326 15327 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15328 return (EFAULT); 15329 15330 return (0); 15331 } 15332 15333 case DTRACEIOC_GO: { 15334 processorid_t cpuid; 15335 rval = dtrace_state_go(state, &cpuid); 15336 15337 if (rval != 0) 15338 return (rval); 15339 15340 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15341 return (EFAULT); 15342 15343 return (0); 15344 } 15345 15346 case DTRACEIOC_STOP: { 15347 processorid_t cpuid; 15348 15349 mutex_enter(&dtrace_lock); 15350 rval = dtrace_state_stop(state, &cpuid); 15351 mutex_exit(&dtrace_lock); 15352 15353 if (rval != 0) 15354 return (rval); 15355 15356 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 15357 return (EFAULT); 15358 15359 return (0); 15360 } 15361 15362 case DTRACEIOC_DOFGET: { 15363 dof_hdr_t hdr, *dof; 15364 uint64_t len; 15365 15366 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 15367 return (EFAULT); 15368 15369 mutex_enter(&dtrace_lock); 15370 dof = dtrace_dof_create(state); 15371 mutex_exit(&dtrace_lock); 15372 15373 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 15374 rval = copyout(dof, (void *)arg, len); 15375 dtrace_dof_destroy(dof); 15376 15377 return (rval == 0 ? 0 : EFAULT); 15378 } 15379 15380 case DTRACEIOC_AGGSNAP: 15381 case DTRACEIOC_BUFSNAP: { 15382 dtrace_bufdesc_t desc; 15383 caddr_t cached; 15384 dtrace_buffer_t *buf; 15385 15386 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15387 return (EFAULT); 15388 15389 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 15390 return (EINVAL); 15391 15392 mutex_enter(&dtrace_lock); 15393 15394 if (cmd == DTRACEIOC_BUFSNAP) { 15395 buf = &state->dts_buffer[desc.dtbd_cpu]; 15396 } else { 15397 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 15398 } 15399 15400 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 15401 size_t sz = buf->dtb_offset; 15402 15403 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 15404 mutex_exit(&dtrace_lock); 15405 return (EBUSY); 15406 } 15407 15408 /* 15409 * If this buffer has already been consumed, we're 15410 * going to indicate that there's nothing left here 15411 * to consume. 15412 */ 15413 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 15414 mutex_exit(&dtrace_lock); 15415 15416 desc.dtbd_size = 0; 15417 desc.dtbd_drops = 0; 15418 desc.dtbd_errors = 0; 15419 desc.dtbd_oldest = 0; 15420 sz = sizeof (desc); 15421 15422 if (copyout(&desc, (void *)arg, sz) != 0) 15423 return (EFAULT); 15424 15425 return (0); 15426 } 15427 15428 /* 15429 * If this is a ring buffer that has wrapped, we want 15430 * to copy the whole thing out. 15431 */ 15432 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 15433 dtrace_buffer_polish(buf); 15434 sz = buf->dtb_size; 15435 } 15436 15437 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 15438 mutex_exit(&dtrace_lock); 15439 return (EFAULT); 15440 } 15441 15442 desc.dtbd_size = sz; 15443 desc.dtbd_drops = buf->dtb_drops; 15444 desc.dtbd_errors = buf->dtb_errors; 15445 desc.dtbd_oldest = buf->dtb_xamot_offset; 15446 15447 mutex_exit(&dtrace_lock); 15448 15449 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15450 return (EFAULT); 15451 15452 buf->dtb_flags |= DTRACEBUF_CONSUMED; 15453 15454 return (0); 15455 } 15456 15457 if (buf->dtb_tomax == NULL) { 15458 ASSERT(buf->dtb_xamot == NULL); 15459 mutex_exit(&dtrace_lock); 15460 return (ENOENT); 15461 } 15462 15463 cached = buf->dtb_tomax; 15464 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 15465 15466 dtrace_xcall(desc.dtbd_cpu, 15467 (dtrace_xcall_t)dtrace_buffer_switch, buf); 15468 15469 state->dts_errors += buf->dtb_xamot_errors; 15470 15471 /* 15472 * If the buffers did not actually switch, then the cross call 15473 * did not take place -- presumably because the given CPU is 15474 * not in the ready set. If this is the case, we'll return 15475 * ENOENT. 15476 */ 15477 if (buf->dtb_tomax == cached) { 15478 ASSERT(buf->dtb_xamot != cached); 15479 mutex_exit(&dtrace_lock); 15480 return (ENOENT); 15481 } 15482 15483 ASSERT(cached == buf->dtb_xamot); 15484 15485 /* 15486 * We have our snapshot; now copy it out. 15487 */ 15488 if (copyout(buf->dtb_xamot, desc.dtbd_data, 15489 buf->dtb_xamot_offset) != 0) { 15490 mutex_exit(&dtrace_lock); 15491 return (EFAULT); 15492 } 15493 15494 desc.dtbd_size = buf->dtb_xamot_offset; 15495 desc.dtbd_drops = buf->dtb_xamot_drops; 15496 desc.dtbd_errors = buf->dtb_xamot_errors; 15497 desc.dtbd_oldest = 0; 15498 15499 mutex_exit(&dtrace_lock); 15500 15501 /* 15502 * Finally, copy out the buffer description. 15503 */ 15504 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 15505 return (EFAULT); 15506 15507 return (0); 15508 } 15509 15510 case DTRACEIOC_CONF: { 15511 dtrace_conf_t conf; 15512 15513 bzero(&conf, sizeof (conf)); 15514 conf.dtc_difversion = DIF_VERSION; 15515 conf.dtc_difintregs = DIF_DIR_NREGS; 15516 conf.dtc_diftupregs = DIF_DTR_NREGS; 15517 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 15518 15519 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 15520 return (EFAULT); 15521 15522 return (0); 15523 } 15524 15525 case DTRACEIOC_STATUS: { 15526 dtrace_status_t stat; 15527 dtrace_dstate_t *dstate; 15528 int i, j; 15529 uint64_t nerrs; 15530 15531 /* 15532 * See the comment in dtrace_state_deadman() for the reason 15533 * for setting dts_laststatus to INT64_MAX before setting 15534 * it to the correct value. 15535 */ 15536 state->dts_laststatus = INT64_MAX; 15537 dtrace_membar_producer(); 15538 state->dts_laststatus = dtrace_gethrtime(); 15539 15540 bzero(&stat, sizeof (stat)); 15541 15542 mutex_enter(&dtrace_lock); 15543 15544 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 15545 mutex_exit(&dtrace_lock); 15546 return (ENOENT); 15547 } 15548 15549 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 15550 stat.dtst_exiting = 1; 15551 15552 nerrs = state->dts_errors; 15553 dstate = &state->dts_vstate.dtvs_dynvars; 15554 15555 for (i = 0; i < NCPU; i++) { 15556 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 15557 15558 stat.dtst_dyndrops += dcpu->dtdsc_drops; 15559 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 15560 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 15561 15562 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 15563 stat.dtst_filled++; 15564 15565 nerrs += state->dts_buffer[i].dtb_errors; 15566 15567 for (j = 0; j < state->dts_nspeculations; j++) { 15568 dtrace_speculation_t *spec; 15569 dtrace_buffer_t *buf; 15570 15571 spec = &state->dts_speculations[j]; 15572 buf = &spec->dtsp_buffer[i]; 15573 stat.dtst_specdrops += buf->dtb_xamot_drops; 15574 } 15575 } 15576 15577 stat.dtst_specdrops_busy = state->dts_speculations_busy; 15578 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 15579 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 15580 stat.dtst_dblerrors = state->dts_dblerrors; 15581 stat.dtst_killed = 15582 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 15583 stat.dtst_errors = nerrs; 15584 15585 mutex_exit(&dtrace_lock); 15586 15587 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 15588 return (EFAULT); 15589 15590 return (0); 15591 } 15592 15593 case DTRACEIOC_FORMAT: { 15594 dtrace_fmtdesc_t fmt; 15595 char *str; 15596 int len; 15597 15598 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 15599 return (EFAULT); 15600 15601 mutex_enter(&dtrace_lock); 15602 15603 if (fmt.dtfd_format == 0 || 15604 fmt.dtfd_format > state->dts_nformats) { 15605 mutex_exit(&dtrace_lock); 15606 return (EINVAL); 15607 } 15608 15609 /* 15610 * Format strings are allocated contiguously and they are 15611 * never freed; if a format index is less than the number 15612 * of formats, we can assert that the format map is non-NULL 15613 * and that the format for the specified index is non-NULL. 15614 */ 15615 ASSERT(state->dts_formats != NULL); 15616 str = state->dts_formats[fmt.dtfd_format - 1]; 15617 ASSERT(str != NULL); 15618 15619 len = strlen(str) + 1; 15620 15621 if (len > fmt.dtfd_length) { 15622 fmt.dtfd_length = len; 15623 15624 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 15625 mutex_exit(&dtrace_lock); 15626 return (EINVAL); 15627 } 15628 } else { 15629 if (copyout(str, fmt.dtfd_string, len) != 0) { 15630 mutex_exit(&dtrace_lock); 15631 return (EINVAL); 15632 } 15633 } 15634 15635 mutex_exit(&dtrace_lock); 15636 return (0); 15637 } 15638 15639 default: 15640 break; 15641 } 15642 15643 return (ENOTTY); 15644 } 15645 15646 /*ARGSUSED*/ 15647 static int 15648 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 15649 { 15650 dtrace_state_t *state; 15651 15652 switch (cmd) { 15653 case DDI_DETACH: 15654 break; 15655 15656 case DDI_SUSPEND: 15657 return (DDI_SUCCESS); 15658 15659 default: 15660 return (DDI_FAILURE); 15661 } 15662 15663 mutex_enter(&cpu_lock); 15664 mutex_enter(&dtrace_provider_lock); 15665 mutex_enter(&dtrace_lock); 15666 15667 ASSERT(dtrace_opens == 0); 15668 15669 if (dtrace_helpers > 0) { 15670 mutex_exit(&dtrace_provider_lock); 15671 mutex_exit(&dtrace_lock); 15672 mutex_exit(&cpu_lock); 15673 return (DDI_FAILURE); 15674 } 15675 15676 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 15677 mutex_exit(&dtrace_provider_lock); 15678 mutex_exit(&dtrace_lock); 15679 mutex_exit(&cpu_lock); 15680 return (DDI_FAILURE); 15681 } 15682 15683 dtrace_provider = NULL; 15684 15685 if ((state = dtrace_anon_grab()) != NULL) { 15686 /* 15687 * If there were ECBs on this state, the provider should 15688 * have not been allowed to detach; assert that there is 15689 * none. 15690 */ 15691 ASSERT(state->dts_necbs == 0); 15692 dtrace_state_destroy(state); 15693 15694 /* 15695 * If we're being detached with anonymous state, we need to 15696 * indicate to the kernel debugger that DTrace is now inactive. 15697 */ 15698 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15699 } 15700 15701 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 15702 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15703 dtrace_cpu_init = NULL; 15704 dtrace_helpers_cleanup = NULL; 15705 dtrace_helpers_fork = NULL; 15706 dtrace_cpustart_init = NULL; 15707 dtrace_cpustart_fini = NULL; 15708 dtrace_debugger_init = NULL; 15709 dtrace_debugger_fini = NULL; 15710 dtrace_modload = NULL; 15711 dtrace_modunload = NULL; 15712 15713 mutex_exit(&cpu_lock); 15714 15715 if (dtrace_helptrace_enabled) { 15716 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 15717 dtrace_helptrace_buffer = NULL; 15718 } 15719 15720 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 15721 dtrace_probes = NULL; 15722 dtrace_nprobes = 0; 15723 15724 dtrace_hash_destroy(dtrace_bymod); 15725 dtrace_hash_destroy(dtrace_byfunc); 15726 dtrace_hash_destroy(dtrace_byname); 15727 dtrace_bymod = NULL; 15728 dtrace_byfunc = NULL; 15729 dtrace_byname = NULL; 15730 15731 kmem_cache_destroy(dtrace_state_cache); 15732 vmem_destroy(dtrace_minor); 15733 vmem_destroy(dtrace_arena); 15734 15735 if (dtrace_toxrange != NULL) { 15736 kmem_free(dtrace_toxrange, 15737 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 15738 dtrace_toxrange = NULL; 15739 dtrace_toxranges = 0; 15740 dtrace_toxranges_max = 0; 15741 } 15742 15743 ddi_remove_minor_node(dtrace_devi, NULL); 15744 dtrace_devi = NULL; 15745 15746 ddi_soft_state_fini(&dtrace_softstate); 15747 15748 ASSERT(dtrace_vtime_references == 0); 15749 ASSERT(dtrace_opens == 0); 15750 ASSERT(dtrace_retained == NULL); 15751 15752 mutex_exit(&dtrace_lock); 15753 mutex_exit(&dtrace_provider_lock); 15754 15755 /* 15756 * We don't destroy the task queue until after we have dropped our 15757 * locks (taskq_destroy() may block on running tasks). To prevent 15758 * attempting to do work after we have effectively detached but before 15759 * the task queue has been destroyed, all tasks dispatched via the 15760 * task queue must check that DTrace is still attached before 15761 * performing any operation. 15762 */ 15763 taskq_destroy(dtrace_taskq); 15764 dtrace_taskq = NULL; 15765 15766 return (DDI_SUCCESS); 15767 } 15768 15769 /*ARGSUSED*/ 15770 static int 15771 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 15772 { 15773 int error; 15774 15775 switch (infocmd) { 15776 case DDI_INFO_DEVT2DEVINFO: 15777 *result = (void *)dtrace_devi; 15778 error = DDI_SUCCESS; 15779 break; 15780 case DDI_INFO_DEVT2INSTANCE: 15781 *result = (void *)0; 15782 error = DDI_SUCCESS; 15783 break; 15784 default: 15785 error = DDI_FAILURE; 15786 } 15787 return (error); 15788 } 15789 15790 static struct cb_ops dtrace_cb_ops = { 15791 dtrace_open, /* open */ 15792 dtrace_close, /* close */ 15793 nulldev, /* strategy */ 15794 nulldev, /* print */ 15795 nodev, /* dump */ 15796 nodev, /* read */ 15797 nodev, /* write */ 15798 dtrace_ioctl, /* ioctl */ 15799 nodev, /* devmap */ 15800 nodev, /* mmap */ 15801 nodev, /* segmap */ 15802 nochpoll, /* poll */ 15803 ddi_prop_op, /* cb_prop_op */ 15804 0, /* streamtab */ 15805 D_NEW | D_MP /* Driver compatibility flag */ 15806 }; 15807 15808 static struct dev_ops dtrace_ops = { 15809 DEVO_REV, /* devo_rev */ 15810 0, /* refcnt */ 15811 dtrace_info, /* get_dev_info */ 15812 nulldev, /* identify */ 15813 nulldev, /* probe */ 15814 dtrace_attach, /* attach */ 15815 dtrace_detach, /* detach */ 15816 nodev, /* reset */ 15817 &dtrace_cb_ops, /* driver operations */ 15818 NULL, /* bus operations */ 15819 nodev, /* dev power */ 15820 ddi_quiesce_not_needed, /* quiesce */ 15821 }; 15822 15823 static struct modldrv modldrv = { 15824 &mod_driverops, /* module type (this is a pseudo driver) */ 15825 "Dynamic Tracing", /* name of module */ 15826 &dtrace_ops, /* driver ops */ 15827 }; 15828 15829 static struct modlinkage modlinkage = { 15830 MODREV_1, 15831 (void *)&modldrv, 15832 NULL 15833 }; 15834 15835 int 15836 _init(void) 15837 { 15838 return (mod_install(&modlinkage)); 15839 } 15840 15841 int 15842 _info(struct modinfo *modinfop) 15843 { 15844 return (mod_info(&modlinkage, modinfop)); 15845 } 15846 15847 int 15848 _fini(void) 15849 { 15850 return (mod_remove(&modlinkage)); 15851 } 15852