1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DTrace - Dynamic Tracing for Solaris 31 * 32 * This is the implementation of the Solaris Dynamic Tracing framework 33 * (DTrace). The user-visible interface to DTrace is described at length in 34 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 35 * library, the in-kernel DTrace framework, and the DTrace providers are 36 * described in the block comments in the <sys/dtrace.h> header file. The 37 * internal architecture of DTrace is described in the block comments in the 38 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 39 * implementation very much assume mastery of all of these sources; if one has 40 * an unanswered question about the implementation, one should consult them 41 * first. 42 * 43 * The functions here are ordered roughly as follows: 44 * 45 * - Probe context functions 46 * - Probe hashing functions 47 * - Non-probe context utility functions 48 * - Matching functions 49 * - Provider-to-Framework API functions 50 * - Probe management functions 51 * - DIF object functions 52 * - Format functions 53 * - Predicate functions 54 * - ECB functions 55 * - Buffer functions 56 * - Enabling functions 57 * - DOF functions 58 * - Anonymous enabling functions 59 * - Consumer state functions 60 * - Helper functions 61 * - Hook functions 62 * - Driver cookbook functions 63 * 64 * Each group of functions begins with a block comment labelled the "DTrace 65 * [Group] Functions", allowing one to find each block by searching forward 66 * on capital-f functions. 67 */ 68 #include <sys/errno.h> 69 #include <sys/stat.h> 70 #include <sys/modctl.h> 71 #include <sys/conf.h> 72 #include <sys/systm.h> 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/cpuvar.h> 76 #include <sys/kmem.h> 77 #include <sys/strsubr.h> 78 #include <sys/sysmacros.h> 79 #include <sys/dtrace_impl.h> 80 #include <sys/atomic.h> 81 #include <sys/cmn_err.h> 82 #include <sys/mutex_impl.h> 83 #include <sys/rwlock_impl.h> 84 #include <sys/ctf_api.h> 85 #include <sys/panic.h> 86 #include <sys/priv_impl.h> 87 #include <sys/policy.h> 88 #include <sys/cred_impl.h> 89 #include <sys/procfs_isa.h> 90 #include <sys/taskq.h> 91 #include <sys/mkdev.h> 92 #include <sys/kdi.h> 93 #include <sys/zone.h> 94 95 /* 96 * DTrace Tunable Variables 97 * 98 * The following variables may be tuned by adding a line to /etc/system that 99 * includes both the name of the DTrace module ("dtrace") and the name of the 100 * variable. For example: 101 * 102 * set dtrace:dtrace_destructive_disallow = 1 103 * 104 * In general, the only variables that one should be tuning this way are those 105 * that affect system-wide DTrace behavior, and for which the default behavior 106 * is undesirable. Most of these variables are tunable on a per-consumer 107 * basis using DTrace options, and need not be tuned on a system-wide basis. 108 * When tuning these variables, avoid pathological values; while some attempt 109 * is made to verify the integrity of these variables, they are not considered 110 * part of the supported interface to DTrace, and they are therefore not 111 * checked comprehensively. Further, these variables should not be tuned 112 * dynamically via "mdb -kw" or other means; they should only be tuned via 113 * /etc/system. 114 */ 115 int dtrace_destructive_disallow = 0; 116 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 117 size_t dtrace_difo_maxsize = (256 * 1024); 118 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 119 size_t dtrace_global_maxsize = (16 * 1024); 120 size_t dtrace_actions_max = (16 * 1024); 121 size_t dtrace_retain_max = 1024; 122 dtrace_optval_t dtrace_helper_actions_max = 32; 123 dtrace_optval_t dtrace_helper_providers_max = 32; 124 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 125 size_t dtrace_strsize_default = 256; 126 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 127 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 128 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 129 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 130 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 131 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 132 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 133 dtrace_optval_t dtrace_nspec_default = 1; 134 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 135 dtrace_optval_t dtrace_stackframes_default = 20; 136 dtrace_optval_t dtrace_ustackframes_default = 20; 137 dtrace_optval_t dtrace_jstackframes_default = 50; 138 dtrace_optval_t dtrace_jstackstrsize_default = 512; 139 int dtrace_msgdsize_max = 128; 140 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 141 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 142 int dtrace_devdepth_max = 32; 143 int dtrace_err_verbose; 144 hrtime_t dtrace_deadman_interval = NANOSEC; 145 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 146 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 147 148 /* 149 * DTrace External Variables 150 * 151 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 152 * available to DTrace consumers via the backtick (`) syntax. One of these, 153 * dtrace_zero, is made deliberately so: it is provided as a source of 154 * well-known, zero-filled memory. While this variable is not documented, 155 * it is used by some translators as an implementation detail. 156 */ 157 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 158 159 /* 160 * DTrace Internal Variables 161 */ 162 static dev_info_t *dtrace_devi; /* device info */ 163 static vmem_t *dtrace_arena; /* probe ID arena */ 164 static vmem_t *dtrace_minor; /* minor number arena */ 165 static taskq_t *dtrace_taskq; /* task queue */ 166 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 167 static int dtrace_nprobes; /* number of probes */ 168 static dtrace_provider_t *dtrace_provider; /* provider list */ 169 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 170 static int dtrace_opens; /* number of opens */ 171 static int dtrace_helpers; /* number of helpers */ 172 static void *dtrace_softstate; /* softstate pointer */ 173 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 174 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 175 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 176 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 177 static int dtrace_toxranges; /* number of toxic ranges */ 178 static int dtrace_toxranges_max; /* size of toxic range array */ 179 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 180 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 181 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 182 static kthread_t *dtrace_panicked; /* panicking thread */ 183 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 184 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 185 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 186 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 187 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 188 189 /* 190 * DTrace Locking 191 * DTrace is protected by three (relatively coarse-grained) locks: 192 * 193 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 194 * including enabling state, probes, ECBs, consumer state, helper state, 195 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 196 * probe context is lock-free -- synchronization is handled via the 197 * dtrace_sync() cross call mechanism. 198 * 199 * (2) dtrace_provider_lock is required when manipulating provider state, or 200 * when provider state must be held constant. 201 * 202 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 203 * when meta provider state must be held constant. 204 * 205 * The lock ordering between these three locks is dtrace_meta_lock before 206 * dtrace_provider_lock before dtrace_lock. (In particular, there are 207 * several places where dtrace_provider_lock is held by the framework as it 208 * calls into the providers -- which then call back into the framework, 209 * grabbing dtrace_lock.) 210 * 211 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 212 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 213 * role as a coarse-grained lock; it is acquired before both of these locks. 214 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 215 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 216 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 217 * acquired _between_ dtrace_provider_lock and dtrace_lock. 218 */ 219 static kmutex_t dtrace_lock; /* probe state lock */ 220 static kmutex_t dtrace_provider_lock; /* provider state lock */ 221 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 222 223 /* 224 * DTrace Provider Variables 225 * 226 * These are the variables relating to DTrace as a provider (that is, the 227 * provider of the BEGIN, END, and ERROR probes). 228 */ 229 static dtrace_pattr_t dtrace_provider_attr = { 230 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 231 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 232 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 233 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 235 }; 236 237 static void 238 dtrace_nullop(void) 239 {} 240 241 static dtrace_pops_t dtrace_provider_ops = { 242 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 243 (void (*)(void *, struct modctl *))dtrace_nullop, 244 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 245 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 246 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 247 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 248 NULL, 249 NULL, 250 NULL, 251 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 252 }; 253 254 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 255 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 256 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 257 258 /* 259 * DTrace Helper Tracing Variables 260 */ 261 uint32_t dtrace_helptrace_next = 0; 262 uint32_t dtrace_helptrace_nlocals; 263 char *dtrace_helptrace_buffer; 264 int dtrace_helptrace_bufsize = 512 * 1024; 265 266 #ifdef DEBUG 267 int dtrace_helptrace_enabled = 1; 268 #else 269 int dtrace_helptrace_enabled = 0; 270 #endif 271 272 /* 273 * DTrace Error Hashing 274 * 275 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 276 * table. This is very useful for checking coverage of tests that are 277 * expected to induce DIF or DOF processing errors, and may be useful for 278 * debugging problems in the DIF code generator or in DOF generation . The 279 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 280 */ 281 #ifdef DEBUG 282 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 283 static const char *dtrace_errlast; 284 static kthread_t *dtrace_errthread; 285 static kmutex_t dtrace_errlock; 286 #endif 287 288 /* 289 * DTrace Macros and Constants 290 * 291 * These are various macros that are useful in various spots in the 292 * implementation, along with a few random constants that have no meaning 293 * outside of the implementation. There is no real structure to this cpp 294 * mishmash -- but is there ever? 295 */ 296 #define DTRACE_HASHSTR(hash, probe) \ 297 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 298 299 #define DTRACE_HASHNEXT(hash, probe) \ 300 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 301 302 #define DTRACE_HASHPREV(hash, probe) \ 303 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 304 305 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 306 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 307 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 308 309 #define DTRACE_AGGHASHSIZE_SLEW 17 310 311 /* 312 * The key for a thread-local variable consists of the lower 61 bits of the 313 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 314 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 315 * equal to a variable identifier. This is necessary (but not sufficient) to 316 * assure that global associative arrays never collide with thread-local 317 * variables. To guarantee that they cannot collide, we must also define the 318 * order for keying dynamic variables. That order is: 319 * 320 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 321 * 322 * Because the variable-key and the tls-key are in orthogonal spaces, there is 323 * no way for a global variable key signature to match a thread-local key 324 * signature. 325 */ 326 #define DTRACE_TLS_THRKEY(where) { \ 327 uint_t intr = 0; \ 328 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 329 for (; actv; actv >>= 1) \ 330 intr++; \ 331 ASSERT(intr < (1 << 3)); \ 332 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 333 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 334 } 335 336 #define DT_BSWAP_8(x) ((x) & 0xff) 337 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 338 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 339 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 340 341 #define DTRACE_STORE(type, tomax, offset, what) \ 342 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 343 344 #ifndef __i386 345 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 346 if (addr & (size - 1)) { \ 347 *flags |= CPU_DTRACE_BADALIGN; \ 348 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 349 return (0); \ 350 } 351 #else 352 #define DTRACE_ALIGNCHECK(addr, size, flags) 353 #endif 354 355 /* 356 * Test whether a range of memory starting at testaddr of size testsz falls 357 * within the range of memory described by addr, sz. We take care to avoid 358 * problems with overflow and underflow of the unsigned quantities, and 359 * disallow all negative sizes. Ranges of size 0 are allowed. 360 */ 361 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 362 ((testaddr) - (baseaddr) < (basesz) && \ 363 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 364 (testaddr) + (testsz) >= (testaddr)) 365 366 /* 367 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 368 * alloc_sz on the righthand side of the comparison in order to avoid overflow 369 * or underflow in the comparison with it. This is simpler than the INRANGE 370 * check above, because we know that the dtms_scratch_ptr is valid in the 371 * range. Allocations of size zero are allowed. 372 */ 373 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 374 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 375 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 376 377 #define DTRACE_LOADFUNC(bits) \ 378 /*CSTYLED*/ \ 379 uint##bits##_t \ 380 dtrace_load##bits(uintptr_t addr) \ 381 { \ 382 size_t size = bits / NBBY; \ 383 /*CSTYLED*/ \ 384 uint##bits##_t rval; \ 385 int i; \ 386 volatile uint16_t *flags = (volatile uint16_t *) \ 387 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \ 388 \ 389 DTRACE_ALIGNCHECK(addr, size, flags); \ 390 \ 391 for (i = 0; i < dtrace_toxranges; i++) { \ 392 if (addr >= dtrace_toxrange[i].dtt_limit) \ 393 continue; \ 394 \ 395 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 396 continue; \ 397 \ 398 /* \ 399 * This address falls within a toxic region; return 0. \ 400 */ \ 401 *flags |= CPU_DTRACE_BADADDR; \ 402 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \ 403 return (0); \ 404 } \ 405 \ 406 *flags |= CPU_DTRACE_NOFAULT; \ 407 /*CSTYLED*/ \ 408 rval = *((volatile uint##bits##_t *)addr); \ 409 *flags &= ~CPU_DTRACE_NOFAULT; \ 410 \ 411 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 412 } 413 414 #ifdef _LP64 415 #define dtrace_loadptr dtrace_load64 416 #else 417 #define dtrace_loadptr dtrace_load32 418 #endif 419 420 #define DTRACE_DYNHASH_FREE 0 421 #define DTRACE_DYNHASH_SINK 1 422 #define DTRACE_DYNHASH_VALID 2 423 424 #define DTRACE_MATCH_NEXT 0 425 #define DTRACE_MATCH_DONE 1 426 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 427 #define DTRACE_STATE_ALIGN 64 428 429 #define DTRACE_FLAGS2FLT(flags) \ 430 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 431 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 432 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 433 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 434 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 435 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 436 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 437 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 438 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 439 DTRACEFLT_UNKNOWN) 440 441 #define DTRACEACT_ISSTRING(act) \ 442 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 443 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 444 445 static size_t dtrace_strlen(const char *, size_t); 446 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 447 static void dtrace_enabling_provide(dtrace_provider_t *); 448 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 449 static void dtrace_enabling_matchall(void); 450 static dtrace_state_t *dtrace_anon_grab(void); 451 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 452 dtrace_state_t *, uint64_t, uint64_t); 453 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 454 static void dtrace_buffer_drop(dtrace_buffer_t *); 455 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 456 dtrace_state_t *, dtrace_mstate_t *); 457 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 458 dtrace_optval_t); 459 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 460 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 461 462 /* 463 * DTrace Probe Context Functions 464 * 465 * These functions are called from probe context. Because probe context is 466 * any context in which C may be called, arbitrarily locks may be held, 467 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 468 * As a result, functions called from probe context may only call other DTrace 469 * support functions -- they may not interact at all with the system at large. 470 * (Note that the ASSERT macro is made probe-context safe by redefining it in 471 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 472 * loads are to be performed from probe context, they _must_ be in terms of 473 * the safe dtrace_load*() variants. 474 * 475 * Some functions in this block are not actually called from probe context; 476 * for these functions, there will be a comment above the function reading 477 * "Note: not called from probe context." 478 */ 479 void 480 dtrace_panic(const char *format, ...) 481 { 482 va_list alist; 483 484 va_start(alist, format); 485 dtrace_vpanic(format, alist); 486 va_end(alist); 487 } 488 489 int 490 dtrace_assfail(const char *a, const char *f, int l) 491 { 492 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 493 494 /* 495 * We just need something here that even the most clever compiler 496 * cannot optimize away. 497 */ 498 return (a[(uintptr_t)f]); 499 } 500 501 /* 502 * Atomically increment a specified error counter from probe context. 503 */ 504 static void 505 dtrace_error(uint32_t *counter) 506 { 507 /* 508 * Most counters stored to in probe context are per-CPU counters. 509 * However, there are some error conditions that are sufficiently 510 * arcane that they don't merit per-CPU storage. If these counters 511 * are incremented concurrently on different CPUs, scalability will be 512 * adversely affected -- but we don't expect them to be white-hot in a 513 * correctly constructed enabling... 514 */ 515 uint32_t oval, nval; 516 517 do { 518 oval = *counter; 519 520 if ((nval = oval + 1) == 0) { 521 /* 522 * If the counter would wrap, set it to 1 -- assuring 523 * that the counter is never zero when we have seen 524 * errors. (The counter must be 32-bits because we 525 * aren't guaranteed a 64-bit compare&swap operation.) 526 * To save this code both the infamy of being fingered 527 * by a priggish news story and the indignity of being 528 * the target of a neo-puritan witch trial, we're 529 * carefully avoiding any colorful description of the 530 * likelihood of this condition -- but suffice it to 531 * say that it is only slightly more likely than the 532 * overflow of predicate cache IDs, as discussed in 533 * dtrace_predicate_create(). 534 */ 535 nval = 1; 536 } 537 } while (dtrace_cas32(counter, oval, nval) != oval); 538 } 539 540 /* 541 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 542 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 543 */ 544 DTRACE_LOADFUNC(8) 545 DTRACE_LOADFUNC(16) 546 DTRACE_LOADFUNC(32) 547 DTRACE_LOADFUNC(64) 548 549 static int 550 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 551 { 552 if (dest < mstate->dtms_scratch_base) 553 return (0); 554 555 if (dest + size < dest) 556 return (0); 557 558 if (dest + size > mstate->dtms_scratch_ptr) 559 return (0); 560 561 return (1); 562 } 563 564 static int 565 dtrace_canstore_statvar(uint64_t addr, size_t sz, 566 dtrace_statvar_t **svars, int nsvars) 567 { 568 int i; 569 570 for (i = 0; i < nsvars; i++) { 571 dtrace_statvar_t *svar = svars[i]; 572 573 if (svar == NULL || svar->dtsv_size == 0) 574 continue; 575 576 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 577 return (1); 578 } 579 580 return (0); 581 } 582 583 /* 584 * Check to see if the address is within a memory region to which a store may 585 * be issued. This includes the DTrace scratch areas, and any DTrace variable 586 * region. The caller of dtrace_canstore() is responsible for performing any 587 * alignment checks that are needed before stores are actually executed. 588 */ 589 static int 590 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 591 dtrace_vstate_t *vstate) 592 { 593 /* 594 * First, check to see if the address is in scratch space... 595 */ 596 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 597 mstate->dtms_scratch_size)) 598 return (1); 599 600 /* 601 * Now check to see if it's a dynamic variable. This check will pick 602 * up both thread-local variables and any global dynamically-allocated 603 * variables. 604 */ 605 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 606 vstate->dtvs_dynvars.dtds_size)) 607 return (1); 608 609 /* 610 * Finally, check the static local and global variables. These checks 611 * take the longest, so we perform them last. 612 */ 613 if (dtrace_canstore_statvar(addr, sz, 614 vstate->dtvs_locals, vstate->dtvs_nlocals)) 615 return (1); 616 617 if (dtrace_canstore_statvar(addr, sz, 618 vstate->dtvs_globals, vstate->dtvs_nglobals)) 619 return (1); 620 621 return (0); 622 } 623 624 625 /* 626 * Convenience routine to check to see if the address is within a memory 627 * region in which a load may be issued given the user's privilege level; 628 * if not, it sets the appropriate error flags and loads 'addr' into the 629 * illegal value slot. 630 * 631 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 632 * appropriate memory access protection. 633 */ 634 static int 635 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 636 dtrace_vstate_t *vstate) 637 { 638 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 639 640 /* 641 * If we hold the privilege to read from kernel memory, then 642 * everything is readable. 643 */ 644 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 645 return (1); 646 647 /* 648 * You can obviously read that which you can store. 649 */ 650 if (dtrace_canstore(addr, sz, mstate, vstate)) 651 return (1); 652 653 /* 654 * We're allowed to read from our own string table. 655 */ 656 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 657 mstate->dtms_difo->dtdo_strlen)) 658 return (1); 659 660 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 661 *illval = addr; 662 return (0); 663 } 664 665 /* 666 * Convenience routine to check to see if a given string is within a memory 667 * region in which a load may be issued given the user's privilege level; 668 * this exists so that we don't need to issue unnecessary dtrace_strlen() 669 * calls in the event that the user has all privileges. 670 */ 671 static int 672 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 673 dtrace_vstate_t *vstate) 674 { 675 size_t strsz; 676 677 /* 678 * If we hold the privilege to read from kernel memory, then 679 * everything is readable. 680 */ 681 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 682 return (1); 683 684 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 685 if (dtrace_canload(addr, strsz, mstate, vstate)) 686 return (1); 687 688 return (0); 689 } 690 691 /* 692 * Convenience routine to check to see if a given variable is within a memory 693 * region in which a load may be issued given the user's privilege level. 694 */ 695 static int 696 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 697 dtrace_vstate_t *vstate) 698 { 699 size_t sz; 700 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 701 702 /* 703 * If we hold the privilege to read from kernel memory, then 704 * everything is readable. 705 */ 706 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 707 return (1); 708 709 if (type->dtdt_kind == DIF_TYPE_STRING) 710 sz = dtrace_strlen(src, 711 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 712 else 713 sz = type->dtdt_size; 714 715 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 716 } 717 718 /* 719 * Compare two strings using safe loads. 720 */ 721 static int 722 dtrace_strncmp(char *s1, char *s2, size_t limit) 723 { 724 uint8_t c1, c2; 725 volatile uint16_t *flags; 726 727 if (s1 == s2 || limit == 0) 728 return (0); 729 730 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 731 732 do { 733 if (s1 == NULL) { 734 c1 = '\0'; 735 } else { 736 c1 = dtrace_load8((uintptr_t)s1++); 737 } 738 739 if (s2 == NULL) { 740 c2 = '\0'; 741 } else { 742 c2 = dtrace_load8((uintptr_t)s2++); 743 } 744 745 if (c1 != c2) 746 return (c1 - c2); 747 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 748 749 return (0); 750 } 751 752 /* 753 * Compute strlen(s) for a string using safe memory accesses. The additional 754 * len parameter is used to specify a maximum length to ensure completion. 755 */ 756 static size_t 757 dtrace_strlen(const char *s, size_t lim) 758 { 759 uint_t len; 760 761 for (len = 0; len != lim; len++) { 762 if (dtrace_load8((uintptr_t)s++) == '\0') 763 break; 764 } 765 766 return (len); 767 } 768 769 /* 770 * Check if an address falls within a toxic region. 771 */ 772 static int 773 dtrace_istoxic(uintptr_t kaddr, size_t size) 774 { 775 uintptr_t taddr, tsize; 776 int i; 777 778 for (i = 0; i < dtrace_toxranges; i++) { 779 taddr = dtrace_toxrange[i].dtt_base; 780 tsize = dtrace_toxrange[i].dtt_limit - taddr; 781 782 if (kaddr - taddr < tsize) { 783 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 784 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr; 785 return (1); 786 } 787 788 if (taddr - kaddr < size) { 789 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 790 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr; 791 return (1); 792 } 793 } 794 795 return (0); 796 } 797 798 /* 799 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 800 * memory specified by the DIF program. The dst is assumed to be safe memory 801 * that we can store to directly because it is managed by DTrace. As with 802 * standard bcopy, overlapping copies are handled properly. 803 */ 804 static void 805 dtrace_bcopy(const void *src, void *dst, size_t len) 806 { 807 if (len != 0) { 808 uint8_t *s1 = dst; 809 const uint8_t *s2 = src; 810 811 if (s1 <= s2) { 812 do { 813 *s1++ = dtrace_load8((uintptr_t)s2++); 814 } while (--len != 0); 815 } else { 816 s2 += len; 817 s1 += len; 818 819 do { 820 *--s1 = dtrace_load8((uintptr_t)--s2); 821 } while (--len != 0); 822 } 823 } 824 } 825 826 /* 827 * Copy src to dst using safe memory accesses, up to either the specified 828 * length, or the point that a nul byte is encountered. The src is assumed to 829 * be unsafe memory specified by the DIF program. The dst is assumed to be 830 * safe memory that we can store to directly because it is managed by DTrace. 831 * Unlike dtrace_bcopy(), overlapping regions are not handled. 832 */ 833 static void 834 dtrace_strcpy(const void *src, void *dst, size_t len) 835 { 836 if (len != 0) { 837 uint8_t *s1 = dst, c; 838 const uint8_t *s2 = src; 839 840 do { 841 *s1++ = c = dtrace_load8((uintptr_t)s2++); 842 } while (--len != 0 && c != '\0'); 843 } 844 } 845 846 /* 847 * Copy src to dst, deriving the size and type from the specified (BYREF) 848 * variable type. The src is assumed to be unsafe memory specified by the DIF 849 * program. The dst is assumed to be DTrace variable memory that is of the 850 * specified type; we assume that we can store to directly. 851 */ 852 static void 853 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 854 { 855 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 856 857 if (type->dtdt_kind == DIF_TYPE_STRING) { 858 dtrace_strcpy(src, dst, type->dtdt_size); 859 } else { 860 dtrace_bcopy(src, dst, type->dtdt_size); 861 } 862 } 863 864 /* 865 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 866 * unsafe memory specified by the DIF program. The s2 data is assumed to be 867 * safe memory that we can access directly because it is managed by DTrace. 868 */ 869 static int 870 dtrace_bcmp(const void *s1, const void *s2, size_t len) 871 { 872 volatile uint16_t *flags; 873 874 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 875 876 if (s1 == s2) 877 return (0); 878 879 if (s1 == NULL || s2 == NULL) 880 return (1); 881 882 if (s1 != s2 && len != 0) { 883 const uint8_t *ps1 = s1; 884 const uint8_t *ps2 = s2; 885 886 do { 887 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 888 return (1); 889 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 890 } 891 return (0); 892 } 893 894 /* 895 * Zero the specified region using a simple byte-by-byte loop. Note that this 896 * is for safe DTrace-managed memory only. 897 */ 898 static void 899 dtrace_bzero(void *dst, size_t len) 900 { 901 uchar_t *cp; 902 903 for (cp = dst; len != 0; len--) 904 *cp++ = 0; 905 } 906 907 /* 908 * This privilege check should be used by actions and subroutines to 909 * verify that the user credentials of the process that enabled the 910 * invoking ECB match the target credentials 911 */ 912 static int 913 dtrace_priv_proc_common_user(dtrace_state_t *state) 914 { 915 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 916 917 /* 918 * We should always have a non-NULL state cred here, since if cred 919 * is null (anonymous tracing), we fast-path bypass this routine. 920 */ 921 ASSERT(s_cr != NULL); 922 923 if ((cr = CRED()) != NULL && 924 s_cr->cr_uid == cr->cr_uid && 925 s_cr->cr_uid == cr->cr_ruid && 926 s_cr->cr_uid == cr->cr_suid && 927 s_cr->cr_gid == cr->cr_gid && 928 s_cr->cr_gid == cr->cr_rgid && 929 s_cr->cr_gid == cr->cr_sgid) 930 return (1); 931 932 return (0); 933 } 934 935 /* 936 * This privilege check should be used by actions and subroutines to 937 * verify that the zone of the process that enabled the invoking ECB 938 * matches the target credentials 939 */ 940 static int 941 dtrace_priv_proc_common_zone(dtrace_state_t *state) 942 { 943 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 944 945 /* 946 * We should always have a non-NULL state cred here, since if cred 947 * is null (anonymous tracing), we fast-path bypass this routine. 948 */ 949 ASSERT(s_cr != NULL); 950 951 if ((cr = CRED()) != NULL && 952 s_cr->cr_zone == cr->cr_zone) 953 return (1); 954 955 return (0); 956 } 957 958 /* 959 * This privilege check should be used by actions and subroutines to 960 * verify that the process has not setuid or changed credentials. 961 */ 962 static int 963 dtrace_priv_proc_common_nocd() 964 { 965 proc_t *proc; 966 967 if ((proc = ttoproc(curthread)) != NULL && 968 !(proc->p_flag & SNOCD)) 969 return (1); 970 971 return (0); 972 } 973 974 static int 975 dtrace_priv_proc_destructive(dtrace_state_t *state) 976 { 977 int action = state->dts_cred.dcr_action; 978 979 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 980 dtrace_priv_proc_common_zone(state) == 0) 981 goto bad; 982 983 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 984 dtrace_priv_proc_common_user(state) == 0) 985 goto bad; 986 987 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 988 dtrace_priv_proc_common_nocd() == 0) 989 goto bad; 990 991 return (1); 992 993 bad: 994 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 995 996 return (0); 997 } 998 999 static int 1000 dtrace_priv_proc_control(dtrace_state_t *state) 1001 { 1002 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1003 return (1); 1004 1005 if (dtrace_priv_proc_common_zone(state) && 1006 dtrace_priv_proc_common_user(state) && 1007 dtrace_priv_proc_common_nocd()) 1008 return (1); 1009 1010 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1011 1012 return (0); 1013 } 1014 1015 static int 1016 dtrace_priv_proc(dtrace_state_t *state) 1017 { 1018 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1019 return (1); 1020 1021 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1022 1023 return (0); 1024 } 1025 1026 static int 1027 dtrace_priv_kernel(dtrace_state_t *state) 1028 { 1029 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1030 return (1); 1031 1032 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1033 1034 return (0); 1035 } 1036 1037 static int 1038 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1039 { 1040 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1041 return (1); 1042 1043 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1044 1045 return (0); 1046 } 1047 1048 /* 1049 * Note: not called from probe context. This function is called 1050 * asynchronously (and at a regular interval) from outside of probe context to 1051 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1052 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1053 */ 1054 void 1055 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1056 { 1057 dtrace_dynvar_t *dirty; 1058 dtrace_dstate_percpu_t *dcpu; 1059 int i, work = 0; 1060 1061 for (i = 0; i < NCPU; i++) { 1062 dcpu = &dstate->dtds_percpu[i]; 1063 1064 ASSERT(dcpu->dtdsc_rinsing == NULL); 1065 1066 /* 1067 * If the dirty list is NULL, there is no dirty work to do. 1068 */ 1069 if (dcpu->dtdsc_dirty == NULL) 1070 continue; 1071 1072 /* 1073 * If the clean list is non-NULL, then we're not going to do 1074 * any work for this CPU -- it means that there has not been 1075 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1076 * since the last time we cleaned house. 1077 */ 1078 if (dcpu->dtdsc_clean != NULL) 1079 continue; 1080 1081 work = 1; 1082 1083 /* 1084 * Atomically move the dirty list aside. 1085 */ 1086 do { 1087 dirty = dcpu->dtdsc_dirty; 1088 1089 /* 1090 * Before we zap the dirty list, set the rinsing list. 1091 * (This allows for a potential assertion in 1092 * dtrace_dynvar(): if a free dynamic variable appears 1093 * on a hash chain, either the dirty list or the 1094 * rinsing list for some CPU must be non-NULL.) 1095 */ 1096 dcpu->dtdsc_rinsing = dirty; 1097 dtrace_membar_producer(); 1098 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1099 dirty, NULL) != dirty); 1100 } 1101 1102 if (!work) { 1103 /* 1104 * We have no work to do; we can simply return. 1105 */ 1106 return; 1107 } 1108 1109 dtrace_sync(); 1110 1111 for (i = 0; i < NCPU; i++) { 1112 dcpu = &dstate->dtds_percpu[i]; 1113 1114 if (dcpu->dtdsc_rinsing == NULL) 1115 continue; 1116 1117 /* 1118 * We are now guaranteed that no hash chain contains a pointer 1119 * into this dirty list; we can make it clean. 1120 */ 1121 ASSERT(dcpu->dtdsc_clean == NULL); 1122 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1123 dcpu->dtdsc_rinsing = NULL; 1124 } 1125 1126 /* 1127 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1128 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1129 * This prevents a race whereby a CPU incorrectly decides that 1130 * the state should be something other than DTRACE_DSTATE_CLEAN 1131 * after dtrace_dynvar_clean() has completed. 1132 */ 1133 dtrace_sync(); 1134 1135 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1136 } 1137 1138 /* 1139 * Depending on the value of the op parameter, this function looks-up, 1140 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1141 * allocation is requested, this function will return a pointer to a 1142 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1143 * variable can be allocated. If NULL is returned, the appropriate counter 1144 * will be incremented. 1145 */ 1146 dtrace_dynvar_t * 1147 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1148 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1149 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1150 { 1151 uint64_t hashval = DTRACE_DYNHASH_VALID; 1152 dtrace_dynhash_t *hash = dstate->dtds_hash; 1153 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1154 processorid_t me = CPU->cpu_id, cpu = me; 1155 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1156 size_t bucket, ksize; 1157 size_t chunksize = dstate->dtds_chunksize; 1158 uintptr_t kdata, lock, nstate; 1159 uint_t i; 1160 1161 ASSERT(nkeys != 0); 1162 1163 /* 1164 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1165 * algorithm. For the by-value portions, we perform the algorithm in 1166 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1167 * bit, and seems to have only a minute effect on distribution. For 1168 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1169 * over each referenced byte. It's painful to do this, but it's much 1170 * better than pathological hash distribution. The efficacy of the 1171 * hashing algorithm (and a comparison with other algorithms) may be 1172 * found by running the ::dtrace_dynstat MDB dcmd. 1173 */ 1174 for (i = 0; i < nkeys; i++) { 1175 if (key[i].dttk_size == 0) { 1176 uint64_t val = key[i].dttk_value; 1177 1178 hashval += (val >> 48) & 0xffff; 1179 hashval += (hashval << 10); 1180 hashval ^= (hashval >> 6); 1181 1182 hashval += (val >> 32) & 0xffff; 1183 hashval += (hashval << 10); 1184 hashval ^= (hashval >> 6); 1185 1186 hashval += (val >> 16) & 0xffff; 1187 hashval += (hashval << 10); 1188 hashval ^= (hashval >> 6); 1189 1190 hashval += val & 0xffff; 1191 hashval += (hashval << 10); 1192 hashval ^= (hashval >> 6); 1193 } else { 1194 /* 1195 * This is incredibly painful, but it beats the hell 1196 * out of the alternative. 1197 */ 1198 uint64_t j, size = key[i].dttk_size; 1199 uintptr_t base = (uintptr_t)key[i].dttk_value; 1200 1201 if (!dtrace_canload(base, size, mstate, vstate)) 1202 break; 1203 1204 for (j = 0; j < size; j++) { 1205 hashval += dtrace_load8(base + j); 1206 hashval += (hashval << 10); 1207 hashval ^= (hashval >> 6); 1208 } 1209 } 1210 } 1211 1212 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1213 return (NULL); 1214 1215 hashval += (hashval << 3); 1216 hashval ^= (hashval >> 11); 1217 hashval += (hashval << 15); 1218 1219 /* 1220 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1221 * comes out to be one of our two sentinel hash values. If this 1222 * actually happens, we set the hashval to be a value known to be a 1223 * non-sentinel value. 1224 */ 1225 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1226 hashval = DTRACE_DYNHASH_VALID; 1227 1228 /* 1229 * Yes, it's painful to do a divide here. If the cycle count becomes 1230 * important here, tricks can be pulled to reduce it. (However, it's 1231 * critical that hash collisions be kept to an absolute minimum; 1232 * they're much more painful than a divide.) It's better to have a 1233 * solution that generates few collisions and still keeps things 1234 * relatively simple. 1235 */ 1236 bucket = hashval % dstate->dtds_hashsize; 1237 1238 if (op == DTRACE_DYNVAR_DEALLOC) { 1239 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1240 1241 for (;;) { 1242 while ((lock = *lockp) & 1) 1243 continue; 1244 1245 if (dtrace_casptr((void *)lockp, 1246 (void *)lock, (void *)(lock + 1)) == (void *)lock) 1247 break; 1248 } 1249 1250 dtrace_membar_producer(); 1251 } 1252 1253 top: 1254 prev = NULL; 1255 lock = hash[bucket].dtdh_lock; 1256 1257 dtrace_membar_consumer(); 1258 1259 start = hash[bucket].dtdh_chain; 1260 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1261 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1262 op != DTRACE_DYNVAR_DEALLOC)); 1263 1264 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1265 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1266 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1267 1268 if (dvar->dtdv_hashval != hashval) { 1269 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1270 /* 1271 * We've reached the sink, and therefore the 1272 * end of the hash chain; we can kick out of 1273 * the loop knowing that we have seen a valid 1274 * snapshot of state. 1275 */ 1276 ASSERT(dvar->dtdv_next == NULL); 1277 ASSERT(dvar == &dtrace_dynhash_sink); 1278 break; 1279 } 1280 1281 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1282 /* 1283 * We've gone off the rails: somewhere along 1284 * the line, one of the members of this hash 1285 * chain was deleted. Note that we could also 1286 * detect this by simply letting this loop run 1287 * to completion, as we would eventually hit 1288 * the end of the dirty list. However, we 1289 * want to avoid running the length of the 1290 * dirty list unnecessarily (it might be quite 1291 * long), so we catch this as early as 1292 * possible by detecting the hash marker. In 1293 * this case, we simply set dvar to NULL and 1294 * break; the conditional after the loop will 1295 * send us back to top. 1296 */ 1297 dvar = NULL; 1298 break; 1299 } 1300 1301 goto next; 1302 } 1303 1304 if (dtuple->dtt_nkeys != nkeys) 1305 goto next; 1306 1307 for (i = 0; i < nkeys; i++, dkey++) { 1308 if (dkey->dttk_size != key[i].dttk_size) 1309 goto next; /* size or type mismatch */ 1310 1311 if (dkey->dttk_size != 0) { 1312 if (dtrace_bcmp( 1313 (void *)(uintptr_t)key[i].dttk_value, 1314 (void *)(uintptr_t)dkey->dttk_value, 1315 dkey->dttk_size)) 1316 goto next; 1317 } else { 1318 if (dkey->dttk_value != key[i].dttk_value) 1319 goto next; 1320 } 1321 } 1322 1323 if (op != DTRACE_DYNVAR_DEALLOC) 1324 return (dvar); 1325 1326 ASSERT(dvar->dtdv_next == NULL || 1327 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1328 1329 if (prev != NULL) { 1330 ASSERT(hash[bucket].dtdh_chain != dvar); 1331 ASSERT(start != dvar); 1332 ASSERT(prev->dtdv_next == dvar); 1333 prev->dtdv_next = dvar->dtdv_next; 1334 } else { 1335 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1336 start, dvar->dtdv_next) != start) { 1337 /* 1338 * We have failed to atomically swing the 1339 * hash table head pointer, presumably because 1340 * of a conflicting allocation on another CPU. 1341 * We need to reread the hash chain and try 1342 * again. 1343 */ 1344 goto top; 1345 } 1346 } 1347 1348 dtrace_membar_producer(); 1349 1350 /* 1351 * Now set the hash value to indicate that it's free. 1352 */ 1353 ASSERT(hash[bucket].dtdh_chain != dvar); 1354 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1355 1356 dtrace_membar_producer(); 1357 1358 /* 1359 * Set the next pointer to point at the dirty list, and 1360 * atomically swing the dirty pointer to the newly freed dvar. 1361 */ 1362 do { 1363 next = dcpu->dtdsc_dirty; 1364 dvar->dtdv_next = next; 1365 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1366 1367 /* 1368 * Finally, unlock this hash bucket. 1369 */ 1370 ASSERT(hash[bucket].dtdh_lock == lock); 1371 ASSERT(lock & 1); 1372 hash[bucket].dtdh_lock++; 1373 1374 return (NULL); 1375 next: 1376 prev = dvar; 1377 continue; 1378 } 1379 1380 if (dvar == NULL) { 1381 /* 1382 * If dvar is NULL, it is because we went off the rails: 1383 * one of the elements that we traversed in the hash chain 1384 * was deleted while we were traversing it. In this case, 1385 * we assert that we aren't doing a dealloc (deallocs lock 1386 * the hash bucket to prevent themselves from racing with 1387 * one another), and retry the hash chain traversal. 1388 */ 1389 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1390 goto top; 1391 } 1392 1393 if (op != DTRACE_DYNVAR_ALLOC) { 1394 /* 1395 * If we are not to allocate a new variable, we want to 1396 * return NULL now. Before we return, check that the value 1397 * of the lock word hasn't changed. If it has, we may have 1398 * seen an inconsistent snapshot. 1399 */ 1400 if (op == DTRACE_DYNVAR_NOALLOC) { 1401 if (hash[bucket].dtdh_lock != lock) 1402 goto top; 1403 } else { 1404 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1405 ASSERT(hash[bucket].dtdh_lock == lock); 1406 ASSERT(lock & 1); 1407 hash[bucket].dtdh_lock++; 1408 } 1409 1410 return (NULL); 1411 } 1412 1413 /* 1414 * We need to allocate a new dynamic variable. The size we need is the 1415 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1416 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1417 * the size of any referred-to data (dsize). We then round the final 1418 * size up to the chunksize for allocation. 1419 */ 1420 for (ksize = 0, i = 0; i < nkeys; i++) 1421 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1422 1423 /* 1424 * This should be pretty much impossible, but could happen if, say, 1425 * strange DIF specified the tuple. Ideally, this should be an 1426 * assertion and not an error condition -- but that requires that the 1427 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1428 * bullet-proof. (That is, it must not be able to be fooled by 1429 * malicious DIF.) Given the lack of backwards branches in DIF, 1430 * solving this would presumably not amount to solving the Halting 1431 * Problem -- but it still seems awfully hard. 1432 */ 1433 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1434 ksize + dsize > chunksize) { 1435 dcpu->dtdsc_drops++; 1436 return (NULL); 1437 } 1438 1439 nstate = DTRACE_DSTATE_EMPTY; 1440 1441 do { 1442 retry: 1443 free = dcpu->dtdsc_free; 1444 1445 if (free == NULL) { 1446 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1447 void *rval; 1448 1449 if (clean == NULL) { 1450 /* 1451 * We're out of dynamic variable space on 1452 * this CPU. Unless we have tried all CPUs, 1453 * we'll try to allocate from a different 1454 * CPU. 1455 */ 1456 switch (dstate->dtds_state) { 1457 case DTRACE_DSTATE_CLEAN: { 1458 void *sp = &dstate->dtds_state; 1459 1460 if (++cpu >= NCPU) 1461 cpu = 0; 1462 1463 if (dcpu->dtdsc_dirty != NULL && 1464 nstate == DTRACE_DSTATE_EMPTY) 1465 nstate = DTRACE_DSTATE_DIRTY; 1466 1467 if (dcpu->dtdsc_rinsing != NULL) 1468 nstate = DTRACE_DSTATE_RINSING; 1469 1470 dcpu = &dstate->dtds_percpu[cpu]; 1471 1472 if (cpu != me) 1473 goto retry; 1474 1475 (void) dtrace_cas32(sp, 1476 DTRACE_DSTATE_CLEAN, nstate); 1477 1478 /* 1479 * To increment the correct bean 1480 * counter, take another lap. 1481 */ 1482 goto retry; 1483 } 1484 1485 case DTRACE_DSTATE_DIRTY: 1486 dcpu->dtdsc_dirty_drops++; 1487 break; 1488 1489 case DTRACE_DSTATE_RINSING: 1490 dcpu->dtdsc_rinsing_drops++; 1491 break; 1492 1493 case DTRACE_DSTATE_EMPTY: 1494 dcpu->dtdsc_drops++; 1495 break; 1496 } 1497 1498 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1499 return (NULL); 1500 } 1501 1502 /* 1503 * The clean list appears to be non-empty. We want to 1504 * move the clean list to the free list; we start by 1505 * moving the clean pointer aside. 1506 */ 1507 if (dtrace_casptr(&dcpu->dtdsc_clean, 1508 clean, NULL) != clean) { 1509 /* 1510 * We are in one of two situations: 1511 * 1512 * (a) The clean list was switched to the 1513 * free list by another CPU. 1514 * 1515 * (b) The clean list was added to by the 1516 * cleansing cyclic. 1517 * 1518 * In either of these situations, we can 1519 * just reattempt the free list allocation. 1520 */ 1521 goto retry; 1522 } 1523 1524 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1525 1526 /* 1527 * Now we'll move the clean list to the free list. 1528 * It's impossible for this to fail: the only way 1529 * the free list can be updated is through this 1530 * code path, and only one CPU can own the clean list. 1531 * Thus, it would only be possible for this to fail if 1532 * this code were racing with dtrace_dynvar_clean(). 1533 * (That is, if dtrace_dynvar_clean() updated the clean 1534 * list, and we ended up racing to update the free 1535 * list.) This race is prevented by the dtrace_sync() 1536 * in dtrace_dynvar_clean() -- which flushes the 1537 * owners of the clean lists out before resetting 1538 * the clean lists. 1539 */ 1540 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1541 ASSERT(rval == NULL); 1542 goto retry; 1543 } 1544 1545 dvar = free; 1546 new_free = dvar->dtdv_next; 1547 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1548 1549 /* 1550 * We have now allocated a new chunk. We copy the tuple keys into the 1551 * tuple array and copy any referenced key data into the data space 1552 * following the tuple array. As we do this, we relocate dttk_value 1553 * in the final tuple to point to the key data address in the chunk. 1554 */ 1555 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1556 dvar->dtdv_data = (void *)(kdata + ksize); 1557 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1558 1559 for (i = 0; i < nkeys; i++) { 1560 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1561 size_t kesize = key[i].dttk_size; 1562 1563 if (kesize != 0) { 1564 dtrace_bcopy( 1565 (const void *)(uintptr_t)key[i].dttk_value, 1566 (void *)kdata, kesize); 1567 dkey->dttk_value = kdata; 1568 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1569 } else { 1570 dkey->dttk_value = key[i].dttk_value; 1571 } 1572 1573 dkey->dttk_size = kesize; 1574 } 1575 1576 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1577 dvar->dtdv_hashval = hashval; 1578 dvar->dtdv_next = start; 1579 1580 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1581 return (dvar); 1582 1583 /* 1584 * The cas has failed. Either another CPU is adding an element to 1585 * this hash chain, or another CPU is deleting an element from this 1586 * hash chain. The simplest way to deal with both of these cases 1587 * (though not necessarily the most efficient) is to free our 1588 * allocated block and tail-call ourselves. Note that the free is 1589 * to the dirty list and _not_ to the free list. This is to prevent 1590 * races with allocators, above. 1591 */ 1592 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1593 1594 dtrace_membar_producer(); 1595 1596 do { 1597 free = dcpu->dtdsc_dirty; 1598 dvar->dtdv_next = free; 1599 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1600 1601 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1602 } 1603 1604 /*ARGSUSED*/ 1605 static void 1606 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1607 { 1608 if (nval < *oval) 1609 *oval = nval; 1610 } 1611 1612 /*ARGSUSED*/ 1613 static void 1614 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1615 { 1616 if (nval > *oval) 1617 *oval = nval; 1618 } 1619 1620 static void 1621 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1622 { 1623 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1624 int64_t val = (int64_t)nval; 1625 1626 if (val < 0) { 1627 for (i = 0; i < zero; i++) { 1628 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1629 quanta[i] += incr; 1630 return; 1631 } 1632 } 1633 } else { 1634 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1635 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1636 quanta[i - 1] += incr; 1637 return; 1638 } 1639 } 1640 1641 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1642 return; 1643 } 1644 1645 ASSERT(0); 1646 } 1647 1648 static void 1649 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1650 { 1651 uint64_t arg = *lquanta++; 1652 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1653 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1654 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1655 int32_t val = (int32_t)nval, level; 1656 1657 ASSERT(step != 0); 1658 ASSERT(levels != 0); 1659 1660 if (val < base) { 1661 /* 1662 * This is an underflow. 1663 */ 1664 lquanta[0] += incr; 1665 return; 1666 } 1667 1668 level = (val - base) / step; 1669 1670 if (level < levels) { 1671 lquanta[level + 1] += incr; 1672 return; 1673 } 1674 1675 /* 1676 * This is an overflow. 1677 */ 1678 lquanta[levels + 1] += incr; 1679 } 1680 1681 /*ARGSUSED*/ 1682 static void 1683 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1684 { 1685 data[0]++; 1686 data[1] += nval; 1687 } 1688 1689 /*ARGSUSED*/ 1690 static void 1691 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 1692 { 1693 *oval = *oval + 1; 1694 } 1695 1696 /*ARGSUSED*/ 1697 static void 1698 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 1699 { 1700 *oval += nval; 1701 } 1702 1703 /* 1704 * Aggregate given the tuple in the principal data buffer, and the aggregating 1705 * action denoted by the specified dtrace_aggregation_t. The aggregation 1706 * buffer is specified as the buf parameter. This routine does not return 1707 * failure; if there is no space in the aggregation buffer, the data will be 1708 * dropped, and a corresponding counter incremented. 1709 */ 1710 static void 1711 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 1712 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 1713 { 1714 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 1715 uint32_t i, ndx, size, fsize; 1716 uint32_t align = sizeof (uint64_t) - 1; 1717 dtrace_aggbuffer_t *agb; 1718 dtrace_aggkey_t *key; 1719 uint32_t hashval = 0, limit, isstr; 1720 caddr_t tomax, data, kdata; 1721 dtrace_actkind_t action; 1722 dtrace_action_t *act; 1723 uintptr_t offs; 1724 1725 if (buf == NULL) 1726 return; 1727 1728 if (!agg->dtag_hasarg) { 1729 /* 1730 * Currently, only quantize() and lquantize() take additional 1731 * arguments, and they have the same semantics: an increment 1732 * value that defaults to 1 when not present. If additional 1733 * aggregating actions take arguments, the setting of the 1734 * default argument value will presumably have to become more 1735 * sophisticated... 1736 */ 1737 arg = 1; 1738 } 1739 1740 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 1741 size = rec->dtrd_offset - agg->dtag_base; 1742 fsize = size + rec->dtrd_size; 1743 1744 ASSERT(dbuf->dtb_tomax != NULL); 1745 data = dbuf->dtb_tomax + offset + agg->dtag_base; 1746 1747 if ((tomax = buf->dtb_tomax) == NULL) { 1748 dtrace_buffer_drop(buf); 1749 return; 1750 } 1751 1752 /* 1753 * The metastructure is always at the bottom of the buffer. 1754 */ 1755 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 1756 sizeof (dtrace_aggbuffer_t)); 1757 1758 if (buf->dtb_offset == 0) { 1759 /* 1760 * We just kludge up approximately 1/8th of the size to be 1761 * buckets. If this guess ends up being routinely 1762 * off-the-mark, we may need to dynamically readjust this 1763 * based on past performance. 1764 */ 1765 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 1766 1767 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 1768 (uintptr_t)tomax || hashsize == 0) { 1769 /* 1770 * We've been given a ludicrously small buffer; 1771 * increment our drop count and leave. 1772 */ 1773 dtrace_buffer_drop(buf); 1774 return; 1775 } 1776 1777 /* 1778 * And now, a pathetic attempt to try to get a an odd (or 1779 * perchance, a prime) hash size for better hash distribution. 1780 */ 1781 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 1782 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 1783 1784 agb->dtagb_hashsize = hashsize; 1785 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 1786 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 1787 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 1788 1789 for (i = 0; i < agb->dtagb_hashsize; i++) 1790 agb->dtagb_hash[i] = NULL; 1791 } 1792 1793 ASSERT(agg->dtag_first != NULL); 1794 ASSERT(agg->dtag_first->dta_intuple); 1795 1796 /* 1797 * Calculate the hash value based on the key. Note that we _don't_ 1798 * include the aggid in the hashing (but we will store it as part of 1799 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 1800 * algorithm: a simple, quick algorithm that has no known funnels, and 1801 * gets good distribution in practice. The efficacy of the hashing 1802 * algorithm (and a comparison with other algorithms) may be found by 1803 * running the ::dtrace_aggstat MDB dcmd. 1804 */ 1805 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1806 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1807 limit = i + act->dta_rec.dtrd_size; 1808 ASSERT(limit <= size); 1809 isstr = DTRACEACT_ISSTRING(act); 1810 1811 for (; i < limit; i++) { 1812 hashval += data[i]; 1813 hashval += (hashval << 10); 1814 hashval ^= (hashval >> 6); 1815 1816 if (isstr && data[i] == '\0') 1817 break; 1818 } 1819 } 1820 1821 hashval += (hashval << 3); 1822 hashval ^= (hashval >> 11); 1823 hashval += (hashval << 15); 1824 1825 /* 1826 * Yes, the divide here is expensive -- but it's generally the least 1827 * of the performance issues given the amount of data that we iterate 1828 * over to compute hash values, compare data, etc. 1829 */ 1830 ndx = hashval % agb->dtagb_hashsize; 1831 1832 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 1833 ASSERT((caddr_t)key >= tomax); 1834 ASSERT((caddr_t)key < tomax + buf->dtb_size); 1835 1836 if (hashval != key->dtak_hashval || key->dtak_size != size) 1837 continue; 1838 1839 kdata = key->dtak_data; 1840 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 1841 1842 for (act = agg->dtag_first; act->dta_intuple; 1843 act = act->dta_next) { 1844 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1845 limit = i + act->dta_rec.dtrd_size; 1846 ASSERT(limit <= size); 1847 isstr = DTRACEACT_ISSTRING(act); 1848 1849 for (; i < limit; i++) { 1850 if (kdata[i] != data[i]) 1851 goto next; 1852 1853 if (isstr && data[i] == '\0') 1854 break; 1855 } 1856 } 1857 1858 if (action != key->dtak_action) { 1859 /* 1860 * We are aggregating on the same value in the same 1861 * aggregation with two different aggregating actions. 1862 * (This should have been picked up in the compiler, 1863 * so we may be dealing with errant or devious DIF.) 1864 * This is an error condition; we indicate as much, 1865 * and return. 1866 */ 1867 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 1868 return; 1869 } 1870 1871 /* 1872 * This is a hit: we need to apply the aggregator to 1873 * the value at this key. 1874 */ 1875 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 1876 return; 1877 next: 1878 continue; 1879 } 1880 1881 /* 1882 * We didn't find it. We need to allocate some zero-filled space, 1883 * link it into the hash table appropriately, and apply the aggregator 1884 * to the (zero-filled) value. 1885 */ 1886 offs = buf->dtb_offset; 1887 while (offs & (align - 1)) 1888 offs += sizeof (uint32_t); 1889 1890 /* 1891 * If we don't have enough room to both allocate a new key _and_ 1892 * its associated data, increment the drop count and return. 1893 */ 1894 if ((uintptr_t)tomax + offs + fsize > 1895 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 1896 dtrace_buffer_drop(buf); 1897 return; 1898 } 1899 1900 /*CONSTCOND*/ 1901 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 1902 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 1903 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 1904 1905 key->dtak_data = kdata = tomax + offs; 1906 buf->dtb_offset = offs + fsize; 1907 1908 /* 1909 * Now copy the data across. 1910 */ 1911 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 1912 1913 for (i = sizeof (dtrace_aggid_t); i < size; i++) 1914 kdata[i] = data[i]; 1915 1916 /* 1917 * Because strings are not zeroed out by default, we need to iterate 1918 * looking for actions that store strings, and we need to explicitly 1919 * pad these strings out with zeroes. 1920 */ 1921 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 1922 int nul; 1923 1924 if (!DTRACEACT_ISSTRING(act)) 1925 continue; 1926 1927 i = act->dta_rec.dtrd_offset - agg->dtag_base; 1928 limit = i + act->dta_rec.dtrd_size; 1929 ASSERT(limit <= size); 1930 1931 for (nul = 0; i < limit; i++) { 1932 if (nul) { 1933 kdata[i] = '\0'; 1934 continue; 1935 } 1936 1937 if (data[i] != '\0') 1938 continue; 1939 1940 nul = 1; 1941 } 1942 } 1943 1944 for (i = size; i < fsize; i++) 1945 kdata[i] = 0; 1946 1947 key->dtak_hashval = hashval; 1948 key->dtak_size = size; 1949 key->dtak_action = action; 1950 key->dtak_next = agb->dtagb_hash[ndx]; 1951 agb->dtagb_hash[ndx] = key; 1952 1953 /* 1954 * Finally, apply the aggregator. 1955 */ 1956 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 1957 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 1958 } 1959 1960 /* 1961 * Given consumer state, this routine finds a speculation in the INACTIVE 1962 * state and transitions it into the ACTIVE state. If there is no speculation 1963 * in the INACTIVE state, 0 is returned. In this case, no error counter is 1964 * incremented -- it is up to the caller to take appropriate action. 1965 */ 1966 static int 1967 dtrace_speculation(dtrace_state_t *state) 1968 { 1969 int i = 0; 1970 dtrace_speculation_state_t current; 1971 uint32_t *stat = &state->dts_speculations_unavail, count; 1972 1973 while (i < state->dts_nspeculations) { 1974 dtrace_speculation_t *spec = &state->dts_speculations[i]; 1975 1976 current = spec->dtsp_state; 1977 1978 if (current != DTRACESPEC_INACTIVE) { 1979 if (current == DTRACESPEC_COMMITTINGMANY || 1980 current == DTRACESPEC_COMMITTING || 1981 current == DTRACESPEC_DISCARDING) 1982 stat = &state->dts_speculations_busy; 1983 i++; 1984 continue; 1985 } 1986 1987 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 1988 current, DTRACESPEC_ACTIVE) == current) 1989 return (i + 1); 1990 } 1991 1992 /* 1993 * We couldn't find a speculation. If we found as much as a single 1994 * busy speculation buffer, we'll attribute this failure as "busy" 1995 * instead of "unavail". 1996 */ 1997 do { 1998 count = *stat; 1999 } while (dtrace_cas32(stat, count, count + 1) != count); 2000 2001 return (0); 2002 } 2003 2004 /* 2005 * This routine commits an active speculation. If the specified speculation 2006 * is not in a valid state to perform a commit(), this routine will silently do 2007 * nothing. The state of the specified speculation is transitioned according 2008 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2009 */ 2010 static void 2011 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2012 dtrace_specid_t which) 2013 { 2014 dtrace_speculation_t *spec; 2015 dtrace_buffer_t *src, *dest; 2016 uintptr_t daddr, saddr, dlimit; 2017 dtrace_speculation_state_t current, new; 2018 intptr_t offs; 2019 2020 if (which == 0) 2021 return; 2022 2023 if (which > state->dts_nspeculations) { 2024 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2025 return; 2026 } 2027 2028 spec = &state->dts_speculations[which - 1]; 2029 src = &spec->dtsp_buffer[cpu]; 2030 dest = &state->dts_buffer[cpu]; 2031 2032 do { 2033 current = spec->dtsp_state; 2034 2035 if (current == DTRACESPEC_COMMITTINGMANY) 2036 break; 2037 2038 switch (current) { 2039 case DTRACESPEC_INACTIVE: 2040 case DTRACESPEC_DISCARDING: 2041 return; 2042 2043 case DTRACESPEC_COMMITTING: 2044 /* 2045 * This is only possible if we are (a) commit()'ing 2046 * without having done a prior speculate() on this CPU 2047 * and (b) racing with another commit() on a different 2048 * CPU. There's nothing to do -- we just assert that 2049 * our offset is 0. 2050 */ 2051 ASSERT(src->dtb_offset == 0); 2052 return; 2053 2054 case DTRACESPEC_ACTIVE: 2055 new = DTRACESPEC_COMMITTING; 2056 break; 2057 2058 case DTRACESPEC_ACTIVEONE: 2059 /* 2060 * This speculation is active on one CPU. If our 2061 * buffer offset is non-zero, we know that the one CPU 2062 * must be us. Otherwise, we are committing on a 2063 * different CPU from the speculate(), and we must 2064 * rely on being asynchronously cleaned. 2065 */ 2066 if (src->dtb_offset != 0) { 2067 new = DTRACESPEC_COMMITTING; 2068 break; 2069 } 2070 /*FALLTHROUGH*/ 2071 2072 case DTRACESPEC_ACTIVEMANY: 2073 new = DTRACESPEC_COMMITTINGMANY; 2074 break; 2075 2076 default: 2077 ASSERT(0); 2078 } 2079 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2080 current, new) != current); 2081 2082 /* 2083 * We have set the state to indicate that we are committing this 2084 * speculation. Now reserve the necessary space in the destination 2085 * buffer. 2086 */ 2087 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2088 sizeof (uint64_t), state, NULL)) < 0) { 2089 dtrace_buffer_drop(dest); 2090 goto out; 2091 } 2092 2093 /* 2094 * We have the space; copy the buffer across. (Note that this is a 2095 * highly subobtimal bcopy(); in the unlikely event that this becomes 2096 * a serious performance issue, a high-performance DTrace-specific 2097 * bcopy() should obviously be invented.) 2098 */ 2099 daddr = (uintptr_t)dest->dtb_tomax + offs; 2100 dlimit = daddr + src->dtb_offset; 2101 saddr = (uintptr_t)src->dtb_tomax; 2102 2103 /* 2104 * First, the aligned portion. 2105 */ 2106 while (dlimit - daddr >= sizeof (uint64_t)) { 2107 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2108 2109 daddr += sizeof (uint64_t); 2110 saddr += sizeof (uint64_t); 2111 } 2112 2113 /* 2114 * Now any left-over bit... 2115 */ 2116 while (dlimit - daddr) 2117 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2118 2119 /* 2120 * Finally, commit the reserved space in the destination buffer. 2121 */ 2122 dest->dtb_offset = offs + src->dtb_offset; 2123 2124 out: 2125 /* 2126 * If we're lucky enough to be the only active CPU on this speculation 2127 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2128 */ 2129 if (current == DTRACESPEC_ACTIVE || 2130 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2131 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2132 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2133 2134 ASSERT(rval == DTRACESPEC_COMMITTING); 2135 } 2136 2137 src->dtb_offset = 0; 2138 src->dtb_xamot_drops += src->dtb_drops; 2139 src->dtb_drops = 0; 2140 } 2141 2142 /* 2143 * This routine discards an active speculation. If the specified speculation 2144 * is not in a valid state to perform a discard(), this routine will silently 2145 * do nothing. The state of the specified speculation is transitioned 2146 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2147 */ 2148 static void 2149 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2150 dtrace_specid_t which) 2151 { 2152 dtrace_speculation_t *spec; 2153 dtrace_speculation_state_t current, new; 2154 dtrace_buffer_t *buf; 2155 2156 if (which == 0) 2157 return; 2158 2159 if (which > state->dts_nspeculations) { 2160 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2161 return; 2162 } 2163 2164 spec = &state->dts_speculations[which - 1]; 2165 buf = &spec->dtsp_buffer[cpu]; 2166 2167 do { 2168 current = spec->dtsp_state; 2169 2170 switch (current) { 2171 case DTRACESPEC_INACTIVE: 2172 case DTRACESPEC_COMMITTINGMANY: 2173 case DTRACESPEC_COMMITTING: 2174 case DTRACESPEC_DISCARDING: 2175 return; 2176 2177 case DTRACESPEC_ACTIVE: 2178 case DTRACESPEC_ACTIVEMANY: 2179 new = DTRACESPEC_DISCARDING; 2180 break; 2181 2182 case DTRACESPEC_ACTIVEONE: 2183 if (buf->dtb_offset != 0) { 2184 new = DTRACESPEC_INACTIVE; 2185 } else { 2186 new = DTRACESPEC_DISCARDING; 2187 } 2188 break; 2189 2190 default: 2191 ASSERT(0); 2192 } 2193 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2194 current, new) != current); 2195 2196 buf->dtb_offset = 0; 2197 buf->dtb_drops = 0; 2198 } 2199 2200 /* 2201 * Note: not called from probe context. This function is called 2202 * asynchronously from cross call context to clean any speculations that are 2203 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2204 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2205 * speculation. 2206 */ 2207 static void 2208 dtrace_speculation_clean_here(dtrace_state_t *state) 2209 { 2210 dtrace_icookie_t cookie; 2211 processorid_t cpu = CPU->cpu_id; 2212 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2213 dtrace_specid_t i; 2214 2215 cookie = dtrace_interrupt_disable(); 2216 2217 if (dest->dtb_tomax == NULL) { 2218 dtrace_interrupt_enable(cookie); 2219 return; 2220 } 2221 2222 for (i = 0; i < state->dts_nspeculations; i++) { 2223 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2224 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2225 2226 if (src->dtb_tomax == NULL) 2227 continue; 2228 2229 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2230 src->dtb_offset = 0; 2231 continue; 2232 } 2233 2234 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2235 continue; 2236 2237 if (src->dtb_offset == 0) 2238 continue; 2239 2240 dtrace_speculation_commit(state, cpu, i + 1); 2241 } 2242 2243 dtrace_interrupt_enable(cookie); 2244 } 2245 2246 /* 2247 * Note: not called from probe context. This function is called 2248 * asynchronously (and at a regular interval) to clean any speculations that 2249 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2250 * is work to be done, it cross calls all CPUs to perform that work; 2251 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2252 * INACTIVE state until they have been cleaned by all CPUs. 2253 */ 2254 static void 2255 dtrace_speculation_clean(dtrace_state_t *state) 2256 { 2257 int work = 0, rv; 2258 dtrace_specid_t i; 2259 2260 for (i = 0; i < state->dts_nspeculations; i++) { 2261 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2262 2263 ASSERT(!spec->dtsp_cleaning); 2264 2265 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2266 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2267 continue; 2268 2269 work++; 2270 spec->dtsp_cleaning = 1; 2271 } 2272 2273 if (!work) 2274 return; 2275 2276 dtrace_xcall(DTRACE_CPUALL, 2277 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2278 2279 /* 2280 * We now know that all CPUs have committed or discarded their 2281 * speculation buffers, as appropriate. We can now set the state 2282 * to inactive. 2283 */ 2284 for (i = 0; i < state->dts_nspeculations; i++) { 2285 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2286 dtrace_speculation_state_t current, new; 2287 2288 if (!spec->dtsp_cleaning) 2289 continue; 2290 2291 current = spec->dtsp_state; 2292 ASSERT(current == DTRACESPEC_DISCARDING || 2293 current == DTRACESPEC_COMMITTINGMANY); 2294 2295 new = DTRACESPEC_INACTIVE; 2296 2297 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2298 ASSERT(rv == current); 2299 spec->dtsp_cleaning = 0; 2300 } 2301 } 2302 2303 /* 2304 * Called as part of a speculate() to get the speculative buffer associated 2305 * with a given speculation. Returns NULL if the specified speculation is not 2306 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2307 * the active CPU is not the specified CPU -- the speculation will be 2308 * atomically transitioned into the ACTIVEMANY state. 2309 */ 2310 static dtrace_buffer_t * 2311 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2312 dtrace_specid_t which) 2313 { 2314 dtrace_speculation_t *spec; 2315 dtrace_speculation_state_t current, new; 2316 dtrace_buffer_t *buf; 2317 2318 if (which == 0) 2319 return (NULL); 2320 2321 if (which > state->dts_nspeculations) { 2322 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2323 return (NULL); 2324 } 2325 2326 spec = &state->dts_speculations[which - 1]; 2327 buf = &spec->dtsp_buffer[cpuid]; 2328 2329 do { 2330 current = spec->dtsp_state; 2331 2332 switch (current) { 2333 case DTRACESPEC_INACTIVE: 2334 case DTRACESPEC_COMMITTINGMANY: 2335 case DTRACESPEC_DISCARDING: 2336 return (NULL); 2337 2338 case DTRACESPEC_COMMITTING: 2339 ASSERT(buf->dtb_offset == 0); 2340 return (NULL); 2341 2342 case DTRACESPEC_ACTIVEONE: 2343 /* 2344 * This speculation is currently active on one CPU. 2345 * Check the offset in the buffer; if it's non-zero, 2346 * that CPU must be us (and we leave the state alone). 2347 * If it's zero, assume that we're starting on a new 2348 * CPU -- and change the state to indicate that the 2349 * speculation is active on more than one CPU. 2350 */ 2351 if (buf->dtb_offset != 0) 2352 return (buf); 2353 2354 new = DTRACESPEC_ACTIVEMANY; 2355 break; 2356 2357 case DTRACESPEC_ACTIVEMANY: 2358 return (buf); 2359 2360 case DTRACESPEC_ACTIVE: 2361 new = DTRACESPEC_ACTIVEONE; 2362 break; 2363 2364 default: 2365 ASSERT(0); 2366 } 2367 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2368 current, new) != current); 2369 2370 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2371 return (buf); 2372 } 2373 2374 /* 2375 * Return a string. In the event that the user lacks the privilege to access 2376 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2377 * don't fail access checking. 2378 * 2379 * dtrace_dif_variable() uses this routine as a helper for various 2380 * builtin values such as 'execname' and 'probefunc.' 2381 */ 2382 uintptr_t 2383 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2384 dtrace_mstate_t *mstate) 2385 { 2386 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2387 uintptr_t ret; 2388 size_t strsz; 2389 2390 /* 2391 * The easy case: this probe is allowed to read all of memory, so 2392 * we can just return this as a vanilla pointer. 2393 */ 2394 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2395 return (addr); 2396 2397 /* 2398 * This is the tougher case: we copy the string in question from 2399 * kernel memory into scratch memory and return it that way: this 2400 * ensures that we won't trip up when access checking tests the 2401 * BYREF return value. 2402 */ 2403 strsz = dtrace_strlen((char *)addr, size) + 1; 2404 2405 if (mstate->dtms_scratch_ptr + strsz > 2406 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2407 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2408 return (NULL); 2409 } 2410 2411 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2412 strsz); 2413 ret = mstate->dtms_scratch_ptr; 2414 mstate->dtms_scratch_ptr += strsz; 2415 return (ret); 2416 } 2417 2418 /* 2419 * This function implements the DIF emulator's variable lookups. The emulator 2420 * passes a reserved variable identifier and optional built-in array index. 2421 */ 2422 static uint64_t 2423 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2424 uint64_t ndx) 2425 { 2426 /* 2427 * If we're accessing one of the uncached arguments, we'll turn this 2428 * into a reference in the args array. 2429 */ 2430 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2431 ndx = v - DIF_VAR_ARG0; 2432 v = DIF_VAR_ARGS; 2433 } 2434 2435 switch (v) { 2436 case DIF_VAR_ARGS: 2437 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2438 if (ndx >= sizeof (mstate->dtms_arg) / 2439 sizeof (mstate->dtms_arg[0])) { 2440 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2441 dtrace_provider_t *pv; 2442 uint64_t val; 2443 2444 pv = mstate->dtms_probe->dtpr_provider; 2445 if (pv->dtpv_pops.dtps_getargval != NULL) 2446 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2447 mstate->dtms_probe->dtpr_id, 2448 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2449 else 2450 val = dtrace_getarg(ndx, aframes); 2451 2452 /* 2453 * This is regrettably required to keep the compiler 2454 * from tail-optimizing the call to dtrace_getarg(). 2455 * The condition always evaluates to true, but the 2456 * compiler has no way of figuring that out a priori. 2457 * (None of this would be necessary if the compiler 2458 * could be relied upon to _always_ tail-optimize 2459 * the call to dtrace_getarg() -- but it can't.) 2460 */ 2461 if (mstate->dtms_probe != NULL) 2462 return (val); 2463 2464 ASSERT(0); 2465 } 2466 2467 return (mstate->dtms_arg[ndx]); 2468 2469 case DIF_VAR_UREGS: { 2470 klwp_t *lwp; 2471 2472 if (!dtrace_priv_proc(state)) 2473 return (0); 2474 2475 if ((lwp = curthread->t_lwp) == NULL) { 2476 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2477 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL; 2478 return (0); 2479 } 2480 2481 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2482 } 2483 2484 case DIF_VAR_CURTHREAD: 2485 if (!dtrace_priv_kernel(state)) 2486 return (0); 2487 return ((uint64_t)(uintptr_t)curthread); 2488 2489 case DIF_VAR_TIMESTAMP: 2490 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2491 mstate->dtms_timestamp = dtrace_gethrtime(); 2492 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2493 } 2494 return (mstate->dtms_timestamp); 2495 2496 case DIF_VAR_VTIMESTAMP: 2497 ASSERT(dtrace_vtime_references != 0); 2498 return (curthread->t_dtrace_vtime); 2499 2500 case DIF_VAR_WALLTIMESTAMP: 2501 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2502 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2503 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2504 } 2505 return (mstate->dtms_walltimestamp); 2506 2507 case DIF_VAR_IPL: 2508 if (!dtrace_priv_kernel(state)) 2509 return (0); 2510 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2511 mstate->dtms_ipl = dtrace_getipl(); 2512 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2513 } 2514 return (mstate->dtms_ipl); 2515 2516 case DIF_VAR_EPID: 2517 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2518 return (mstate->dtms_epid); 2519 2520 case DIF_VAR_ID: 2521 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2522 return (mstate->dtms_probe->dtpr_id); 2523 2524 case DIF_VAR_STACKDEPTH: 2525 if (!dtrace_priv_kernel(state)) 2526 return (0); 2527 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2528 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2529 2530 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2531 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2532 } 2533 return (mstate->dtms_stackdepth); 2534 2535 case DIF_VAR_USTACKDEPTH: 2536 if (!dtrace_priv_proc(state)) 2537 return (0); 2538 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2539 /* 2540 * See comment in DIF_VAR_PID. 2541 */ 2542 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2543 CPU_ON_INTR(CPU)) { 2544 mstate->dtms_ustackdepth = 0; 2545 } else { 2546 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2547 mstate->dtms_ustackdepth = 2548 dtrace_getustackdepth(); 2549 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2550 } 2551 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2552 } 2553 return (mstate->dtms_ustackdepth); 2554 2555 case DIF_VAR_CALLER: 2556 if (!dtrace_priv_kernel(state)) 2557 return (0); 2558 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2559 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2560 2561 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2562 /* 2563 * If this is an unanchored probe, we are 2564 * required to go through the slow path: 2565 * dtrace_caller() only guarantees correct 2566 * results for anchored probes. 2567 */ 2568 pc_t caller[2]; 2569 2570 dtrace_getpcstack(caller, 2, aframes, 2571 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2572 mstate->dtms_caller = caller[1]; 2573 } else if ((mstate->dtms_caller = 2574 dtrace_caller(aframes)) == -1) { 2575 /* 2576 * We have failed to do this the quick way; 2577 * we must resort to the slower approach of 2578 * calling dtrace_getpcstack(). 2579 */ 2580 pc_t caller; 2581 2582 dtrace_getpcstack(&caller, 1, aframes, NULL); 2583 mstate->dtms_caller = caller; 2584 } 2585 2586 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2587 } 2588 return (mstate->dtms_caller); 2589 2590 case DIF_VAR_UCALLER: 2591 if (!dtrace_priv_proc(state)) 2592 return (0); 2593 2594 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2595 uint64_t ustack[3]; 2596 2597 /* 2598 * dtrace_getupcstack() fills in the first uint64_t 2599 * with the current PID. The second uint64_t will 2600 * be the program counter at user-level. The third 2601 * uint64_t will contain the caller, which is what 2602 * we're after. 2603 */ 2604 ustack[2] = NULL; 2605 dtrace_getupcstack(ustack, 3); 2606 mstate->dtms_ucaller = ustack[2]; 2607 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2608 } 2609 2610 return (mstate->dtms_ucaller); 2611 2612 case DIF_VAR_PROBEPROV: 2613 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2614 return (dtrace_dif_varstr( 2615 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2616 state, mstate)); 2617 2618 case DIF_VAR_PROBEMOD: 2619 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2620 return (dtrace_dif_varstr( 2621 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2622 state, mstate)); 2623 2624 case DIF_VAR_PROBEFUNC: 2625 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2626 return (dtrace_dif_varstr( 2627 (uintptr_t)mstate->dtms_probe->dtpr_func, 2628 state, mstate)); 2629 2630 case DIF_VAR_PROBENAME: 2631 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2632 return (dtrace_dif_varstr( 2633 (uintptr_t)mstate->dtms_probe->dtpr_name, 2634 state, mstate)); 2635 2636 case DIF_VAR_PID: 2637 if (!dtrace_priv_proc(state)) 2638 return (0); 2639 2640 /* 2641 * Note that we are assuming that an unanchored probe is 2642 * always due to a high-level interrupt. (And we're assuming 2643 * that there is only a single high level interrupt.) 2644 */ 2645 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2646 return (pid0.pid_id); 2647 2648 /* 2649 * It is always safe to dereference one's own t_procp pointer: 2650 * it always points to a valid, allocated proc structure. 2651 * Further, it is always safe to dereference the p_pidp member 2652 * of one's own proc structure. (These are truisms becuase 2653 * threads and processes don't clean up their own state -- 2654 * they leave that task to whomever reaps them.) 2655 */ 2656 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 2657 2658 case DIF_VAR_PPID: 2659 if (!dtrace_priv_proc(state)) 2660 return (0); 2661 2662 /* 2663 * See comment in DIF_VAR_PID. 2664 */ 2665 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2666 return (pid0.pid_id); 2667 2668 /* 2669 * It is always safe to dereference one's own t_procp pointer: 2670 * it always points to a valid, allocated proc structure. 2671 * (This is true because threads don't clean up their own 2672 * state -- they leave that task to whomever reaps them.) 2673 */ 2674 return ((uint64_t)curthread->t_procp->p_ppid); 2675 2676 case DIF_VAR_TID: 2677 /* 2678 * See comment in DIF_VAR_PID. 2679 */ 2680 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2681 return (0); 2682 2683 return ((uint64_t)curthread->t_tid); 2684 2685 case DIF_VAR_EXECNAME: 2686 if (!dtrace_priv_proc(state)) 2687 return (0); 2688 2689 /* 2690 * See comment in DIF_VAR_PID. 2691 */ 2692 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2693 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 2694 2695 /* 2696 * It is always safe to dereference one's own t_procp pointer: 2697 * it always points to a valid, allocated proc structure. 2698 * (This is true because threads don't clean up their own 2699 * state -- they leave that task to whomever reaps them.) 2700 */ 2701 return (dtrace_dif_varstr( 2702 (uintptr_t)curthread->t_procp->p_user.u_comm, 2703 state, mstate)); 2704 2705 case DIF_VAR_ZONENAME: 2706 if (!dtrace_priv_proc(state)) 2707 return (0); 2708 2709 /* 2710 * See comment in DIF_VAR_PID. 2711 */ 2712 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2713 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 2714 2715 /* 2716 * It is always safe to dereference one's own t_procp pointer: 2717 * it always points to a valid, allocated proc structure. 2718 * (This is true because threads don't clean up their own 2719 * state -- they leave that task to whomever reaps them.) 2720 */ 2721 return (dtrace_dif_varstr( 2722 (uintptr_t)curthread->t_procp->p_zone->zone_name, 2723 state, mstate)); 2724 2725 case DIF_VAR_UID: 2726 if (!dtrace_priv_proc(state)) 2727 return (0); 2728 2729 /* 2730 * See comment in DIF_VAR_PID. 2731 */ 2732 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2733 return ((uint64_t)p0.p_cred->cr_uid); 2734 2735 /* 2736 * It is always safe to dereference one's own t_procp pointer: 2737 * it always points to a valid, allocated proc structure. 2738 * (This is true because threads don't clean up their own 2739 * state -- they leave that task to whomever reaps them.) 2740 * 2741 * Additionally, it is safe to dereference one's own process 2742 * credential, since this is never NULL after process birth. 2743 */ 2744 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 2745 2746 case DIF_VAR_GID: 2747 if (!dtrace_priv_proc(state)) 2748 return (0); 2749 2750 /* 2751 * See comment in DIF_VAR_PID. 2752 */ 2753 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2754 return ((uint64_t)p0.p_cred->cr_gid); 2755 2756 /* 2757 * It is always safe to dereference one's own t_procp pointer: 2758 * it always points to a valid, allocated proc structure. 2759 * (This is true because threads don't clean up their own 2760 * state -- they leave that task to whomever reaps them.) 2761 * 2762 * Additionally, it is safe to dereference one's own process 2763 * credential, since this is never NULL after process birth. 2764 */ 2765 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 2766 2767 case DIF_VAR_ERRNO: { 2768 klwp_t *lwp; 2769 if (!dtrace_priv_proc(state)) 2770 return (0); 2771 2772 /* 2773 * See comment in DIF_VAR_PID. 2774 */ 2775 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 2776 return (0); 2777 2778 /* 2779 * It is always safe to dereference one's own t_lwp pointer in 2780 * the event that this pointer is non-NULL. (This is true 2781 * because threads and lwps don't clean up their own state -- 2782 * they leave that task to whomever reaps them.) 2783 */ 2784 if ((lwp = curthread->t_lwp) == NULL) 2785 return (0); 2786 2787 return ((uint64_t)lwp->lwp_errno); 2788 } 2789 default: 2790 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2791 return (0); 2792 } 2793 } 2794 2795 /* 2796 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 2797 * Notice that we don't bother validating the proper number of arguments or 2798 * their types in the tuple stack. This isn't needed because all argument 2799 * interpretation is safe because of our load safety -- the worst that can 2800 * happen is that a bogus program can obtain bogus results. 2801 */ 2802 static void 2803 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 2804 dtrace_key_t *tupregs, int nargs, 2805 dtrace_mstate_t *mstate, dtrace_state_t *state) 2806 { 2807 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2808 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 2809 dtrace_vstate_t *vstate = &state->dts_vstate; 2810 2811 union { 2812 mutex_impl_t mi; 2813 uint64_t mx; 2814 } m; 2815 2816 union { 2817 krwlock_t ri; 2818 uintptr_t rw; 2819 } r; 2820 2821 switch (subr) { 2822 case DIF_SUBR_RAND: 2823 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 2824 break; 2825 2826 case DIF_SUBR_MUTEX_OWNED: 2827 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2828 mstate, vstate)) { 2829 regs[rd] = NULL; 2830 break; 2831 } 2832 2833 m.mx = dtrace_load64(tupregs[0].dttk_value); 2834 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 2835 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 2836 else 2837 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 2838 break; 2839 2840 case DIF_SUBR_MUTEX_OWNER: 2841 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2842 mstate, vstate)) { 2843 regs[rd] = NULL; 2844 break; 2845 } 2846 2847 m.mx = dtrace_load64(tupregs[0].dttk_value); 2848 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 2849 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 2850 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 2851 else 2852 regs[rd] = 0; 2853 break; 2854 2855 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 2856 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2857 mstate, vstate)) { 2858 regs[rd] = NULL; 2859 break; 2860 } 2861 2862 m.mx = dtrace_load64(tupregs[0].dttk_value); 2863 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 2864 break; 2865 2866 case DIF_SUBR_MUTEX_TYPE_SPIN: 2867 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 2868 mstate, vstate)) { 2869 regs[rd] = NULL; 2870 break; 2871 } 2872 2873 m.mx = dtrace_load64(tupregs[0].dttk_value); 2874 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 2875 break; 2876 2877 case DIF_SUBR_RW_READ_HELD: { 2878 uintptr_t tmp; 2879 2880 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 2881 mstate, vstate)) { 2882 regs[rd] = NULL; 2883 break; 2884 } 2885 2886 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2887 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 2888 break; 2889 } 2890 2891 case DIF_SUBR_RW_WRITE_HELD: 2892 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2893 mstate, vstate)) { 2894 regs[rd] = NULL; 2895 break; 2896 } 2897 2898 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2899 regs[rd] = _RW_WRITE_HELD(&r.ri); 2900 break; 2901 2902 case DIF_SUBR_RW_ISWRITER: 2903 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 2904 mstate, vstate)) { 2905 regs[rd] = NULL; 2906 break; 2907 } 2908 2909 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 2910 regs[rd] = _RW_ISWRITER(&r.ri); 2911 break; 2912 2913 case DIF_SUBR_BCOPY: { 2914 /* 2915 * We need to be sure that the destination is in the scratch 2916 * region -- no other region is allowed. 2917 */ 2918 uintptr_t src = tupregs[0].dttk_value; 2919 uintptr_t dest = tupregs[1].dttk_value; 2920 size_t size = tupregs[2].dttk_value; 2921 2922 if (!dtrace_inscratch(dest, size, mstate)) { 2923 *flags |= CPU_DTRACE_BADADDR; 2924 *illval = regs[rd]; 2925 break; 2926 } 2927 2928 if (!dtrace_canload(src, size, mstate, vstate)) { 2929 regs[rd] = NULL; 2930 break; 2931 } 2932 2933 dtrace_bcopy((void *)src, (void *)dest, size); 2934 break; 2935 } 2936 2937 case DIF_SUBR_ALLOCA: 2938 case DIF_SUBR_COPYIN: { 2939 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 2940 uint64_t size = 2941 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 2942 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 2943 2944 /* 2945 * This action doesn't require any credential checks since 2946 * probes will not activate in user contexts to which the 2947 * enabling user does not have permissions. 2948 */ 2949 2950 /* 2951 * Rounding up the user allocation size could have overflowed 2952 * a large, bogus allocation (like -1ULL) to 0. 2953 */ 2954 if (scratch_size < size || 2955 !DTRACE_INSCRATCH(mstate, scratch_size)) { 2956 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2957 regs[rd] = NULL; 2958 break; 2959 } 2960 2961 if (subr == DIF_SUBR_COPYIN) { 2962 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2963 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 2964 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2965 } 2966 2967 mstate->dtms_scratch_ptr += scratch_size; 2968 regs[rd] = dest; 2969 break; 2970 } 2971 2972 case DIF_SUBR_COPYINTO: { 2973 uint64_t size = tupregs[1].dttk_value; 2974 uintptr_t dest = tupregs[2].dttk_value; 2975 2976 /* 2977 * This action doesn't require any credential checks since 2978 * probes will not activate in user contexts to which the 2979 * enabling user does not have permissions. 2980 */ 2981 if (!dtrace_inscratch(dest, size, mstate)) { 2982 *flags |= CPU_DTRACE_BADADDR; 2983 *illval = regs[rd]; 2984 break; 2985 } 2986 2987 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2988 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 2989 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2990 break; 2991 } 2992 2993 case DIF_SUBR_COPYINSTR: { 2994 uintptr_t dest = mstate->dtms_scratch_ptr; 2995 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2996 2997 if (nargs > 1 && tupregs[1].dttk_value < size) 2998 size = tupregs[1].dttk_value + 1; 2999 3000 /* 3001 * This action doesn't require any credential checks since 3002 * probes will not activate in user contexts to which the 3003 * enabling user does not have permissions. 3004 */ 3005 if (!DTRACE_INSCRATCH(mstate, size)) { 3006 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3007 regs[rd] = NULL; 3008 break; 3009 } 3010 3011 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3012 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3013 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3014 3015 ((char *)dest)[size - 1] = '\0'; 3016 mstate->dtms_scratch_ptr += size; 3017 regs[rd] = dest; 3018 break; 3019 } 3020 3021 case DIF_SUBR_MSGSIZE: 3022 case DIF_SUBR_MSGDSIZE: { 3023 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3024 uintptr_t wptr, rptr; 3025 size_t count = 0; 3026 int cont = 0; 3027 3028 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3029 3030 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3031 vstate)) { 3032 regs[rd] = NULL; 3033 break; 3034 } 3035 3036 wptr = dtrace_loadptr(baddr + 3037 offsetof(mblk_t, b_wptr)); 3038 3039 rptr = dtrace_loadptr(baddr + 3040 offsetof(mblk_t, b_rptr)); 3041 3042 if (wptr < rptr) { 3043 *flags |= CPU_DTRACE_BADADDR; 3044 *illval = tupregs[0].dttk_value; 3045 break; 3046 } 3047 3048 daddr = dtrace_loadptr(baddr + 3049 offsetof(mblk_t, b_datap)); 3050 3051 baddr = dtrace_loadptr(baddr + 3052 offsetof(mblk_t, b_cont)); 3053 3054 /* 3055 * We want to prevent against denial-of-service here, 3056 * so we're only going to search the list for 3057 * dtrace_msgdsize_max mblks. 3058 */ 3059 if (cont++ > dtrace_msgdsize_max) { 3060 *flags |= CPU_DTRACE_ILLOP; 3061 break; 3062 } 3063 3064 if (subr == DIF_SUBR_MSGDSIZE) { 3065 if (dtrace_load8(daddr + 3066 offsetof(dblk_t, db_type)) != M_DATA) 3067 continue; 3068 } 3069 3070 count += wptr - rptr; 3071 } 3072 3073 if (!(*flags & CPU_DTRACE_FAULT)) 3074 regs[rd] = count; 3075 3076 break; 3077 } 3078 3079 case DIF_SUBR_PROGENYOF: { 3080 pid_t pid = tupregs[0].dttk_value; 3081 proc_t *p; 3082 int rval = 0; 3083 3084 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3085 3086 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3087 if (p->p_pidp->pid_id == pid) { 3088 rval = 1; 3089 break; 3090 } 3091 } 3092 3093 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3094 3095 regs[rd] = rval; 3096 break; 3097 } 3098 3099 case DIF_SUBR_SPECULATION: 3100 regs[rd] = dtrace_speculation(state); 3101 break; 3102 3103 case DIF_SUBR_COPYOUT: { 3104 uintptr_t kaddr = tupregs[0].dttk_value; 3105 uintptr_t uaddr = tupregs[1].dttk_value; 3106 uint64_t size = tupregs[2].dttk_value; 3107 3108 if (!dtrace_destructive_disallow && 3109 dtrace_priv_proc_control(state) && 3110 !dtrace_istoxic(kaddr, size)) { 3111 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3112 dtrace_copyout(kaddr, uaddr, size, flags); 3113 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3114 } 3115 break; 3116 } 3117 3118 case DIF_SUBR_COPYOUTSTR: { 3119 uintptr_t kaddr = tupregs[0].dttk_value; 3120 uintptr_t uaddr = tupregs[1].dttk_value; 3121 uint64_t size = tupregs[2].dttk_value; 3122 3123 if (!dtrace_destructive_disallow && 3124 dtrace_priv_proc_control(state) && 3125 !dtrace_istoxic(kaddr, size)) { 3126 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3127 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3128 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3129 } 3130 break; 3131 } 3132 3133 case DIF_SUBR_STRLEN: { 3134 size_t sz; 3135 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3136 sz = dtrace_strlen((char *)addr, 3137 state->dts_options[DTRACEOPT_STRSIZE]); 3138 3139 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3140 regs[rd] = NULL; 3141 break; 3142 } 3143 3144 regs[rd] = sz; 3145 3146 break; 3147 } 3148 3149 case DIF_SUBR_STRCHR: 3150 case DIF_SUBR_STRRCHR: { 3151 /* 3152 * We're going to iterate over the string looking for the 3153 * specified character. We will iterate until we have reached 3154 * the string length or we have found the character. If this 3155 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3156 * of the specified character instead of the first. 3157 */ 3158 uintptr_t saddr = tupregs[0].dttk_value; 3159 uintptr_t addr = tupregs[0].dttk_value; 3160 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3161 char c, target = (char)tupregs[1].dttk_value; 3162 3163 for (regs[rd] = NULL; addr < limit; addr++) { 3164 if ((c = dtrace_load8(addr)) == target) { 3165 regs[rd] = addr; 3166 3167 if (subr == DIF_SUBR_STRCHR) 3168 break; 3169 } 3170 3171 if (c == '\0') 3172 break; 3173 } 3174 3175 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3176 regs[rd] = NULL; 3177 break; 3178 } 3179 3180 break; 3181 } 3182 3183 case DIF_SUBR_STRSTR: 3184 case DIF_SUBR_INDEX: 3185 case DIF_SUBR_RINDEX: { 3186 /* 3187 * We're going to iterate over the string looking for the 3188 * specified string. We will iterate until we have reached 3189 * the string length or we have found the string. (Yes, this 3190 * is done in the most naive way possible -- but considering 3191 * that the string we're searching for is likely to be 3192 * relatively short, the complexity of Rabin-Karp or similar 3193 * hardly seems merited.) 3194 */ 3195 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3196 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3197 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3198 size_t len = dtrace_strlen(addr, size); 3199 size_t sublen = dtrace_strlen(substr, size); 3200 char *limit = addr + len, *orig = addr; 3201 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3202 int inc = 1; 3203 3204 regs[rd] = notfound; 3205 3206 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3207 regs[rd] = NULL; 3208 break; 3209 } 3210 3211 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3212 vstate)) { 3213 regs[rd] = NULL; 3214 break; 3215 } 3216 3217 /* 3218 * strstr() and index()/rindex() have similar semantics if 3219 * both strings are the empty string: strstr() returns a 3220 * pointer to the (empty) string, and index() and rindex() 3221 * both return index 0 (regardless of any position argument). 3222 */ 3223 if (sublen == 0 && len == 0) { 3224 if (subr == DIF_SUBR_STRSTR) 3225 regs[rd] = (uintptr_t)addr; 3226 else 3227 regs[rd] = 0; 3228 break; 3229 } 3230 3231 if (subr != DIF_SUBR_STRSTR) { 3232 if (subr == DIF_SUBR_RINDEX) { 3233 limit = orig - 1; 3234 addr += len; 3235 inc = -1; 3236 } 3237 3238 /* 3239 * Both index() and rindex() take an optional position 3240 * argument that denotes the starting position. 3241 */ 3242 if (nargs == 3) { 3243 int64_t pos = (int64_t)tupregs[2].dttk_value; 3244 3245 /* 3246 * If the position argument to index() is 3247 * negative, Perl implicitly clamps it at 3248 * zero. This semantic is a little surprising 3249 * given the special meaning of negative 3250 * positions to similar Perl functions like 3251 * substr(), but it appears to reflect a 3252 * notion that index() can start from a 3253 * negative index and increment its way up to 3254 * the string. Given this notion, Perl's 3255 * rindex() is at least self-consistent in 3256 * that it implicitly clamps positions greater 3257 * than the string length to be the string 3258 * length. Where Perl completely loses 3259 * coherence, however, is when the specified 3260 * substring is the empty string (""). In 3261 * this case, even if the position is 3262 * negative, rindex() returns 0 -- and even if 3263 * the position is greater than the length, 3264 * index() returns the string length. These 3265 * semantics violate the notion that index() 3266 * should never return a value less than the 3267 * specified position and that rindex() should 3268 * never return a value greater than the 3269 * specified position. (One assumes that 3270 * these semantics are artifacts of Perl's 3271 * implementation and not the results of 3272 * deliberate design -- it beggars belief that 3273 * even Larry Wall could desire such oddness.) 3274 * While in the abstract one would wish for 3275 * consistent position semantics across 3276 * substr(), index() and rindex() -- or at the 3277 * very least self-consistent position 3278 * semantics for index() and rindex() -- we 3279 * instead opt to keep with the extant Perl 3280 * semantics, in all their broken glory. (Do 3281 * we have more desire to maintain Perl's 3282 * semantics than Perl does? Probably.) 3283 */ 3284 if (subr == DIF_SUBR_RINDEX) { 3285 if (pos < 0) { 3286 if (sublen == 0) 3287 regs[rd] = 0; 3288 break; 3289 } 3290 3291 if (pos > len) 3292 pos = len; 3293 } else { 3294 if (pos < 0) 3295 pos = 0; 3296 3297 if (pos >= len) { 3298 if (sublen == 0) 3299 regs[rd] = len; 3300 break; 3301 } 3302 } 3303 3304 addr = orig + pos; 3305 } 3306 } 3307 3308 for (regs[rd] = notfound; addr != limit; addr += inc) { 3309 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3310 if (subr != DIF_SUBR_STRSTR) { 3311 /* 3312 * As D index() and rindex() are 3313 * modeled on Perl (and not on awk), 3314 * we return a zero-based (and not a 3315 * one-based) index. (For you Perl 3316 * weenies: no, we're not going to add 3317 * $[ -- and shouldn't you be at a con 3318 * or something?) 3319 */ 3320 regs[rd] = (uintptr_t)(addr - orig); 3321 break; 3322 } 3323 3324 ASSERT(subr == DIF_SUBR_STRSTR); 3325 regs[rd] = (uintptr_t)addr; 3326 break; 3327 } 3328 } 3329 3330 break; 3331 } 3332 3333 case DIF_SUBR_STRTOK: { 3334 uintptr_t addr = tupregs[0].dttk_value; 3335 uintptr_t tokaddr = tupregs[1].dttk_value; 3336 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3337 uintptr_t limit, toklimit = tokaddr + size; 3338 uint8_t c, tokmap[32]; /* 256 / 8 */ 3339 char *dest = (char *)mstate->dtms_scratch_ptr; 3340 int i; 3341 3342 /* 3343 * Check both the token buffer and (later) the input buffer, 3344 * since both could be non-scratch addresses. 3345 */ 3346 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3347 regs[rd] = NULL; 3348 break; 3349 } 3350 3351 if (!DTRACE_INSCRATCH(mstate, size)) { 3352 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3353 regs[rd] = NULL; 3354 break; 3355 } 3356 3357 if (addr == NULL) { 3358 /* 3359 * If the address specified is NULL, we use our saved 3360 * strtok pointer from the mstate. Note that this 3361 * means that the saved strtok pointer is _only_ 3362 * valid within multiple enablings of the same probe -- 3363 * it behaves like an implicit clause-local variable. 3364 */ 3365 addr = mstate->dtms_strtok; 3366 } else { 3367 /* 3368 * If the user-specified address is non-NULL we must 3369 * access check it. This is the only time we have 3370 * a chance to do so, since this address may reside 3371 * in the string table of this clause-- future calls 3372 * (when we fetch addr from mstate->dtms_strtok) 3373 * would fail this access check. 3374 */ 3375 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3376 regs[rd] = NULL; 3377 break; 3378 } 3379 } 3380 3381 /* 3382 * First, zero the token map, and then process the token 3383 * string -- setting a bit in the map for every character 3384 * found in the token string. 3385 */ 3386 for (i = 0; i < sizeof (tokmap); i++) 3387 tokmap[i] = 0; 3388 3389 for (; tokaddr < toklimit; tokaddr++) { 3390 if ((c = dtrace_load8(tokaddr)) == '\0') 3391 break; 3392 3393 ASSERT((c >> 3) < sizeof (tokmap)); 3394 tokmap[c >> 3] |= (1 << (c & 0x7)); 3395 } 3396 3397 for (limit = addr + size; addr < limit; addr++) { 3398 /* 3399 * We're looking for a character that is _not_ contained 3400 * in the token string. 3401 */ 3402 if ((c = dtrace_load8(addr)) == '\0') 3403 break; 3404 3405 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3406 break; 3407 } 3408 3409 if (c == '\0') { 3410 /* 3411 * We reached the end of the string without finding 3412 * any character that was not in the token string. 3413 * We return NULL in this case, and we set the saved 3414 * address to NULL as well. 3415 */ 3416 regs[rd] = NULL; 3417 mstate->dtms_strtok = NULL; 3418 break; 3419 } 3420 3421 /* 3422 * From here on, we're copying into the destination string. 3423 */ 3424 for (i = 0; addr < limit && i < size - 1; addr++) { 3425 if ((c = dtrace_load8(addr)) == '\0') 3426 break; 3427 3428 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3429 break; 3430 3431 ASSERT(i < size); 3432 dest[i++] = c; 3433 } 3434 3435 ASSERT(i < size); 3436 dest[i] = '\0'; 3437 regs[rd] = (uintptr_t)dest; 3438 mstate->dtms_scratch_ptr += size; 3439 mstate->dtms_strtok = addr; 3440 break; 3441 } 3442 3443 case DIF_SUBR_SUBSTR: { 3444 uintptr_t s = tupregs[0].dttk_value; 3445 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3446 char *d = (char *)mstate->dtms_scratch_ptr; 3447 int64_t index = (int64_t)tupregs[1].dttk_value; 3448 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3449 size_t len = dtrace_strlen((char *)s, size); 3450 int64_t i = 0; 3451 3452 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3453 regs[rd] = NULL; 3454 break; 3455 } 3456 3457 if (nargs <= 2) 3458 remaining = (int64_t)size; 3459 3460 if (!DTRACE_INSCRATCH(mstate, size)) { 3461 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3462 regs[rd] = NULL; 3463 break; 3464 } 3465 3466 if (index < 0) { 3467 index += len; 3468 3469 if (index < 0 && index + remaining > 0) { 3470 remaining += index; 3471 index = 0; 3472 } 3473 } 3474 3475 if (index >= len || index < 0) 3476 index = len; 3477 3478 for (d[0] = '\0'; remaining > 0; remaining--) { 3479 if ((d[i++] = dtrace_load8(s++ + index)) == '\0') 3480 break; 3481 3482 if (i == size) { 3483 d[i - 1] = '\0'; 3484 break; 3485 } 3486 } 3487 3488 mstate->dtms_scratch_ptr += size; 3489 regs[rd] = (uintptr_t)d; 3490 break; 3491 } 3492 3493 case DIF_SUBR_GETMAJOR: 3494 #ifdef _LP64 3495 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 3496 #else 3497 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 3498 #endif 3499 break; 3500 3501 case DIF_SUBR_GETMINOR: 3502 #ifdef _LP64 3503 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 3504 #else 3505 regs[rd] = tupregs[0].dttk_value & MAXMIN; 3506 #endif 3507 break; 3508 3509 case DIF_SUBR_DDI_PATHNAME: { 3510 /* 3511 * This one is a galactic mess. We are going to roughly 3512 * emulate ddi_pathname(), but it's made more complicated 3513 * by the fact that we (a) want to include the minor name and 3514 * (b) must proceed iteratively instead of recursively. 3515 */ 3516 uintptr_t dest = mstate->dtms_scratch_ptr; 3517 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3518 char *start = (char *)dest, *end = start + size - 1; 3519 uintptr_t daddr = tupregs[0].dttk_value; 3520 int64_t minor = (int64_t)tupregs[1].dttk_value; 3521 char *s; 3522 int i, len, depth = 0; 3523 3524 /* 3525 * Due to all the pointer jumping we do and context we must 3526 * rely upon, we just mandate that the user must have kernel 3527 * read privileges to use this routine. 3528 */ 3529 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 3530 *flags |= CPU_DTRACE_KPRIV; 3531 *illval = daddr; 3532 regs[rd] = NULL; 3533 } 3534 3535 if (!DTRACE_INSCRATCH(mstate, size)) { 3536 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3537 regs[rd] = NULL; 3538 break; 3539 } 3540 3541 *end = '\0'; 3542 3543 /* 3544 * We want to have a name for the minor. In order to do this, 3545 * we need to walk the minor list from the devinfo. We want 3546 * to be sure that we don't infinitely walk a circular list, 3547 * so we check for circularity by sending a scout pointer 3548 * ahead two elements for every element that we iterate over; 3549 * if the list is circular, these will ultimately point to the 3550 * same element. You may recognize this little trick as the 3551 * answer to a stupid interview question -- one that always 3552 * seems to be asked by those who had to have it laboriously 3553 * explained to them, and who can't even concisely describe 3554 * the conditions under which one would be forced to resort to 3555 * this technique. Needless to say, those conditions are 3556 * found here -- and probably only here. Is this is the only 3557 * use of this infamous trick in shipping, production code? 3558 * If it isn't, it probably should be... 3559 */ 3560 if (minor != -1) { 3561 uintptr_t maddr = dtrace_loadptr(daddr + 3562 offsetof(struct dev_info, devi_minor)); 3563 3564 uintptr_t next = offsetof(struct ddi_minor_data, next); 3565 uintptr_t name = offsetof(struct ddi_minor_data, 3566 d_minor) + offsetof(struct ddi_minor, name); 3567 uintptr_t dev = offsetof(struct ddi_minor_data, 3568 d_minor) + offsetof(struct ddi_minor, dev); 3569 uintptr_t scout; 3570 3571 if (maddr != NULL) 3572 scout = dtrace_loadptr(maddr + next); 3573 3574 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3575 uint64_t m; 3576 #ifdef _LP64 3577 m = dtrace_load64(maddr + dev) & MAXMIN64; 3578 #else 3579 m = dtrace_load32(maddr + dev) & MAXMIN; 3580 #endif 3581 if (m != minor) { 3582 maddr = dtrace_loadptr(maddr + next); 3583 3584 if (scout == NULL) 3585 continue; 3586 3587 scout = dtrace_loadptr(scout + next); 3588 3589 if (scout == NULL) 3590 continue; 3591 3592 scout = dtrace_loadptr(scout + next); 3593 3594 if (scout == NULL) 3595 continue; 3596 3597 if (scout == maddr) { 3598 *flags |= CPU_DTRACE_ILLOP; 3599 break; 3600 } 3601 3602 continue; 3603 } 3604 3605 /* 3606 * We have the minor data. Now we need to 3607 * copy the minor's name into the end of the 3608 * pathname. 3609 */ 3610 s = (char *)dtrace_loadptr(maddr + name); 3611 len = dtrace_strlen(s, size); 3612 3613 if (*flags & CPU_DTRACE_FAULT) 3614 break; 3615 3616 if (len != 0) { 3617 if ((end -= (len + 1)) < start) 3618 break; 3619 3620 *end = ':'; 3621 } 3622 3623 for (i = 1; i <= len; i++) 3624 end[i] = dtrace_load8((uintptr_t)s++); 3625 break; 3626 } 3627 } 3628 3629 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 3630 ddi_node_state_t devi_state; 3631 3632 devi_state = dtrace_load32(daddr + 3633 offsetof(struct dev_info, devi_node_state)); 3634 3635 if (*flags & CPU_DTRACE_FAULT) 3636 break; 3637 3638 if (devi_state >= DS_INITIALIZED) { 3639 s = (char *)dtrace_loadptr(daddr + 3640 offsetof(struct dev_info, devi_addr)); 3641 len = dtrace_strlen(s, size); 3642 3643 if (*flags & CPU_DTRACE_FAULT) 3644 break; 3645 3646 if (len != 0) { 3647 if ((end -= (len + 1)) < start) 3648 break; 3649 3650 *end = '@'; 3651 } 3652 3653 for (i = 1; i <= len; i++) 3654 end[i] = dtrace_load8((uintptr_t)s++); 3655 } 3656 3657 /* 3658 * Now for the node name... 3659 */ 3660 s = (char *)dtrace_loadptr(daddr + 3661 offsetof(struct dev_info, devi_node_name)); 3662 3663 daddr = dtrace_loadptr(daddr + 3664 offsetof(struct dev_info, devi_parent)); 3665 3666 /* 3667 * If our parent is NULL (that is, if we're the root 3668 * node), we're going to use the special path 3669 * "devices". 3670 */ 3671 if (daddr == NULL) 3672 s = "devices"; 3673 3674 len = dtrace_strlen(s, size); 3675 if (*flags & CPU_DTRACE_FAULT) 3676 break; 3677 3678 if ((end -= (len + 1)) < start) 3679 break; 3680 3681 for (i = 1; i <= len; i++) 3682 end[i] = dtrace_load8((uintptr_t)s++); 3683 *end = '/'; 3684 3685 if (depth++ > dtrace_devdepth_max) { 3686 *flags |= CPU_DTRACE_ILLOP; 3687 break; 3688 } 3689 } 3690 3691 if (end < start) 3692 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3693 3694 if (daddr == NULL) { 3695 regs[rd] = (uintptr_t)end; 3696 mstate->dtms_scratch_ptr += size; 3697 } 3698 3699 break; 3700 } 3701 3702 case DIF_SUBR_STRJOIN: { 3703 char *d = (char *)mstate->dtms_scratch_ptr; 3704 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3705 uintptr_t s1 = tupregs[0].dttk_value; 3706 uintptr_t s2 = tupregs[1].dttk_value; 3707 int i = 0; 3708 3709 if (!dtrace_strcanload(s1, size, mstate, vstate) || 3710 !dtrace_strcanload(s2, size, mstate, vstate)) { 3711 regs[rd] = NULL; 3712 break; 3713 } 3714 3715 if (!DTRACE_INSCRATCH(mstate, size)) { 3716 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3717 regs[rd] = NULL; 3718 break; 3719 } 3720 3721 for (;;) { 3722 if (i >= size) { 3723 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3724 regs[rd] = NULL; 3725 break; 3726 } 3727 3728 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 3729 i--; 3730 break; 3731 } 3732 } 3733 3734 for (;;) { 3735 if (i >= size) { 3736 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3737 regs[rd] = NULL; 3738 break; 3739 } 3740 3741 if ((d[i++] = dtrace_load8(s2++)) == '\0') 3742 break; 3743 } 3744 3745 if (i < size) { 3746 mstate->dtms_scratch_ptr += i; 3747 regs[rd] = (uintptr_t)d; 3748 } 3749 3750 break; 3751 } 3752 3753 case DIF_SUBR_LLTOSTR: { 3754 int64_t i = (int64_t)tupregs[0].dttk_value; 3755 int64_t val = i < 0 ? i * -1 : i; 3756 uint64_t size = 22; /* enough room for 2^64 in decimal */ 3757 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 3758 3759 if (!DTRACE_INSCRATCH(mstate, size)) { 3760 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3761 regs[rd] = NULL; 3762 break; 3763 } 3764 3765 for (*end-- = '\0'; val; val /= 10) 3766 *end-- = '0' + (val % 10); 3767 3768 if (i == 0) 3769 *end-- = '0'; 3770 3771 if (i < 0) 3772 *end-- = '-'; 3773 3774 regs[rd] = (uintptr_t)end + 1; 3775 mstate->dtms_scratch_ptr += size; 3776 break; 3777 } 3778 3779 case DIF_SUBR_HTONS: 3780 case DIF_SUBR_NTOHS: 3781 #ifdef _BIG_ENDIAN 3782 regs[rd] = (uint16_t)tupregs[0].dttk_value; 3783 #else 3784 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 3785 #endif 3786 break; 3787 3788 3789 case DIF_SUBR_HTONL: 3790 case DIF_SUBR_NTOHL: 3791 #ifdef _BIG_ENDIAN 3792 regs[rd] = (uint32_t)tupregs[0].dttk_value; 3793 #else 3794 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 3795 #endif 3796 break; 3797 3798 3799 case DIF_SUBR_HTONLL: 3800 case DIF_SUBR_NTOHLL: 3801 #ifdef _BIG_ENDIAN 3802 regs[rd] = (uint64_t)tupregs[0].dttk_value; 3803 #else 3804 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 3805 #endif 3806 break; 3807 3808 3809 case DIF_SUBR_DIRNAME: 3810 case DIF_SUBR_BASENAME: { 3811 char *dest = (char *)mstate->dtms_scratch_ptr; 3812 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3813 uintptr_t src = tupregs[0].dttk_value; 3814 int i, j, len = dtrace_strlen((char *)src, size); 3815 int lastbase = -1, firstbase = -1, lastdir = -1; 3816 int start, end; 3817 3818 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 3819 regs[rd] = NULL; 3820 break; 3821 } 3822 3823 if (!DTRACE_INSCRATCH(mstate, size)) { 3824 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3825 regs[rd] = NULL; 3826 break; 3827 } 3828 3829 /* 3830 * The basename and dirname for a zero-length string is 3831 * defined to be "." 3832 */ 3833 if (len == 0) { 3834 len = 1; 3835 src = (uintptr_t)"."; 3836 } 3837 3838 /* 3839 * Start from the back of the string, moving back toward the 3840 * front until we see a character that isn't a slash. That 3841 * character is the last character in the basename. 3842 */ 3843 for (i = len - 1; i >= 0; i--) { 3844 if (dtrace_load8(src + i) != '/') 3845 break; 3846 } 3847 3848 if (i >= 0) 3849 lastbase = i; 3850 3851 /* 3852 * Starting from the last character in the basename, move 3853 * towards the front until we find a slash. The character 3854 * that we processed immediately before that is the first 3855 * character in the basename. 3856 */ 3857 for (; i >= 0; i--) { 3858 if (dtrace_load8(src + i) == '/') 3859 break; 3860 } 3861 3862 if (i >= 0) 3863 firstbase = i + 1; 3864 3865 /* 3866 * Now keep going until we find a non-slash character. That 3867 * character is the last character in the dirname. 3868 */ 3869 for (; i >= 0; i--) { 3870 if (dtrace_load8(src + i) != '/') 3871 break; 3872 } 3873 3874 if (i >= 0) 3875 lastdir = i; 3876 3877 ASSERT(!(lastbase == -1 && firstbase != -1)); 3878 ASSERT(!(firstbase == -1 && lastdir != -1)); 3879 3880 if (lastbase == -1) { 3881 /* 3882 * We didn't find a non-slash character. We know that 3883 * the length is non-zero, so the whole string must be 3884 * slashes. In either the dirname or the basename 3885 * case, we return '/'. 3886 */ 3887 ASSERT(firstbase == -1); 3888 firstbase = lastbase = lastdir = 0; 3889 } 3890 3891 if (firstbase == -1) { 3892 /* 3893 * The entire string consists only of a basename 3894 * component. If we're looking for dirname, we need 3895 * to change our string to be just "."; if we're 3896 * looking for a basename, we'll just set the first 3897 * character of the basename to be 0. 3898 */ 3899 if (subr == DIF_SUBR_DIRNAME) { 3900 ASSERT(lastdir == -1); 3901 src = (uintptr_t)"."; 3902 lastdir = 0; 3903 } else { 3904 firstbase = 0; 3905 } 3906 } 3907 3908 if (subr == DIF_SUBR_DIRNAME) { 3909 if (lastdir == -1) { 3910 /* 3911 * We know that we have a slash in the name -- 3912 * or lastdir would be set to 0, above. And 3913 * because lastdir is -1, we know that this 3914 * slash must be the first character. (That 3915 * is, the full string must be of the form 3916 * "/basename".) In this case, the last 3917 * character of the directory name is 0. 3918 */ 3919 lastdir = 0; 3920 } 3921 3922 start = 0; 3923 end = lastdir; 3924 } else { 3925 ASSERT(subr == DIF_SUBR_BASENAME); 3926 ASSERT(firstbase != -1 && lastbase != -1); 3927 start = firstbase; 3928 end = lastbase; 3929 } 3930 3931 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 3932 dest[j] = dtrace_load8(src + i); 3933 3934 dest[j] = '\0'; 3935 regs[rd] = (uintptr_t)dest; 3936 mstate->dtms_scratch_ptr += size; 3937 break; 3938 } 3939 3940 case DIF_SUBR_CLEANPATH: { 3941 char *dest = (char *)mstate->dtms_scratch_ptr, c; 3942 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3943 uintptr_t src = tupregs[0].dttk_value; 3944 int i = 0, j = 0; 3945 3946 if (!dtrace_strcanload(src, size, mstate, vstate)) { 3947 regs[rd] = NULL; 3948 break; 3949 } 3950 3951 if (!DTRACE_INSCRATCH(mstate, size)) { 3952 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3953 regs[rd] = NULL; 3954 break; 3955 } 3956 3957 /* 3958 * Move forward, loading each character. 3959 */ 3960 do { 3961 c = dtrace_load8(src + i++); 3962 next: 3963 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 3964 break; 3965 3966 if (c != '/') { 3967 dest[j++] = c; 3968 continue; 3969 } 3970 3971 c = dtrace_load8(src + i++); 3972 3973 if (c == '/') { 3974 /* 3975 * We have two slashes -- we can just advance 3976 * to the next character. 3977 */ 3978 goto next; 3979 } 3980 3981 if (c != '.') { 3982 /* 3983 * This is not "." and it's not ".." -- we can 3984 * just store the "/" and this character and 3985 * drive on. 3986 */ 3987 dest[j++] = '/'; 3988 dest[j++] = c; 3989 continue; 3990 } 3991 3992 c = dtrace_load8(src + i++); 3993 3994 if (c == '/') { 3995 /* 3996 * This is a "/./" component. We're not going 3997 * to store anything in the destination buffer; 3998 * we're just going to go to the next component. 3999 */ 4000 goto next; 4001 } 4002 4003 if (c != '.') { 4004 /* 4005 * This is not ".." -- we can just store the 4006 * "/." and this character and continue 4007 * processing. 4008 */ 4009 dest[j++] = '/'; 4010 dest[j++] = '.'; 4011 dest[j++] = c; 4012 continue; 4013 } 4014 4015 c = dtrace_load8(src + i++); 4016 4017 if (c != '/' && c != '\0') { 4018 /* 4019 * This is not ".." -- it's "..[mumble]". 4020 * We'll store the "/.." and this character 4021 * and continue processing. 4022 */ 4023 dest[j++] = '/'; 4024 dest[j++] = '.'; 4025 dest[j++] = '.'; 4026 dest[j++] = c; 4027 continue; 4028 } 4029 4030 /* 4031 * This is "/../" or "/..\0". We need to back up 4032 * our destination pointer until we find a "/". 4033 */ 4034 i--; 4035 while (j != 0 && dest[--j] != '/') 4036 continue; 4037 4038 if (c == '\0') 4039 dest[++j] = '/'; 4040 } while (c != '\0'); 4041 4042 dest[j] = '\0'; 4043 regs[rd] = (uintptr_t)dest; 4044 mstate->dtms_scratch_ptr += size; 4045 break; 4046 } 4047 } 4048 } 4049 4050 /* 4051 * Emulate the execution of DTrace IR instructions specified by the given 4052 * DIF object. This function is deliberately void of assertions as all of 4053 * the necessary checks are handled by a call to dtrace_difo_validate(). 4054 */ 4055 static uint64_t 4056 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4057 dtrace_vstate_t *vstate, dtrace_state_t *state) 4058 { 4059 const dif_instr_t *text = difo->dtdo_buf; 4060 const uint_t textlen = difo->dtdo_len; 4061 const char *strtab = difo->dtdo_strtab; 4062 const uint64_t *inttab = difo->dtdo_inttab; 4063 4064 uint64_t rval = 0; 4065 dtrace_statvar_t *svar; 4066 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4067 dtrace_difv_t *v; 4068 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 4069 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 4070 4071 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4072 uint64_t regs[DIF_DIR_NREGS]; 4073 uint64_t *tmp; 4074 4075 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4076 int64_t cc_r; 4077 uint_t pc = 0, id, opc; 4078 uint8_t ttop = 0; 4079 dif_instr_t instr; 4080 uint_t r1, r2, rd; 4081 4082 /* 4083 * We stash the current DIF object into the machine state: we need it 4084 * for subsequent access checking. 4085 */ 4086 mstate->dtms_difo = difo; 4087 4088 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4089 4090 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4091 opc = pc; 4092 4093 instr = text[pc++]; 4094 r1 = DIF_INSTR_R1(instr); 4095 r2 = DIF_INSTR_R2(instr); 4096 rd = DIF_INSTR_RD(instr); 4097 4098 switch (DIF_INSTR_OP(instr)) { 4099 case DIF_OP_OR: 4100 regs[rd] = regs[r1] | regs[r2]; 4101 break; 4102 case DIF_OP_XOR: 4103 regs[rd] = regs[r1] ^ regs[r2]; 4104 break; 4105 case DIF_OP_AND: 4106 regs[rd] = regs[r1] & regs[r2]; 4107 break; 4108 case DIF_OP_SLL: 4109 regs[rd] = regs[r1] << regs[r2]; 4110 break; 4111 case DIF_OP_SRL: 4112 regs[rd] = regs[r1] >> regs[r2]; 4113 break; 4114 case DIF_OP_SUB: 4115 regs[rd] = regs[r1] - regs[r2]; 4116 break; 4117 case DIF_OP_ADD: 4118 regs[rd] = regs[r1] + regs[r2]; 4119 break; 4120 case DIF_OP_MUL: 4121 regs[rd] = regs[r1] * regs[r2]; 4122 break; 4123 case DIF_OP_SDIV: 4124 if (regs[r2] == 0) { 4125 regs[rd] = 0; 4126 *flags |= CPU_DTRACE_DIVZERO; 4127 } else { 4128 regs[rd] = (int64_t)regs[r1] / 4129 (int64_t)regs[r2]; 4130 } 4131 break; 4132 4133 case DIF_OP_UDIV: 4134 if (regs[r2] == 0) { 4135 regs[rd] = 0; 4136 *flags |= CPU_DTRACE_DIVZERO; 4137 } else { 4138 regs[rd] = regs[r1] / regs[r2]; 4139 } 4140 break; 4141 4142 case DIF_OP_SREM: 4143 if (regs[r2] == 0) { 4144 regs[rd] = 0; 4145 *flags |= CPU_DTRACE_DIVZERO; 4146 } else { 4147 regs[rd] = (int64_t)regs[r1] % 4148 (int64_t)regs[r2]; 4149 } 4150 break; 4151 4152 case DIF_OP_UREM: 4153 if (regs[r2] == 0) { 4154 regs[rd] = 0; 4155 *flags |= CPU_DTRACE_DIVZERO; 4156 } else { 4157 regs[rd] = regs[r1] % regs[r2]; 4158 } 4159 break; 4160 4161 case DIF_OP_NOT: 4162 regs[rd] = ~regs[r1]; 4163 break; 4164 case DIF_OP_MOV: 4165 regs[rd] = regs[r1]; 4166 break; 4167 case DIF_OP_CMP: 4168 cc_r = regs[r1] - regs[r2]; 4169 cc_n = cc_r < 0; 4170 cc_z = cc_r == 0; 4171 cc_v = 0; 4172 cc_c = regs[r1] < regs[r2]; 4173 break; 4174 case DIF_OP_TST: 4175 cc_n = cc_v = cc_c = 0; 4176 cc_z = regs[r1] == 0; 4177 break; 4178 case DIF_OP_BA: 4179 pc = DIF_INSTR_LABEL(instr); 4180 break; 4181 case DIF_OP_BE: 4182 if (cc_z) 4183 pc = DIF_INSTR_LABEL(instr); 4184 break; 4185 case DIF_OP_BNE: 4186 if (cc_z == 0) 4187 pc = DIF_INSTR_LABEL(instr); 4188 break; 4189 case DIF_OP_BG: 4190 if ((cc_z | (cc_n ^ cc_v)) == 0) 4191 pc = DIF_INSTR_LABEL(instr); 4192 break; 4193 case DIF_OP_BGU: 4194 if ((cc_c | cc_z) == 0) 4195 pc = DIF_INSTR_LABEL(instr); 4196 break; 4197 case DIF_OP_BGE: 4198 if ((cc_n ^ cc_v) == 0) 4199 pc = DIF_INSTR_LABEL(instr); 4200 break; 4201 case DIF_OP_BGEU: 4202 if (cc_c == 0) 4203 pc = DIF_INSTR_LABEL(instr); 4204 break; 4205 case DIF_OP_BL: 4206 if (cc_n ^ cc_v) 4207 pc = DIF_INSTR_LABEL(instr); 4208 break; 4209 case DIF_OP_BLU: 4210 if (cc_c) 4211 pc = DIF_INSTR_LABEL(instr); 4212 break; 4213 case DIF_OP_BLE: 4214 if (cc_z | (cc_n ^ cc_v)) 4215 pc = DIF_INSTR_LABEL(instr); 4216 break; 4217 case DIF_OP_BLEU: 4218 if (cc_c | cc_z) 4219 pc = DIF_INSTR_LABEL(instr); 4220 break; 4221 case DIF_OP_RLDSB: 4222 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4223 *flags |= CPU_DTRACE_KPRIV; 4224 *illval = regs[r1]; 4225 break; 4226 } 4227 /*FALLTHROUGH*/ 4228 case DIF_OP_LDSB: 4229 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 4230 break; 4231 case DIF_OP_RLDSH: 4232 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4233 *flags |= CPU_DTRACE_KPRIV; 4234 *illval = regs[r1]; 4235 break; 4236 } 4237 /*FALLTHROUGH*/ 4238 case DIF_OP_LDSH: 4239 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 4240 break; 4241 case DIF_OP_RLDSW: 4242 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4243 *flags |= CPU_DTRACE_KPRIV; 4244 *illval = regs[r1]; 4245 break; 4246 } 4247 /*FALLTHROUGH*/ 4248 case DIF_OP_LDSW: 4249 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 4250 break; 4251 case DIF_OP_RLDUB: 4252 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 4253 *flags |= CPU_DTRACE_KPRIV; 4254 *illval = regs[r1]; 4255 break; 4256 } 4257 /*FALLTHROUGH*/ 4258 case DIF_OP_LDUB: 4259 regs[rd] = dtrace_load8(regs[r1]); 4260 break; 4261 case DIF_OP_RLDUH: 4262 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 4263 *flags |= CPU_DTRACE_KPRIV; 4264 *illval = regs[r1]; 4265 break; 4266 } 4267 /*FALLTHROUGH*/ 4268 case DIF_OP_LDUH: 4269 regs[rd] = dtrace_load16(regs[r1]); 4270 break; 4271 case DIF_OP_RLDUW: 4272 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 4273 *flags |= CPU_DTRACE_KPRIV; 4274 *illval = regs[r1]; 4275 break; 4276 } 4277 /*FALLTHROUGH*/ 4278 case DIF_OP_LDUW: 4279 regs[rd] = dtrace_load32(regs[r1]); 4280 break; 4281 case DIF_OP_RLDX: 4282 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 4283 *flags |= CPU_DTRACE_KPRIV; 4284 *illval = regs[r1]; 4285 break; 4286 } 4287 /*FALLTHROUGH*/ 4288 case DIF_OP_LDX: 4289 regs[rd] = dtrace_load64(regs[r1]); 4290 break; 4291 case DIF_OP_ULDSB: 4292 regs[rd] = (int8_t) 4293 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4294 break; 4295 case DIF_OP_ULDSH: 4296 regs[rd] = (int16_t) 4297 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4298 break; 4299 case DIF_OP_ULDSW: 4300 regs[rd] = (int32_t) 4301 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4302 break; 4303 case DIF_OP_ULDUB: 4304 regs[rd] = 4305 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 4306 break; 4307 case DIF_OP_ULDUH: 4308 regs[rd] = 4309 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 4310 break; 4311 case DIF_OP_ULDUW: 4312 regs[rd] = 4313 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 4314 break; 4315 case DIF_OP_ULDX: 4316 regs[rd] = 4317 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 4318 break; 4319 case DIF_OP_RET: 4320 rval = regs[rd]; 4321 break; 4322 case DIF_OP_NOP: 4323 break; 4324 case DIF_OP_SETX: 4325 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 4326 break; 4327 case DIF_OP_SETS: 4328 regs[rd] = (uint64_t)(uintptr_t) 4329 (strtab + DIF_INSTR_STRING(instr)); 4330 break; 4331 case DIF_OP_SCMP: { 4332 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 4333 uintptr_t s1 = regs[r1]; 4334 uintptr_t s2 = regs[r2]; 4335 4336 if (s1 != NULL && 4337 !dtrace_strcanload(s1, sz, mstate, vstate)) 4338 break; 4339 if (s2 != NULL && 4340 !dtrace_strcanload(s2, sz, mstate, vstate)) 4341 break; 4342 4343 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 4344 4345 cc_n = cc_r < 0; 4346 cc_z = cc_r == 0; 4347 cc_v = cc_c = 0; 4348 break; 4349 } 4350 case DIF_OP_LDGA: 4351 regs[rd] = dtrace_dif_variable(mstate, state, 4352 r1, regs[r2]); 4353 break; 4354 case DIF_OP_LDGS: 4355 id = DIF_INSTR_VAR(instr); 4356 4357 if (id >= DIF_VAR_OTHER_UBASE) { 4358 uintptr_t a; 4359 4360 id -= DIF_VAR_OTHER_UBASE; 4361 svar = vstate->dtvs_globals[id]; 4362 ASSERT(svar != NULL); 4363 v = &svar->dtsv_var; 4364 4365 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 4366 regs[rd] = svar->dtsv_data; 4367 break; 4368 } 4369 4370 a = (uintptr_t)svar->dtsv_data; 4371 4372 if (*(uint8_t *)a == UINT8_MAX) { 4373 /* 4374 * If the 0th byte is set to UINT8_MAX 4375 * then this is to be treated as a 4376 * reference to a NULL variable. 4377 */ 4378 regs[rd] = NULL; 4379 } else { 4380 regs[rd] = a + sizeof (uint64_t); 4381 } 4382 4383 break; 4384 } 4385 4386 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 4387 break; 4388 4389 case DIF_OP_STGS: 4390 id = DIF_INSTR_VAR(instr); 4391 4392 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4393 id -= DIF_VAR_OTHER_UBASE; 4394 4395 svar = vstate->dtvs_globals[id]; 4396 ASSERT(svar != NULL); 4397 v = &svar->dtsv_var; 4398 4399 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4400 uintptr_t a = (uintptr_t)svar->dtsv_data; 4401 4402 ASSERT(a != NULL); 4403 ASSERT(svar->dtsv_size != 0); 4404 4405 if (regs[rd] == NULL) { 4406 *(uint8_t *)a = UINT8_MAX; 4407 break; 4408 } else { 4409 *(uint8_t *)a = 0; 4410 a += sizeof (uint64_t); 4411 } 4412 if (!dtrace_vcanload( 4413 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4414 mstate, vstate)) 4415 break; 4416 4417 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4418 (void *)a, &v->dtdv_type); 4419 break; 4420 } 4421 4422 svar->dtsv_data = regs[rd]; 4423 break; 4424 4425 case DIF_OP_LDTA: 4426 /* 4427 * There are no DTrace built-in thread-local arrays at 4428 * present. This opcode is saved for future work. 4429 */ 4430 *flags |= CPU_DTRACE_ILLOP; 4431 regs[rd] = 0; 4432 break; 4433 4434 case DIF_OP_LDLS: 4435 id = DIF_INSTR_VAR(instr); 4436 4437 if (id < DIF_VAR_OTHER_UBASE) { 4438 /* 4439 * For now, this has no meaning. 4440 */ 4441 regs[rd] = 0; 4442 break; 4443 } 4444 4445 id -= DIF_VAR_OTHER_UBASE; 4446 4447 ASSERT(id < vstate->dtvs_nlocals); 4448 ASSERT(vstate->dtvs_locals != NULL); 4449 4450 svar = vstate->dtvs_locals[id]; 4451 ASSERT(svar != NULL); 4452 v = &svar->dtsv_var; 4453 4454 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4455 uintptr_t a = (uintptr_t)svar->dtsv_data; 4456 size_t sz = v->dtdv_type.dtdt_size; 4457 4458 sz += sizeof (uint64_t); 4459 ASSERT(svar->dtsv_size == NCPU * sz); 4460 a += CPU->cpu_id * sz; 4461 4462 if (*(uint8_t *)a == UINT8_MAX) { 4463 /* 4464 * If the 0th byte is set to UINT8_MAX 4465 * then this is to be treated as a 4466 * reference to a NULL variable. 4467 */ 4468 regs[rd] = NULL; 4469 } else { 4470 regs[rd] = a + sizeof (uint64_t); 4471 } 4472 4473 break; 4474 } 4475 4476 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4477 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4478 regs[rd] = tmp[CPU->cpu_id]; 4479 break; 4480 4481 case DIF_OP_STLS: 4482 id = DIF_INSTR_VAR(instr); 4483 4484 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4485 id -= DIF_VAR_OTHER_UBASE; 4486 ASSERT(id < vstate->dtvs_nlocals); 4487 4488 ASSERT(vstate->dtvs_locals != NULL); 4489 svar = vstate->dtvs_locals[id]; 4490 ASSERT(svar != NULL); 4491 v = &svar->dtsv_var; 4492 4493 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4494 uintptr_t a = (uintptr_t)svar->dtsv_data; 4495 size_t sz = v->dtdv_type.dtdt_size; 4496 4497 sz += sizeof (uint64_t); 4498 ASSERT(svar->dtsv_size == NCPU * sz); 4499 a += CPU->cpu_id * sz; 4500 4501 if (regs[rd] == NULL) { 4502 *(uint8_t *)a = UINT8_MAX; 4503 break; 4504 } else { 4505 *(uint8_t *)a = 0; 4506 a += sizeof (uint64_t); 4507 } 4508 4509 if (!dtrace_vcanload( 4510 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4511 mstate, vstate)) 4512 break; 4513 4514 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4515 (void *)a, &v->dtdv_type); 4516 break; 4517 } 4518 4519 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 4520 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 4521 tmp[CPU->cpu_id] = regs[rd]; 4522 break; 4523 4524 case DIF_OP_LDTS: { 4525 dtrace_dynvar_t *dvar; 4526 dtrace_key_t *key; 4527 4528 id = DIF_INSTR_VAR(instr); 4529 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4530 id -= DIF_VAR_OTHER_UBASE; 4531 v = &vstate->dtvs_tlocals[id]; 4532 4533 key = &tupregs[DIF_DTR_NREGS]; 4534 key[0].dttk_value = (uint64_t)id; 4535 key[0].dttk_size = 0; 4536 DTRACE_TLS_THRKEY(key[1].dttk_value); 4537 key[1].dttk_size = 0; 4538 4539 dvar = dtrace_dynvar(dstate, 2, key, 4540 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 4541 mstate, vstate); 4542 4543 if (dvar == NULL) { 4544 regs[rd] = 0; 4545 break; 4546 } 4547 4548 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4549 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4550 } else { 4551 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4552 } 4553 4554 break; 4555 } 4556 4557 case DIF_OP_STTS: { 4558 dtrace_dynvar_t *dvar; 4559 dtrace_key_t *key; 4560 4561 id = DIF_INSTR_VAR(instr); 4562 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4563 id -= DIF_VAR_OTHER_UBASE; 4564 4565 key = &tupregs[DIF_DTR_NREGS]; 4566 key[0].dttk_value = (uint64_t)id; 4567 key[0].dttk_size = 0; 4568 DTRACE_TLS_THRKEY(key[1].dttk_value); 4569 key[1].dttk_size = 0; 4570 v = &vstate->dtvs_tlocals[id]; 4571 4572 dvar = dtrace_dynvar(dstate, 2, key, 4573 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4574 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4575 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4576 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4577 4578 /* 4579 * Given that we're storing to thread-local data, 4580 * we need to flush our predicate cache. 4581 */ 4582 curthread->t_predcache = NULL; 4583 4584 if (dvar == NULL) 4585 break; 4586 4587 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4588 if (!dtrace_vcanload( 4589 (void *)(uintptr_t)regs[rd], 4590 &v->dtdv_type, mstate, vstate)) 4591 break; 4592 4593 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4594 dvar->dtdv_data, &v->dtdv_type); 4595 } else { 4596 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4597 } 4598 4599 break; 4600 } 4601 4602 case DIF_OP_SRA: 4603 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 4604 break; 4605 4606 case DIF_OP_CALL: 4607 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 4608 regs, tupregs, ttop, mstate, state); 4609 break; 4610 4611 case DIF_OP_PUSHTR: 4612 if (ttop == DIF_DTR_NREGS) { 4613 *flags |= CPU_DTRACE_TUPOFLOW; 4614 break; 4615 } 4616 4617 if (r1 == DIF_TYPE_STRING) { 4618 /* 4619 * If this is a string type and the size is 0, 4620 * we'll use the system-wide default string 4621 * size. Note that we are _not_ looking at 4622 * the value of the DTRACEOPT_STRSIZE option; 4623 * had this been set, we would expect to have 4624 * a non-zero size value in the "pushtr". 4625 */ 4626 tupregs[ttop].dttk_size = 4627 dtrace_strlen((char *)(uintptr_t)regs[rd], 4628 regs[r2] ? regs[r2] : 4629 dtrace_strsize_default) + 1; 4630 } else { 4631 tupregs[ttop].dttk_size = regs[r2]; 4632 } 4633 4634 tupregs[ttop++].dttk_value = regs[rd]; 4635 break; 4636 4637 case DIF_OP_PUSHTV: 4638 if (ttop == DIF_DTR_NREGS) { 4639 *flags |= CPU_DTRACE_TUPOFLOW; 4640 break; 4641 } 4642 4643 tupregs[ttop].dttk_value = regs[rd]; 4644 tupregs[ttop++].dttk_size = 0; 4645 break; 4646 4647 case DIF_OP_POPTS: 4648 if (ttop != 0) 4649 ttop--; 4650 break; 4651 4652 case DIF_OP_FLUSHTS: 4653 ttop = 0; 4654 break; 4655 4656 case DIF_OP_LDGAA: 4657 case DIF_OP_LDTAA: { 4658 dtrace_dynvar_t *dvar; 4659 dtrace_key_t *key = tupregs; 4660 uint_t nkeys = ttop; 4661 4662 id = DIF_INSTR_VAR(instr); 4663 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4664 id -= DIF_VAR_OTHER_UBASE; 4665 4666 key[nkeys].dttk_value = (uint64_t)id; 4667 key[nkeys++].dttk_size = 0; 4668 4669 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 4670 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4671 key[nkeys++].dttk_size = 0; 4672 v = &vstate->dtvs_tlocals[id]; 4673 } else { 4674 v = &vstate->dtvs_globals[id]->dtsv_var; 4675 } 4676 4677 dvar = dtrace_dynvar(dstate, nkeys, key, 4678 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4679 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4680 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 4681 4682 if (dvar == NULL) { 4683 regs[rd] = 0; 4684 break; 4685 } 4686 4687 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4688 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 4689 } else { 4690 regs[rd] = *((uint64_t *)dvar->dtdv_data); 4691 } 4692 4693 break; 4694 } 4695 4696 case DIF_OP_STGAA: 4697 case DIF_OP_STTAA: { 4698 dtrace_dynvar_t *dvar; 4699 dtrace_key_t *key = tupregs; 4700 uint_t nkeys = ttop; 4701 4702 id = DIF_INSTR_VAR(instr); 4703 ASSERT(id >= DIF_VAR_OTHER_UBASE); 4704 id -= DIF_VAR_OTHER_UBASE; 4705 4706 key[nkeys].dttk_value = (uint64_t)id; 4707 key[nkeys++].dttk_size = 0; 4708 4709 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 4710 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 4711 key[nkeys++].dttk_size = 0; 4712 v = &vstate->dtvs_tlocals[id]; 4713 } else { 4714 v = &vstate->dtvs_globals[id]->dtsv_var; 4715 } 4716 4717 dvar = dtrace_dynvar(dstate, nkeys, key, 4718 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 4719 v->dtdv_type.dtdt_size : sizeof (uint64_t), 4720 regs[rd] ? DTRACE_DYNVAR_ALLOC : 4721 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 4722 4723 if (dvar == NULL) 4724 break; 4725 4726 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 4727 if (!dtrace_vcanload( 4728 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 4729 mstate, vstate)) 4730 break; 4731 4732 dtrace_vcopy((void *)(uintptr_t)regs[rd], 4733 dvar->dtdv_data, &v->dtdv_type); 4734 } else { 4735 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 4736 } 4737 4738 break; 4739 } 4740 4741 case DIF_OP_ALLOCS: { 4742 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 4743 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 4744 4745 /* 4746 * Rounding up the user allocation size could have 4747 * overflowed large, bogus allocations (like -1ULL) to 4748 * 0. 4749 */ 4750 if (size < regs[r1] || 4751 !DTRACE_INSCRATCH(mstate, size)) { 4752 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4753 regs[rd] = NULL; 4754 break; 4755 } 4756 4757 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 4758 mstate->dtms_scratch_ptr += size; 4759 regs[rd] = ptr; 4760 break; 4761 } 4762 4763 case DIF_OP_COPYS: 4764 if (!dtrace_canstore(regs[rd], regs[r2], 4765 mstate, vstate)) { 4766 *flags |= CPU_DTRACE_BADADDR; 4767 *illval = regs[rd]; 4768 break; 4769 } 4770 4771 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 4772 break; 4773 4774 dtrace_bcopy((void *)(uintptr_t)regs[r1], 4775 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 4776 break; 4777 4778 case DIF_OP_STB: 4779 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 4780 *flags |= CPU_DTRACE_BADADDR; 4781 *illval = regs[rd]; 4782 break; 4783 } 4784 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 4785 break; 4786 4787 case DIF_OP_STH: 4788 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 4789 *flags |= CPU_DTRACE_BADADDR; 4790 *illval = regs[rd]; 4791 break; 4792 } 4793 if (regs[rd] & 1) { 4794 *flags |= CPU_DTRACE_BADALIGN; 4795 *illval = regs[rd]; 4796 break; 4797 } 4798 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 4799 break; 4800 4801 case DIF_OP_STW: 4802 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 4803 *flags |= CPU_DTRACE_BADADDR; 4804 *illval = regs[rd]; 4805 break; 4806 } 4807 if (regs[rd] & 3) { 4808 *flags |= CPU_DTRACE_BADALIGN; 4809 *illval = regs[rd]; 4810 break; 4811 } 4812 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 4813 break; 4814 4815 case DIF_OP_STX: 4816 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 4817 *flags |= CPU_DTRACE_BADADDR; 4818 *illval = regs[rd]; 4819 break; 4820 } 4821 if (regs[rd] & 7) { 4822 *flags |= CPU_DTRACE_BADALIGN; 4823 *illval = regs[rd]; 4824 break; 4825 } 4826 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 4827 break; 4828 } 4829 } 4830 4831 if (!(*flags & CPU_DTRACE_FAULT)) 4832 return (rval); 4833 4834 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 4835 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 4836 4837 return (0); 4838 } 4839 4840 static void 4841 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 4842 { 4843 dtrace_probe_t *probe = ecb->dte_probe; 4844 dtrace_provider_t *prov = probe->dtpr_provider; 4845 char c[DTRACE_FULLNAMELEN + 80], *str; 4846 char *msg = "dtrace: breakpoint action at probe "; 4847 char *ecbmsg = " (ecb "; 4848 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 4849 uintptr_t val = (uintptr_t)ecb; 4850 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 4851 4852 if (dtrace_destructive_disallow) 4853 return; 4854 4855 /* 4856 * It's impossible to be taking action on the NULL probe. 4857 */ 4858 ASSERT(probe != NULL); 4859 4860 /* 4861 * This is a poor man's (destitute man's?) sprintf(): we want to 4862 * print the provider name, module name, function name and name of 4863 * the probe, along with the hex address of the ECB with the breakpoint 4864 * action -- all of which we must place in the character buffer by 4865 * hand. 4866 */ 4867 while (*msg != '\0') 4868 c[i++] = *msg++; 4869 4870 for (str = prov->dtpv_name; *str != '\0'; str++) 4871 c[i++] = *str; 4872 c[i++] = ':'; 4873 4874 for (str = probe->dtpr_mod; *str != '\0'; str++) 4875 c[i++] = *str; 4876 c[i++] = ':'; 4877 4878 for (str = probe->dtpr_func; *str != '\0'; str++) 4879 c[i++] = *str; 4880 c[i++] = ':'; 4881 4882 for (str = probe->dtpr_name; *str != '\0'; str++) 4883 c[i++] = *str; 4884 4885 while (*ecbmsg != '\0') 4886 c[i++] = *ecbmsg++; 4887 4888 while (shift >= 0) { 4889 mask = (uintptr_t)0xf << shift; 4890 4891 if (val >= ((uintptr_t)1 << shift)) 4892 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 4893 shift -= 4; 4894 } 4895 4896 c[i++] = ')'; 4897 c[i] = '\0'; 4898 4899 debug_enter(c); 4900 } 4901 4902 static void 4903 dtrace_action_panic(dtrace_ecb_t *ecb) 4904 { 4905 dtrace_probe_t *probe = ecb->dte_probe; 4906 4907 /* 4908 * It's impossible to be taking action on the NULL probe. 4909 */ 4910 ASSERT(probe != NULL); 4911 4912 if (dtrace_destructive_disallow) 4913 return; 4914 4915 if (dtrace_panicked != NULL) 4916 return; 4917 4918 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 4919 return; 4920 4921 /* 4922 * We won the right to panic. (We want to be sure that only one 4923 * thread calls panic() from dtrace_probe(), and that panic() is 4924 * called exactly once.) 4925 */ 4926 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 4927 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 4928 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 4929 } 4930 4931 static void 4932 dtrace_action_raise(uint64_t sig) 4933 { 4934 if (dtrace_destructive_disallow) 4935 return; 4936 4937 if (sig >= NSIG) { 4938 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4939 return; 4940 } 4941 4942 /* 4943 * raise() has a queue depth of 1 -- we ignore all subsequent 4944 * invocations of the raise() action. 4945 */ 4946 if (curthread->t_dtrace_sig == 0) 4947 curthread->t_dtrace_sig = (uint8_t)sig; 4948 4949 curthread->t_sig_check = 1; 4950 aston(curthread); 4951 } 4952 4953 static void 4954 dtrace_action_stop(void) 4955 { 4956 if (dtrace_destructive_disallow) 4957 return; 4958 4959 if (!curthread->t_dtrace_stop) { 4960 curthread->t_dtrace_stop = 1; 4961 curthread->t_sig_check = 1; 4962 aston(curthread); 4963 } 4964 } 4965 4966 static void 4967 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 4968 { 4969 hrtime_t now; 4970 volatile uint16_t *flags; 4971 cpu_t *cpu = CPU; 4972 4973 if (dtrace_destructive_disallow) 4974 return; 4975 4976 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 4977 4978 now = dtrace_gethrtime(); 4979 4980 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 4981 /* 4982 * We need to advance the mark to the current time. 4983 */ 4984 cpu->cpu_dtrace_chillmark = now; 4985 cpu->cpu_dtrace_chilled = 0; 4986 } 4987 4988 /* 4989 * Now check to see if the requested chill time would take us over 4990 * the maximum amount of time allowed in the chill interval. (Or 4991 * worse, if the calculation itself induces overflow.) 4992 */ 4993 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 4994 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 4995 *flags |= CPU_DTRACE_ILLOP; 4996 return; 4997 } 4998 4999 while (dtrace_gethrtime() - now < val) 5000 continue; 5001 5002 /* 5003 * Normally, we assure that the value of the variable "timestamp" does 5004 * not change within an ECB. The presence of chill() represents an 5005 * exception to this rule, however. 5006 */ 5007 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5008 cpu->cpu_dtrace_chilled += val; 5009 } 5010 5011 static void 5012 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5013 uint64_t *buf, uint64_t arg) 5014 { 5015 int nframes = DTRACE_USTACK_NFRAMES(arg); 5016 int strsize = DTRACE_USTACK_STRSIZE(arg); 5017 uint64_t *pcs = &buf[1], *fps; 5018 char *str = (char *)&pcs[nframes]; 5019 int size, offs = 0, i, j; 5020 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5021 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 5022 char *sym; 5023 5024 /* 5025 * Should be taking a faster path if string space has not been 5026 * allocated. 5027 */ 5028 ASSERT(strsize != 0); 5029 5030 /* 5031 * We will first allocate some temporary space for the frame pointers. 5032 */ 5033 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5034 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5035 (nframes * sizeof (uint64_t)); 5036 5037 if (!DTRACE_INSCRATCH(mstate, size)) { 5038 /* 5039 * Not enough room for our frame pointers -- need to indicate 5040 * that we ran out of scratch space. 5041 */ 5042 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5043 return; 5044 } 5045 5046 mstate->dtms_scratch_ptr += size; 5047 saved = mstate->dtms_scratch_ptr; 5048 5049 /* 5050 * Now get a stack with both program counters and frame pointers. 5051 */ 5052 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5053 dtrace_getufpstack(buf, fps, nframes + 1); 5054 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5055 5056 /* 5057 * If that faulted, we're cooked. 5058 */ 5059 if (*flags & CPU_DTRACE_FAULT) 5060 goto out; 5061 5062 /* 5063 * Now we want to walk up the stack, calling the USTACK helper. For 5064 * each iteration, we restore the scratch pointer. 5065 */ 5066 for (i = 0; i < nframes; i++) { 5067 mstate->dtms_scratch_ptr = saved; 5068 5069 if (offs >= strsize) 5070 break; 5071 5072 sym = (char *)(uintptr_t)dtrace_helper( 5073 DTRACE_HELPER_ACTION_USTACK, 5074 mstate, state, pcs[i], fps[i]); 5075 5076 /* 5077 * If we faulted while running the helper, we're going to 5078 * clear the fault and null out the corresponding string. 5079 */ 5080 if (*flags & CPU_DTRACE_FAULT) { 5081 *flags &= ~CPU_DTRACE_FAULT; 5082 str[offs++] = '\0'; 5083 continue; 5084 } 5085 5086 if (sym == NULL) { 5087 str[offs++] = '\0'; 5088 continue; 5089 } 5090 5091 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5092 5093 /* 5094 * Now copy in the string that the helper returned to us. 5095 */ 5096 for (j = 0; offs + j < strsize; j++) { 5097 if ((str[offs + j] = sym[j]) == '\0') 5098 break; 5099 } 5100 5101 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5102 5103 offs += j + 1; 5104 } 5105 5106 if (offs >= strsize) { 5107 /* 5108 * If we didn't have room for all of the strings, we don't 5109 * abort processing -- this needn't be a fatal error -- but we 5110 * still want to increment a counter (dts_stkstroverflows) to 5111 * allow this condition to be warned about. (If this is from 5112 * a jstack() action, it is easily tuned via jstackstrsize.) 5113 */ 5114 dtrace_error(&state->dts_stkstroverflows); 5115 } 5116 5117 while (offs < strsize) 5118 str[offs++] = '\0'; 5119 5120 out: 5121 mstate->dtms_scratch_ptr = old; 5122 } 5123 5124 /* 5125 * If you're looking for the epicenter of DTrace, you just found it. This 5126 * is the function called by the provider to fire a probe -- from which all 5127 * subsequent probe-context DTrace activity emanates. 5128 */ 5129 void 5130 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5131 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5132 { 5133 processorid_t cpuid; 5134 dtrace_icookie_t cookie; 5135 dtrace_probe_t *probe; 5136 dtrace_mstate_t mstate; 5137 dtrace_ecb_t *ecb; 5138 dtrace_action_t *act; 5139 intptr_t offs; 5140 size_t size; 5141 int vtime, onintr; 5142 volatile uint16_t *flags; 5143 hrtime_t now; 5144 5145 /* 5146 * Kick out immediately if this CPU is still being born (in which case 5147 * curthread will be set to -1) 5148 */ 5149 if ((uintptr_t)curthread & 1) 5150 return; 5151 5152 cookie = dtrace_interrupt_disable(); 5153 probe = dtrace_probes[id - 1]; 5154 cpuid = CPU->cpu_id; 5155 onintr = CPU_ON_INTR(CPU); 5156 5157 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5158 probe->dtpr_predcache == curthread->t_predcache) { 5159 /* 5160 * We have hit in the predicate cache; we know that 5161 * this predicate would evaluate to be false. 5162 */ 5163 dtrace_interrupt_enable(cookie); 5164 return; 5165 } 5166 5167 if (panic_quiesce) { 5168 /* 5169 * We don't trace anything if we're panicking. 5170 */ 5171 dtrace_interrupt_enable(cookie); 5172 return; 5173 } 5174 5175 now = dtrace_gethrtime(); 5176 vtime = dtrace_vtime_references != 0; 5177 5178 if (vtime && curthread->t_dtrace_start) 5179 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 5180 5181 mstate.dtms_difo = NULL; 5182 mstate.dtms_probe = probe; 5183 mstate.dtms_strtok = NULL; 5184 mstate.dtms_arg[0] = arg0; 5185 mstate.dtms_arg[1] = arg1; 5186 mstate.dtms_arg[2] = arg2; 5187 mstate.dtms_arg[3] = arg3; 5188 mstate.dtms_arg[4] = arg4; 5189 5190 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 5191 5192 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 5193 dtrace_predicate_t *pred = ecb->dte_predicate; 5194 dtrace_state_t *state = ecb->dte_state; 5195 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 5196 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 5197 dtrace_vstate_t *vstate = &state->dts_vstate; 5198 dtrace_provider_t *prov = probe->dtpr_provider; 5199 int committed = 0; 5200 caddr_t tomax; 5201 5202 /* 5203 * A little subtlety with the following (seemingly innocuous) 5204 * declaration of the automatic 'val': by looking at the 5205 * code, you might think that it could be declared in the 5206 * action processing loop, below. (That is, it's only used in 5207 * the action processing loop.) However, it must be declared 5208 * out of that scope because in the case of DIF expression 5209 * arguments to aggregating actions, one iteration of the 5210 * action loop will use the last iteration's value. 5211 */ 5212 #ifdef lint 5213 uint64_t val = 0; 5214 #else 5215 uint64_t val; 5216 #endif 5217 5218 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 5219 *flags &= ~CPU_DTRACE_ERROR; 5220 5221 if (prov == dtrace_provider) { 5222 /* 5223 * If dtrace itself is the provider of this probe, 5224 * we're only going to continue processing the ECB if 5225 * arg0 (the dtrace_state_t) is equal to the ECB's 5226 * creating state. (This prevents disjoint consumers 5227 * from seeing one another's metaprobes.) 5228 */ 5229 if (arg0 != (uint64_t)(uintptr_t)state) 5230 continue; 5231 } 5232 5233 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 5234 /* 5235 * We're not currently active. If our provider isn't 5236 * the dtrace pseudo provider, we're not interested. 5237 */ 5238 if (prov != dtrace_provider) 5239 continue; 5240 5241 /* 5242 * Now we must further check if we are in the BEGIN 5243 * probe. If we are, we will only continue processing 5244 * if we're still in WARMUP -- if one BEGIN enabling 5245 * has invoked the exit() action, we don't want to 5246 * evaluate subsequent BEGIN enablings. 5247 */ 5248 if (probe->dtpr_id == dtrace_probeid_begin && 5249 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 5250 ASSERT(state->dts_activity == 5251 DTRACE_ACTIVITY_DRAINING); 5252 continue; 5253 } 5254 } 5255 5256 if (ecb->dte_cond) { 5257 /* 5258 * If the dte_cond bits indicate that this 5259 * consumer is only allowed to see user-mode firings 5260 * of this probe, call the provider's dtps_usermode() 5261 * entry point to check that the probe was fired 5262 * while in a user context. Skip this ECB if that's 5263 * not the case. 5264 */ 5265 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 5266 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 5267 probe->dtpr_id, probe->dtpr_arg) == 0) 5268 continue; 5269 5270 /* 5271 * This is more subtle than it looks. We have to be 5272 * absolutely certain that CRED() isn't going to 5273 * change out from under us so it's only legit to 5274 * examine that structure if we're in constrained 5275 * situations. Currently, the only times we'll this 5276 * check is if a non-super-user has enabled the 5277 * profile or syscall providers -- providers that 5278 * allow visibility of all processes. For the 5279 * profile case, the check above will ensure that 5280 * we're examining a user context. 5281 */ 5282 if (ecb->dte_cond & DTRACE_COND_OWNER) { 5283 cred_t *cr; 5284 cred_t *s_cr = 5285 ecb->dte_state->dts_cred.dcr_cred; 5286 proc_t *proc; 5287 5288 ASSERT(s_cr != NULL); 5289 5290 if ((cr = CRED()) == NULL || 5291 s_cr->cr_uid != cr->cr_uid || 5292 s_cr->cr_uid != cr->cr_ruid || 5293 s_cr->cr_uid != cr->cr_suid || 5294 s_cr->cr_gid != cr->cr_gid || 5295 s_cr->cr_gid != cr->cr_rgid || 5296 s_cr->cr_gid != cr->cr_sgid || 5297 (proc = ttoproc(curthread)) == NULL || 5298 (proc->p_flag & SNOCD)) 5299 continue; 5300 } 5301 5302 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 5303 cred_t *cr; 5304 cred_t *s_cr = 5305 ecb->dte_state->dts_cred.dcr_cred; 5306 5307 ASSERT(s_cr != NULL); 5308 5309 if ((cr = CRED()) == NULL || 5310 s_cr->cr_zone->zone_id != 5311 cr->cr_zone->zone_id) 5312 continue; 5313 } 5314 } 5315 5316 if (now - state->dts_alive > dtrace_deadman_timeout) { 5317 /* 5318 * We seem to be dead. Unless we (a) have kernel 5319 * destructive permissions (b) have expicitly enabled 5320 * destructive actions and (c) destructive actions have 5321 * not been disabled, we're going to transition into 5322 * the KILLED state, from which no further processing 5323 * on this state will be performed. 5324 */ 5325 if (!dtrace_priv_kernel_destructive(state) || 5326 !state->dts_cred.dcr_destructive || 5327 dtrace_destructive_disallow) { 5328 void *activity = &state->dts_activity; 5329 dtrace_activity_t current; 5330 5331 do { 5332 current = state->dts_activity; 5333 } while (dtrace_cas32(activity, current, 5334 DTRACE_ACTIVITY_KILLED) != current); 5335 5336 continue; 5337 } 5338 } 5339 5340 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 5341 ecb->dte_alignment, state, &mstate)) < 0) 5342 continue; 5343 5344 tomax = buf->dtb_tomax; 5345 ASSERT(tomax != NULL); 5346 5347 if (ecb->dte_size != 0) 5348 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 5349 5350 mstate.dtms_epid = ecb->dte_epid; 5351 mstate.dtms_present |= DTRACE_MSTATE_EPID; 5352 5353 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 5354 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 5355 else 5356 mstate.dtms_access = 0; 5357 5358 if (pred != NULL) { 5359 dtrace_difo_t *dp = pred->dtp_difo; 5360 int rval; 5361 5362 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 5363 5364 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 5365 dtrace_cacheid_t cid = probe->dtpr_predcache; 5366 5367 if (cid != DTRACE_CACHEIDNONE && !onintr) { 5368 /* 5369 * Update the predicate cache... 5370 */ 5371 ASSERT(cid == pred->dtp_cacheid); 5372 curthread->t_predcache = cid; 5373 } 5374 5375 continue; 5376 } 5377 } 5378 5379 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 5380 act != NULL; act = act->dta_next) { 5381 size_t valoffs; 5382 dtrace_difo_t *dp; 5383 dtrace_recdesc_t *rec = &act->dta_rec; 5384 5385 size = rec->dtrd_size; 5386 valoffs = offs + rec->dtrd_offset; 5387 5388 if (DTRACEACT_ISAGG(act->dta_kind)) { 5389 uint64_t v = 0xbad; 5390 dtrace_aggregation_t *agg; 5391 5392 agg = (dtrace_aggregation_t *)act; 5393 5394 if ((dp = act->dta_difo) != NULL) 5395 v = dtrace_dif_emulate(dp, 5396 &mstate, vstate, state); 5397 5398 if (*flags & CPU_DTRACE_ERROR) 5399 continue; 5400 5401 /* 5402 * Note that we always pass the expression 5403 * value from the previous iteration of the 5404 * action loop. This value will only be used 5405 * if there is an expression argument to the 5406 * aggregating action, denoted by the 5407 * dtag_hasarg field. 5408 */ 5409 dtrace_aggregate(agg, buf, 5410 offs, aggbuf, v, val); 5411 continue; 5412 } 5413 5414 switch (act->dta_kind) { 5415 case DTRACEACT_STOP: 5416 if (dtrace_priv_proc_destructive(state)) 5417 dtrace_action_stop(); 5418 continue; 5419 5420 case DTRACEACT_BREAKPOINT: 5421 if (dtrace_priv_kernel_destructive(state)) 5422 dtrace_action_breakpoint(ecb); 5423 continue; 5424 5425 case DTRACEACT_PANIC: 5426 if (dtrace_priv_kernel_destructive(state)) 5427 dtrace_action_panic(ecb); 5428 continue; 5429 5430 case DTRACEACT_STACK: 5431 if (!dtrace_priv_kernel(state)) 5432 continue; 5433 5434 dtrace_getpcstack((pc_t *)(tomax + valoffs), 5435 size / sizeof (pc_t), probe->dtpr_aframes, 5436 DTRACE_ANCHORED(probe) ? NULL : 5437 (uint32_t *)arg0); 5438 5439 continue; 5440 5441 case DTRACEACT_JSTACK: 5442 case DTRACEACT_USTACK: 5443 if (!dtrace_priv_proc(state)) 5444 continue; 5445 5446 /* 5447 * See comment in DIF_VAR_PID. 5448 */ 5449 if (DTRACE_ANCHORED(mstate.dtms_probe) && 5450 CPU_ON_INTR(CPU)) { 5451 int depth = DTRACE_USTACK_NFRAMES( 5452 rec->dtrd_arg) + 1; 5453 5454 dtrace_bzero((void *)(tomax + valoffs), 5455 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 5456 + depth * sizeof (uint64_t)); 5457 5458 continue; 5459 } 5460 5461 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 5462 curproc->p_dtrace_helpers != NULL) { 5463 /* 5464 * This is the slow path -- we have 5465 * allocated string space, and we're 5466 * getting the stack of a process that 5467 * has helpers. Call into a separate 5468 * routine to perform this processing. 5469 */ 5470 dtrace_action_ustack(&mstate, state, 5471 (uint64_t *)(tomax + valoffs), 5472 rec->dtrd_arg); 5473 continue; 5474 } 5475 5476 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5477 dtrace_getupcstack((uint64_t *) 5478 (tomax + valoffs), 5479 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 5480 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5481 continue; 5482 5483 default: 5484 break; 5485 } 5486 5487 dp = act->dta_difo; 5488 ASSERT(dp != NULL); 5489 5490 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 5491 5492 if (*flags & CPU_DTRACE_ERROR) 5493 continue; 5494 5495 switch (act->dta_kind) { 5496 case DTRACEACT_SPECULATE: 5497 ASSERT(buf == &state->dts_buffer[cpuid]); 5498 buf = dtrace_speculation_buffer(state, 5499 cpuid, val); 5500 5501 if (buf == NULL) { 5502 *flags |= CPU_DTRACE_DROP; 5503 continue; 5504 } 5505 5506 offs = dtrace_buffer_reserve(buf, 5507 ecb->dte_needed, ecb->dte_alignment, 5508 state, NULL); 5509 5510 if (offs < 0) { 5511 *flags |= CPU_DTRACE_DROP; 5512 continue; 5513 } 5514 5515 tomax = buf->dtb_tomax; 5516 ASSERT(tomax != NULL); 5517 5518 if (ecb->dte_size != 0) 5519 DTRACE_STORE(uint32_t, tomax, offs, 5520 ecb->dte_epid); 5521 continue; 5522 5523 case DTRACEACT_CHILL: 5524 if (dtrace_priv_kernel_destructive(state)) 5525 dtrace_action_chill(&mstate, val); 5526 continue; 5527 5528 case DTRACEACT_RAISE: 5529 if (dtrace_priv_proc_destructive(state)) 5530 dtrace_action_raise(val); 5531 continue; 5532 5533 case DTRACEACT_COMMIT: 5534 ASSERT(!committed); 5535 5536 /* 5537 * We need to commit our buffer state. 5538 */ 5539 if (ecb->dte_size) 5540 buf->dtb_offset = offs + ecb->dte_size; 5541 buf = &state->dts_buffer[cpuid]; 5542 dtrace_speculation_commit(state, cpuid, val); 5543 committed = 1; 5544 continue; 5545 5546 case DTRACEACT_DISCARD: 5547 dtrace_speculation_discard(state, cpuid, val); 5548 continue; 5549 5550 case DTRACEACT_DIFEXPR: 5551 case DTRACEACT_LIBACT: 5552 case DTRACEACT_PRINTF: 5553 case DTRACEACT_PRINTA: 5554 case DTRACEACT_SYSTEM: 5555 case DTRACEACT_FREOPEN: 5556 break; 5557 5558 case DTRACEACT_SYM: 5559 case DTRACEACT_MOD: 5560 if (!dtrace_priv_kernel(state)) 5561 continue; 5562 break; 5563 5564 case DTRACEACT_USYM: 5565 case DTRACEACT_UMOD: 5566 case DTRACEACT_UADDR: { 5567 struct pid *pid = curthread->t_procp->p_pidp; 5568 5569 if (!dtrace_priv_proc(state)) 5570 continue; 5571 5572 DTRACE_STORE(uint64_t, tomax, 5573 valoffs, (uint64_t)pid->pid_id); 5574 DTRACE_STORE(uint64_t, tomax, 5575 valoffs + sizeof (uint64_t), val); 5576 5577 continue; 5578 } 5579 5580 case DTRACEACT_EXIT: { 5581 /* 5582 * For the exit action, we are going to attempt 5583 * to atomically set our activity to be 5584 * draining. If this fails (either because 5585 * another CPU has beat us to the exit action, 5586 * or because our current activity is something 5587 * other than ACTIVE or WARMUP), we will 5588 * continue. This assures that the exit action 5589 * can be successfully recorded at most once 5590 * when we're in the ACTIVE state. If we're 5591 * encountering the exit() action while in 5592 * COOLDOWN, however, we want to honor the new 5593 * status code. (We know that we're the only 5594 * thread in COOLDOWN, so there is no race.) 5595 */ 5596 void *activity = &state->dts_activity; 5597 dtrace_activity_t current = state->dts_activity; 5598 5599 if (current == DTRACE_ACTIVITY_COOLDOWN) 5600 break; 5601 5602 if (current != DTRACE_ACTIVITY_WARMUP) 5603 current = DTRACE_ACTIVITY_ACTIVE; 5604 5605 if (dtrace_cas32(activity, current, 5606 DTRACE_ACTIVITY_DRAINING) != current) { 5607 *flags |= CPU_DTRACE_DROP; 5608 continue; 5609 } 5610 5611 break; 5612 } 5613 5614 default: 5615 ASSERT(0); 5616 } 5617 5618 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 5619 uintptr_t end = valoffs + size; 5620 5621 if (!dtrace_vcanload((void *)(uintptr_t)val, 5622 &dp->dtdo_rtype, &mstate, vstate)) 5623 continue; 5624 5625 /* 5626 * If this is a string, we're going to only 5627 * load until we find the zero byte -- after 5628 * which we'll store zero bytes. 5629 */ 5630 if (dp->dtdo_rtype.dtdt_kind == 5631 DIF_TYPE_STRING) { 5632 char c = '\0' + 1; 5633 int intuple = act->dta_intuple; 5634 size_t s; 5635 5636 for (s = 0; s < size; s++) { 5637 if (c != '\0') 5638 c = dtrace_load8(val++); 5639 5640 DTRACE_STORE(uint8_t, tomax, 5641 valoffs++, c); 5642 5643 if (c == '\0' && intuple) 5644 break; 5645 } 5646 5647 continue; 5648 } 5649 5650 while (valoffs < end) { 5651 DTRACE_STORE(uint8_t, tomax, valoffs++, 5652 dtrace_load8(val++)); 5653 } 5654 5655 continue; 5656 } 5657 5658 switch (size) { 5659 case 0: 5660 break; 5661 5662 case sizeof (uint8_t): 5663 DTRACE_STORE(uint8_t, tomax, valoffs, val); 5664 break; 5665 case sizeof (uint16_t): 5666 DTRACE_STORE(uint16_t, tomax, valoffs, val); 5667 break; 5668 case sizeof (uint32_t): 5669 DTRACE_STORE(uint32_t, tomax, valoffs, val); 5670 break; 5671 case sizeof (uint64_t): 5672 DTRACE_STORE(uint64_t, tomax, valoffs, val); 5673 break; 5674 default: 5675 /* 5676 * Any other size should have been returned by 5677 * reference, not by value. 5678 */ 5679 ASSERT(0); 5680 break; 5681 } 5682 } 5683 5684 if (*flags & CPU_DTRACE_DROP) 5685 continue; 5686 5687 if (*flags & CPU_DTRACE_FAULT) { 5688 int ndx; 5689 dtrace_action_t *err; 5690 5691 buf->dtb_errors++; 5692 5693 if (probe->dtpr_id == dtrace_probeid_error) { 5694 /* 5695 * There's nothing we can do -- we had an 5696 * error on the error probe. We bump an 5697 * error counter to at least indicate that 5698 * this condition happened. 5699 */ 5700 dtrace_error(&state->dts_dblerrors); 5701 continue; 5702 } 5703 5704 if (vtime) { 5705 /* 5706 * Before recursing on dtrace_probe(), we 5707 * need to explicitly clear out our start 5708 * time to prevent it from being accumulated 5709 * into t_dtrace_vtime. 5710 */ 5711 curthread->t_dtrace_start = 0; 5712 } 5713 5714 /* 5715 * Iterate over the actions to figure out which action 5716 * we were processing when we experienced the error. 5717 * Note that act points _past_ the faulting action; if 5718 * act is ecb->dte_action, the fault was in the 5719 * predicate, if it's ecb->dte_action->dta_next it's 5720 * in action #1, and so on. 5721 */ 5722 for (err = ecb->dte_action, ndx = 0; 5723 err != act; err = err->dta_next, ndx++) 5724 continue; 5725 5726 dtrace_probe_error(state, ecb->dte_epid, ndx, 5727 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 5728 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 5729 cpu_core[cpuid].cpuc_dtrace_illval); 5730 5731 continue; 5732 } 5733 5734 if (!committed) 5735 buf->dtb_offset = offs + ecb->dte_size; 5736 } 5737 5738 if (vtime) 5739 curthread->t_dtrace_start = dtrace_gethrtime(); 5740 5741 dtrace_interrupt_enable(cookie); 5742 } 5743 5744 /* 5745 * DTrace Probe Hashing Functions 5746 * 5747 * The functions in this section (and indeed, the functions in remaining 5748 * sections) are not _called_ from probe context. (Any exceptions to this are 5749 * marked with a "Note:".) Rather, they are called from elsewhere in the 5750 * DTrace framework to look-up probes in, add probes to and remove probes from 5751 * the DTrace probe hashes. (Each probe is hashed by each element of the 5752 * probe tuple -- allowing for fast lookups, regardless of what was 5753 * specified.) 5754 */ 5755 static uint_t 5756 dtrace_hash_str(char *p) 5757 { 5758 unsigned int g; 5759 uint_t hval = 0; 5760 5761 while (*p) { 5762 hval = (hval << 4) + *p++; 5763 if ((g = (hval & 0xf0000000)) != 0) 5764 hval ^= g >> 24; 5765 hval &= ~g; 5766 } 5767 return (hval); 5768 } 5769 5770 static dtrace_hash_t * 5771 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 5772 { 5773 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 5774 5775 hash->dth_stroffs = stroffs; 5776 hash->dth_nextoffs = nextoffs; 5777 hash->dth_prevoffs = prevoffs; 5778 5779 hash->dth_size = 1; 5780 hash->dth_mask = hash->dth_size - 1; 5781 5782 hash->dth_tab = kmem_zalloc(hash->dth_size * 5783 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 5784 5785 return (hash); 5786 } 5787 5788 static void 5789 dtrace_hash_destroy(dtrace_hash_t *hash) 5790 { 5791 #ifdef DEBUG 5792 int i; 5793 5794 for (i = 0; i < hash->dth_size; i++) 5795 ASSERT(hash->dth_tab[i] == NULL); 5796 #endif 5797 5798 kmem_free(hash->dth_tab, 5799 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 5800 kmem_free(hash, sizeof (dtrace_hash_t)); 5801 } 5802 5803 static void 5804 dtrace_hash_resize(dtrace_hash_t *hash) 5805 { 5806 int size = hash->dth_size, i, ndx; 5807 int new_size = hash->dth_size << 1; 5808 int new_mask = new_size - 1; 5809 dtrace_hashbucket_t **new_tab, *bucket, *next; 5810 5811 ASSERT((new_size & new_mask) == 0); 5812 5813 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 5814 5815 for (i = 0; i < size; i++) { 5816 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 5817 dtrace_probe_t *probe = bucket->dthb_chain; 5818 5819 ASSERT(probe != NULL); 5820 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 5821 5822 next = bucket->dthb_next; 5823 bucket->dthb_next = new_tab[ndx]; 5824 new_tab[ndx] = bucket; 5825 } 5826 } 5827 5828 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 5829 hash->dth_tab = new_tab; 5830 hash->dth_size = new_size; 5831 hash->dth_mask = new_mask; 5832 } 5833 5834 static void 5835 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 5836 { 5837 int hashval = DTRACE_HASHSTR(hash, new); 5838 int ndx = hashval & hash->dth_mask; 5839 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5840 dtrace_probe_t **nextp, **prevp; 5841 5842 for (; bucket != NULL; bucket = bucket->dthb_next) { 5843 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 5844 goto add; 5845 } 5846 5847 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 5848 dtrace_hash_resize(hash); 5849 dtrace_hash_add(hash, new); 5850 return; 5851 } 5852 5853 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 5854 bucket->dthb_next = hash->dth_tab[ndx]; 5855 hash->dth_tab[ndx] = bucket; 5856 hash->dth_nbuckets++; 5857 5858 add: 5859 nextp = DTRACE_HASHNEXT(hash, new); 5860 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 5861 *nextp = bucket->dthb_chain; 5862 5863 if (bucket->dthb_chain != NULL) { 5864 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 5865 ASSERT(*prevp == NULL); 5866 *prevp = new; 5867 } 5868 5869 bucket->dthb_chain = new; 5870 bucket->dthb_len++; 5871 } 5872 5873 static dtrace_probe_t * 5874 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 5875 { 5876 int hashval = DTRACE_HASHSTR(hash, template); 5877 int ndx = hashval & hash->dth_mask; 5878 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5879 5880 for (; bucket != NULL; bucket = bucket->dthb_next) { 5881 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5882 return (bucket->dthb_chain); 5883 } 5884 5885 return (NULL); 5886 } 5887 5888 static int 5889 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 5890 { 5891 int hashval = DTRACE_HASHSTR(hash, template); 5892 int ndx = hashval & hash->dth_mask; 5893 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5894 5895 for (; bucket != NULL; bucket = bucket->dthb_next) { 5896 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 5897 return (bucket->dthb_len); 5898 } 5899 5900 return (NULL); 5901 } 5902 5903 static void 5904 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 5905 { 5906 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 5907 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 5908 5909 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 5910 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 5911 5912 /* 5913 * Find the bucket that we're removing this probe from. 5914 */ 5915 for (; bucket != NULL; bucket = bucket->dthb_next) { 5916 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 5917 break; 5918 } 5919 5920 ASSERT(bucket != NULL); 5921 5922 if (*prevp == NULL) { 5923 if (*nextp == NULL) { 5924 /* 5925 * The removed probe was the only probe on this 5926 * bucket; we need to remove the bucket. 5927 */ 5928 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 5929 5930 ASSERT(bucket->dthb_chain == probe); 5931 ASSERT(b != NULL); 5932 5933 if (b == bucket) { 5934 hash->dth_tab[ndx] = bucket->dthb_next; 5935 } else { 5936 while (b->dthb_next != bucket) 5937 b = b->dthb_next; 5938 b->dthb_next = bucket->dthb_next; 5939 } 5940 5941 ASSERT(hash->dth_nbuckets > 0); 5942 hash->dth_nbuckets--; 5943 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 5944 return; 5945 } 5946 5947 bucket->dthb_chain = *nextp; 5948 } else { 5949 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 5950 } 5951 5952 if (*nextp != NULL) 5953 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 5954 } 5955 5956 /* 5957 * DTrace Utility Functions 5958 * 5959 * These are random utility functions that are _not_ called from probe context. 5960 */ 5961 static int 5962 dtrace_badattr(const dtrace_attribute_t *a) 5963 { 5964 return (a->dtat_name > DTRACE_STABILITY_MAX || 5965 a->dtat_data > DTRACE_STABILITY_MAX || 5966 a->dtat_class > DTRACE_CLASS_MAX); 5967 } 5968 5969 /* 5970 * Return a duplicate copy of a string. If the specified string is NULL, 5971 * this function returns a zero-length string. 5972 */ 5973 static char * 5974 dtrace_strdup(const char *str) 5975 { 5976 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 5977 5978 if (str != NULL) 5979 (void) strcpy(new, str); 5980 5981 return (new); 5982 } 5983 5984 #define DTRACE_ISALPHA(c) \ 5985 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 5986 5987 static int 5988 dtrace_badname(const char *s) 5989 { 5990 char c; 5991 5992 if (s == NULL || (c = *s++) == '\0') 5993 return (0); 5994 5995 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 5996 return (1); 5997 5998 while ((c = *s++) != '\0') { 5999 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6000 c != '-' && c != '_' && c != '.' && c != '`') 6001 return (1); 6002 } 6003 6004 return (0); 6005 } 6006 6007 static void 6008 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6009 { 6010 uint32_t priv; 6011 6012 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6013 /* 6014 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6015 */ 6016 priv = DTRACE_PRIV_ALL; 6017 } else { 6018 *uidp = crgetuid(cr); 6019 *zoneidp = crgetzoneid(cr); 6020 6021 priv = 0; 6022 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6023 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6024 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6025 priv |= DTRACE_PRIV_USER; 6026 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6027 priv |= DTRACE_PRIV_PROC; 6028 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6029 priv |= DTRACE_PRIV_OWNER; 6030 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6031 priv |= DTRACE_PRIV_ZONEOWNER; 6032 } 6033 6034 *privp = priv; 6035 } 6036 6037 #ifdef DTRACE_ERRDEBUG 6038 static void 6039 dtrace_errdebug(const char *str) 6040 { 6041 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ; 6042 int occupied = 0; 6043 6044 mutex_enter(&dtrace_errlock); 6045 dtrace_errlast = str; 6046 dtrace_errthread = curthread; 6047 6048 while (occupied++ < DTRACE_ERRHASHSZ) { 6049 if (dtrace_errhash[hval].dter_msg == str) { 6050 dtrace_errhash[hval].dter_count++; 6051 goto out; 6052 } 6053 6054 if (dtrace_errhash[hval].dter_msg != NULL) { 6055 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6056 continue; 6057 } 6058 6059 dtrace_errhash[hval].dter_msg = str; 6060 dtrace_errhash[hval].dter_count = 1; 6061 goto out; 6062 } 6063 6064 panic("dtrace: undersized error hash"); 6065 out: 6066 mutex_exit(&dtrace_errlock); 6067 } 6068 #endif 6069 6070 /* 6071 * DTrace Matching Functions 6072 * 6073 * These functions are used to match groups of probes, given some elements of 6074 * a probe tuple, or some globbed expressions for elements of a probe tuple. 6075 */ 6076 static int 6077 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 6078 zoneid_t zoneid) 6079 { 6080 if (priv != DTRACE_PRIV_ALL) { 6081 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 6082 uint32_t match = priv & ppriv; 6083 6084 /* 6085 * No PRIV_DTRACE_* privileges... 6086 */ 6087 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 6088 DTRACE_PRIV_KERNEL)) == 0) 6089 return (0); 6090 6091 /* 6092 * No matching bits, but there were bits to match... 6093 */ 6094 if (match == 0 && ppriv != 0) 6095 return (0); 6096 6097 /* 6098 * Need to have permissions to the process, but don't... 6099 */ 6100 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 6101 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 6102 return (0); 6103 } 6104 6105 /* 6106 * Need to be in the same zone unless we possess the 6107 * privilege to examine all zones. 6108 */ 6109 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 6110 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 6111 return (0); 6112 } 6113 } 6114 6115 return (1); 6116 } 6117 6118 /* 6119 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 6120 * consists of input pattern strings and an ops-vector to evaluate them. 6121 * This function returns >0 for match, 0 for no match, and <0 for error. 6122 */ 6123 static int 6124 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 6125 uint32_t priv, uid_t uid, zoneid_t zoneid) 6126 { 6127 dtrace_provider_t *pvp = prp->dtpr_provider; 6128 int rv; 6129 6130 if (pvp->dtpv_defunct) 6131 return (0); 6132 6133 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 6134 return (rv); 6135 6136 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 6137 return (rv); 6138 6139 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 6140 return (rv); 6141 6142 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 6143 return (rv); 6144 6145 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 6146 return (0); 6147 6148 return (rv); 6149 } 6150 6151 /* 6152 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 6153 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 6154 * libc's version, the kernel version only applies to 8-bit ASCII strings. 6155 * In addition, all of the recursion cases except for '*' matching have been 6156 * unwound. For '*', we still implement recursive evaluation, but a depth 6157 * counter is maintained and matching is aborted if we recurse too deep. 6158 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 6159 */ 6160 static int 6161 dtrace_match_glob(const char *s, const char *p, int depth) 6162 { 6163 const char *olds; 6164 char s1, c; 6165 int gs; 6166 6167 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 6168 return (-1); 6169 6170 if (s == NULL) 6171 s = ""; /* treat NULL as empty string */ 6172 6173 top: 6174 olds = s; 6175 s1 = *s++; 6176 6177 if (p == NULL) 6178 return (0); 6179 6180 if ((c = *p++) == '\0') 6181 return (s1 == '\0'); 6182 6183 switch (c) { 6184 case '[': { 6185 int ok = 0, notflag = 0; 6186 char lc = '\0'; 6187 6188 if (s1 == '\0') 6189 return (0); 6190 6191 if (*p == '!') { 6192 notflag = 1; 6193 p++; 6194 } 6195 6196 if ((c = *p++) == '\0') 6197 return (0); 6198 6199 do { 6200 if (c == '-' && lc != '\0' && *p != ']') { 6201 if ((c = *p++) == '\0') 6202 return (0); 6203 if (c == '\\' && (c = *p++) == '\0') 6204 return (0); 6205 6206 if (notflag) { 6207 if (s1 < lc || s1 > c) 6208 ok++; 6209 else 6210 return (0); 6211 } else if (lc <= s1 && s1 <= c) 6212 ok++; 6213 6214 } else if (c == '\\' && (c = *p++) == '\0') 6215 return (0); 6216 6217 lc = c; /* save left-hand 'c' for next iteration */ 6218 6219 if (notflag) { 6220 if (s1 != c) 6221 ok++; 6222 else 6223 return (0); 6224 } else if (s1 == c) 6225 ok++; 6226 6227 if ((c = *p++) == '\0') 6228 return (0); 6229 6230 } while (c != ']'); 6231 6232 if (ok) 6233 goto top; 6234 6235 return (0); 6236 } 6237 6238 case '\\': 6239 if ((c = *p++) == '\0') 6240 return (0); 6241 /*FALLTHRU*/ 6242 6243 default: 6244 if (c != s1) 6245 return (0); 6246 /*FALLTHRU*/ 6247 6248 case '?': 6249 if (s1 != '\0') 6250 goto top; 6251 return (0); 6252 6253 case '*': 6254 while (*p == '*') 6255 p++; /* consecutive *'s are identical to a single one */ 6256 6257 if (*p == '\0') 6258 return (1); 6259 6260 for (s = olds; *s != '\0'; s++) { 6261 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 6262 return (gs); 6263 } 6264 6265 return (0); 6266 } 6267 } 6268 6269 /*ARGSUSED*/ 6270 static int 6271 dtrace_match_string(const char *s, const char *p, int depth) 6272 { 6273 return (s != NULL && strcmp(s, p) == 0); 6274 } 6275 6276 /*ARGSUSED*/ 6277 static int 6278 dtrace_match_nul(const char *s, const char *p, int depth) 6279 { 6280 return (1); /* always match the empty pattern */ 6281 } 6282 6283 /*ARGSUSED*/ 6284 static int 6285 dtrace_match_nonzero(const char *s, const char *p, int depth) 6286 { 6287 return (s != NULL && s[0] != '\0'); 6288 } 6289 6290 static int 6291 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 6292 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 6293 { 6294 dtrace_probe_t template, *probe; 6295 dtrace_hash_t *hash = NULL; 6296 int len, best = INT_MAX, nmatched = 0; 6297 dtrace_id_t i; 6298 6299 ASSERT(MUTEX_HELD(&dtrace_lock)); 6300 6301 /* 6302 * If the probe ID is specified in the key, just lookup by ID and 6303 * invoke the match callback once if a matching probe is found. 6304 */ 6305 if (pkp->dtpk_id != DTRACE_IDNONE) { 6306 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 6307 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 6308 (void) (*matched)(probe, arg); 6309 nmatched++; 6310 } 6311 return (nmatched); 6312 } 6313 6314 template.dtpr_mod = (char *)pkp->dtpk_mod; 6315 template.dtpr_func = (char *)pkp->dtpk_func; 6316 template.dtpr_name = (char *)pkp->dtpk_name; 6317 6318 /* 6319 * We want to find the most distinct of the module name, function 6320 * name, and name. So for each one that is not a glob pattern or 6321 * empty string, we perform a lookup in the corresponding hash and 6322 * use the hash table with the fewest collisions to do our search. 6323 */ 6324 if (pkp->dtpk_mmatch == &dtrace_match_string && 6325 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 6326 best = len; 6327 hash = dtrace_bymod; 6328 } 6329 6330 if (pkp->dtpk_fmatch == &dtrace_match_string && 6331 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 6332 best = len; 6333 hash = dtrace_byfunc; 6334 } 6335 6336 if (pkp->dtpk_nmatch == &dtrace_match_string && 6337 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 6338 best = len; 6339 hash = dtrace_byname; 6340 } 6341 6342 /* 6343 * If we did not select a hash table, iterate over every probe and 6344 * invoke our callback for each one that matches our input probe key. 6345 */ 6346 if (hash == NULL) { 6347 for (i = 0; i < dtrace_nprobes; i++) { 6348 if ((probe = dtrace_probes[i]) == NULL || 6349 dtrace_match_probe(probe, pkp, priv, uid, 6350 zoneid) <= 0) 6351 continue; 6352 6353 nmatched++; 6354 6355 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6356 break; 6357 } 6358 6359 return (nmatched); 6360 } 6361 6362 /* 6363 * If we selected a hash table, iterate over each probe of the same key 6364 * name and invoke the callback for every probe that matches the other 6365 * attributes of our input probe key. 6366 */ 6367 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 6368 probe = *(DTRACE_HASHNEXT(hash, probe))) { 6369 6370 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 6371 continue; 6372 6373 nmatched++; 6374 6375 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 6376 break; 6377 } 6378 6379 return (nmatched); 6380 } 6381 6382 /* 6383 * Return the function pointer dtrace_probecmp() should use to compare the 6384 * specified pattern with a string. For NULL or empty patterns, we select 6385 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 6386 * For non-empty non-glob strings, we use dtrace_match_string(). 6387 */ 6388 static dtrace_probekey_f * 6389 dtrace_probekey_func(const char *p) 6390 { 6391 char c; 6392 6393 if (p == NULL || *p == '\0') 6394 return (&dtrace_match_nul); 6395 6396 while ((c = *p++) != '\0') { 6397 if (c == '[' || c == '?' || c == '*' || c == '\\') 6398 return (&dtrace_match_glob); 6399 } 6400 6401 return (&dtrace_match_string); 6402 } 6403 6404 /* 6405 * Build a probe comparison key for use with dtrace_match_probe() from the 6406 * given probe description. By convention, a null key only matches anchored 6407 * probes: if each field is the empty string, reset dtpk_fmatch to 6408 * dtrace_match_nonzero(). 6409 */ 6410 static void 6411 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 6412 { 6413 pkp->dtpk_prov = pdp->dtpd_provider; 6414 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 6415 6416 pkp->dtpk_mod = pdp->dtpd_mod; 6417 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 6418 6419 pkp->dtpk_func = pdp->dtpd_func; 6420 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 6421 6422 pkp->dtpk_name = pdp->dtpd_name; 6423 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 6424 6425 pkp->dtpk_id = pdp->dtpd_id; 6426 6427 if (pkp->dtpk_id == DTRACE_IDNONE && 6428 pkp->dtpk_pmatch == &dtrace_match_nul && 6429 pkp->dtpk_mmatch == &dtrace_match_nul && 6430 pkp->dtpk_fmatch == &dtrace_match_nul && 6431 pkp->dtpk_nmatch == &dtrace_match_nul) 6432 pkp->dtpk_fmatch = &dtrace_match_nonzero; 6433 } 6434 6435 /* 6436 * DTrace Provider-to-Framework API Functions 6437 * 6438 * These functions implement much of the Provider-to-Framework API, as 6439 * described in <sys/dtrace.h>. The parts of the API not in this section are 6440 * the functions in the API for probe management (found below), and 6441 * dtrace_probe() itself (found above). 6442 */ 6443 6444 /* 6445 * Register the calling provider with the DTrace framework. This should 6446 * generally be called by DTrace providers in their attach(9E) entry point. 6447 */ 6448 int 6449 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 6450 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 6451 { 6452 dtrace_provider_t *provider; 6453 6454 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 6455 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6456 "arguments", name ? name : "<NULL>"); 6457 return (EINVAL); 6458 } 6459 6460 if (name[0] == '\0' || dtrace_badname(name)) { 6461 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6462 "provider name", name); 6463 return (EINVAL); 6464 } 6465 6466 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 6467 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 6468 pops->dtps_destroy == NULL || 6469 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 6470 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6471 "provider ops", name); 6472 return (EINVAL); 6473 } 6474 6475 if (dtrace_badattr(&pap->dtpa_provider) || 6476 dtrace_badattr(&pap->dtpa_mod) || 6477 dtrace_badattr(&pap->dtpa_func) || 6478 dtrace_badattr(&pap->dtpa_name) || 6479 dtrace_badattr(&pap->dtpa_args)) { 6480 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6481 "provider attributes", name); 6482 return (EINVAL); 6483 } 6484 6485 if (priv & ~DTRACE_PRIV_ALL) { 6486 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 6487 "privilege attributes", name); 6488 return (EINVAL); 6489 } 6490 6491 if ((priv & DTRACE_PRIV_KERNEL) && 6492 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 6493 pops->dtps_usermode == NULL) { 6494 cmn_err(CE_WARN, "failed to register provider '%s': need " 6495 "dtps_usermode() op for given privilege attributes", name); 6496 return (EINVAL); 6497 } 6498 6499 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 6500 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 6501 (void) strcpy(provider->dtpv_name, name); 6502 6503 provider->dtpv_attr = *pap; 6504 provider->dtpv_priv.dtpp_flags = priv; 6505 if (cr != NULL) { 6506 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 6507 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 6508 } 6509 provider->dtpv_pops = *pops; 6510 6511 if (pops->dtps_provide == NULL) { 6512 ASSERT(pops->dtps_provide_module != NULL); 6513 provider->dtpv_pops.dtps_provide = 6514 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 6515 } 6516 6517 if (pops->dtps_provide_module == NULL) { 6518 ASSERT(pops->dtps_provide != NULL); 6519 provider->dtpv_pops.dtps_provide_module = 6520 (void (*)(void *, struct modctl *))dtrace_nullop; 6521 } 6522 6523 if (pops->dtps_suspend == NULL) { 6524 ASSERT(pops->dtps_resume == NULL); 6525 provider->dtpv_pops.dtps_suspend = 6526 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6527 provider->dtpv_pops.dtps_resume = 6528 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 6529 } 6530 6531 provider->dtpv_arg = arg; 6532 *idp = (dtrace_provider_id_t)provider; 6533 6534 if (pops == &dtrace_provider_ops) { 6535 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6536 ASSERT(MUTEX_HELD(&dtrace_lock)); 6537 ASSERT(dtrace_anon.dta_enabling == NULL); 6538 6539 /* 6540 * We make sure that the DTrace provider is at the head of 6541 * the provider chain. 6542 */ 6543 provider->dtpv_next = dtrace_provider; 6544 dtrace_provider = provider; 6545 return (0); 6546 } 6547 6548 mutex_enter(&dtrace_provider_lock); 6549 mutex_enter(&dtrace_lock); 6550 6551 /* 6552 * If there is at least one provider registered, we'll add this 6553 * provider after the first provider. 6554 */ 6555 if (dtrace_provider != NULL) { 6556 provider->dtpv_next = dtrace_provider->dtpv_next; 6557 dtrace_provider->dtpv_next = provider; 6558 } else { 6559 dtrace_provider = provider; 6560 } 6561 6562 if (dtrace_retained != NULL) { 6563 dtrace_enabling_provide(provider); 6564 6565 /* 6566 * Now we need to call dtrace_enabling_matchall() -- which 6567 * will acquire cpu_lock and dtrace_lock. We therefore need 6568 * to drop all of our locks before calling into it... 6569 */ 6570 mutex_exit(&dtrace_lock); 6571 mutex_exit(&dtrace_provider_lock); 6572 dtrace_enabling_matchall(); 6573 6574 return (0); 6575 } 6576 6577 mutex_exit(&dtrace_lock); 6578 mutex_exit(&dtrace_provider_lock); 6579 6580 return (0); 6581 } 6582 6583 /* 6584 * Unregister the specified provider from the DTrace framework. This should 6585 * generally be called by DTrace providers in their detach(9E) entry point. 6586 */ 6587 int 6588 dtrace_unregister(dtrace_provider_id_t id) 6589 { 6590 dtrace_provider_t *old = (dtrace_provider_t *)id; 6591 dtrace_provider_t *prev = NULL; 6592 int i, self = 0; 6593 dtrace_probe_t *probe, *first = NULL; 6594 6595 if (old->dtpv_pops.dtps_enable == 6596 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 6597 /* 6598 * If DTrace itself is the provider, we're called with locks 6599 * already held. 6600 */ 6601 ASSERT(old == dtrace_provider); 6602 ASSERT(dtrace_devi != NULL); 6603 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 6604 ASSERT(MUTEX_HELD(&dtrace_lock)); 6605 self = 1; 6606 6607 if (dtrace_provider->dtpv_next != NULL) { 6608 /* 6609 * There's another provider here; return failure. 6610 */ 6611 return (EBUSY); 6612 } 6613 } else { 6614 mutex_enter(&dtrace_provider_lock); 6615 mutex_enter(&mod_lock); 6616 mutex_enter(&dtrace_lock); 6617 } 6618 6619 /* 6620 * If anyone has /dev/dtrace open, or if there are anonymous enabled 6621 * probes, we refuse to let providers slither away, unless this 6622 * provider has already been explicitly invalidated. 6623 */ 6624 if (!old->dtpv_defunct && 6625 (dtrace_opens || (dtrace_anon.dta_state != NULL && 6626 dtrace_anon.dta_state->dts_necbs > 0))) { 6627 if (!self) { 6628 mutex_exit(&dtrace_lock); 6629 mutex_exit(&mod_lock); 6630 mutex_exit(&dtrace_provider_lock); 6631 } 6632 return (EBUSY); 6633 } 6634 6635 /* 6636 * Attempt to destroy the probes associated with this provider. 6637 */ 6638 for (i = 0; i < dtrace_nprobes; i++) { 6639 if ((probe = dtrace_probes[i]) == NULL) 6640 continue; 6641 6642 if (probe->dtpr_provider != old) 6643 continue; 6644 6645 if (probe->dtpr_ecb == NULL) 6646 continue; 6647 6648 /* 6649 * We have at least one ECB; we can't remove this provider. 6650 */ 6651 if (!self) { 6652 mutex_exit(&dtrace_lock); 6653 mutex_exit(&mod_lock); 6654 mutex_exit(&dtrace_provider_lock); 6655 } 6656 return (EBUSY); 6657 } 6658 6659 /* 6660 * All of the probes for this provider are disabled; we can safely 6661 * remove all of them from their hash chains and from the probe array. 6662 */ 6663 for (i = 0; i < dtrace_nprobes; i++) { 6664 if ((probe = dtrace_probes[i]) == NULL) 6665 continue; 6666 6667 if (probe->dtpr_provider != old) 6668 continue; 6669 6670 dtrace_probes[i] = NULL; 6671 6672 dtrace_hash_remove(dtrace_bymod, probe); 6673 dtrace_hash_remove(dtrace_byfunc, probe); 6674 dtrace_hash_remove(dtrace_byname, probe); 6675 6676 if (first == NULL) { 6677 first = probe; 6678 probe->dtpr_nextmod = NULL; 6679 } else { 6680 probe->dtpr_nextmod = first; 6681 first = probe; 6682 } 6683 } 6684 6685 /* 6686 * The provider's probes have been removed from the hash chains and 6687 * from the probe array. Now issue a dtrace_sync() to be sure that 6688 * everyone has cleared out from any probe array processing. 6689 */ 6690 dtrace_sync(); 6691 6692 for (probe = first; probe != NULL; probe = first) { 6693 first = probe->dtpr_nextmod; 6694 6695 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 6696 probe->dtpr_arg); 6697 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6698 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6699 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6700 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 6701 kmem_free(probe, sizeof (dtrace_probe_t)); 6702 } 6703 6704 if ((prev = dtrace_provider) == old) { 6705 ASSERT(self || dtrace_devi == NULL); 6706 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 6707 dtrace_provider = old->dtpv_next; 6708 } else { 6709 while (prev != NULL && prev->dtpv_next != old) 6710 prev = prev->dtpv_next; 6711 6712 if (prev == NULL) { 6713 panic("attempt to unregister non-existent " 6714 "dtrace provider %p\n", (void *)id); 6715 } 6716 6717 prev->dtpv_next = old->dtpv_next; 6718 } 6719 6720 if (!self) { 6721 mutex_exit(&dtrace_lock); 6722 mutex_exit(&mod_lock); 6723 mutex_exit(&dtrace_provider_lock); 6724 } 6725 6726 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 6727 kmem_free(old, sizeof (dtrace_provider_t)); 6728 6729 return (0); 6730 } 6731 6732 /* 6733 * Invalidate the specified provider. All subsequent probe lookups for the 6734 * specified provider will fail, but its probes will not be removed. 6735 */ 6736 void 6737 dtrace_invalidate(dtrace_provider_id_t id) 6738 { 6739 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 6740 6741 ASSERT(pvp->dtpv_pops.dtps_enable != 6742 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6743 6744 mutex_enter(&dtrace_provider_lock); 6745 mutex_enter(&dtrace_lock); 6746 6747 pvp->dtpv_defunct = 1; 6748 6749 mutex_exit(&dtrace_lock); 6750 mutex_exit(&dtrace_provider_lock); 6751 } 6752 6753 /* 6754 * Indicate whether or not DTrace has attached. 6755 */ 6756 int 6757 dtrace_attached(void) 6758 { 6759 /* 6760 * dtrace_provider will be non-NULL iff the DTrace driver has 6761 * attached. (It's non-NULL because DTrace is always itself a 6762 * provider.) 6763 */ 6764 return (dtrace_provider != NULL); 6765 } 6766 6767 /* 6768 * Remove all the unenabled probes for the given provider. This function is 6769 * not unlike dtrace_unregister(), except that it doesn't remove the provider 6770 * -- just as many of its associated probes as it can. 6771 */ 6772 int 6773 dtrace_condense(dtrace_provider_id_t id) 6774 { 6775 dtrace_provider_t *prov = (dtrace_provider_t *)id; 6776 int i; 6777 dtrace_probe_t *probe; 6778 6779 /* 6780 * Make sure this isn't the dtrace provider itself. 6781 */ 6782 ASSERT(prov->dtpv_pops.dtps_enable != 6783 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 6784 6785 mutex_enter(&dtrace_provider_lock); 6786 mutex_enter(&dtrace_lock); 6787 6788 /* 6789 * Attempt to destroy the probes associated with this provider. 6790 */ 6791 for (i = 0; i < dtrace_nprobes; i++) { 6792 if ((probe = dtrace_probes[i]) == NULL) 6793 continue; 6794 6795 if (probe->dtpr_provider != prov) 6796 continue; 6797 6798 if (probe->dtpr_ecb != NULL) 6799 continue; 6800 6801 dtrace_probes[i] = NULL; 6802 6803 dtrace_hash_remove(dtrace_bymod, probe); 6804 dtrace_hash_remove(dtrace_byfunc, probe); 6805 dtrace_hash_remove(dtrace_byname, probe); 6806 6807 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 6808 probe->dtpr_arg); 6809 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 6810 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 6811 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 6812 kmem_free(probe, sizeof (dtrace_probe_t)); 6813 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 6814 } 6815 6816 mutex_exit(&dtrace_lock); 6817 mutex_exit(&dtrace_provider_lock); 6818 6819 return (0); 6820 } 6821 6822 /* 6823 * DTrace Probe Management Functions 6824 * 6825 * The functions in this section perform the DTrace probe management, 6826 * including functions to create probes, look-up probes, and call into the 6827 * providers to request that probes be provided. Some of these functions are 6828 * in the Provider-to-Framework API; these functions can be identified by the 6829 * fact that they are not declared "static". 6830 */ 6831 6832 /* 6833 * Create a probe with the specified module name, function name, and name. 6834 */ 6835 dtrace_id_t 6836 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 6837 const char *func, const char *name, int aframes, void *arg) 6838 { 6839 dtrace_probe_t *probe, **probes; 6840 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 6841 dtrace_id_t id; 6842 6843 if (provider == dtrace_provider) { 6844 ASSERT(MUTEX_HELD(&dtrace_lock)); 6845 } else { 6846 mutex_enter(&dtrace_lock); 6847 } 6848 6849 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 6850 VM_BESTFIT | VM_SLEEP); 6851 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 6852 6853 probe->dtpr_id = id; 6854 probe->dtpr_gen = dtrace_probegen++; 6855 probe->dtpr_mod = dtrace_strdup(mod); 6856 probe->dtpr_func = dtrace_strdup(func); 6857 probe->dtpr_name = dtrace_strdup(name); 6858 probe->dtpr_arg = arg; 6859 probe->dtpr_aframes = aframes; 6860 probe->dtpr_provider = provider; 6861 6862 dtrace_hash_add(dtrace_bymod, probe); 6863 dtrace_hash_add(dtrace_byfunc, probe); 6864 dtrace_hash_add(dtrace_byname, probe); 6865 6866 if (id - 1 >= dtrace_nprobes) { 6867 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 6868 size_t nsize = osize << 1; 6869 6870 if (nsize == 0) { 6871 ASSERT(osize == 0); 6872 ASSERT(dtrace_probes == NULL); 6873 nsize = sizeof (dtrace_probe_t *); 6874 } 6875 6876 probes = kmem_zalloc(nsize, KM_SLEEP); 6877 6878 if (dtrace_probes == NULL) { 6879 ASSERT(osize == 0); 6880 dtrace_probes = probes; 6881 dtrace_nprobes = 1; 6882 } else { 6883 dtrace_probe_t **oprobes = dtrace_probes; 6884 6885 bcopy(oprobes, probes, osize); 6886 dtrace_membar_producer(); 6887 dtrace_probes = probes; 6888 6889 dtrace_sync(); 6890 6891 /* 6892 * All CPUs are now seeing the new probes array; we can 6893 * safely free the old array. 6894 */ 6895 kmem_free(oprobes, osize); 6896 dtrace_nprobes <<= 1; 6897 } 6898 6899 ASSERT(id - 1 < dtrace_nprobes); 6900 } 6901 6902 ASSERT(dtrace_probes[id - 1] == NULL); 6903 dtrace_probes[id - 1] = probe; 6904 6905 if (provider != dtrace_provider) 6906 mutex_exit(&dtrace_lock); 6907 6908 return (id); 6909 } 6910 6911 static dtrace_probe_t * 6912 dtrace_probe_lookup_id(dtrace_id_t id) 6913 { 6914 ASSERT(MUTEX_HELD(&dtrace_lock)); 6915 6916 if (id == 0 || id > dtrace_nprobes) 6917 return (NULL); 6918 6919 return (dtrace_probes[id - 1]); 6920 } 6921 6922 static int 6923 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 6924 { 6925 *((dtrace_id_t *)arg) = probe->dtpr_id; 6926 6927 return (DTRACE_MATCH_DONE); 6928 } 6929 6930 /* 6931 * Look up a probe based on provider and one or more of module name, function 6932 * name and probe name. 6933 */ 6934 dtrace_id_t 6935 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, 6936 const char *func, const char *name) 6937 { 6938 dtrace_probekey_t pkey; 6939 dtrace_id_t id; 6940 int match; 6941 6942 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 6943 pkey.dtpk_pmatch = &dtrace_match_string; 6944 pkey.dtpk_mod = mod; 6945 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 6946 pkey.dtpk_func = func; 6947 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 6948 pkey.dtpk_name = name; 6949 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 6950 pkey.dtpk_id = DTRACE_IDNONE; 6951 6952 mutex_enter(&dtrace_lock); 6953 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 6954 dtrace_probe_lookup_match, &id); 6955 mutex_exit(&dtrace_lock); 6956 6957 ASSERT(match == 1 || match == 0); 6958 return (match ? id : 0); 6959 } 6960 6961 /* 6962 * Returns the probe argument associated with the specified probe. 6963 */ 6964 void * 6965 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 6966 { 6967 dtrace_probe_t *probe; 6968 void *rval = NULL; 6969 6970 mutex_enter(&dtrace_lock); 6971 6972 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 6973 probe->dtpr_provider == (dtrace_provider_t *)id) 6974 rval = probe->dtpr_arg; 6975 6976 mutex_exit(&dtrace_lock); 6977 6978 return (rval); 6979 } 6980 6981 /* 6982 * Copy a probe into a probe description. 6983 */ 6984 static void 6985 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 6986 { 6987 bzero(pdp, sizeof (dtrace_probedesc_t)); 6988 pdp->dtpd_id = prp->dtpr_id; 6989 6990 (void) strncpy(pdp->dtpd_provider, 6991 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 6992 6993 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 6994 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 6995 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 6996 } 6997 6998 /* 6999 * Called to indicate that a probe -- or probes -- should be provided by a 7000 * specfied provider. If the specified description is NULL, the provider will 7001 * be told to provide all of its probes. (This is done whenever a new 7002 * consumer comes along, or whenever a retained enabling is to be matched.) If 7003 * the specified description is non-NULL, the provider is given the 7004 * opportunity to dynamically provide the specified probe, allowing providers 7005 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7006 * probes.) If the provider is NULL, the operations will be applied to all 7007 * providers; if the provider is non-NULL the operations will only be applied 7008 * to the specified provider. The dtrace_provider_lock must be held, and the 7009 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7010 * will need to grab the dtrace_lock when it reenters the framework through 7011 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7012 */ 7013 static void 7014 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7015 { 7016 struct modctl *ctl; 7017 int all = 0; 7018 7019 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7020 7021 if (prv == NULL) { 7022 all = 1; 7023 prv = dtrace_provider; 7024 } 7025 7026 do { 7027 /* 7028 * First, call the blanket provide operation. 7029 */ 7030 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 7031 7032 /* 7033 * Now call the per-module provide operation. We will grab 7034 * mod_lock to prevent the list from being modified. Note 7035 * that this also prevents the mod_busy bits from changing. 7036 * (mod_busy can only be changed with mod_lock held.) 7037 */ 7038 mutex_enter(&mod_lock); 7039 7040 ctl = &modules; 7041 do { 7042 if (ctl->mod_busy || ctl->mod_mp == NULL) 7043 continue; 7044 7045 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 7046 7047 } while ((ctl = ctl->mod_next) != &modules); 7048 7049 mutex_exit(&mod_lock); 7050 } while (all && (prv = prv->dtpv_next) != NULL); 7051 } 7052 7053 /* 7054 * Iterate over each probe, and call the Framework-to-Provider API function 7055 * denoted by offs. 7056 */ 7057 static void 7058 dtrace_probe_foreach(uintptr_t offs) 7059 { 7060 dtrace_provider_t *prov; 7061 void (*func)(void *, dtrace_id_t, void *); 7062 dtrace_probe_t *probe; 7063 dtrace_icookie_t cookie; 7064 int i; 7065 7066 /* 7067 * We disable interrupts to walk through the probe array. This is 7068 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 7069 * won't see stale data. 7070 */ 7071 cookie = dtrace_interrupt_disable(); 7072 7073 for (i = 0; i < dtrace_nprobes; i++) { 7074 if ((probe = dtrace_probes[i]) == NULL) 7075 continue; 7076 7077 if (probe->dtpr_ecb == NULL) { 7078 /* 7079 * This probe isn't enabled -- don't call the function. 7080 */ 7081 continue; 7082 } 7083 7084 prov = probe->dtpr_provider; 7085 func = *((void(**)(void *, dtrace_id_t, void *)) 7086 ((uintptr_t)&prov->dtpv_pops + offs)); 7087 7088 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 7089 } 7090 7091 dtrace_interrupt_enable(cookie); 7092 } 7093 7094 static int 7095 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 7096 { 7097 dtrace_probekey_t pkey; 7098 uint32_t priv; 7099 uid_t uid; 7100 zoneid_t zoneid; 7101 7102 ASSERT(MUTEX_HELD(&dtrace_lock)); 7103 dtrace_ecb_create_cache = NULL; 7104 7105 if (desc == NULL) { 7106 /* 7107 * If we're passed a NULL description, we're being asked to 7108 * create an ECB with a NULL probe. 7109 */ 7110 (void) dtrace_ecb_create_enable(NULL, enab); 7111 return (0); 7112 } 7113 7114 dtrace_probekey(desc, &pkey); 7115 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 7116 &priv, &uid, &zoneid); 7117 7118 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 7119 enab)); 7120 } 7121 7122 /* 7123 * DTrace Helper Provider Functions 7124 */ 7125 static void 7126 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 7127 { 7128 attr->dtat_name = DOF_ATTR_NAME(dofattr); 7129 attr->dtat_data = DOF_ATTR_DATA(dofattr); 7130 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 7131 } 7132 7133 static void 7134 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 7135 const dof_provider_t *dofprov, char *strtab) 7136 { 7137 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 7138 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 7139 dofprov->dofpv_provattr); 7140 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 7141 dofprov->dofpv_modattr); 7142 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 7143 dofprov->dofpv_funcattr); 7144 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 7145 dofprov->dofpv_nameattr); 7146 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 7147 dofprov->dofpv_argsattr); 7148 } 7149 7150 static void 7151 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7152 { 7153 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7154 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7155 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 7156 dof_provider_t *provider; 7157 dof_probe_t *probe; 7158 uint32_t *off, *enoff; 7159 uint8_t *arg; 7160 char *strtab; 7161 uint_t i, nprobes; 7162 dtrace_helper_provdesc_t dhpv; 7163 dtrace_helper_probedesc_t dhpb; 7164 dtrace_meta_t *meta = dtrace_meta_pid; 7165 dtrace_mops_t *mops = &meta->dtm_mops; 7166 void *parg; 7167 7168 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7169 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7170 provider->dofpv_strtab * dof->dofh_secsize); 7171 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7172 provider->dofpv_probes * dof->dofh_secsize); 7173 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7174 provider->dofpv_prargs * dof->dofh_secsize); 7175 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7176 provider->dofpv_proffs * dof->dofh_secsize); 7177 7178 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7179 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 7180 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 7181 enoff = NULL; 7182 7183 /* 7184 * See dtrace_helper_provider_validate(). 7185 */ 7186 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 7187 provider->dofpv_prenoffs != DOF_SECT_NONE) { 7188 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7189 provider->dofpv_prenoffs * dof->dofh_secsize); 7190 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 7191 } 7192 7193 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 7194 7195 /* 7196 * Create the provider. 7197 */ 7198 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7199 7200 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 7201 return; 7202 7203 meta->dtm_count++; 7204 7205 /* 7206 * Create the probes. 7207 */ 7208 for (i = 0; i < nprobes; i++) { 7209 probe = (dof_probe_t *)(uintptr_t)(daddr + 7210 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 7211 7212 dhpb.dthpb_mod = dhp->dofhp_mod; 7213 dhpb.dthpb_func = strtab + probe->dofpr_func; 7214 dhpb.dthpb_name = strtab + probe->dofpr_name; 7215 dhpb.dthpb_base = probe->dofpr_addr; 7216 dhpb.dthpb_offs = off + probe->dofpr_offidx; 7217 dhpb.dthpb_noffs = probe->dofpr_noffs; 7218 if (enoff != NULL) { 7219 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 7220 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 7221 } else { 7222 dhpb.dthpb_enoffs = NULL; 7223 dhpb.dthpb_nenoffs = 0; 7224 } 7225 dhpb.dthpb_args = arg + probe->dofpr_argidx; 7226 dhpb.dthpb_nargc = probe->dofpr_nargc; 7227 dhpb.dthpb_xargc = probe->dofpr_xargc; 7228 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 7229 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 7230 7231 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 7232 } 7233 } 7234 7235 static void 7236 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 7237 { 7238 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7239 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7240 int i; 7241 7242 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7243 7244 for (i = 0; i < dof->dofh_secnum; i++) { 7245 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7246 dof->dofh_secoff + i * dof->dofh_secsize); 7247 7248 if (sec->dofs_type != DOF_SECT_PROVIDER) 7249 continue; 7250 7251 dtrace_helper_provide_one(dhp, sec, pid); 7252 } 7253 7254 /* 7255 * We may have just created probes, so we must now rematch against 7256 * any retained enablings. Note that this call will acquire both 7257 * cpu_lock and dtrace_lock; the fact that we are holding 7258 * dtrace_meta_lock now is what defines the ordering with respect to 7259 * these three locks. 7260 */ 7261 dtrace_enabling_matchall(); 7262 } 7263 7264 static void 7265 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 7266 { 7267 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7268 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7269 dof_sec_t *str_sec; 7270 dof_provider_t *provider; 7271 char *strtab; 7272 dtrace_helper_provdesc_t dhpv; 7273 dtrace_meta_t *meta = dtrace_meta_pid; 7274 dtrace_mops_t *mops = &meta->dtm_mops; 7275 7276 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 7277 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 7278 provider->dofpv_strtab * dof->dofh_secsize); 7279 7280 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 7281 7282 /* 7283 * Create the provider. 7284 */ 7285 dtrace_dofprov2hprov(&dhpv, provider, strtab); 7286 7287 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 7288 7289 meta->dtm_count--; 7290 } 7291 7292 static void 7293 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 7294 { 7295 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 7296 dof_hdr_t *dof = (dof_hdr_t *)daddr; 7297 int i; 7298 7299 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 7300 7301 for (i = 0; i < dof->dofh_secnum; i++) { 7302 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 7303 dof->dofh_secoff + i * dof->dofh_secsize); 7304 7305 if (sec->dofs_type != DOF_SECT_PROVIDER) 7306 continue; 7307 7308 dtrace_helper_provider_remove_one(dhp, sec, pid); 7309 } 7310 } 7311 7312 /* 7313 * DTrace Meta Provider-to-Framework API Functions 7314 * 7315 * These functions implement the Meta Provider-to-Framework API, as described 7316 * in <sys/dtrace.h>. 7317 */ 7318 int 7319 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 7320 dtrace_meta_provider_id_t *idp) 7321 { 7322 dtrace_meta_t *meta; 7323 dtrace_helpers_t *help, *next; 7324 int i; 7325 7326 *idp = DTRACE_METAPROVNONE; 7327 7328 /* 7329 * We strictly don't need the name, but we hold onto it for 7330 * debuggability. All hail error queues! 7331 */ 7332 if (name == NULL) { 7333 cmn_err(CE_WARN, "failed to register meta-provider: " 7334 "invalid name"); 7335 return (EINVAL); 7336 } 7337 7338 if (mops == NULL || 7339 mops->dtms_create_probe == NULL || 7340 mops->dtms_provide_pid == NULL || 7341 mops->dtms_remove_pid == NULL) { 7342 cmn_err(CE_WARN, "failed to register meta-register %s: " 7343 "invalid ops", name); 7344 return (EINVAL); 7345 } 7346 7347 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 7348 meta->dtm_mops = *mops; 7349 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7350 (void) strcpy(meta->dtm_name, name); 7351 meta->dtm_arg = arg; 7352 7353 mutex_enter(&dtrace_meta_lock); 7354 mutex_enter(&dtrace_lock); 7355 7356 if (dtrace_meta_pid != NULL) { 7357 mutex_exit(&dtrace_lock); 7358 mutex_exit(&dtrace_meta_lock); 7359 cmn_err(CE_WARN, "failed to register meta-register %s: " 7360 "user-land meta-provider exists", name); 7361 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 7362 kmem_free(meta, sizeof (dtrace_meta_t)); 7363 return (EINVAL); 7364 } 7365 7366 dtrace_meta_pid = meta; 7367 *idp = (dtrace_meta_provider_id_t)meta; 7368 7369 /* 7370 * If there are providers and probes ready to go, pass them 7371 * off to the new meta provider now. 7372 */ 7373 7374 help = dtrace_deferred_pid; 7375 dtrace_deferred_pid = NULL; 7376 7377 mutex_exit(&dtrace_lock); 7378 7379 while (help != NULL) { 7380 for (i = 0; i < help->dthps_nprovs; i++) { 7381 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 7382 help->dthps_pid); 7383 } 7384 7385 next = help->dthps_next; 7386 help->dthps_next = NULL; 7387 help->dthps_prev = NULL; 7388 help->dthps_deferred = 0; 7389 help = next; 7390 } 7391 7392 mutex_exit(&dtrace_meta_lock); 7393 7394 return (0); 7395 } 7396 7397 int 7398 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 7399 { 7400 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 7401 7402 mutex_enter(&dtrace_meta_lock); 7403 mutex_enter(&dtrace_lock); 7404 7405 if (old == dtrace_meta_pid) { 7406 pp = &dtrace_meta_pid; 7407 } else { 7408 panic("attempt to unregister non-existent " 7409 "dtrace meta-provider %p\n", (void *)old); 7410 } 7411 7412 if (old->dtm_count != 0) { 7413 mutex_exit(&dtrace_lock); 7414 mutex_exit(&dtrace_meta_lock); 7415 return (EBUSY); 7416 } 7417 7418 *pp = NULL; 7419 7420 mutex_exit(&dtrace_lock); 7421 mutex_exit(&dtrace_meta_lock); 7422 7423 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 7424 kmem_free(old, sizeof (dtrace_meta_t)); 7425 7426 return (0); 7427 } 7428 7429 7430 /* 7431 * DTrace DIF Object Functions 7432 */ 7433 static int 7434 dtrace_difo_err(uint_t pc, const char *format, ...) 7435 { 7436 if (dtrace_err_verbose) { 7437 va_list alist; 7438 7439 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 7440 va_start(alist, format); 7441 (void) vuprintf(format, alist); 7442 va_end(alist); 7443 } 7444 7445 #ifdef DTRACE_ERRDEBUG 7446 dtrace_errdebug(format); 7447 #endif 7448 return (1); 7449 } 7450 7451 /* 7452 * Validate a DTrace DIF object by checking the IR instructions. The following 7453 * rules are currently enforced by dtrace_difo_validate(): 7454 * 7455 * 1. Each instruction must have a valid opcode 7456 * 2. Each register, string, variable, or subroutine reference must be valid 7457 * 3. No instruction can modify register %r0 (must be zero) 7458 * 4. All instruction reserved bits must be set to zero 7459 * 5. The last instruction must be a "ret" instruction 7460 * 6. All branch targets must reference a valid instruction _after_ the branch 7461 */ 7462 static int 7463 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 7464 cred_t *cr) 7465 { 7466 int err = 0, i; 7467 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7468 int kcheckload; 7469 uint_t pc; 7470 7471 kcheckload = cr == NULL || 7472 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 7473 7474 dp->dtdo_destructive = 0; 7475 7476 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 7477 dif_instr_t instr = dp->dtdo_buf[pc]; 7478 7479 uint_t r1 = DIF_INSTR_R1(instr); 7480 uint_t r2 = DIF_INSTR_R2(instr); 7481 uint_t rd = DIF_INSTR_RD(instr); 7482 uint_t rs = DIF_INSTR_RS(instr); 7483 uint_t label = DIF_INSTR_LABEL(instr); 7484 uint_t v = DIF_INSTR_VAR(instr); 7485 uint_t subr = DIF_INSTR_SUBR(instr); 7486 uint_t type = DIF_INSTR_TYPE(instr); 7487 uint_t op = DIF_INSTR_OP(instr); 7488 7489 switch (op) { 7490 case DIF_OP_OR: 7491 case DIF_OP_XOR: 7492 case DIF_OP_AND: 7493 case DIF_OP_SLL: 7494 case DIF_OP_SRL: 7495 case DIF_OP_SRA: 7496 case DIF_OP_SUB: 7497 case DIF_OP_ADD: 7498 case DIF_OP_MUL: 7499 case DIF_OP_SDIV: 7500 case DIF_OP_UDIV: 7501 case DIF_OP_SREM: 7502 case DIF_OP_UREM: 7503 case DIF_OP_COPYS: 7504 if (r1 >= nregs) 7505 err += efunc(pc, "invalid register %u\n", r1); 7506 if (r2 >= nregs) 7507 err += efunc(pc, "invalid register %u\n", r2); 7508 if (rd >= nregs) 7509 err += efunc(pc, "invalid register %u\n", rd); 7510 if (rd == 0) 7511 err += efunc(pc, "cannot write to %r0\n"); 7512 break; 7513 case DIF_OP_NOT: 7514 case DIF_OP_MOV: 7515 case DIF_OP_ALLOCS: 7516 if (r1 >= nregs) 7517 err += efunc(pc, "invalid register %u\n", r1); 7518 if (r2 != 0) 7519 err += efunc(pc, "non-zero reserved bits\n"); 7520 if (rd >= nregs) 7521 err += efunc(pc, "invalid register %u\n", rd); 7522 if (rd == 0) 7523 err += efunc(pc, "cannot write to %r0\n"); 7524 break; 7525 case DIF_OP_LDSB: 7526 case DIF_OP_LDSH: 7527 case DIF_OP_LDSW: 7528 case DIF_OP_LDUB: 7529 case DIF_OP_LDUH: 7530 case DIF_OP_LDUW: 7531 case DIF_OP_LDX: 7532 if (r1 >= nregs) 7533 err += efunc(pc, "invalid register %u\n", r1); 7534 if (r2 != 0) 7535 err += efunc(pc, "non-zero reserved bits\n"); 7536 if (rd >= nregs) 7537 err += efunc(pc, "invalid register %u\n", rd); 7538 if (rd == 0) 7539 err += efunc(pc, "cannot write to %r0\n"); 7540 if (kcheckload) 7541 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 7542 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 7543 break; 7544 case DIF_OP_RLDSB: 7545 case DIF_OP_RLDSH: 7546 case DIF_OP_RLDSW: 7547 case DIF_OP_RLDUB: 7548 case DIF_OP_RLDUH: 7549 case DIF_OP_RLDUW: 7550 case DIF_OP_RLDX: 7551 if (r1 >= nregs) 7552 err += efunc(pc, "invalid register %u\n", r1); 7553 if (r2 != 0) 7554 err += efunc(pc, "non-zero reserved bits\n"); 7555 if (rd >= nregs) 7556 err += efunc(pc, "invalid register %u\n", rd); 7557 if (rd == 0) 7558 err += efunc(pc, "cannot write to %r0\n"); 7559 break; 7560 case DIF_OP_ULDSB: 7561 case DIF_OP_ULDSH: 7562 case DIF_OP_ULDSW: 7563 case DIF_OP_ULDUB: 7564 case DIF_OP_ULDUH: 7565 case DIF_OP_ULDUW: 7566 case DIF_OP_ULDX: 7567 if (r1 >= nregs) 7568 err += efunc(pc, "invalid register %u\n", r1); 7569 if (r2 != 0) 7570 err += efunc(pc, "non-zero reserved bits\n"); 7571 if (rd >= nregs) 7572 err += efunc(pc, "invalid register %u\n", rd); 7573 if (rd == 0) 7574 err += efunc(pc, "cannot write to %r0\n"); 7575 break; 7576 case DIF_OP_STB: 7577 case DIF_OP_STH: 7578 case DIF_OP_STW: 7579 case DIF_OP_STX: 7580 if (r1 >= nregs) 7581 err += efunc(pc, "invalid register %u\n", r1); 7582 if (r2 != 0) 7583 err += efunc(pc, "non-zero reserved bits\n"); 7584 if (rd >= nregs) 7585 err += efunc(pc, "invalid register %u\n", rd); 7586 if (rd == 0) 7587 err += efunc(pc, "cannot write to 0 address\n"); 7588 break; 7589 case DIF_OP_CMP: 7590 case DIF_OP_SCMP: 7591 if (r1 >= nregs) 7592 err += efunc(pc, "invalid register %u\n", r1); 7593 if (r2 >= nregs) 7594 err += efunc(pc, "invalid register %u\n", r2); 7595 if (rd != 0) 7596 err += efunc(pc, "non-zero reserved bits\n"); 7597 break; 7598 case DIF_OP_TST: 7599 if (r1 >= nregs) 7600 err += efunc(pc, "invalid register %u\n", r1); 7601 if (r2 != 0 || rd != 0) 7602 err += efunc(pc, "non-zero reserved bits\n"); 7603 break; 7604 case DIF_OP_BA: 7605 case DIF_OP_BE: 7606 case DIF_OP_BNE: 7607 case DIF_OP_BG: 7608 case DIF_OP_BGU: 7609 case DIF_OP_BGE: 7610 case DIF_OP_BGEU: 7611 case DIF_OP_BL: 7612 case DIF_OP_BLU: 7613 case DIF_OP_BLE: 7614 case DIF_OP_BLEU: 7615 if (label >= dp->dtdo_len) { 7616 err += efunc(pc, "invalid branch target %u\n", 7617 label); 7618 } 7619 if (label <= pc) { 7620 err += efunc(pc, "backward branch to %u\n", 7621 label); 7622 } 7623 break; 7624 case DIF_OP_RET: 7625 if (r1 != 0 || r2 != 0) 7626 err += efunc(pc, "non-zero reserved bits\n"); 7627 if (rd >= nregs) 7628 err += efunc(pc, "invalid register %u\n", rd); 7629 break; 7630 case DIF_OP_NOP: 7631 case DIF_OP_POPTS: 7632 case DIF_OP_FLUSHTS: 7633 if (r1 != 0 || r2 != 0 || rd != 0) 7634 err += efunc(pc, "non-zero reserved bits\n"); 7635 break; 7636 case DIF_OP_SETX: 7637 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 7638 err += efunc(pc, "invalid integer ref %u\n", 7639 DIF_INSTR_INTEGER(instr)); 7640 } 7641 if (rd >= nregs) 7642 err += efunc(pc, "invalid register %u\n", rd); 7643 if (rd == 0) 7644 err += efunc(pc, "cannot write to %r0\n"); 7645 break; 7646 case DIF_OP_SETS: 7647 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 7648 err += efunc(pc, "invalid string ref %u\n", 7649 DIF_INSTR_STRING(instr)); 7650 } 7651 if (rd >= nregs) 7652 err += efunc(pc, "invalid register %u\n", rd); 7653 if (rd == 0) 7654 err += efunc(pc, "cannot write to %r0\n"); 7655 break; 7656 case DIF_OP_LDGA: 7657 case DIF_OP_LDTA: 7658 if (r1 > DIF_VAR_ARRAY_MAX) 7659 err += efunc(pc, "invalid array %u\n", r1); 7660 if (r2 >= nregs) 7661 err += efunc(pc, "invalid register %u\n", r2); 7662 if (rd >= nregs) 7663 err += efunc(pc, "invalid register %u\n", rd); 7664 if (rd == 0) 7665 err += efunc(pc, "cannot write to %r0\n"); 7666 break; 7667 case DIF_OP_LDGS: 7668 case DIF_OP_LDTS: 7669 case DIF_OP_LDLS: 7670 case DIF_OP_LDGAA: 7671 case DIF_OP_LDTAA: 7672 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 7673 err += efunc(pc, "invalid variable %u\n", v); 7674 if (rd >= nregs) 7675 err += efunc(pc, "invalid register %u\n", rd); 7676 if (rd == 0) 7677 err += efunc(pc, "cannot write to %r0\n"); 7678 break; 7679 case DIF_OP_STGS: 7680 case DIF_OP_STTS: 7681 case DIF_OP_STLS: 7682 case DIF_OP_STGAA: 7683 case DIF_OP_STTAA: 7684 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 7685 err += efunc(pc, "invalid variable %u\n", v); 7686 if (rs >= nregs) 7687 err += efunc(pc, "invalid register %u\n", rd); 7688 break; 7689 case DIF_OP_CALL: 7690 if (subr > DIF_SUBR_MAX) 7691 err += efunc(pc, "invalid subr %u\n", subr); 7692 if (rd >= nregs) 7693 err += efunc(pc, "invalid register %u\n", rd); 7694 if (rd == 0) 7695 err += efunc(pc, "cannot write to %r0\n"); 7696 7697 if (subr == DIF_SUBR_COPYOUT || 7698 subr == DIF_SUBR_COPYOUTSTR) { 7699 dp->dtdo_destructive = 1; 7700 } 7701 break; 7702 case DIF_OP_PUSHTR: 7703 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 7704 err += efunc(pc, "invalid ref type %u\n", type); 7705 if (r2 >= nregs) 7706 err += efunc(pc, "invalid register %u\n", r2); 7707 if (rs >= nregs) 7708 err += efunc(pc, "invalid register %u\n", rs); 7709 break; 7710 case DIF_OP_PUSHTV: 7711 if (type != DIF_TYPE_CTF) 7712 err += efunc(pc, "invalid val type %u\n", type); 7713 if (r2 >= nregs) 7714 err += efunc(pc, "invalid register %u\n", r2); 7715 if (rs >= nregs) 7716 err += efunc(pc, "invalid register %u\n", rs); 7717 break; 7718 default: 7719 err += efunc(pc, "invalid opcode %u\n", 7720 DIF_INSTR_OP(instr)); 7721 } 7722 } 7723 7724 if (dp->dtdo_len != 0 && 7725 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 7726 err += efunc(dp->dtdo_len - 1, 7727 "expected 'ret' as last DIF instruction\n"); 7728 } 7729 7730 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 7731 /* 7732 * If we're not returning by reference, the size must be either 7733 * 0 or the size of one of the base types. 7734 */ 7735 switch (dp->dtdo_rtype.dtdt_size) { 7736 case 0: 7737 case sizeof (uint8_t): 7738 case sizeof (uint16_t): 7739 case sizeof (uint32_t): 7740 case sizeof (uint64_t): 7741 break; 7742 7743 default: 7744 err += efunc(dp->dtdo_len - 1, "bad return size"); 7745 } 7746 } 7747 7748 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 7749 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 7750 dtrace_diftype_t *vt, *et; 7751 uint_t id, ndx; 7752 7753 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 7754 v->dtdv_scope != DIFV_SCOPE_THREAD && 7755 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 7756 err += efunc(i, "unrecognized variable scope %d\n", 7757 v->dtdv_scope); 7758 break; 7759 } 7760 7761 if (v->dtdv_kind != DIFV_KIND_ARRAY && 7762 v->dtdv_kind != DIFV_KIND_SCALAR) { 7763 err += efunc(i, "unrecognized variable type %d\n", 7764 v->dtdv_kind); 7765 break; 7766 } 7767 7768 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 7769 err += efunc(i, "%d exceeds variable id limit\n", id); 7770 break; 7771 } 7772 7773 if (id < DIF_VAR_OTHER_UBASE) 7774 continue; 7775 7776 /* 7777 * For user-defined variables, we need to check that this 7778 * definition is identical to any previous definition that we 7779 * encountered. 7780 */ 7781 ndx = id - DIF_VAR_OTHER_UBASE; 7782 7783 switch (v->dtdv_scope) { 7784 case DIFV_SCOPE_GLOBAL: 7785 if (ndx < vstate->dtvs_nglobals) { 7786 dtrace_statvar_t *svar; 7787 7788 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 7789 existing = &svar->dtsv_var; 7790 } 7791 7792 break; 7793 7794 case DIFV_SCOPE_THREAD: 7795 if (ndx < vstate->dtvs_ntlocals) 7796 existing = &vstate->dtvs_tlocals[ndx]; 7797 break; 7798 7799 case DIFV_SCOPE_LOCAL: 7800 if (ndx < vstate->dtvs_nlocals) { 7801 dtrace_statvar_t *svar; 7802 7803 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 7804 existing = &svar->dtsv_var; 7805 } 7806 7807 break; 7808 } 7809 7810 vt = &v->dtdv_type; 7811 7812 if (vt->dtdt_flags & DIF_TF_BYREF) { 7813 if (vt->dtdt_size == 0) { 7814 err += efunc(i, "zero-sized variable\n"); 7815 break; 7816 } 7817 7818 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 7819 vt->dtdt_size > dtrace_global_maxsize) { 7820 err += efunc(i, "oversized by-ref global\n"); 7821 break; 7822 } 7823 } 7824 7825 if (existing == NULL || existing->dtdv_id == 0) 7826 continue; 7827 7828 ASSERT(existing->dtdv_id == v->dtdv_id); 7829 ASSERT(existing->dtdv_scope == v->dtdv_scope); 7830 7831 if (existing->dtdv_kind != v->dtdv_kind) 7832 err += efunc(i, "%d changed variable kind\n", id); 7833 7834 et = &existing->dtdv_type; 7835 7836 if (vt->dtdt_flags != et->dtdt_flags) { 7837 err += efunc(i, "%d changed variable type flags\n", id); 7838 break; 7839 } 7840 7841 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 7842 err += efunc(i, "%d changed variable type size\n", id); 7843 break; 7844 } 7845 } 7846 7847 return (err); 7848 } 7849 7850 /* 7851 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 7852 * are much more constrained than normal DIFOs. Specifically, they may 7853 * not: 7854 * 7855 * 1. Make calls to subroutines other than copyin(), copyinstr() or 7856 * miscellaneous string routines 7857 * 2. Access DTrace variables other than the args[] array, and the 7858 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 7859 * 3. Have thread-local variables. 7860 * 4. Have dynamic variables. 7861 */ 7862 static int 7863 dtrace_difo_validate_helper(dtrace_difo_t *dp) 7864 { 7865 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 7866 int err = 0; 7867 uint_t pc; 7868 7869 for (pc = 0; pc < dp->dtdo_len; pc++) { 7870 dif_instr_t instr = dp->dtdo_buf[pc]; 7871 7872 uint_t v = DIF_INSTR_VAR(instr); 7873 uint_t subr = DIF_INSTR_SUBR(instr); 7874 uint_t op = DIF_INSTR_OP(instr); 7875 7876 switch (op) { 7877 case DIF_OP_OR: 7878 case DIF_OP_XOR: 7879 case DIF_OP_AND: 7880 case DIF_OP_SLL: 7881 case DIF_OP_SRL: 7882 case DIF_OP_SRA: 7883 case DIF_OP_SUB: 7884 case DIF_OP_ADD: 7885 case DIF_OP_MUL: 7886 case DIF_OP_SDIV: 7887 case DIF_OP_UDIV: 7888 case DIF_OP_SREM: 7889 case DIF_OP_UREM: 7890 case DIF_OP_COPYS: 7891 case DIF_OP_NOT: 7892 case DIF_OP_MOV: 7893 case DIF_OP_RLDSB: 7894 case DIF_OP_RLDSH: 7895 case DIF_OP_RLDSW: 7896 case DIF_OP_RLDUB: 7897 case DIF_OP_RLDUH: 7898 case DIF_OP_RLDUW: 7899 case DIF_OP_RLDX: 7900 case DIF_OP_ULDSB: 7901 case DIF_OP_ULDSH: 7902 case DIF_OP_ULDSW: 7903 case DIF_OP_ULDUB: 7904 case DIF_OP_ULDUH: 7905 case DIF_OP_ULDUW: 7906 case DIF_OP_ULDX: 7907 case DIF_OP_STB: 7908 case DIF_OP_STH: 7909 case DIF_OP_STW: 7910 case DIF_OP_STX: 7911 case DIF_OP_ALLOCS: 7912 case DIF_OP_CMP: 7913 case DIF_OP_SCMP: 7914 case DIF_OP_TST: 7915 case DIF_OP_BA: 7916 case DIF_OP_BE: 7917 case DIF_OP_BNE: 7918 case DIF_OP_BG: 7919 case DIF_OP_BGU: 7920 case DIF_OP_BGE: 7921 case DIF_OP_BGEU: 7922 case DIF_OP_BL: 7923 case DIF_OP_BLU: 7924 case DIF_OP_BLE: 7925 case DIF_OP_BLEU: 7926 case DIF_OP_RET: 7927 case DIF_OP_NOP: 7928 case DIF_OP_POPTS: 7929 case DIF_OP_FLUSHTS: 7930 case DIF_OP_SETX: 7931 case DIF_OP_SETS: 7932 case DIF_OP_LDGA: 7933 case DIF_OP_LDLS: 7934 case DIF_OP_STGS: 7935 case DIF_OP_STLS: 7936 case DIF_OP_PUSHTR: 7937 case DIF_OP_PUSHTV: 7938 break; 7939 7940 case DIF_OP_LDGS: 7941 if (v >= DIF_VAR_OTHER_UBASE) 7942 break; 7943 7944 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 7945 break; 7946 7947 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 7948 v == DIF_VAR_PPID || v == DIF_VAR_TID || 7949 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 7950 v == DIF_VAR_UID || v == DIF_VAR_GID) 7951 break; 7952 7953 err += efunc(pc, "illegal variable %u\n", v); 7954 break; 7955 7956 case DIF_OP_LDTA: 7957 case DIF_OP_LDTS: 7958 case DIF_OP_LDGAA: 7959 case DIF_OP_LDTAA: 7960 err += efunc(pc, "illegal dynamic variable load\n"); 7961 break; 7962 7963 case DIF_OP_STTS: 7964 case DIF_OP_STGAA: 7965 case DIF_OP_STTAA: 7966 err += efunc(pc, "illegal dynamic variable store\n"); 7967 break; 7968 7969 case DIF_OP_CALL: 7970 if (subr == DIF_SUBR_ALLOCA || 7971 subr == DIF_SUBR_BCOPY || 7972 subr == DIF_SUBR_COPYIN || 7973 subr == DIF_SUBR_COPYINTO || 7974 subr == DIF_SUBR_COPYINSTR || 7975 subr == DIF_SUBR_INDEX || 7976 subr == DIF_SUBR_LLTOSTR || 7977 subr == DIF_SUBR_RINDEX || 7978 subr == DIF_SUBR_STRCHR || 7979 subr == DIF_SUBR_STRJOIN || 7980 subr == DIF_SUBR_STRRCHR || 7981 subr == DIF_SUBR_STRSTR || 7982 subr == DIF_SUBR_HTONS || 7983 subr == DIF_SUBR_HTONL || 7984 subr == DIF_SUBR_HTONLL || 7985 subr == DIF_SUBR_NTOHS || 7986 subr == DIF_SUBR_NTOHL || 7987 subr == DIF_SUBR_NTOHLL) 7988 break; 7989 7990 err += efunc(pc, "invalid subr %u\n", subr); 7991 break; 7992 7993 default: 7994 err += efunc(pc, "invalid opcode %u\n", 7995 DIF_INSTR_OP(instr)); 7996 } 7997 } 7998 7999 return (err); 8000 } 8001 8002 /* 8003 * Returns 1 if the expression in the DIF object can be cached on a per-thread 8004 * basis; 0 if not. 8005 */ 8006 static int 8007 dtrace_difo_cacheable(dtrace_difo_t *dp) 8008 { 8009 int i; 8010 8011 if (dp == NULL) 8012 return (0); 8013 8014 for (i = 0; i < dp->dtdo_varlen; i++) { 8015 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8016 8017 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 8018 continue; 8019 8020 switch (v->dtdv_id) { 8021 case DIF_VAR_CURTHREAD: 8022 case DIF_VAR_PID: 8023 case DIF_VAR_TID: 8024 case DIF_VAR_EXECNAME: 8025 case DIF_VAR_ZONENAME: 8026 break; 8027 8028 default: 8029 return (0); 8030 } 8031 } 8032 8033 /* 8034 * This DIF object may be cacheable. Now we need to look for any 8035 * array loading instructions, any memory loading instructions, or 8036 * any stores to thread-local variables. 8037 */ 8038 for (i = 0; i < dp->dtdo_len; i++) { 8039 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 8040 8041 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 8042 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 8043 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 8044 op == DIF_OP_LDGA || op == DIF_OP_STTS) 8045 return (0); 8046 } 8047 8048 return (1); 8049 } 8050 8051 static void 8052 dtrace_difo_hold(dtrace_difo_t *dp) 8053 { 8054 int i; 8055 8056 ASSERT(MUTEX_HELD(&dtrace_lock)); 8057 8058 dp->dtdo_refcnt++; 8059 ASSERT(dp->dtdo_refcnt != 0); 8060 8061 /* 8062 * We need to check this DIF object for references to the variable 8063 * DIF_VAR_VTIMESTAMP. 8064 */ 8065 for (i = 0; i < dp->dtdo_varlen; i++) { 8066 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8067 8068 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8069 continue; 8070 8071 if (dtrace_vtime_references++ == 0) 8072 dtrace_vtime_enable(); 8073 } 8074 } 8075 8076 /* 8077 * This routine calculates the dynamic variable chunksize for a given DIF 8078 * object. The calculation is not fool-proof, and can probably be tricked by 8079 * malicious DIF -- but it works for all compiler-generated DIF. Because this 8080 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 8081 * if a dynamic variable size exceeds the chunksize. 8082 */ 8083 static void 8084 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8085 { 8086 uint64_t sval; 8087 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 8088 const dif_instr_t *text = dp->dtdo_buf; 8089 uint_t pc, srd = 0; 8090 uint_t ttop = 0; 8091 size_t size, ksize; 8092 uint_t id, i; 8093 8094 for (pc = 0; pc < dp->dtdo_len; pc++) { 8095 dif_instr_t instr = text[pc]; 8096 uint_t op = DIF_INSTR_OP(instr); 8097 uint_t rd = DIF_INSTR_RD(instr); 8098 uint_t r1 = DIF_INSTR_R1(instr); 8099 uint_t nkeys = 0; 8100 uchar_t scope; 8101 8102 dtrace_key_t *key = tupregs; 8103 8104 switch (op) { 8105 case DIF_OP_SETX: 8106 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 8107 srd = rd; 8108 continue; 8109 8110 case DIF_OP_STTS: 8111 key = &tupregs[DIF_DTR_NREGS]; 8112 key[0].dttk_size = 0; 8113 key[1].dttk_size = 0; 8114 nkeys = 2; 8115 scope = DIFV_SCOPE_THREAD; 8116 break; 8117 8118 case DIF_OP_STGAA: 8119 case DIF_OP_STTAA: 8120 nkeys = ttop; 8121 8122 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 8123 key[nkeys++].dttk_size = 0; 8124 8125 key[nkeys++].dttk_size = 0; 8126 8127 if (op == DIF_OP_STTAA) { 8128 scope = DIFV_SCOPE_THREAD; 8129 } else { 8130 scope = DIFV_SCOPE_GLOBAL; 8131 } 8132 8133 break; 8134 8135 case DIF_OP_PUSHTR: 8136 if (ttop == DIF_DTR_NREGS) 8137 return; 8138 8139 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 8140 /* 8141 * If the register for the size of the "pushtr" 8142 * is %r0 (or the value is 0) and the type is 8143 * a string, we'll use the system-wide default 8144 * string size. 8145 */ 8146 tupregs[ttop++].dttk_size = 8147 dtrace_strsize_default; 8148 } else { 8149 if (srd == 0) 8150 return; 8151 8152 tupregs[ttop++].dttk_size = sval; 8153 } 8154 8155 break; 8156 8157 case DIF_OP_PUSHTV: 8158 if (ttop == DIF_DTR_NREGS) 8159 return; 8160 8161 tupregs[ttop++].dttk_size = 0; 8162 break; 8163 8164 case DIF_OP_FLUSHTS: 8165 ttop = 0; 8166 break; 8167 8168 case DIF_OP_POPTS: 8169 if (ttop != 0) 8170 ttop--; 8171 break; 8172 } 8173 8174 sval = 0; 8175 srd = 0; 8176 8177 if (nkeys == 0) 8178 continue; 8179 8180 /* 8181 * We have a dynamic variable allocation; calculate its size. 8182 */ 8183 for (ksize = 0, i = 0; i < nkeys; i++) 8184 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 8185 8186 size = sizeof (dtrace_dynvar_t); 8187 size += sizeof (dtrace_key_t) * (nkeys - 1); 8188 size += ksize; 8189 8190 /* 8191 * Now we need to determine the size of the stored data. 8192 */ 8193 id = DIF_INSTR_VAR(instr); 8194 8195 for (i = 0; i < dp->dtdo_varlen; i++) { 8196 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8197 8198 if (v->dtdv_id == id && v->dtdv_scope == scope) { 8199 size += v->dtdv_type.dtdt_size; 8200 break; 8201 } 8202 } 8203 8204 if (i == dp->dtdo_varlen) 8205 return; 8206 8207 /* 8208 * We have the size. If this is larger than the chunk size 8209 * for our dynamic variable state, reset the chunk size. 8210 */ 8211 size = P2ROUNDUP(size, sizeof (uint64_t)); 8212 8213 if (size > vstate->dtvs_dynvars.dtds_chunksize) 8214 vstate->dtvs_dynvars.dtds_chunksize = size; 8215 } 8216 } 8217 8218 static void 8219 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8220 { 8221 int i, oldsvars, osz, nsz, otlocals, ntlocals; 8222 uint_t id; 8223 8224 ASSERT(MUTEX_HELD(&dtrace_lock)); 8225 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 8226 8227 for (i = 0; i < dp->dtdo_varlen; i++) { 8228 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8229 dtrace_statvar_t *svar, ***svarp; 8230 size_t dsize = 0; 8231 uint8_t scope = v->dtdv_scope; 8232 int *np; 8233 8234 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8235 continue; 8236 8237 id -= DIF_VAR_OTHER_UBASE; 8238 8239 switch (scope) { 8240 case DIFV_SCOPE_THREAD: 8241 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 8242 dtrace_difv_t *tlocals; 8243 8244 if ((ntlocals = (otlocals << 1)) == 0) 8245 ntlocals = 1; 8246 8247 osz = otlocals * sizeof (dtrace_difv_t); 8248 nsz = ntlocals * sizeof (dtrace_difv_t); 8249 8250 tlocals = kmem_zalloc(nsz, KM_SLEEP); 8251 8252 if (osz != 0) { 8253 bcopy(vstate->dtvs_tlocals, 8254 tlocals, osz); 8255 kmem_free(vstate->dtvs_tlocals, osz); 8256 } 8257 8258 vstate->dtvs_tlocals = tlocals; 8259 vstate->dtvs_ntlocals = ntlocals; 8260 } 8261 8262 vstate->dtvs_tlocals[id] = *v; 8263 continue; 8264 8265 case DIFV_SCOPE_LOCAL: 8266 np = &vstate->dtvs_nlocals; 8267 svarp = &vstate->dtvs_locals; 8268 8269 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8270 dsize = NCPU * (v->dtdv_type.dtdt_size + 8271 sizeof (uint64_t)); 8272 else 8273 dsize = NCPU * sizeof (uint64_t); 8274 8275 break; 8276 8277 case DIFV_SCOPE_GLOBAL: 8278 np = &vstate->dtvs_nglobals; 8279 svarp = &vstate->dtvs_globals; 8280 8281 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 8282 dsize = v->dtdv_type.dtdt_size + 8283 sizeof (uint64_t); 8284 8285 break; 8286 8287 default: 8288 ASSERT(0); 8289 } 8290 8291 while (id >= (oldsvars = *np)) { 8292 dtrace_statvar_t **statics; 8293 int newsvars, oldsize, newsize; 8294 8295 if ((newsvars = (oldsvars << 1)) == 0) 8296 newsvars = 1; 8297 8298 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 8299 newsize = newsvars * sizeof (dtrace_statvar_t *); 8300 8301 statics = kmem_zalloc(newsize, KM_SLEEP); 8302 8303 if (oldsize != 0) { 8304 bcopy(*svarp, statics, oldsize); 8305 kmem_free(*svarp, oldsize); 8306 } 8307 8308 *svarp = statics; 8309 *np = newsvars; 8310 } 8311 8312 if ((svar = (*svarp)[id]) == NULL) { 8313 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 8314 svar->dtsv_var = *v; 8315 8316 if ((svar->dtsv_size = dsize) != 0) { 8317 svar->dtsv_data = (uint64_t)(uintptr_t) 8318 kmem_zalloc(dsize, KM_SLEEP); 8319 } 8320 8321 (*svarp)[id] = svar; 8322 } 8323 8324 svar->dtsv_refcnt++; 8325 } 8326 8327 dtrace_difo_chunksize(dp, vstate); 8328 dtrace_difo_hold(dp); 8329 } 8330 8331 static dtrace_difo_t * 8332 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8333 { 8334 dtrace_difo_t *new; 8335 size_t sz; 8336 8337 ASSERT(dp->dtdo_buf != NULL); 8338 ASSERT(dp->dtdo_refcnt != 0); 8339 8340 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 8341 8342 ASSERT(dp->dtdo_buf != NULL); 8343 sz = dp->dtdo_len * sizeof (dif_instr_t); 8344 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 8345 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 8346 new->dtdo_len = dp->dtdo_len; 8347 8348 if (dp->dtdo_strtab != NULL) { 8349 ASSERT(dp->dtdo_strlen != 0); 8350 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 8351 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 8352 new->dtdo_strlen = dp->dtdo_strlen; 8353 } 8354 8355 if (dp->dtdo_inttab != NULL) { 8356 ASSERT(dp->dtdo_intlen != 0); 8357 sz = dp->dtdo_intlen * sizeof (uint64_t); 8358 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 8359 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 8360 new->dtdo_intlen = dp->dtdo_intlen; 8361 } 8362 8363 if (dp->dtdo_vartab != NULL) { 8364 ASSERT(dp->dtdo_varlen != 0); 8365 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 8366 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 8367 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 8368 new->dtdo_varlen = dp->dtdo_varlen; 8369 } 8370 8371 dtrace_difo_init(new, vstate); 8372 return (new); 8373 } 8374 8375 static void 8376 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8377 { 8378 int i; 8379 8380 ASSERT(dp->dtdo_refcnt == 0); 8381 8382 for (i = 0; i < dp->dtdo_varlen; i++) { 8383 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8384 dtrace_statvar_t *svar, **svarp; 8385 uint_t id; 8386 uint8_t scope = v->dtdv_scope; 8387 int *np; 8388 8389 switch (scope) { 8390 case DIFV_SCOPE_THREAD: 8391 continue; 8392 8393 case DIFV_SCOPE_LOCAL: 8394 np = &vstate->dtvs_nlocals; 8395 svarp = vstate->dtvs_locals; 8396 break; 8397 8398 case DIFV_SCOPE_GLOBAL: 8399 np = &vstate->dtvs_nglobals; 8400 svarp = vstate->dtvs_globals; 8401 break; 8402 8403 default: 8404 ASSERT(0); 8405 } 8406 8407 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 8408 continue; 8409 8410 id -= DIF_VAR_OTHER_UBASE; 8411 ASSERT(id < *np); 8412 8413 svar = svarp[id]; 8414 ASSERT(svar != NULL); 8415 ASSERT(svar->dtsv_refcnt > 0); 8416 8417 if (--svar->dtsv_refcnt > 0) 8418 continue; 8419 8420 if (svar->dtsv_size != 0) { 8421 ASSERT(svar->dtsv_data != NULL); 8422 kmem_free((void *)(uintptr_t)svar->dtsv_data, 8423 svar->dtsv_size); 8424 } 8425 8426 kmem_free(svar, sizeof (dtrace_statvar_t)); 8427 svarp[id] = NULL; 8428 } 8429 8430 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 8431 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 8432 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 8433 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 8434 8435 kmem_free(dp, sizeof (dtrace_difo_t)); 8436 } 8437 8438 static void 8439 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 8440 { 8441 int i; 8442 8443 ASSERT(MUTEX_HELD(&dtrace_lock)); 8444 ASSERT(dp->dtdo_refcnt != 0); 8445 8446 for (i = 0; i < dp->dtdo_varlen; i++) { 8447 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 8448 8449 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 8450 continue; 8451 8452 ASSERT(dtrace_vtime_references > 0); 8453 if (--dtrace_vtime_references == 0) 8454 dtrace_vtime_disable(); 8455 } 8456 8457 if (--dp->dtdo_refcnt == 0) 8458 dtrace_difo_destroy(dp, vstate); 8459 } 8460 8461 /* 8462 * DTrace Format Functions 8463 */ 8464 static uint16_t 8465 dtrace_format_add(dtrace_state_t *state, char *str) 8466 { 8467 char *fmt, **new; 8468 uint16_t ndx, len = strlen(str) + 1; 8469 8470 fmt = kmem_zalloc(len, KM_SLEEP); 8471 bcopy(str, fmt, len); 8472 8473 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 8474 if (state->dts_formats[ndx] == NULL) { 8475 state->dts_formats[ndx] = fmt; 8476 return (ndx + 1); 8477 } 8478 } 8479 8480 if (state->dts_nformats == USHRT_MAX) { 8481 /* 8482 * This is only likely if a denial-of-service attack is being 8483 * attempted. As such, it's okay to fail silently here. 8484 */ 8485 kmem_free(fmt, len); 8486 return (0); 8487 } 8488 8489 /* 8490 * For simplicity, we always resize the formats array to be exactly the 8491 * number of formats. 8492 */ 8493 ndx = state->dts_nformats++; 8494 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 8495 8496 if (state->dts_formats != NULL) { 8497 ASSERT(ndx != 0); 8498 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 8499 kmem_free(state->dts_formats, ndx * sizeof (char *)); 8500 } 8501 8502 state->dts_formats = new; 8503 state->dts_formats[ndx] = fmt; 8504 8505 return (ndx + 1); 8506 } 8507 8508 static void 8509 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 8510 { 8511 char *fmt; 8512 8513 ASSERT(state->dts_formats != NULL); 8514 ASSERT(format <= state->dts_nformats); 8515 ASSERT(state->dts_formats[format - 1] != NULL); 8516 8517 fmt = state->dts_formats[format - 1]; 8518 kmem_free(fmt, strlen(fmt) + 1); 8519 state->dts_formats[format - 1] = NULL; 8520 } 8521 8522 static void 8523 dtrace_format_destroy(dtrace_state_t *state) 8524 { 8525 int i; 8526 8527 if (state->dts_nformats == 0) { 8528 ASSERT(state->dts_formats == NULL); 8529 return; 8530 } 8531 8532 ASSERT(state->dts_formats != NULL); 8533 8534 for (i = 0; i < state->dts_nformats; i++) { 8535 char *fmt = state->dts_formats[i]; 8536 8537 if (fmt == NULL) 8538 continue; 8539 8540 kmem_free(fmt, strlen(fmt) + 1); 8541 } 8542 8543 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 8544 state->dts_nformats = 0; 8545 state->dts_formats = NULL; 8546 } 8547 8548 /* 8549 * DTrace Predicate Functions 8550 */ 8551 static dtrace_predicate_t * 8552 dtrace_predicate_create(dtrace_difo_t *dp) 8553 { 8554 dtrace_predicate_t *pred; 8555 8556 ASSERT(MUTEX_HELD(&dtrace_lock)); 8557 ASSERT(dp->dtdo_refcnt != 0); 8558 8559 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 8560 pred->dtp_difo = dp; 8561 pred->dtp_refcnt = 1; 8562 8563 if (!dtrace_difo_cacheable(dp)) 8564 return (pred); 8565 8566 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 8567 /* 8568 * This is only theoretically possible -- we have had 2^32 8569 * cacheable predicates on this machine. We cannot allow any 8570 * more predicates to become cacheable: as unlikely as it is, 8571 * there may be a thread caching a (now stale) predicate cache 8572 * ID. (N.B.: the temptation is being successfully resisted to 8573 * have this cmn_err() "Holy shit -- we executed this code!") 8574 */ 8575 return (pred); 8576 } 8577 8578 pred->dtp_cacheid = dtrace_predcache_id++; 8579 8580 return (pred); 8581 } 8582 8583 static void 8584 dtrace_predicate_hold(dtrace_predicate_t *pred) 8585 { 8586 ASSERT(MUTEX_HELD(&dtrace_lock)); 8587 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 8588 ASSERT(pred->dtp_refcnt > 0); 8589 8590 pred->dtp_refcnt++; 8591 } 8592 8593 static void 8594 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 8595 { 8596 dtrace_difo_t *dp = pred->dtp_difo; 8597 8598 ASSERT(MUTEX_HELD(&dtrace_lock)); 8599 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 8600 ASSERT(pred->dtp_refcnt > 0); 8601 8602 if (--pred->dtp_refcnt == 0) { 8603 dtrace_difo_release(pred->dtp_difo, vstate); 8604 kmem_free(pred, sizeof (dtrace_predicate_t)); 8605 } 8606 } 8607 8608 /* 8609 * DTrace Action Description Functions 8610 */ 8611 static dtrace_actdesc_t * 8612 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 8613 uint64_t uarg, uint64_t arg) 8614 { 8615 dtrace_actdesc_t *act; 8616 8617 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 8618 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 8619 8620 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 8621 act->dtad_kind = kind; 8622 act->dtad_ntuple = ntuple; 8623 act->dtad_uarg = uarg; 8624 act->dtad_arg = arg; 8625 act->dtad_refcnt = 1; 8626 8627 return (act); 8628 } 8629 8630 static void 8631 dtrace_actdesc_hold(dtrace_actdesc_t *act) 8632 { 8633 ASSERT(act->dtad_refcnt >= 1); 8634 act->dtad_refcnt++; 8635 } 8636 8637 static void 8638 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 8639 { 8640 dtrace_actkind_t kind = act->dtad_kind; 8641 dtrace_difo_t *dp; 8642 8643 ASSERT(act->dtad_refcnt >= 1); 8644 8645 if (--act->dtad_refcnt != 0) 8646 return; 8647 8648 if ((dp = act->dtad_difo) != NULL) 8649 dtrace_difo_release(dp, vstate); 8650 8651 if (DTRACEACT_ISPRINTFLIKE(kind)) { 8652 char *str = (char *)(uintptr_t)act->dtad_arg; 8653 8654 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 8655 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 8656 8657 if (str != NULL) 8658 kmem_free(str, strlen(str) + 1); 8659 } 8660 8661 kmem_free(act, sizeof (dtrace_actdesc_t)); 8662 } 8663 8664 /* 8665 * DTrace ECB Functions 8666 */ 8667 static dtrace_ecb_t * 8668 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 8669 { 8670 dtrace_ecb_t *ecb; 8671 dtrace_epid_t epid; 8672 8673 ASSERT(MUTEX_HELD(&dtrace_lock)); 8674 8675 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 8676 ecb->dte_predicate = NULL; 8677 ecb->dte_probe = probe; 8678 8679 /* 8680 * The default size is the size of the default action: recording 8681 * the epid. 8682 */ 8683 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8684 ecb->dte_alignment = sizeof (dtrace_epid_t); 8685 8686 epid = state->dts_epid++; 8687 8688 if (epid - 1 >= state->dts_necbs) { 8689 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 8690 int necbs = state->dts_necbs << 1; 8691 8692 ASSERT(epid == state->dts_necbs + 1); 8693 8694 if (necbs == 0) { 8695 ASSERT(oecbs == NULL); 8696 necbs = 1; 8697 } 8698 8699 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 8700 8701 if (oecbs != NULL) 8702 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 8703 8704 dtrace_membar_producer(); 8705 state->dts_ecbs = ecbs; 8706 8707 if (oecbs != NULL) { 8708 /* 8709 * If this state is active, we must dtrace_sync() 8710 * before we can free the old dts_ecbs array: we're 8711 * coming in hot, and there may be active ring 8712 * buffer processing (which indexes into the dts_ecbs 8713 * array) on another CPU. 8714 */ 8715 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 8716 dtrace_sync(); 8717 8718 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 8719 } 8720 8721 dtrace_membar_producer(); 8722 state->dts_necbs = necbs; 8723 } 8724 8725 ecb->dte_state = state; 8726 8727 ASSERT(state->dts_ecbs[epid - 1] == NULL); 8728 dtrace_membar_producer(); 8729 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 8730 8731 return (ecb); 8732 } 8733 8734 static void 8735 dtrace_ecb_enable(dtrace_ecb_t *ecb) 8736 { 8737 dtrace_probe_t *probe = ecb->dte_probe; 8738 8739 ASSERT(MUTEX_HELD(&cpu_lock)); 8740 ASSERT(MUTEX_HELD(&dtrace_lock)); 8741 ASSERT(ecb->dte_next == NULL); 8742 8743 if (probe == NULL) { 8744 /* 8745 * This is the NULL probe -- there's nothing to do. 8746 */ 8747 return; 8748 } 8749 8750 if (probe->dtpr_ecb == NULL) { 8751 dtrace_provider_t *prov = probe->dtpr_provider; 8752 8753 /* 8754 * We're the first ECB on this probe. 8755 */ 8756 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 8757 8758 if (ecb->dte_predicate != NULL) 8759 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 8760 8761 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 8762 probe->dtpr_id, probe->dtpr_arg); 8763 } else { 8764 /* 8765 * This probe is already active. Swing the last pointer to 8766 * point to the new ECB, and issue a dtrace_sync() to assure 8767 * that all CPUs have seen the change. 8768 */ 8769 ASSERT(probe->dtpr_ecb_last != NULL); 8770 probe->dtpr_ecb_last->dte_next = ecb; 8771 probe->dtpr_ecb_last = ecb; 8772 probe->dtpr_predcache = 0; 8773 8774 dtrace_sync(); 8775 } 8776 } 8777 8778 static void 8779 dtrace_ecb_resize(dtrace_ecb_t *ecb) 8780 { 8781 uint32_t maxalign = sizeof (dtrace_epid_t); 8782 uint32_t align = sizeof (uint8_t), offs, diff; 8783 dtrace_action_t *act; 8784 int wastuple = 0; 8785 uint32_t aggbase = UINT32_MAX; 8786 dtrace_state_t *state = ecb->dte_state; 8787 8788 /* 8789 * If we record anything, we always record the epid. (And we always 8790 * record it first.) 8791 */ 8792 offs = sizeof (dtrace_epid_t); 8793 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 8794 8795 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 8796 dtrace_recdesc_t *rec = &act->dta_rec; 8797 8798 if ((align = rec->dtrd_alignment) > maxalign) 8799 maxalign = align; 8800 8801 if (!wastuple && act->dta_intuple) { 8802 /* 8803 * This is the first record in a tuple. Align the 8804 * offset to be at offset 4 in an 8-byte aligned 8805 * block. 8806 */ 8807 diff = offs + sizeof (dtrace_aggid_t); 8808 8809 if (diff = (diff & (sizeof (uint64_t) - 1))) 8810 offs += sizeof (uint64_t) - diff; 8811 8812 aggbase = offs - sizeof (dtrace_aggid_t); 8813 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 8814 } 8815 8816 /*LINTED*/ 8817 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 8818 /* 8819 * The current offset is not properly aligned; align it. 8820 */ 8821 offs += align - diff; 8822 } 8823 8824 rec->dtrd_offset = offs; 8825 8826 if (offs + rec->dtrd_size > ecb->dte_needed) { 8827 ecb->dte_needed = offs + rec->dtrd_size; 8828 8829 if (ecb->dte_needed > state->dts_needed) 8830 state->dts_needed = ecb->dte_needed; 8831 } 8832 8833 if (DTRACEACT_ISAGG(act->dta_kind)) { 8834 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 8835 dtrace_action_t *first = agg->dtag_first, *prev; 8836 8837 ASSERT(rec->dtrd_size != 0 && first != NULL); 8838 ASSERT(wastuple); 8839 ASSERT(aggbase != UINT32_MAX); 8840 8841 agg->dtag_base = aggbase; 8842 8843 while ((prev = first->dta_prev) != NULL && 8844 DTRACEACT_ISAGG(prev->dta_kind)) { 8845 agg = (dtrace_aggregation_t *)prev; 8846 first = agg->dtag_first; 8847 } 8848 8849 if (prev != NULL) { 8850 offs = prev->dta_rec.dtrd_offset + 8851 prev->dta_rec.dtrd_size; 8852 } else { 8853 offs = sizeof (dtrace_epid_t); 8854 } 8855 wastuple = 0; 8856 } else { 8857 if (!act->dta_intuple) 8858 ecb->dte_size = offs + rec->dtrd_size; 8859 8860 offs += rec->dtrd_size; 8861 } 8862 8863 wastuple = act->dta_intuple; 8864 } 8865 8866 if ((act = ecb->dte_action) != NULL && 8867 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 8868 ecb->dte_size == sizeof (dtrace_epid_t)) { 8869 /* 8870 * If the size is still sizeof (dtrace_epid_t), then all 8871 * actions store no data; set the size to 0. 8872 */ 8873 ecb->dte_alignment = maxalign; 8874 ecb->dte_size = 0; 8875 8876 /* 8877 * If the needed space is still sizeof (dtrace_epid_t), then 8878 * all actions need no additional space; set the needed 8879 * size to 0. 8880 */ 8881 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 8882 ecb->dte_needed = 0; 8883 8884 return; 8885 } 8886 8887 /* 8888 * Set our alignment, and make sure that the dte_size and dte_needed 8889 * are aligned to the size of an EPID. 8890 */ 8891 ecb->dte_alignment = maxalign; 8892 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 8893 ~(sizeof (dtrace_epid_t) - 1); 8894 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 8895 ~(sizeof (dtrace_epid_t) - 1); 8896 ASSERT(ecb->dte_size <= ecb->dte_needed); 8897 } 8898 8899 static dtrace_action_t * 8900 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 8901 { 8902 dtrace_aggregation_t *agg; 8903 size_t size = sizeof (uint64_t); 8904 int ntuple = desc->dtad_ntuple; 8905 dtrace_action_t *act; 8906 dtrace_recdesc_t *frec; 8907 dtrace_aggid_t aggid; 8908 dtrace_state_t *state = ecb->dte_state; 8909 8910 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 8911 agg->dtag_ecb = ecb; 8912 8913 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 8914 8915 switch (desc->dtad_kind) { 8916 case DTRACEAGG_MIN: 8917 agg->dtag_initial = UINT64_MAX; 8918 agg->dtag_aggregate = dtrace_aggregate_min; 8919 break; 8920 8921 case DTRACEAGG_MAX: 8922 agg->dtag_aggregate = dtrace_aggregate_max; 8923 break; 8924 8925 case DTRACEAGG_COUNT: 8926 agg->dtag_aggregate = dtrace_aggregate_count; 8927 break; 8928 8929 case DTRACEAGG_QUANTIZE: 8930 agg->dtag_aggregate = dtrace_aggregate_quantize; 8931 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 8932 sizeof (uint64_t); 8933 break; 8934 8935 case DTRACEAGG_LQUANTIZE: { 8936 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 8937 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 8938 8939 agg->dtag_initial = desc->dtad_arg; 8940 agg->dtag_aggregate = dtrace_aggregate_lquantize; 8941 8942 if (step == 0 || levels == 0) 8943 goto err; 8944 8945 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 8946 break; 8947 } 8948 8949 case DTRACEAGG_AVG: 8950 agg->dtag_aggregate = dtrace_aggregate_avg; 8951 size = sizeof (uint64_t) * 2; 8952 break; 8953 8954 case DTRACEAGG_SUM: 8955 agg->dtag_aggregate = dtrace_aggregate_sum; 8956 break; 8957 8958 default: 8959 goto err; 8960 } 8961 8962 agg->dtag_action.dta_rec.dtrd_size = size; 8963 8964 if (ntuple == 0) 8965 goto err; 8966 8967 /* 8968 * We must make sure that we have enough actions for the n-tuple. 8969 */ 8970 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 8971 if (DTRACEACT_ISAGG(act->dta_kind)) 8972 break; 8973 8974 if (--ntuple == 0) { 8975 /* 8976 * This is the action with which our n-tuple begins. 8977 */ 8978 agg->dtag_first = act; 8979 goto success; 8980 } 8981 } 8982 8983 /* 8984 * This n-tuple is short by ntuple elements. Return failure. 8985 */ 8986 ASSERT(ntuple != 0); 8987 err: 8988 kmem_free(agg, sizeof (dtrace_aggregation_t)); 8989 return (NULL); 8990 8991 success: 8992 /* 8993 * If the last action in the tuple has a size of zero, it's actually 8994 * an expression argument for the aggregating action. 8995 */ 8996 ASSERT(ecb->dte_action_last != NULL); 8997 act = ecb->dte_action_last; 8998 8999 if (act->dta_kind == DTRACEACT_DIFEXPR) { 9000 ASSERT(act->dta_difo != NULL); 9001 9002 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 9003 agg->dtag_hasarg = 1; 9004 } 9005 9006 /* 9007 * We need to allocate an id for this aggregation. 9008 */ 9009 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 9010 VM_BESTFIT | VM_SLEEP); 9011 9012 if (aggid - 1 >= state->dts_naggregations) { 9013 dtrace_aggregation_t **oaggs = state->dts_aggregations; 9014 dtrace_aggregation_t **aggs; 9015 int naggs = state->dts_naggregations << 1; 9016 int onaggs = state->dts_naggregations; 9017 9018 ASSERT(aggid == state->dts_naggregations + 1); 9019 9020 if (naggs == 0) { 9021 ASSERT(oaggs == NULL); 9022 naggs = 1; 9023 } 9024 9025 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 9026 9027 if (oaggs != NULL) { 9028 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 9029 kmem_free(oaggs, onaggs * sizeof (*aggs)); 9030 } 9031 9032 state->dts_aggregations = aggs; 9033 state->dts_naggregations = naggs; 9034 } 9035 9036 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 9037 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 9038 9039 frec = &agg->dtag_first->dta_rec; 9040 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 9041 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 9042 9043 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 9044 ASSERT(!act->dta_intuple); 9045 act->dta_intuple = 1; 9046 } 9047 9048 return (&agg->dtag_action); 9049 } 9050 9051 static void 9052 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 9053 { 9054 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9055 dtrace_state_t *state = ecb->dte_state; 9056 dtrace_aggid_t aggid = agg->dtag_id; 9057 9058 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 9059 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 9060 9061 ASSERT(state->dts_aggregations[aggid - 1] == agg); 9062 state->dts_aggregations[aggid - 1] = NULL; 9063 9064 kmem_free(agg, sizeof (dtrace_aggregation_t)); 9065 } 9066 9067 static int 9068 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9069 { 9070 dtrace_action_t *action, *last; 9071 dtrace_difo_t *dp = desc->dtad_difo; 9072 uint32_t size = 0, align = sizeof (uint8_t), mask; 9073 uint16_t format = 0; 9074 dtrace_recdesc_t *rec; 9075 dtrace_state_t *state = ecb->dte_state; 9076 dtrace_optval_t *opt = state->dts_options, nframes, strsize; 9077 uint64_t arg = desc->dtad_arg; 9078 9079 ASSERT(MUTEX_HELD(&dtrace_lock)); 9080 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 9081 9082 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 9083 /* 9084 * If this is an aggregating action, there must be neither 9085 * a speculate nor a commit on the action chain. 9086 */ 9087 dtrace_action_t *act; 9088 9089 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9090 if (act->dta_kind == DTRACEACT_COMMIT) 9091 return (EINVAL); 9092 9093 if (act->dta_kind == DTRACEACT_SPECULATE) 9094 return (EINVAL); 9095 } 9096 9097 action = dtrace_ecb_aggregation_create(ecb, desc); 9098 9099 if (action == NULL) 9100 return (EINVAL); 9101 } else { 9102 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 9103 (desc->dtad_kind == DTRACEACT_DIFEXPR && 9104 dp != NULL && dp->dtdo_destructive)) { 9105 state->dts_destructive = 1; 9106 } 9107 9108 switch (desc->dtad_kind) { 9109 case DTRACEACT_PRINTF: 9110 case DTRACEACT_PRINTA: 9111 case DTRACEACT_SYSTEM: 9112 case DTRACEACT_FREOPEN: 9113 /* 9114 * We know that our arg is a string -- turn it into a 9115 * format. 9116 */ 9117 if (arg == NULL) { 9118 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 9119 format = 0; 9120 } else { 9121 ASSERT(arg != NULL); 9122 ASSERT(arg > KERNELBASE); 9123 format = dtrace_format_add(state, 9124 (char *)(uintptr_t)arg); 9125 } 9126 9127 /*FALLTHROUGH*/ 9128 case DTRACEACT_LIBACT: 9129 case DTRACEACT_DIFEXPR: 9130 if (dp == NULL) 9131 return (EINVAL); 9132 9133 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 9134 break; 9135 9136 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 9137 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9138 return (EINVAL); 9139 9140 size = opt[DTRACEOPT_STRSIZE]; 9141 } 9142 9143 break; 9144 9145 case DTRACEACT_STACK: 9146 if ((nframes = arg) == 0) { 9147 nframes = opt[DTRACEOPT_STACKFRAMES]; 9148 ASSERT(nframes > 0); 9149 arg = nframes; 9150 } 9151 9152 size = nframes * sizeof (pc_t); 9153 break; 9154 9155 case DTRACEACT_JSTACK: 9156 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 9157 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 9158 9159 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 9160 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 9161 9162 arg = DTRACE_USTACK_ARG(nframes, strsize); 9163 9164 /*FALLTHROUGH*/ 9165 case DTRACEACT_USTACK: 9166 if (desc->dtad_kind != DTRACEACT_JSTACK && 9167 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 9168 strsize = DTRACE_USTACK_STRSIZE(arg); 9169 nframes = opt[DTRACEOPT_USTACKFRAMES]; 9170 ASSERT(nframes > 0); 9171 arg = DTRACE_USTACK_ARG(nframes, strsize); 9172 } 9173 9174 /* 9175 * Save a slot for the pid. 9176 */ 9177 size = (nframes + 1) * sizeof (uint64_t); 9178 size += DTRACE_USTACK_STRSIZE(arg); 9179 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 9180 9181 break; 9182 9183 case DTRACEACT_SYM: 9184 case DTRACEACT_MOD: 9185 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 9186 sizeof (uint64_t)) || 9187 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9188 return (EINVAL); 9189 break; 9190 9191 case DTRACEACT_USYM: 9192 case DTRACEACT_UMOD: 9193 case DTRACEACT_UADDR: 9194 if (dp == NULL || 9195 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 9196 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9197 return (EINVAL); 9198 9199 /* 9200 * We have a slot for the pid, plus a slot for the 9201 * argument. To keep things simple (aligned with 9202 * bitness-neutral sizing), we store each as a 64-bit 9203 * quantity. 9204 */ 9205 size = 2 * sizeof (uint64_t); 9206 break; 9207 9208 case DTRACEACT_STOP: 9209 case DTRACEACT_BREAKPOINT: 9210 case DTRACEACT_PANIC: 9211 break; 9212 9213 case DTRACEACT_CHILL: 9214 case DTRACEACT_DISCARD: 9215 case DTRACEACT_RAISE: 9216 if (dp == NULL) 9217 return (EINVAL); 9218 break; 9219 9220 case DTRACEACT_EXIT: 9221 if (dp == NULL || 9222 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 9223 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 9224 return (EINVAL); 9225 break; 9226 9227 case DTRACEACT_SPECULATE: 9228 if (ecb->dte_size > sizeof (dtrace_epid_t)) 9229 return (EINVAL); 9230 9231 if (dp == NULL) 9232 return (EINVAL); 9233 9234 state->dts_speculates = 1; 9235 break; 9236 9237 case DTRACEACT_COMMIT: { 9238 dtrace_action_t *act = ecb->dte_action; 9239 9240 for (; act != NULL; act = act->dta_next) { 9241 if (act->dta_kind == DTRACEACT_COMMIT) 9242 return (EINVAL); 9243 } 9244 9245 if (dp == NULL) 9246 return (EINVAL); 9247 break; 9248 } 9249 9250 default: 9251 return (EINVAL); 9252 } 9253 9254 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 9255 /* 9256 * If this is a data-storing action or a speculate, 9257 * we must be sure that there isn't a commit on the 9258 * action chain. 9259 */ 9260 dtrace_action_t *act = ecb->dte_action; 9261 9262 for (; act != NULL; act = act->dta_next) { 9263 if (act->dta_kind == DTRACEACT_COMMIT) 9264 return (EINVAL); 9265 } 9266 } 9267 9268 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 9269 action->dta_rec.dtrd_size = size; 9270 } 9271 9272 action->dta_refcnt = 1; 9273 rec = &action->dta_rec; 9274 size = rec->dtrd_size; 9275 9276 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 9277 if (!(size & mask)) { 9278 align = mask + 1; 9279 break; 9280 } 9281 } 9282 9283 action->dta_kind = desc->dtad_kind; 9284 9285 if ((action->dta_difo = dp) != NULL) 9286 dtrace_difo_hold(dp); 9287 9288 rec->dtrd_action = action->dta_kind; 9289 rec->dtrd_arg = arg; 9290 rec->dtrd_uarg = desc->dtad_uarg; 9291 rec->dtrd_alignment = (uint16_t)align; 9292 rec->dtrd_format = format; 9293 9294 if ((last = ecb->dte_action_last) != NULL) { 9295 ASSERT(ecb->dte_action != NULL); 9296 action->dta_prev = last; 9297 last->dta_next = action; 9298 } else { 9299 ASSERT(ecb->dte_action == NULL); 9300 ecb->dte_action = action; 9301 } 9302 9303 ecb->dte_action_last = action; 9304 9305 return (0); 9306 } 9307 9308 static void 9309 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 9310 { 9311 dtrace_action_t *act = ecb->dte_action, *next; 9312 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 9313 dtrace_difo_t *dp; 9314 uint16_t format; 9315 9316 if (act != NULL && act->dta_refcnt > 1) { 9317 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 9318 act->dta_refcnt--; 9319 } else { 9320 for (; act != NULL; act = next) { 9321 next = act->dta_next; 9322 ASSERT(next != NULL || act == ecb->dte_action_last); 9323 ASSERT(act->dta_refcnt == 1); 9324 9325 if ((format = act->dta_rec.dtrd_format) != 0) 9326 dtrace_format_remove(ecb->dte_state, format); 9327 9328 if ((dp = act->dta_difo) != NULL) 9329 dtrace_difo_release(dp, vstate); 9330 9331 if (DTRACEACT_ISAGG(act->dta_kind)) { 9332 dtrace_ecb_aggregation_destroy(ecb, act); 9333 } else { 9334 kmem_free(act, sizeof (dtrace_action_t)); 9335 } 9336 } 9337 } 9338 9339 ecb->dte_action = NULL; 9340 ecb->dte_action_last = NULL; 9341 ecb->dte_size = sizeof (dtrace_epid_t); 9342 } 9343 9344 static void 9345 dtrace_ecb_disable(dtrace_ecb_t *ecb) 9346 { 9347 /* 9348 * We disable the ECB by removing it from its probe. 9349 */ 9350 dtrace_ecb_t *pecb, *prev = NULL; 9351 dtrace_probe_t *probe = ecb->dte_probe; 9352 9353 ASSERT(MUTEX_HELD(&dtrace_lock)); 9354 9355 if (probe == NULL) { 9356 /* 9357 * This is the NULL probe; there is nothing to disable. 9358 */ 9359 return; 9360 } 9361 9362 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 9363 if (pecb == ecb) 9364 break; 9365 prev = pecb; 9366 } 9367 9368 ASSERT(pecb != NULL); 9369 9370 if (prev == NULL) { 9371 probe->dtpr_ecb = ecb->dte_next; 9372 } else { 9373 prev->dte_next = ecb->dte_next; 9374 } 9375 9376 if (ecb == probe->dtpr_ecb_last) { 9377 ASSERT(ecb->dte_next == NULL); 9378 probe->dtpr_ecb_last = prev; 9379 } 9380 9381 /* 9382 * The ECB has been disconnected from the probe; now sync to assure 9383 * that all CPUs have seen the change before returning. 9384 */ 9385 dtrace_sync(); 9386 9387 if (probe->dtpr_ecb == NULL) { 9388 /* 9389 * That was the last ECB on the probe; clear the predicate 9390 * cache ID for the probe, disable it and sync one more time 9391 * to assure that we'll never hit it again. 9392 */ 9393 dtrace_provider_t *prov = probe->dtpr_provider; 9394 9395 ASSERT(ecb->dte_next == NULL); 9396 ASSERT(probe->dtpr_ecb_last == NULL); 9397 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 9398 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 9399 probe->dtpr_id, probe->dtpr_arg); 9400 dtrace_sync(); 9401 } else { 9402 /* 9403 * There is at least one ECB remaining on the probe. If there 9404 * is _exactly_ one, set the probe's predicate cache ID to be 9405 * the predicate cache ID of the remaining ECB. 9406 */ 9407 ASSERT(probe->dtpr_ecb_last != NULL); 9408 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 9409 9410 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 9411 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 9412 9413 ASSERT(probe->dtpr_ecb->dte_next == NULL); 9414 9415 if (p != NULL) 9416 probe->dtpr_predcache = p->dtp_cacheid; 9417 } 9418 9419 ecb->dte_next = NULL; 9420 } 9421 } 9422 9423 static void 9424 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 9425 { 9426 dtrace_state_t *state = ecb->dte_state; 9427 dtrace_vstate_t *vstate = &state->dts_vstate; 9428 dtrace_predicate_t *pred; 9429 dtrace_epid_t epid = ecb->dte_epid; 9430 9431 ASSERT(MUTEX_HELD(&dtrace_lock)); 9432 ASSERT(ecb->dte_next == NULL); 9433 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 9434 9435 if ((pred = ecb->dte_predicate) != NULL) 9436 dtrace_predicate_release(pred, vstate); 9437 9438 dtrace_ecb_action_remove(ecb); 9439 9440 ASSERT(state->dts_ecbs[epid - 1] == ecb); 9441 state->dts_ecbs[epid - 1] = NULL; 9442 9443 kmem_free(ecb, sizeof (dtrace_ecb_t)); 9444 } 9445 9446 static dtrace_ecb_t * 9447 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 9448 dtrace_enabling_t *enab) 9449 { 9450 dtrace_ecb_t *ecb; 9451 dtrace_predicate_t *pred; 9452 dtrace_actdesc_t *act; 9453 dtrace_provider_t *prov; 9454 dtrace_ecbdesc_t *desc = enab->dten_current; 9455 9456 ASSERT(MUTEX_HELD(&dtrace_lock)); 9457 ASSERT(state != NULL); 9458 9459 ecb = dtrace_ecb_add(state, probe); 9460 ecb->dte_uarg = desc->dted_uarg; 9461 9462 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 9463 dtrace_predicate_hold(pred); 9464 ecb->dte_predicate = pred; 9465 } 9466 9467 if (probe != NULL) { 9468 /* 9469 * If the provider shows more leg than the consumer is old 9470 * enough to see, we need to enable the appropriate implicit 9471 * predicate bits to prevent the ecb from activating at 9472 * revealing times. 9473 * 9474 * Providers specifying DTRACE_PRIV_USER at register time 9475 * are stating that they need the /proc-style privilege 9476 * model to be enforced, and this is what DTRACE_COND_OWNER 9477 * and DTRACE_COND_ZONEOWNER will then do at probe time. 9478 */ 9479 prov = probe->dtpr_provider; 9480 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 9481 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9482 ecb->dte_cond |= DTRACE_COND_OWNER; 9483 9484 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 9485 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 9486 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 9487 9488 /* 9489 * If the provider shows us kernel innards and the user 9490 * is lacking sufficient privilege, enable the 9491 * DTRACE_COND_USERMODE implicit predicate. 9492 */ 9493 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 9494 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 9495 ecb->dte_cond |= DTRACE_COND_USERMODE; 9496 } 9497 9498 if (dtrace_ecb_create_cache != NULL) { 9499 /* 9500 * If we have a cached ecb, we'll use its action list instead 9501 * of creating our own (saving both time and space). 9502 */ 9503 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 9504 dtrace_action_t *act = cached->dte_action; 9505 9506 if (act != NULL) { 9507 ASSERT(act->dta_refcnt > 0); 9508 act->dta_refcnt++; 9509 ecb->dte_action = act; 9510 ecb->dte_action_last = cached->dte_action_last; 9511 ecb->dte_needed = cached->dte_needed; 9512 ecb->dte_size = cached->dte_size; 9513 ecb->dte_alignment = cached->dte_alignment; 9514 } 9515 9516 return (ecb); 9517 } 9518 9519 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 9520 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 9521 dtrace_ecb_destroy(ecb); 9522 return (NULL); 9523 } 9524 } 9525 9526 dtrace_ecb_resize(ecb); 9527 9528 return (dtrace_ecb_create_cache = ecb); 9529 } 9530 9531 static int 9532 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 9533 { 9534 dtrace_ecb_t *ecb; 9535 dtrace_enabling_t *enab = arg; 9536 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 9537 9538 ASSERT(state != NULL); 9539 9540 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 9541 /* 9542 * This probe was created in a generation for which this 9543 * enabling has previously created ECBs; we don't want to 9544 * enable it again, so just kick out. 9545 */ 9546 return (DTRACE_MATCH_NEXT); 9547 } 9548 9549 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 9550 return (DTRACE_MATCH_DONE); 9551 9552 dtrace_ecb_enable(ecb); 9553 return (DTRACE_MATCH_NEXT); 9554 } 9555 9556 static dtrace_ecb_t * 9557 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 9558 { 9559 dtrace_ecb_t *ecb; 9560 9561 ASSERT(MUTEX_HELD(&dtrace_lock)); 9562 9563 if (id == 0 || id > state->dts_necbs) 9564 return (NULL); 9565 9566 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 9567 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 9568 9569 return (state->dts_ecbs[id - 1]); 9570 } 9571 9572 static dtrace_aggregation_t * 9573 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 9574 { 9575 dtrace_aggregation_t *agg; 9576 9577 ASSERT(MUTEX_HELD(&dtrace_lock)); 9578 9579 if (id == 0 || id > state->dts_naggregations) 9580 return (NULL); 9581 9582 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 9583 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 9584 agg->dtag_id == id); 9585 9586 return (state->dts_aggregations[id - 1]); 9587 } 9588 9589 /* 9590 * DTrace Buffer Functions 9591 * 9592 * The following functions manipulate DTrace buffers. Most of these functions 9593 * are called in the context of establishing or processing consumer state; 9594 * exceptions are explicitly noted. 9595 */ 9596 9597 /* 9598 * Note: called from cross call context. This function switches the two 9599 * buffers on a given CPU. The atomicity of this operation is assured by 9600 * disabling interrupts while the actual switch takes place; the disabling of 9601 * interrupts serializes the execution with any execution of dtrace_probe() on 9602 * the same CPU. 9603 */ 9604 static void 9605 dtrace_buffer_switch(dtrace_buffer_t *buf) 9606 { 9607 caddr_t tomax = buf->dtb_tomax; 9608 caddr_t xamot = buf->dtb_xamot; 9609 dtrace_icookie_t cookie; 9610 9611 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 9612 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 9613 9614 cookie = dtrace_interrupt_disable(); 9615 buf->dtb_tomax = xamot; 9616 buf->dtb_xamot = tomax; 9617 buf->dtb_xamot_drops = buf->dtb_drops; 9618 buf->dtb_xamot_offset = buf->dtb_offset; 9619 buf->dtb_xamot_errors = buf->dtb_errors; 9620 buf->dtb_xamot_flags = buf->dtb_flags; 9621 buf->dtb_offset = 0; 9622 buf->dtb_drops = 0; 9623 buf->dtb_errors = 0; 9624 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 9625 dtrace_interrupt_enable(cookie); 9626 } 9627 9628 /* 9629 * Note: called from cross call context. This function activates a buffer 9630 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 9631 * is guaranteed by the disabling of interrupts. 9632 */ 9633 static void 9634 dtrace_buffer_activate(dtrace_state_t *state) 9635 { 9636 dtrace_buffer_t *buf; 9637 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 9638 9639 buf = &state->dts_buffer[CPU->cpu_id]; 9640 9641 if (buf->dtb_tomax != NULL) { 9642 /* 9643 * We might like to assert that the buffer is marked inactive, 9644 * but this isn't necessarily true: the buffer for the CPU 9645 * that processes the BEGIN probe has its buffer activated 9646 * manually. In this case, we take the (harmless) action 9647 * re-clearing the bit INACTIVE bit. 9648 */ 9649 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 9650 } 9651 9652 dtrace_interrupt_enable(cookie); 9653 } 9654 9655 static int 9656 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 9657 processorid_t cpu) 9658 { 9659 cpu_t *cp; 9660 dtrace_buffer_t *buf; 9661 9662 ASSERT(MUTEX_HELD(&cpu_lock)); 9663 ASSERT(MUTEX_HELD(&dtrace_lock)); 9664 9665 if (size > dtrace_nonroot_maxsize && 9666 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 9667 return (EFBIG); 9668 9669 cp = cpu_list; 9670 9671 do { 9672 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9673 continue; 9674 9675 buf = &bufs[cp->cpu_id]; 9676 9677 /* 9678 * If there is already a buffer allocated for this CPU, it 9679 * is only possible that this is a DR event. In this case, 9680 * the buffer size must match our specified size. 9681 */ 9682 if (buf->dtb_tomax != NULL) { 9683 ASSERT(buf->dtb_size == size); 9684 continue; 9685 } 9686 9687 ASSERT(buf->dtb_xamot == NULL); 9688 9689 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9690 goto err; 9691 9692 buf->dtb_size = size; 9693 buf->dtb_flags = flags; 9694 buf->dtb_offset = 0; 9695 buf->dtb_drops = 0; 9696 9697 if (flags & DTRACEBUF_NOSWITCH) 9698 continue; 9699 9700 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 9701 goto err; 9702 } while ((cp = cp->cpu_next) != cpu_list); 9703 9704 return (0); 9705 9706 err: 9707 cp = cpu_list; 9708 9709 do { 9710 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 9711 continue; 9712 9713 buf = &bufs[cp->cpu_id]; 9714 9715 if (buf->dtb_xamot != NULL) { 9716 ASSERT(buf->dtb_tomax != NULL); 9717 ASSERT(buf->dtb_size == size); 9718 kmem_free(buf->dtb_xamot, size); 9719 } 9720 9721 if (buf->dtb_tomax != NULL) { 9722 ASSERT(buf->dtb_size == size); 9723 kmem_free(buf->dtb_tomax, size); 9724 } 9725 9726 buf->dtb_tomax = NULL; 9727 buf->dtb_xamot = NULL; 9728 buf->dtb_size = 0; 9729 } while ((cp = cp->cpu_next) != cpu_list); 9730 9731 return (ENOMEM); 9732 } 9733 9734 /* 9735 * Note: called from probe context. This function just increments the drop 9736 * count on a buffer. It has been made a function to allow for the 9737 * possibility of understanding the source of mysterious drop counts. (A 9738 * problem for which one may be particularly disappointed that DTrace cannot 9739 * be used to understand DTrace.) 9740 */ 9741 static void 9742 dtrace_buffer_drop(dtrace_buffer_t *buf) 9743 { 9744 buf->dtb_drops++; 9745 } 9746 9747 /* 9748 * Note: called from probe context. This function is called to reserve space 9749 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 9750 * mstate. Returns the new offset in the buffer, or a negative value if an 9751 * error has occurred. 9752 */ 9753 static intptr_t 9754 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 9755 dtrace_state_t *state, dtrace_mstate_t *mstate) 9756 { 9757 intptr_t offs = buf->dtb_offset, soffs; 9758 intptr_t woffs; 9759 caddr_t tomax; 9760 size_t total; 9761 9762 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 9763 return (-1); 9764 9765 if ((tomax = buf->dtb_tomax) == NULL) { 9766 dtrace_buffer_drop(buf); 9767 return (-1); 9768 } 9769 9770 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 9771 while (offs & (align - 1)) { 9772 /* 9773 * Assert that our alignment is off by a number which 9774 * is itself sizeof (uint32_t) aligned. 9775 */ 9776 ASSERT(!((align - (offs & (align - 1))) & 9777 (sizeof (uint32_t) - 1))); 9778 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9779 offs += sizeof (uint32_t); 9780 } 9781 9782 if ((soffs = offs + needed) > buf->dtb_size) { 9783 dtrace_buffer_drop(buf); 9784 return (-1); 9785 } 9786 9787 if (mstate == NULL) 9788 return (offs); 9789 9790 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 9791 mstate->dtms_scratch_size = buf->dtb_size - soffs; 9792 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9793 9794 return (offs); 9795 } 9796 9797 if (buf->dtb_flags & DTRACEBUF_FILL) { 9798 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 9799 (buf->dtb_flags & DTRACEBUF_FULL)) 9800 return (-1); 9801 goto out; 9802 } 9803 9804 total = needed + (offs & (align - 1)); 9805 9806 /* 9807 * For a ring buffer, life is quite a bit more complicated. Before 9808 * we can store any padding, we need to adjust our wrapping offset. 9809 * (If we've never before wrapped or we're not about to, no adjustment 9810 * is required.) 9811 */ 9812 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 9813 offs + total > buf->dtb_size) { 9814 woffs = buf->dtb_xamot_offset; 9815 9816 if (offs + total > buf->dtb_size) { 9817 /* 9818 * We can't fit in the end of the buffer. First, a 9819 * sanity check that we can fit in the buffer at all. 9820 */ 9821 if (total > buf->dtb_size) { 9822 dtrace_buffer_drop(buf); 9823 return (-1); 9824 } 9825 9826 /* 9827 * We're going to be storing at the top of the buffer, 9828 * so now we need to deal with the wrapped offset. We 9829 * only reset our wrapped offset to 0 if it is 9830 * currently greater than the current offset. If it 9831 * is less than the current offset, it is because a 9832 * previous allocation induced a wrap -- but the 9833 * allocation didn't subsequently take the space due 9834 * to an error or false predicate evaluation. In this 9835 * case, we'll just leave the wrapped offset alone: if 9836 * the wrapped offset hasn't been advanced far enough 9837 * for this allocation, it will be adjusted in the 9838 * lower loop. 9839 */ 9840 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 9841 if (woffs >= offs) 9842 woffs = 0; 9843 } else { 9844 woffs = 0; 9845 } 9846 9847 /* 9848 * Now we know that we're going to be storing to the 9849 * top of the buffer and that there is room for us 9850 * there. We need to clear the buffer from the current 9851 * offset to the end (there may be old gunk there). 9852 */ 9853 while (offs < buf->dtb_size) 9854 tomax[offs++] = 0; 9855 9856 /* 9857 * We need to set our offset to zero. And because we 9858 * are wrapping, we need to set the bit indicating as 9859 * much. We can also adjust our needed space back 9860 * down to the space required by the ECB -- we know 9861 * that the top of the buffer is aligned. 9862 */ 9863 offs = 0; 9864 total = needed; 9865 buf->dtb_flags |= DTRACEBUF_WRAPPED; 9866 } else { 9867 /* 9868 * There is room for us in the buffer, so we simply 9869 * need to check the wrapped offset. 9870 */ 9871 if (woffs < offs) { 9872 /* 9873 * The wrapped offset is less than the offset. 9874 * This can happen if we allocated buffer space 9875 * that induced a wrap, but then we didn't 9876 * subsequently take the space due to an error 9877 * or false predicate evaluation. This is 9878 * okay; we know that _this_ allocation isn't 9879 * going to induce a wrap. We still can't 9880 * reset the wrapped offset to be zero, 9881 * however: the space may have been trashed in 9882 * the previous failed probe attempt. But at 9883 * least the wrapped offset doesn't need to 9884 * be adjusted at all... 9885 */ 9886 goto out; 9887 } 9888 } 9889 9890 while (offs + total > woffs) { 9891 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 9892 size_t size; 9893 9894 if (epid == DTRACE_EPIDNONE) { 9895 size = sizeof (uint32_t); 9896 } else { 9897 ASSERT(epid <= state->dts_necbs); 9898 ASSERT(state->dts_ecbs[epid - 1] != NULL); 9899 9900 size = state->dts_ecbs[epid - 1]->dte_size; 9901 } 9902 9903 ASSERT(woffs + size <= buf->dtb_size); 9904 ASSERT(size != 0); 9905 9906 if (woffs + size == buf->dtb_size) { 9907 /* 9908 * We've reached the end of the buffer; we want 9909 * to set the wrapped offset to 0 and break 9910 * out. However, if the offs is 0, then we're 9911 * in a strange edge-condition: the amount of 9912 * space that we want to reserve plus the size 9913 * of the record that we're overwriting is 9914 * greater than the size of the buffer. This 9915 * is problematic because if we reserve the 9916 * space but subsequently don't consume it (due 9917 * to a failed predicate or error) the wrapped 9918 * offset will be 0 -- yet the EPID at offset 0 9919 * will not be committed. This situation is 9920 * relatively easy to deal with: if we're in 9921 * this case, the buffer is indistinguishable 9922 * from one that hasn't wrapped; we need only 9923 * finish the job by clearing the wrapped bit, 9924 * explicitly setting the offset to be 0, and 9925 * zero'ing out the old data in the buffer. 9926 */ 9927 if (offs == 0) { 9928 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 9929 buf->dtb_offset = 0; 9930 woffs = total; 9931 9932 while (woffs < buf->dtb_size) 9933 tomax[woffs++] = 0; 9934 } 9935 9936 woffs = 0; 9937 break; 9938 } 9939 9940 woffs += size; 9941 } 9942 9943 /* 9944 * We have a wrapped offset. It may be that the wrapped offset 9945 * has become zero -- that's okay. 9946 */ 9947 buf->dtb_xamot_offset = woffs; 9948 } 9949 9950 out: 9951 /* 9952 * Now we can plow the buffer with any necessary padding. 9953 */ 9954 while (offs & (align - 1)) { 9955 /* 9956 * Assert that our alignment is off by a number which 9957 * is itself sizeof (uint32_t) aligned. 9958 */ 9959 ASSERT(!((align - (offs & (align - 1))) & 9960 (sizeof (uint32_t) - 1))); 9961 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 9962 offs += sizeof (uint32_t); 9963 } 9964 9965 if (buf->dtb_flags & DTRACEBUF_FILL) { 9966 if (offs + needed > buf->dtb_size - state->dts_reserve) { 9967 buf->dtb_flags |= DTRACEBUF_FULL; 9968 return (-1); 9969 } 9970 } 9971 9972 if (mstate == NULL) 9973 return (offs); 9974 9975 /* 9976 * For ring buffers and fill buffers, the scratch space is always 9977 * the inactive buffer. 9978 */ 9979 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 9980 mstate->dtms_scratch_size = buf->dtb_size; 9981 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 9982 9983 return (offs); 9984 } 9985 9986 static void 9987 dtrace_buffer_polish(dtrace_buffer_t *buf) 9988 { 9989 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 9990 ASSERT(MUTEX_HELD(&dtrace_lock)); 9991 9992 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 9993 return; 9994 9995 /* 9996 * We need to polish the ring buffer. There are three cases: 9997 * 9998 * - The first (and presumably most common) is that there is no gap 9999 * between the buffer offset and the wrapped offset. In this case, 10000 * there is nothing in the buffer that isn't valid data; we can 10001 * mark the buffer as polished and return. 10002 * 10003 * - The second (less common than the first but still more common 10004 * than the third) is that there is a gap between the buffer offset 10005 * and the wrapped offset, and the wrapped offset is larger than the 10006 * buffer offset. This can happen because of an alignment issue, or 10007 * can happen because of a call to dtrace_buffer_reserve() that 10008 * didn't subsequently consume the buffer space. In this case, 10009 * we need to zero the data from the buffer offset to the wrapped 10010 * offset. 10011 * 10012 * - The third (and least common) is that there is a gap between the 10013 * buffer offset and the wrapped offset, but the wrapped offset is 10014 * _less_ than the buffer offset. This can only happen because a 10015 * call to dtrace_buffer_reserve() induced a wrap, but the space 10016 * was not subsequently consumed. In this case, we need to zero the 10017 * space from the offset to the end of the buffer _and_ from the 10018 * top of the buffer to the wrapped offset. 10019 */ 10020 if (buf->dtb_offset < buf->dtb_xamot_offset) { 10021 bzero(buf->dtb_tomax + buf->dtb_offset, 10022 buf->dtb_xamot_offset - buf->dtb_offset); 10023 } 10024 10025 if (buf->dtb_offset > buf->dtb_xamot_offset) { 10026 bzero(buf->dtb_tomax + buf->dtb_offset, 10027 buf->dtb_size - buf->dtb_offset); 10028 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 10029 } 10030 } 10031 10032 static void 10033 dtrace_buffer_free(dtrace_buffer_t *bufs) 10034 { 10035 int i; 10036 10037 for (i = 0; i < NCPU; i++) { 10038 dtrace_buffer_t *buf = &bufs[i]; 10039 10040 if (buf->dtb_tomax == NULL) { 10041 ASSERT(buf->dtb_xamot == NULL); 10042 ASSERT(buf->dtb_size == 0); 10043 continue; 10044 } 10045 10046 if (buf->dtb_xamot != NULL) { 10047 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10048 kmem_free(buf->dtb_xamot, buf->dtb_size); 10049 } 10050 10051 kmem_free(buf->dtb_tomax, buf->dtb_size); 10052 buf->dtb_size = 0; 10053 buf->dtb_tomax = NULL; 10054 buf->dtb_xamot = NULL; 10055 } 10056 } 10057 10058 /* 10059 * DTrace Enabling Functions 10060 */ 10061 static dtrace_enabling_t * 10062 dtrace_enabling_create(dtrace_vstate_t *vstate) 10063 { 10064 dtrace_enabling_t *enab; 10065 10066 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 10067 enab->dten_vstate = vstate; 10068 10069 return (enab); 10070 } 10071 10072 static void 10073 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 10074 { 10075 dtrace_ecbdesc_t **ndesc; 10076 size_t osize, nsize; 10077 10078 /* 10079 * We can't add to enablings after we've enabled them, or after we've 10080 * retained them. 10081 */ 10082 ASSERT(enab->dten_probegen == 0); 10083 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10084 10085 if (enab->dten_ndesc < enab->dten_maxdesc) { 10086 enab->dten_desc[enab->dten_ndesc++] = ecb; 10087 return; 10088 } 10089 10090 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10091 10092 if (enab->dten_maxdesc == 0) { 10093 enab->dten_maxdesc = 1; 10094 } else { 10095 enab->dten_maxdesc <<= 1; 10096 } 10097 10098 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 10099 10100 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 10101 ndesc = kmem_zalloc(nsize, KM_SLEEP); 10102 bcopy(enab->dten_desc, ndesc, osize); 10103 kmem_free(enab->dten_desc, osize); 10104 10105 enab->dten_desc = ndesc; 10106 enab->dten_desc[enab->dten_ndesc++] = ecb; 10107 } 10108 10109 static void 10110 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 10111 dtrace_probedesc_t *pd) 10112 { 10113 dtrace_ecbdesc_t *new; 10114 dtrace_predicate_t *pred; 10115 dtrace_actdesc_t *act; 10116 10117 /* 10118 * We're going to create a new ECB description that matches the 10119 * specified ECB in every way, but has the specified probe description. 10120 */ 10121 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 10122 10123 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 10124 dtrace_predicate_hold(pred); 10125 10126 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 10127 dtrace_actdesc_hold(act); 10128 10129 new->dted_action = ecb->dted_action; 10130 new->dted_pred = ecb->dted_pred; 10131 new->dted_probe = *pd; 10132 new->dted_uarg = ecb->dted_uarg; 10133 10134 dtrace_enabling_add(enab, new); 10135 } 10136 10137 static void 10138 dtrace_enabling_dump(dtrace_enabling_t *enab) 10139 { 10140 int i; 10141 10142 for (i = 0; i < enab->dten_ndesc; i++) { 10143 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 10144 10145 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 10146 desc->dtpd_provider, desc->dtpd_mod, 10147 desc->dtpd_func, desc->dtpd_name); 10148 } 10149 } 10150 10151 static void 10152 dtrace_enabling_destroy(dtrace_enabling_t *enab) 10153 { 10154 int i; 10155 dtrace_ecbdesc_t *ep; 10156 dtrace_vstate_t *vstate = enab->dten_vstate; 10157 10158 ASSERT(MUTEX_HELD(&dtrace_lock)); 10159 10160 for (i = 0; i < enab->dten_ndesc; i++) { 10161 dtrace_actdesc_t *act, *next; 10162 dtrace_predicate_t *pred; 10163 10164 ep = enab->dten_desc[i]; 10165 10166 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 10167 dtrace_predicate_release(pred, vstate); 10168 10169 for (act = ep->dted_action; act != NULL; act = next) { 10170 next = act->dtad_next; 10171 dtrace_actdesc_release(act, vstate); 10172 } 10173 10174 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 10175 } 10176 10177 kmem_free(enab->dten_desc, 10178 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 10179 10180 /* 10181 * If this was a retained enabling, decrement the dts_nretained count 10182 * and take it off of the dtrace_retained list. 10183 */ 10184 if (enab->dten_prev != NULL || enab->dten_next != NULL || 10185 dtrace_retained == enab) { 10186 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10187 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 10188 enab->dten_vstate->dtvs_state->dts_nretained--; 10189 } 10190 10191 if (enab->dten_prev == NULL) { 10192 if (dtrace_retained == enab) { 10193 dtrace_retained = enab->dten_next; 10194 10195 if (dtrace_retained != NULL) 10196 dtrace_retained->dten_prev = NULL; 10197 } 10198 } else { 10199 ASSERT(enab != dtrace_retained); 10200 ASSERT(dtrace_retained != NULL); 10201 enab->dten_prev->dten_next = enab->dten_next; 10202 } 10203 10204 if (enab->dten_next != NULL) { 10205 ASSERT(dtrace_retained != NULL); 10206 enab->dten_next->dten_prev = enab->dten_prev; 10207 } 10208 10209 kmem_free(enab, sizeof (dtrace_enabling_t)); 10210 } 10211 10212 static int 10213 dtrace_enabling_retain(dtrace_enabling_t *enab) 10214 { 10215 dtrace_state_t *state; 10216 10217 ASSERT(MUTEX_HELD(&dtrace_lock)); 10218 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 10219 ASSERT(enab->dten_vstate != NULL); 10220 10221 state = enab->dten_vstate->dtvs_state; 10222 ASSERT(state != NULL); 10223 10224 /* 10225 * We only allow each state to retain dtrace_retain_max enablings. 10226 */ 10227 if (state->dts_nretained >= dtrace_retain_max) 10228 return (ENOSPC); 10229 10230 state->dts_nretained++; 10231 10232 if (dtrace_retained == NULL) { 10233 dtrace_retained = enab; 10234 return (0); 10235 } 10236 10237 enab->dten_next = dtrace_retained; 10238 dtrace_retained->dten_prev = enab; 10239 dtrace_retained = enab; 10240 10241 return (0); 10242 } 10243 10244 static int 10245 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 10246 dtrace_probedesc_t *create) 10247 { 10248 dtrace_enabling_t *new, *enab; 10249 int found = 0, err = ENOENT; 10250 10251 ASSERT(MUTEX_HELD(&dtrace_lock)); 10252 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 10253 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 10254 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 10255 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 10256 10257 new = dtrace_enabling_create(&state->dts_vstate); 10258 10259 /* 10260 * Iterate over all retained enablings, looking for enablings that 10261 * match the specified state. 10262 */ 10263 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10264 int i; 10265 10266 /* 10267 * dtvs_state can only be NULL for helper enablings -- and 10268 * helper enablings can't be retained. 10269 */ 10270 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10271 10272 if (enab->dten_vstate->dtvs_state != state) 10273 continue; 10274 10275 /* 10276 * Now iterate over each probe description; we're looking for 10277 * an exact match to the specified probe description. 10278 */ 10279 for (i = 0; i < enab->dten_ndesc; i++) { 10280 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10281 dtrace_probedesc_t *pd = &ep->dted_probe; 10282 10283 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 10284 continue; 10285 10286 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 10287 continue; 10288 10289 if (strcmp(pd->dtpd_func, match->dtpd_func)) 10290 continue; 10291 10292 if (strcmp(pd->dtpd_name, match->dtpd_name)) 10293 continue; 10294 10295 /* 10296 * We have a winning probe! Add it to our growing 10297 * enabling. 10298 */ 10299 found = 1; 10300 dtrace_enabling_addlike(new, ep, create); 10301 } 10302 } 10303 10304 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 10305 dtrace_enabling_destroy(new); 10306 return (err); 10307 } 10308 10309 return (0); 10310 } 10311 10312 static void 10313 dtrace_enabling_retract(dtrace_state_t *state) 10314 { 10315 dtrace_enabling_t *enab, *next; 10316 10317 ASSERT(MUTEX_HELD(&dtrace_lock)); 10318 10319 /* 10320 * Iterate over all retained enablings, destroy the enablings retained 10321 * for the specified state. 10322 */ 10323 for (enab = dtrace_retained; enab != NULL; enab = next) { 10324 next = enab->dten_next; 10325 10326 /* 10327 * dtvs_state can only be NULL for helper enablings -- and 10328 * helper enablings can't be retained. 10329 */ 10330 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10331 10332 if (enab->dten_vstate->dtvs_state == state) { 10333 ASSERT(state->dts_nretained > 0); 10334 dtrace_enabling_destroy(enab); 10335 } 10336 } 10337 10338 ASSERT(state->dts_nretained == 0); 10339 } 10340 10341 static int 10342 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 10343 { 10344 int i = 0; 10345 int matched = 0; 10346 10347 ASSERT(MUTEX_HELD(&cpu_lock)); 10348 ASSERT(MUTEX_HELD(&dtrace_lock)); 10349 10350 for (i = 0; i < enab->dten_ndesc; i++) { 10351 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 10352 10353 enab->dten_current = ep; 10354 enab->dten_error = 0; 10355 10356 matched += dtrace_probe_enable(&ep->dted_probe, enab); 10357 10358 if (enab->dten_error != 0) { 10359 /* 10360 * If we get an error half-way through enabling the 10361 * probes, we kick out -- perhaps with some number of 10362 * them enabled. Leaving enabled probes enabled may 10363 * be slightly confusing for user-level, but we expect 10364 * that no one will attempt to actually drive on in 10365 * the face of such errors. If this is an anonymous 10366 * enabling (indicated with a NULL nmatched pointer), 10367 * we cmn_err() a message. We aren't expecting to 10368 * get such an error -- such as it can exist at all, 10369 * it would be a result of corrupted DOF in the driver 10370 * properties. 10371 */ 10372 if (nmatched == NULL) { 10373 cmn_err(CE_WARN, "dtrace_enabling_match() " 10374 "error on %p: %d", (void *)ep, 10375 enab->dten_error); 10376 } 10377 10378 return (enab->dten_error); 10379 } 10380 } 10381 10382 enab->dten_probegen = dtrace_probegen; 10383 if (nmatched != NULL) 10384 *nmatched = matched; 10385 10386 return (0); 10387 } 10388 10389 static void 10390 dtrace_enabling_matchall(void) 10391 { 10392 dtrace_enabling_t *enab; 10393 10394 mutex_enter(&cpu_lock); 10395 mutex_enter(&dtrace_lock); 10396 10397 /* 10398 * Because we can be called after dtrace_detach() has been called, we 10399 * cannot assert that there are retained enablings. We can safely 10400 * load from dtrace_retained, however: the taskq_destroy() at the 10401 * end of dtrace_detach() will block pending our completion. 10402 */ 10403 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) 10404 (void) dtrace_enabling_match(enab, NULL); 10405 10406 mutex_exit(&dtrace_lock); 10407 mutex_exit(&cpu_lock); 10408 } 10409 10410 static int 10411 dtrace_enabling_matchstate(dtrace_state_t *state, int *nmatched) 10412 { 10413 dtrace_enabling_t *enab; 10414 int matched, total = 0, err; 10415 10416 ASSERT(MUTEX_HELD(&cpu_lock)); 10417 ASSERT(MUTEX_HELD(&dtrace_lock)); 10418 10419 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10420 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10421 10422 if (enab->dten_vstate->dtvs_state != state) 10423 continue; 10424 10425 if ((err = dtrace_enabling_match(enab, &matched)) != 0) 10426 return (err); 10427 10428 total += matched; 10429 } 10430 10431 if (nmatched != NULL) 10432 *nmatched = total; 10433 10434 return (0); 10435 } 10436 10437 /* 10438 * If an enabling is to be enabled without having matched probes (that is, if 10439 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 10440 * enabling must be _primed_ by creating an ECB for every ECB description. 10441 * This must be done to assure that we know the number of speculations, the 10442 * number of aggregations, the minimum buffer size needed, etc. before we 10443 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 10444 * enabling any probes, we create ECBs for every ECB decription, but with a 10445 * NULL probe -- which is exactly what this function does. 10446 */ 10447 static void 10448 dtrace_enabling_prime(dtrace_state_t *state) 10449 { 10450 dtrace_enabling_t *enab; 10451 int i; 10452 10453 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 10454 ASSERT(enab->dten_vstate->dtvs_state != NULL); 10455 10456 if (enab->dten_vstate->dtvs_state != state) 10457 continue; 10458 10459 /* 10460 * We don't want to prime an enabling more than once, lest 10461 * we allow a malicious user to induce resource exhaustion. 10462 * (The ECBs that result from priming an enabling aren't 10463 * leaked -- but they also aren't deallocated until the 10464 * consumer state is destroyed.) 10465 */ 10466 if (enab->dten_primed) 10467 continue; 10468 10469 for (i = 0; i < enab->dten_ndesc; i++) { 10470 enab->dten_current = enab->dten_desc[i]; 10471 (void) dtrace_probe_enable(NULL, enab); 10472 } 10473 10474 enab->dten_primed = 1; 10475 } 10476 } 10477 10478 /* 10479 * Called to indicate that probes should be provided due to retained 10480 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 10481 * must take an initial lap through the enabling calling the dtps_provide() 10482 * entry point explicitly to allow for autocreated probes. 10483 */ 10484 static void 10485 dtrace_enabling_provide(dtrace_provider_t *prv) 10486 { 10487 int i, all = 0; 10488 dtrace_probedesc_t desc; 10489 10490 ASSERT(MUTEX_HELD(&dtrace_lock)); 10491 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 10492 10493 if (prv == NULL) { 10494 all = 1; 10495 prv = dtrace_provider; 10496 } 10497 10498 do { 10499 dtrace_enabling_t *enab = dtrace_retained; 10500 void *parg = prv->dtpv_arg; 10501 10502 for (; enab != NULL; enab = enab->dten_next) { 10503 for (i = 0; i < enab->dten_ndesc; i++) { 10504 desc = enab->dten_desc[i]->dted_probe; 10505 mutex_exit(&dtrace_lock); 10506 prv->dtpv_pops.dtps_provide(parg, &desc); 10507 mutex_enter(&dtrace_lock); 10508 } 10509 } 10510 } while (all && (prv = prv->dtpv_next) != NULL); 10511 10512 mutex_exit(&dtrace_lock); 10513 dtrace_probe_provide(NULL, all ? NULL : prv); 10514 mutex_enter(&dtrace_lock); 10515 } 10516 10517 /* 10518 * DTrace DOF Functions 10519 */ 10520 /*ARGSUSED*/ 10521 static void 10522 dtrace_dof_error(dof_hdr_t *dof, const char *str) 10523 { 10524 if (dtrace_err_verbose) 10525 cmn_err(CE_WARN, "failed to process DOF: %s", str); 10526 10527 #ifdef DTRACE_ERRDEBUG 10528 dtrace_errdebug(str); 10529 #endif 10530 } 10531 10532 /* 10533 * Create DOF out of a currently enabled state. Right now, we only create 10534 * DOF containing the run-time options -- but this could be expanded to create 10535 * complete DOF representing the enabled state. 10536 */ 10537 static dof_hdr_t * 10538 dtrace_dof_create(dtrace_state_t *state) 10539 { 10540 dof_hdr_t *dof; 10541 dof_sec_t *sec; 10542 dof_optdesc_t *opt; 10543 int i, len = sizeof (dof_hdr_t) + 10544 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 10545 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10546 10547 ASSERT(MUTEX_HELD(&dtrace_lock)); 10548 10549 dof = kmem_zalloc(len, KM_SLEEP); 10550 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 10551 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 10552 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 10553 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 10554 10555 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 10556 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 10557 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 10558 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 10559 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 10560 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 10561 10562 dof->dofh_flags = 0; 10563 dof->dofh_hdrsize = sizeof (dof_hdr_t); 10564 dof->dofh_secsize = sizeof (dof_sec_t); 10565 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 10566 dof->dofh_secoff = sizeof (dof_hdr_t); 10567 dof->dofh_loadsz = len; 10568 dof->dofh_filesz = len; 10569 dof->dofh_pad = 0; 10570 10571 /* 10572 * Fill in the option section header... 10573 */ 10574 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 10575 sec->dofs_type = DOF_SECT_OPTDESC; 10576 sec->dofs_align = sizeof (uint64_t); 10577 sec->dofs_flags = DOF_SECF_LOAD; 10578 sec->dofs_entsize = sizeof (dof_optdesc_t); 10579 10580 opt = (dof_optdesc_t *)((uintptr_t)sec + 10581 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 10582 10583 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 10584 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 10585 10586 for (i = 0; i < DTRACEOPT_MAX; i++) { 10587 opt[i].dofo_option = i; 10588 opt[i].dofo_strtab = DOF_SECIDX_NONE; 10589 opt[i].dofo_value = state->dts_options[i]; 10590 } 10591 10592 return (dof); 10593 } 10594 10595 static dof_hdr_t * 10596 dtrace_dof_copyin(uintptr_t uarg, int *errp) 10597 { 10598 dof_hdr_t hdr, *dof; 10599 10600 ASSERT(!MUTEX_HELD(&dtrace_lock)); 10601 10602 /* 10603 * First, we're going to copyin() the sizeof (dof_hdr_t). 10604 */ 10605 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 10606 dtrace_dof_error(NULL, "failed to copyin DOF header"); 10607 *errp = EFAULT; 10608 return (NULL); 10609 } 10610 10611 /* 10612 * Now we'll allocate the entire DOF and copy it in -- provided 10613 * that the length isn't outrageous. 10614 */ 10615 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 10616 dtrace_dof_error(&hdr, "load size exceeds maximum"); 10617 *errp = E2BIG; 10618 return (NULL); 10619 } 10620 10621 if (hdr.dofh_loadsz < sizeof (hdr)) { 10622 dtrace_dof_error(&hdr, "invalid load size"); 10623 *errp = EINVAL; 10624 return (NULL); 10625 } 10626 10627 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 10628 10629 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 10630 kmem_free(dof, hdr.dofh_loadsz); 10631 *errp = EFAULT; 10632 return (NULL); 10633 } 10634 10635 return (dof); 10636 } 10637 10638 static dof_hdr_t * 10639 dtrace_dof_property(const char *name) 10640 { 10641 uchar_t *buf; 10642 uint64_t loadsz; 10643 unsigned int len, i; 10644 dof_hdr_t *dof; 10645 10646 /* 10647 * Unfortunately, array of values in .conf files are always (and 10648 * only) interpreted to be integer arrays. We must read our DOF 10649 * as an integer array, and then squeeze it into a byte array. 10650 */ 10651 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 10652 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 10653 return (NULL); 10654 10655 for (i = 0; i < len; i++) 10656 buf[i] = (uchar_t)(((int *)buf)[i]); 10657 10658 if (len < sizeof (dof_hdr_t)) { 10659 ddi_prop_free(buf); 10660 dtrace_dof_error(NULL, "truncated header"); 10661 return (NULL); 10662 } 10663 10664 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 10665 ddi_prop_free(buf); 10666 dtrace_dof_error(NULL, "truncated DOF"); 10667 return (NULL); 10668 } 10669 10670 if (loadsz >= dtrace_dof_maxsize) { 10671 ddi_prop_free(buf); 10672 dtrace_dof_error(NULL, "oversized DOF"); 10673 return (NULL); 10674 } 10675 10676 dof = kmem_alloc(loadsz, KM_SLEEP); 10677 bcopy(buf, dof, loadsz); 10678 ddi_prop_free(buf); 10679 10680 return (dof); 10681 } 10682 10683 static void 10684 dtrace_dof_destroy(dof_hdr_t *dof) 10685 { 10686 kmem_free(dof, dof->dofh_loadsz); 10687 } 10688 10689 /* 10690 * Return the dof_sec_t pointer corresponding to a given section index. If the 10691 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 10692 * a type other than DOF_SECT_NONE is specified, the header is checked against 10693 * this type and NULL is returned if the types do not match. 10694 */ 10695 static dof_sec_t * 10696 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 10697 { 10698 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 10699 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 10700 10701 if (i >= dof->dofh_secnum) { 10702 dtrace_dof_error(dof, "referenced section index is invalid"); 10703 return (NULL); 10704 } 10705 10706 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 10707 dtrace_dof_error(dof, "referenced section is not loadable"); 10708 return (NULL); 10709 } 10710 10711 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 10712 dtrace_dof_error(dof, "referenced section is the wrong type"); 10713 return (NULL); 10714 } 10715 10716 return (sec); 10717 } 10718 10719 static dtrace_probedesc_t * 10720 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 10721 { 10722 dof_probedesc_t *probe; 10723 dof_sec_t *strtab; 10724 uintptr_t daddr = (uintptr_t)dof; 10725 uintptr_t str; 10726 size_t size; 10727 10728 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 10729 dtrace_dof_error(dof, "invalid probe section"); 10730 return (NULL); 10731 } 10732 10733 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10734 dtrace_dof_error(dof, "bad alignment in probe description"); 10735 return (NULL); 10736 } 10737 10738 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 10739 dtrace_dof_error(dof, "truncated probe description"); 10740 return (NULL); 10741 } 10742 10743 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 10744 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 10745 10746 if (strtab == NULL) 10747 return (NULL); 10748 10749 str = daddr + strtab->dofs_offset; 10750 size = strtab->dofs_size; 10751 10752 if (probe->dofp_provider >= strtab->dofs_size) { 10753 dtrace_dof_error(dof, "corrupt probe provider"); 10754 return (NULL); 10755 } 10756 10757 (void) strncpy(desc->dtpd_provider, 10758 (char *)(str + probe->dofp_provider), 10759 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 10760 10761 if (probe->dofp_mod >= strtab->dofs_size) { 10762 dtrace_dof_error(dof, "corrupt probe module"); 10763 return (NULL); 10764 } 10765 10766 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 10767 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 10768 10769 if (probe->dofp_func >= strtab->dofs_size) { 10770 dtrace_dof_error(dof, "corrupt probe function"); 10771 return (NULL); 10772 } 10773 10774 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 10775 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 10776 10777 if (probe->dofp_name >= strtab->dofs_size) { 10778 dtrace_dof_error(dof, "corrupt probe name"); 10779 return (NULL); 10780 } 10781 10782 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 10783 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 10784 10785 return (desc); 10786 } 10787 10788 static dtrace_difo_t * 10789 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10790 cred_t *cr) 10791 { 10792 dtrace_difo_t *dp; 10793 size_t ttl = 0; 10794 dof_difohdr_t *dofd; 10795 uintptr_t daddr = (uintptr_t)dof; 10796 size_t max = dtrace_difo_maxsize; 10797 int i, l, n; 10798 10799 static const struct { 10800 int section; 10801 int bufoffs; 10802 int lenoffs; 10803 int entsize; 10804 int align; 10805 const char *msg; 10806 } difo[] = { 10807 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 10808 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 10809 sizeof (dif_instr_t), "multiple DIF sections" }, 10810 10811 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 10812 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 10813 sizeof (uint64_t), "multiple integer tables" }, 10814 10815 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 10816 offsetof(dtrace_difo_t, dtdo_strlen), 0, 10817 sizeof (char), "multiple string tables" }, 10818 10819 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 10820 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 10821 sizeof (uint_t), "multiple variable tables" }, 10822 10823 { DOF_SECT_NONE, 0, 0, 0, NULL } 10824 }; 10825 10826 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 10827 dtrace_dof_error(dof, "invalid DIFO header section"); 10828 return (NULL); 10829 } 10830 10831 if (sec->dofs_align != sizeof (dof_secidx_t)) { 10832 dtrace_dof_error(dof, "bad alignment in DIFO header"); 10833 return (NULL); 10834 } 10835 10836 if (sec->dofs_size < sizeof (dof_difohdr_t) || 10837 sec->dofs_size % sizeof (dof_secidx_t)) { 10838 dtrace_dof_error(dof, "bad size in DIFO header"); 10839 return (NULL); 10840 } 10841 10842 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 10843 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 10844 10845 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 10846 dp->dtdo_rtype = dofd->dofd_rtype; 10847 10848 for (l = 0; l < n; l++) { 10849 dof_sec_t *subsec; 10850 void **bufp; 10851 uint32_t *lenp; 10852 10853 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 10854 dofd->dofd_links[l])) == NULL) 10855 goto err; /* invalid section link */ 10856 10857 if (ttl + subsec->dofs_size > max) { 10858 dtrace_dof_error(dof, "exceeds maximum size"); 10859 goto err; 10860 } 10861 10862 ttl += subsec->dofs_size; 10863 10864 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 10865 if (subsec->dofs_type != difo[i].section) 10866 continue; 10867 10868 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 10869 dtrace_dof_error(dof, "section not loaded"); 10870 goto err; 10871 } 10872 10873 if (subsec->dofs_align != difo[i].align) { 10874 dtrace_dof_error(dof, "bad alignment"); 10875 goto err; 10876 } 10877 10878 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 10879 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 10880 10881 if (*bufp != NULL) { 10882 dtrace_dof_error(dof, difo[i].msg); 10883 goto err; 10884 } 10885 10886 if (difo[i].entsize != subsec->dofs_entsize) { 10887 dtrace_dof_error(dof, "entry size mismatch"); 10888 goto err; 10889 } 10890 10891 if (subsec->dofs_entsize != 0 && 10892 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 10893 dtrace_dof_error(dof, "corrupt entry size"); 10894 goto err; 10895 } 10896 10897 *lenp = subsec->dofs_size; 10898 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 10899 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 10900 *bufp, subsec->dofs_size); 10901 10902 if (subsec->dofs_entsize != 0) 10903 *lenp /= subsec->dofs_entsize; 10904 10905 break; 10906 } 10907 10908 /* 10909 * If we encounter a loadable DIFO sub-section that is not 10910 * known to us, assume this is a broken program and fail. 10911 */ 10912 if (difo[i].section == DOF_SECT_NONE && 10913 (subsec->dofs_flags & DOF_SECF_LOAD)) { 10914 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 10915 goto err; 10916 } 10917 } 10918 10919 if (dp->dtdo_buf == NULL) { 10920 /* 10921 * We can't have a DIF object without DIF text. 10922 */ 10923 dtrace_dof_error(dof, "missing DIF text"); 10924 goto err; 10925 } 10926 10927 /* 10928 * Before we validate the DIF object, run through the variable table 10929 * looking for the strings -- if any of their size are under, we'll set 10930 * their size to be the system-wide default string size. Note that 10931 * this should _not_ happen if the "strsize" option has been set -- 10932 * in this case, the compiler should have set the size to reflect the 10933 * setting of the option. 10934 */ 10935 for (i = 0; i < dp->dtdo_varlen; i++) { 10936 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 10937 dtrace_diftype_t *t = &v->dtdv_type; 10938 10939 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 10940 continue; 10941 10942 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 10943 t->dtdt_size = dtrace_strsize_default; 10944 } 10945 10946 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 10947 goto err; 10948 10949 dtrace_difo_init(dp, vstate); 10950 return (dp); 10951 10952 err: 10953 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 10954 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 10955 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 10956 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 10957 10958 kmem_free(dp, sizeof (dtrace_difo_t)); 10959 return (NULL); 10960 } 10961 10962 static dtrace_predicate_t * 10963 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10964 cred_t *cr) 10965 { 10966 dtrace_difo_t *dp; 10967 10968 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 10969 return (NULL); 10970 10971 return (dtrace_predicate_create(dp)); 10972 } 10973 10974 static dtrace_actdesc_t * 10975 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 10976 cred_t *cr) 10977 { 10978 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 10979 dof_actdesc_t *desc; 10980 dof_sec_t *difosec; 10981 size_t offs; 10982 uintptr_t daddr = (uintptr_t)dof; 10983 uint64_t arg; 10984 dtrace_actkind_t kind; 10985 10986 if (sec->dofs_type != DOF_SECT_ACTDESC) { 10987 dtrace_dof_error(dof, "invalid action section"); 10988 return (NULL); 10989 } 10990 10991 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 10992 dtrace_dof_error(dof, "truncated action description"); 10993 return (NULL); 10994 } 10995 10996 if (sec->dofs_align != sizeof (uint64_t)) { 10997 dtrace_dof_error(dof, "bad alignment in action description"); 10998 return (NULL); 10999 } 11000 11001 if (sec->dofs_size < sec->dofs_entsize) { 11002 dtrace_dof_error(dof, "section entry size exceeds total size"); 11003 return (NULL); 11004 } 11005 11006 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 11007 dtrace_dof_error(dof, "bad entry size in action description"); 11008 return (NULL); 11009 } 11010 11011 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 11012 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 11013 return (NULL); 11014 } 11015 11016 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 11017 desc = (dof_actdesc_t *)(daddr + 11018 (uintptr_t)sec->dofs_offset + offs); 11019 kind = (dtrace_actkind_t)desc->dofa_kind; 11020 11021 if (DTRACEACT_ISPRINTFLIKE(kind) && 11022 (kind != DTRACEACT_PRINTA || 11023 desc->dofa_strtab != DOF_SECIDX_NONE)) { 11024 dof_sec_t *strtab; 11025 char *str, *fmt; 11026 uint64_t i; 11027 11028 /* 11029 * printf()-like actions must have a format string. 11030 */ 11031 if ((strtab = dtrace_dof_sect(dof, 11032 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 11033 goto err; 11034 11035 str = (char *)((uintptr_t)dof + 11036 (uintptr_t)strtab->dofs_offset); 11037 11038 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 11039 if (str[i] == '\0') 11040 break; 11041 } 11042 11043 if (i >= strtab->dofs_size) { 11044 dtrace_dof_error(dof, "bogus format string"); 11045 goto err; 11046 } 11047 11048 if (i == desc->dofa_arg) { 11049 dtrace_dof_error(dof, "empty format string"); 11050 goto err; 11051 } 11052 11053 i -= desc->dofa_arg; 11054 fmt = kmem_alloc(i + 1, KM_SLEEP); 11055 bcopy(&str[desc->dofa_arg], fmt, i + 1); 11056 arg = (uint64_t)(uintptr_t)fmt; 11057 } else { 11058 if (kind == DTRACEACT_PRINTA) { 11059 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 11060 arg = 0; 11061 } else { 11062 arg = desc->dofa_arg; 11063 } 11064 } 11065 11066 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 11067 desc->dofa_uarg, arg); 11068 11069 if (last != NULL) { 11070 last->dtad_next = act; 11071 } else { 11072 first = act; 11073 } 11074 11075 last = act; 11076 11077 if (desc->dofa_difo == DOF_SECIDX_NONE) 11078 continue; 11079 11080 if ((difosec = dtrace_dof_sect(dof, 11081 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 11082 goto err; 11083 11084 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 11085 11086 if (act->dtad_difo == NULL) 11087 goto err; 11088 } 11089 11090 ASSERT(first != NULL); 11091 return (first); 11092 11093 err: 11094 for (act = first; act != NULL; act = next) { 11095 next = act->dtad_next; 11096 dtrace_actdesc_release(act, vstate); 11097 } 11098 11099 return (NULL); 11100 } 11101 11102 static dtrace_ecbdesc_t * 11103 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11104 cred_t *cr) 11105 { 11106 dtrace_ecbdesc_t *ep; 11107 dof_ecbdesc_t *ecb; 11108 dtrace_probedesc_t *desc; 11109 dtrace_predicate_t *pred = NULL; 11110 11111 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 11112 dtrace_dof_error(dof, "truncated ECB description"); 11113 return (NULL); 11114 } 11115 11116 if (sec->dofs_align != sizeof (uint64_t)) { 11117 dtrace_dof_error(dof, "bad alignment in ECB description"); 11118 return (NULL); 11119 } 11120 11121 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 11122 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 11123 11124 if (sec == NULL) 11125 return (NULL); 11126 11127 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11128 ep->dted_uarg = ecb->dofe_uarg; 11129 desc = &ep->dted_probe; 11130 11131 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 11132 goto err; 11133 11134 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 11135 if ((sec = dtrace_dof_sect(dof, 11136 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 11137 goto err; 11138 11139 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 11140 goto err; 11141 11142 ep->dted_pred.dtpdd_predicate = pred; 11143 } 11144 11145 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 11146 if ((sec = dtrace_dof_sect(dof, 11147 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 11148 goto err; 11149 11150 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 11151 11152 if (ep->dted_action == NULL) 11153 goto err; 11154 } 11155 11156 return (ep); 11157 11158 err: 11159 if (pred != NULL) 11160 dtrace_predicate_release(pred, vstate); 11161 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11162 return (NULL); 11163 } 11164 11165 /* 11166 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 11167 * specified DOF. At present, this amounts to simply adding 'ubase' to the 11168 * site of any user SETX relocations to account for load object base address. 11169 * In the future, if we need other relocations, this function can be extended. 11170 */ 11171 static int 11172 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 11173 { 11174 uintptr_t daddr = (uintptr_t)dof; 11175 dof_relohdr_t *dofr = 11176 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 11177 dof_sec_t *ss, *rs, *ts; 11178 dof_relodesc_t *r; 11179 uint_t i, n; 11180 11181 if (sec->dofs_size < sizeof (dof_relohdr_t) || 11182 sec->dofs_align != sizeof (dof_secidx_t)) { 11183 dtrace_dof_error(dof, "invalid relocation header"); 11184 return (-1); 11185 } 11186 11187 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 11188 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 11189 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 11190 11191 if (ss == NULL || rs == NULL || ts == NULL) 11192 return (-1); /* dtrace_dof_error() has been called already */ 11193 11194 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 11195 rs->dofs_align != sizeof (uint64_t)) { 11196 dtrace_dof_error(dof, "invalid relocation section"); 11197 return (-1); 11198 } 11199 11200 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 11201 n = rs->dofs_size / rs->dofs_entsize; 11202 11203 for (i = 0; i < n; i++) { 11204 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 11205 11206 switch (r->dofr_type) { 11207 case DOF_RELO_NONE: 11208 break; 11209 case DOF_RELO_SETX: 11210 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 11211 sizeof (uint64_t) > ts->dofs_size) { 11212 dtrace_dof_error(dof, "bad relocation offset"); 11213 return (-1); 11214 } 11215 11216 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 11217 dtrace_dof_error(dof, "misaligned setx relo"); 11218 return (-1); 11219 } 11220 11221 *(uint64_t *)taddr += ubase; 11222 break; 11223 default: 11224 dtrace_dof_error(dof, "invalid relocation type"); 11225 return (-1); 11226 } 11227 11228 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 11229 } 11230 11231 return (0); 11232 } 11233 11234 /* 11235 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 11236 * header: it should be at the front of a memory region that is at least 11237 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 11238 * size. It need not be validated in any other way. 11239 */ 11240 static int 11241 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 11242 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 11243 { 11244 uint64_t len = dof->dofh_loadsz, seclen; 11245 uintptr_t daddr = (uintptr_t)dof; 11246 dtrace_ecbdesc_t *ep; 11247 dtrace_enabling_t *enab; 11248 uint_t i; 11249 11250 ASSERT(MUTEX_HELD(&dtrace_lock)); 11251 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 11252 11253 /* 11254 * Check the DOF header identification bytes. In addition to checking 11255 * valid settings, we also verify that unused bits/bytes are zeroed so 11256 * we can use them later without fear of regressing existing binaries. 11257 */ 11258 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 11259 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 11260 dtrace_dof_error(dof, "DOF magic string mismatch"); 11261 return (-1); 11262 } 11263 11264 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 11265 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 11266 dtrace_dof_error(dof, "DOF has invalid data model"); 11267 return (-1); 11268 } 11269 11270 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 11271 dtrace_dof_error(dof, "DOF encoding mismatch"); 11272 return (-1); 11273 } 11274 11275 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 11276 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 11277 dtrace_dof_error(dof, "DOF version mismatch"); 11278 return (-1); 11279 } 11280 11281 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 11282 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 11283 return (-1); 11284 } 11285 11286 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 11287 dtrace_dof_error(dof, "DOF uses too many integer registers"); 11288 return (-1); 11289 } 11290 11291 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 11292 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 11293 return (-1); 11294 } 11295 11296 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 11297 if (dof->dofh_ident[i] != 0) { 11298 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 11299 return (-1); 11300 } 11301 } 11302 11303 if (dof->dofh_flags & ~DOF_FL_VALID) { 11304 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 11305 return (-1); 11306 } 11307 11308 if (dof->dofh_secsize == 0) { 11309 dtrace_dof_error(dof, "zero section header size"); 11310 return (-1); 11311 } 11312 11313 /* 11314 * Check that the section headers don't exceed the amount of DOF 11315 * data. Note that we cast the section size and number of sections 11316 * to uint64_t's to prevent possible overflow in the multiplication. 11317 */ 11318 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 11319 11320 if (dof->dofh_secoff > len || seclen > len || 11321 dof->dofh_secoff + seclen > len) { 11322 dtrace_dof_error(dof, "truncated section headers"); 11323 return (-1); 11324 } 11325 11326 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 11327 dtrace_dof_error(dof, "misaligned section headers"); 11328 return (-1); 11329 } 11330 11331 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 11332 dtrace_dof_error(dof, "misaligned section size"); 11333 return (-1); 11334 } 11335 11336 /* 11337 * Take an initial pass through the section headers to be sure that 11338 * the headers don't have stray offsets. If the 'noprobes' flag is 11339 * set, do not permit sections relating to providers, probes, or args. 11340 */ 11341 for (i = 0; i < dof->dofh_secnum; i++) { 11342 dof_sec_t *sec = (dof_sec_t *)(daddr + 11343 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11344 11345 if (noprobes) { 11346 switch (sec->dofs_type) { 11347 case DOF_SECT_PROVIDER: 11348 case DOF_SECT_PROBES: 11349 case DOF_SECT_PRARGS: 11350 case DOF_SECT_PROFFS: 11351 dtrace_dof_error(dof, "illegal sections " 11352 "for enabling"); 11353 return (-1); 11354 } 11355 } 11356 11357 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11358 continue; /* just ignore non-loadable sections */ 11359 11360 if (sec->dofs_align & (sec->dofs_align - 1)) { 11361 dtrace_dof_error(dof, "bad section alignment"); 11362 return (-1); 11363 } 11364 11365 if (sec->dofs_offset & (sec->dofs_align - 1)) { 11366 dtrace_dof_error(dof, "misaligned section"); 11367 return (-1); 11368 } 11369 11370 if (sec->dofs_offset > len || sec->dofs_size > len || 11371 sec->dofs_offset + sec->dofs_size > len) { 11372 dtrace_dof_error(dof, "corrupt section header"); 11373 return (-1); 11374 } 11375 11376 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 11377 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 11378 dtrace_dof_error(dof, "non-terminating string table"); 11379 return (-1); 11380 } 11381 } 11382 11383 /* 11384 * Take a second pass through the sections and locate and perform any 11385 * relocations that are present. We do this after the first pass to 11386 * be sure that all sections have had their headers validated. 11387 */ 11388 for (i = 0; i < dof->dofh_secnum; i++) { 11389 dof_sec_t *sec = (dof_sec_t *)(daddr + 11390 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11391 11392 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 11393 continue; /* skip sections that are not loadable */ 11394 11395 switch (sec->dofs_type) { 11396 case DOF_SECT_URELHDR: 11397 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 11398 return (-1); 11399 break; 11400 } 11401 } 11402 11403 if ((enab = *enabp) == NULL) 11404 enab = *enabp = dtrace_enabling_create(vstate); 11405 11406 for (i = 0; i < dof->dofh_secnum; i++) { 11407 dof_sec_t *sec = (dof_sec_t *)(daddr + 11408 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11409 11410 if (sec->dofs_type != DOF_SECT_ECBDESC) 11411 continue; 11412 11413 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 11414 dtrace_enabling_destroy(enab); 11415 *enabp = NULL; 11416 return (-1); 11417 } 11418 11419 dtrace_enabling_add(enab, ep); 11420 } 11421 11422 return (0); 11423 } 11424 11425 /* 11426 * Process DOF for any options. This routine assumes that the DOF has been 11427 * at least processed by dtrace_dof_slurp(). 11428 */ 11429 static int 11430 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 11431 { 11432 int i, rval; 11433 uint32_t entsize; 11434 size_t offs; 11435 dof_optdesc_t *desc; 11436 11437 for (i = 0; i < dof->dofh_secnum; i++) { 11438 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 11439 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 11440 11441 if (sec->dofs_type != DOF_SECT_OPTDESC) 11442 continue; 11443 11444 if (sec->dofs_align != sizeof (uint64_t)) { 11445 dtrace_dof_error(dof, "bad alignment in " 11446 "option description"); 11447 return (EINVAL); 11448 } 11449 11450 if ((entsize = sec->dofs_entsize) == 0) { 11451 dtrace_dof_error(dof, "zeroed option entry size"); 11452 return (EINVAL); 11453 } 11454 11455 if (entsize < sizeof (dof_optdesc_t)) { 11456 dtrace_dof_error(dof, "bad option entry size"); 11457 return (EINVAL); 11458 } 11459 11460 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 11461 desc = (dof_optdesc_t *)((uintptr_t)dof + 11462 (uintptr_t)sec->dofs_offset + offs); 11463 11464 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 11465 dtrace_dof_error(dof, "non-zero option string"); 11466 return (EINVAL); 11467 } 11468 11469 if (desc->dofo_value == DTRACEOPT_UNSET) { 11470 dtrace_dof_error(dof, "unset option"); 11471 return (EINVAL); 11472 } 11473 11474 if ((rval = dtrace_state_option(state, 11475 desc->dofo_option, desc->dofo_value)) != 0) { 11476 dtrace_dof_error(dof, "rejected option"); 11477 return (rval); 11478 } 11479 } 11480 } 11481 11482 return (0); 11483 } 11484 11485 /* 11486 * DTrace Consumer State Functions 11487 */ 11488 int 11489 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 11490 { 11491 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 11492 void *base; 11493 uintptr_t limit; 11494 dtrace_dynvar_t *dvar, *next, *start; 11495 int i; 11496 11497 ASSERT(MUTEX_HELD(&dtrace_lock)); 11498 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 11499 11500 bzero(dstate, sizeof (dtrace_dstate_t)); 11501 11502 if ((dstate->dtds_chunksize = chunksize) == 0) 11503 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 11504 11505 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 11506 size = min; 11507 11508 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 11509 return (ENOMEM); 11510 11511 dstate->dtds_size = size; 11512 dstate->dtds_base = base; 11513 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 11514 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 11515 11516 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 11517 11518 if (hashsize != 1 && (hashsize & 1)) 11519 hashsize--; 11520 11521 dstate->dtds_hashsize = hashsize; 11522 dstate->dtds_hash = dstate->dtds_base; 11523 11524 /* 11525 * Set all of our hash buckets to point to the single sink, and (if 11526 * it hasn't already been set), set the sink's hash value to be the 11527 * sink sentinel value. The sink is needed for dynamic variable 11528 * lookups to know that they have iterated over an entire, valid hash 11529 * chain. 11530 */ 11531 for (i = 0; i < hashsize; i++) 11532 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 11533 11534 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 11535 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 11536 11537 /* 11538 * Determine number of active CPUs. Divide free list evenly among 11539 * active CPUs. 11540 */ 11541 start = (dtrace_dynvar_t *) 11542 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 11543 limit = (uintptr_t)base + size; 11544 11545 maxper = (limit - (uintptr_t)start) / NCPU; 11546 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 11547 11548 for (i = 0; i < NCPU; i++) { 11549 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 11550 11551 /* 11552 * If we don't even have enough chunks to make it once through 11553 * NCPUs, we're just going to allocate everything to the first 11554 * CPU. And if we're on the last CPU, we're going to allocate 11555 * whatever is left over. In either case, we set the limit to 11556 * be the limit of the dynamic variable space. 11557 */ 11558 if (maxper == 0 || i == NCPU - 1) { 11559 limit = (uintptr_t)base + size; 11560 start = NULL; 11561 } else { 11562 limit = (uintptr_t)start + maxper; 11563 start = (dtrace_dynvar_t *)limit; 11564 } 11565 11566 ASSERT(limit <= (uintptr_t)base + size); 11567 11568 for (;;) { 11569 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 11570 dstate->dtds_chunksize); 11571 11572 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 11573 break; 11574 11575 dvar->dtdv_next = next; 11576 dvar = next; 11577 } 11578 11579 if (maxper == 0) 11580 break; 11581 } 11582 11583 return (0); 11584 } 11585 11586 void 11587 dtrace_dstate_fini(dtrace_dstate_t *dstate) 11588 { 11589 ASSERT(MUTEX_HELD(&cpu_lock)); 11590 11591 if (dstate->dtds_base == NULL) 11592 return; 11593 11594 kmem_free(dstate->dtds_base, dstate->dtds_size); 11595 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 11596 } 11597 11598 static void 11599 dtrace_vstate_fini(dtrace_vstate_t *vstate) 11600 { 11601 /* 11602 * Logical XOR, where are you? 11603 */ 11604 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 11605 11606 if (vstate->dtvs_nglobals > 0) { 11607 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 11608 sizeof (dtrace_statvar_t *)); 11609 } 11610 11611 if (vstate->dtvs_ntlocals > 0) { 11612 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 11613 sizeof (dtrace_difv_t)); 11614 } 11615 11616 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 11617 11618 if (vstate->dtvs_nlocals > 0) { 11619 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 11620 sizeof (dtrace_statvar_t *)); 11621 } 11622 } 11623 11624 static void 11625 dtrace_state_clean(dtrace_state_t *state) 11626 { 11627 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 11628 return; 11629 11630 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 11631 dtrace_speculation_clean(state); 11632 } 11633 11634 static void 11635 dtrace_state_deadman(dtrace_state_t *state) 11636 { 11637 hrtime_t now; 11638 11639 dtrace_sync(); 11640 11641 now = dtrace_gethrtime(); 11642 11643 if (state != dtrace_anon.dta_state && 11644 now - state->dts_laststatus >= dtrace_deadman_user) 11645 return; 11646 11647 /* 11648 * We must be sure that dts_alive never appears to be less than the 11649 * value upon entry to dtrace_state_deadman(), and because we lack a 11650 * dtrace_cas64(), we cannot store to it atomically. We thus instead 11651 * store INT64_MAX to it, followed by a memory barrier, followed by 11652 * the new value. This assures that dts_alive never appears to be 11653 * less than its true value, regardless of the order in which the 11654 * stores to the underlying storage are issued. 11655 */ 11656 state->dts_alive = INT64_MAX; 11657 dtrace_membar_producer(); 11658 state->dts_alive = now; 11659 } 11660 11661 dtrace_state_t * 11662 dtrace_state_create(dev_t *devp, cred_t *cr) 11663 { 11664 minor_t minor; 11665 major_t major; 11666 char c[30]; 11667 dtrace_state_t *state; 11668 dtrace_optval_t *opt; 11669 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 11670 11671 ASSERT(MUTEX_HELD(&dtrace_lock)); 11672 ASSERT(MUTEX_HELD(&cpu_lock)); 11673 11674 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 11675 VM_BESTFIT | VM_SLEEP); 11676 11677 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 11678 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 11679 return (NULL); 11680 } 11681 11682 state = ddi_get_soft_state(dtrace_softstate, minor); 11683 state->dts_epid = DTRACE_EPIDNONE + 1; 11684 11685 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); 11686 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 11687 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 11688 11689 if (devp != NULL) { 11690 major = getemajor(*devp); 11691 } else { 11692 major = ddi_driver_major(dtrace_devi); 11693 } 11694 11695 state->dts_dev = makedevice(major, minor); 11696 11697 if (devp != NULL) 11698 *devp = state->dts_dev; 11699 11700 /* 11701 * We allocate NCPU buffers. On the one hand, this can be quite 11702 * a bit of memory per instance (nearly 36K on a Starcat). On the 11703 * other hand, it saves an additional memory reference in the probe 11704 * path. 11705 */ 11706 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 11707 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 11708 state->dts_cleaner = CYCLIC_NONE; 11709 state->dts_deadman = CYCLIC_NONE; 11710 state->dts_vstate.dtvs_state = state; 11711 11712 for (i = 0; i < DTRACEOPT_MAX; i++) 11713 state->dts_options[i] = DTRACEOPT_UNSET; 11714 11715 /* 11716 * Set the default options. 11717 */ 11718 opt = state->dts_options; 11719 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 11720 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 11721 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 11722 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 11723 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 11724 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 11725 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 11726 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 11727 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 11728 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 11729 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 11730 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 11731 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 11732 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 11733 11734 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 11735 11736 /* 11737 * Depending on the user credentials, we set flag bits which alter probe 11738 * visibility or the amount of destructiveness allowed. In the case of 11739 * actual anonymous tracing, or the possession of all privileges, all of 11740 * the normal checks are bypassed. 11741 */ 11742 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 11743 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 11744 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 11745 } else { 11746 /* 11747 * Set up the credentials for this instantiation. We take a 11748 * hold on the credential to prevent it from disappearing on 11749 * us; this in turn prevents the zone_t referenced by this 11750 * credential from disappearing. This means that we can 11751 * examine the credential and the zone from probe context. 11752 */ 11753 crhold(cr); 11754 state->dts_cred.dcr_cred = cr; 11755 11756 /* 11757 * CRA_PROC means "we have *some* privilege for dtrace" and 11758 * unlocks the use of variables like pid, zonename, etc. 11759 */ 11760 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 11761 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11762 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 11763 } 11764 11765 /* 11766 * dtrace_user allows use of syscall and profile providers. 11767 * If the user also has proc_owner and/or proc_zone, we 11768 * extend the scope to include additional visibility and 11769 * destructive power. 11770 */ 11771 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 11772 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 11773 state->dts_cred.dcr_visible |= 11774 DTRACE_CRV_ALLPROC; 11775 11776 state->dts_cred.dcr_action |= 11777 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11778 } 11779 11780 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 11781 state->dts_cred.dcr_visible |= 11782 DTRACE_CRV_ALLZONE; 11783 11784 state->dts_cred.dcr_action |= 11785 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11786 } 11787 11788 /* 11789 * If we have all privs in whatever zone this is, 11790 * we can do destructive things to processes which 11791 * have altered credentials. 11792 */ 11793 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11794 cr->cr_zone->zone_privset)) { 11795 state->dts_cred.dcr_action |= 11796 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11797 } 11798 } 11799 11800 /* 11801 * Holding the dtrace_kernel privilege also implies that 11802 * the user has the dtrace_user privilege from a visibility 11803 * perspective. But without further privileges, some 11804 * destructive actions are not available. 11805 */ 11806 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 11807 /* 11808 * Make all probes in all zones visible. However, 11809 * this doesn't mean that all actions become available 11810 * to all zones. 11811 */ 11812 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 11813 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 11814 11815 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 11816 DTRACE_CRA_PROC; 11817 /* 11818 * Holding proc_owner means that destructive actions 11819 * for *this* zone are allowed. 11820 */ 11821 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11822 state->dts_cred.dcr_action |= 11823 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11824 11825 /* 11826 * Holding proc_zone means that destructive actions 11827 * for this user/group ID in all zones is allowed. 11828 */ 11829 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11830 state->dts_cred.dcr_action |= 11831 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11832 11833 /* 11834 * If we have all privs in whatever zone this is, 11835 * we can do destructive things to processes which 11836 * have altered credentials. 11837 */ 11838 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 11839 cr->cr_zone->zone_privset)) { 11840 state->dts_cred.dcr_action |= 11841 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 11842 } 11843 } 11844 11845 /* 11846 * Holding the dtrace_proc privilege gives control over fasttrap 11847 * and pid providers. We need to grant wider destructive 11848 * privileges in the event that the user has proc_owner and/or 11849 * proc_zone. 11850 */ 11851 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 11852 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 11853 state->dts_cred.dcr_action |= 11854 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 11855 11856 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 11857 state->dts_cred.dcr_action |= 11858 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 11859 } 11860 } 11861 11862 return (state); 11863 } 11864 11865 static int 11866 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 11867 { 11868 dtrace_optval_t *opt = state->dts_options, size; 11869 processorid_t cpu; 11870 int flags = 0, rval; 11871 11872 ASSERT(MUTEX_HELD(&dtrace_lock)); 11873 ASSERT(MUTEX_HELD(&cpu_lock)); 11874 ASSERT(which < DTRACEOPT_MAX); 11875 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 11876 (state == dtrace_anon.dta_state && 11877 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 11878 11879 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 11880 return (0); 11881 11882 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 11883 cpu = opt[DTRACEOPT_CPU]; 11884 11885 if (which == DTRACEOPT_SPECSIZE) 11886 flags |= DTRACEBUF_NOSWITCH; 11887 11888 if (which == DTRACEOPT_BUFSIZE) { 11889 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 11890 flags |= DTRACEBUF_RING; 11891 11892 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 11893 flags |= DTRACEBUF_FILL; 11894 11895 if (state != dtrace_anon.dta_state || 11896 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 11897 flags |= DTRACEBUF_INACTIVE; 11898 } 11899 11900 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 11901 /* 11902 * The size must be 8-byte aligned. If the size is not 8-byte 11903 * aligned, drop it down by the difference. 11904 */ 11905 if (size & (sizeof (uint64_t) - 1)) 11906 size -= size & (sizeof (uint64_t) - 1); 11907 11908 if (size < state->dts_reserve) { 11909 /* 11910 * Buffers always must be large enough to accommodate 11911 * their prereserved space. We return E2BIG instead 11912 * of ENOMEM in this case to allow for user-level 11913 * software to differentiate the cases. 11914 */ 11915 return (E2BIG); 11916 } 11917 11918 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 11919 11920 if (rval != ENOMEM) { 11921 opt[which] = size; 11922 return (rval); 11923 } 11924 11925 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 11926 return (rval); 11927 } 11928 11929 return (ENOMEM); 11930 } 11931 11932 static int 11933 dtrace_state_buffers(dtrace_state_t *state) 11934 { 11935 dtrace_speculation_t *spec = state->dts_speculations; 11936 int rval, i; 11937 11938 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 11939 DTRACEOPT_BUFSIZE)) != 0) 11940 return (rval); 11941 11942 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 11943 DTRACEOPT_AGGSIZE)) != 0) 11944 return (rval); 11945 11946 for (i = 0; i < state->dts_nspeculations; i++) { 11947 if ((rval = dtrace_state_buffer(state, 11948 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 11949 return (rval); 11950 } 11951 11952 return (0); 11953 } 11954 11955 static void 11956 dtrace_state_prereserve(dtrace_state_t *state) 11957 { 11958 dtrace_ecb_t *ecb; 11959 dtrace_probe_t *probe; 11960 11961 state->dts_reserve = 0; 11962 11963 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 11964 return; 11965 11966 /* 11967 * If our buffer policy is a "fill" buffer policy, we need to set the 11968 * prereserved space to be the space required by the END probes. 11969 */ 11970 probe = dtrace_probes[dtrace_probeid_end - 1]; 11971 ASSERT(probe != NULL); 11972 11973 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 11974 if (ecb->dte_state != state) 11975 continue; 11976 11977 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 11978 } 11979 } 11980 11981 static int 11982 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 11983 { 11984 dtrace_optval_t *opt = state->dts_options, sz, nspec; 11985 dtrace_speculation_t *spec; 11986 dtrace_buffer_t *buf; 11987 cyc_handler_t hdlr; 11988 cyc_time_t when; 11989 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 11990 dtrace_icookie_t cookie; 11991 11992 mutex_enter(&cpu_lock); 11993 mutex_enter(&dtrace_lock); 11994 11995 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 11996 rval = EBUSY; 11997 goto out; 11998 } 11999 12000 /* 12001 * Before we can perform any checks, we must prime all of the 12002 * retained enablings that correspond to this state. 12003 */ 12004 dtrace_enabling_prime(state); 12005 12006 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 12007 rval = EACCES; 12008 goto out; 12009 } 12010 12011 dtrace_state_prereserve(state); 12012 12013 /* 12014 * Now we want to do is try to allocate our speculations. 12015 * We do not automatically resize the number of speculations; if 12016 * this fails, we will fail the operation. 12017 */ 12018 nspec = opt[DTRACEOPT_NSPEC]; 12019 ASSERT(nspec != DTRACEOPT_UNSET); 12020 12021 if (nspec > INT_MAX) { 12022 rval = ENOMEM; 12023 goto out; 12024 } 12025 12026 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 12027 12028 if (spec == NULL) { 12029 rval = ENOMEM; 12030 goto out; 12031 } 12032 12033 state->dts_speculations = spec; 12034 state->dts_nspeculations = (int)nspec; 12035 12036 for (i = 0; i < nspec; i++) { 12037 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 12038 rval = ENOMEM; 12039 goto err; 12040 } 12041 12042 spec[i].dtsp_buffer = buf; 12043 } 12044 12045 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 12046 if (dtrace_anon.dta_state == NULL) { 12047 rval = ENOENT; 12048 goto out; 12049 } 12050 12051 if (state->dts_necbs != 0) { 12052 rval = EALREADY; 12053 goto out; 12054 } 12055 12056 state->dts_anon = dtrace_anon_grab(); 12057 ASSERT(state->dts_anon != NULL); 12058 state = state->dts_anon; 12059 12060 /* 12061 * We want "grabanon" to be set in the grabbed state, so we'll 12062 * copy that option value from the grabbing state into the 12063 * grabbed state. 12064 */ 12065 state->dts_options[DTRACEOPT_GRABANON] = 12066 opt[DTRACEOPT_GRABANON]; 12067 12068 *cpu = dtrace_anon.dta_beganon; 12069 12070 /* 12071 * If the anonymous state is active (as it almost certainly 12072 * is if the anonymous enabling ultimately matched anything), 12073 * we don't allow any further option processing -- but we 12074 * don't return failure. 12075 */ 12076 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12077 goto out; 12078 } 12079 12080 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 12081 opt[DTRACEOPT_AGGSIZE] != 0) { 12082 if (state->dts_aggregations == NULL) { 12083 /* 12084 * We're not going to create an aggregation buffer 12085 * because we don't have any ECBs that contain 12086 * aggregations -- set this option to 0. 12087 */ 12088 opt[DTRACEOPT_AGGSIZE] = 0; 12089 } else { 12090 /* 12091 * If we have an aggregation buffer, we must also have 12092 * a buffer to use as scratch. 12093 */ 12094 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 12095 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 12096 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 12097 } 12098 } 12099 } 12100 12101 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 12102 opt[DTRACEOPT_SPECSIZE] != 0) { 12103 if (!state->dts_speculates) { 12104 /* 12105 * We're not going to create speculation buffers 12106 * because we don't have any ECBs that actually 12107 * speculate -- set the speculation size to 0. 12108 */ 12109 opt[DTRACEOPT_SPECSIZE] = 0; 12110 } 12111 } 12112 12113 /* 12114 * The bare minimum size for any buffer that we're actually going to 12115 * do anything to is sizeof (uint64_t). 12116 */ 12117 sz = sizeof (uint64_t); 12118 12119 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 12120 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 12121 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 12122 /* 12123 * A buffer size has been explicitly set to 0 (or to a size 12124 * that will be adjusted to 0) and we need the space -- we 12125 * need to return failure. We return ENOSPC to differentiate 12126 * it from failing to allocate a buffer due to failure to meet 12127 * the reserve (for which we return E2BIG). 12128 */ 12129 rval = ENOSPC; 12130 goto out; 12131 } 12132 12133 if ((rval = dtrace_state_buffers(state)) != 0) 12134 goto err; 12135 12136 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 12137 sz = dtrace_dstate_defsize; 12138 12139 do { 12140 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 12141 12142 if (rval == 0) 12143 break; 12144 12145 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 12146 goto err; 12147 } while (sz >>= 1); 12148 12149 opt[DTRACEOPT_DYNVARSIZE] = sz; 12150 12151 if (rval != 0) 12152 goto err; 12153 12154 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 12155 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 12156 12157 if (opt[DTRACEOPT_CLEANRATE] == 0) 12158 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12159 12160 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 12161 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 12162 12163 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 12164 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 12165 12166 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 12167 hdlr.cyh_arg = state; 12168 hdlr.cyh_level = CY_LOW_LEVEL; 12169 12170 when.cyt_when = 0; 12171 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 12172 12173 state->dts_cleaner = cyclic_add(&hdlr, &when); 12174 12175 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 12176 hdlr.cyh_arg = state; 12177 hdlr.cyh_level = CY_LOW_LEVEL; 12178 12179 when.cyt_when = 0; 12180 when.cyt_interval = dtrace_deadman_interval; 12181 12182 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 12183 state->dts_deadman = cyclic_add(&hdlr, &when); 12184 12185 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 12186 12187 /* 12188 * Now it's time to actually fire the BEGIN probe. We need to disable 12189 * interrupts here both to record the CPU on which we fired the BEGIN 12190 * probe (the data from this CPU will be processed first at user 12191 * level) and to manually activate the buffer for this CPU. 12192 */ 12193 cookie = dtrace_interrupt_disable(); 12194 *cpu = CPU->cpu_id; 12195 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 12196 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 12197 12198 dtrace_probe(dtrace_probeid_begin, 12199 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12200 dtrace_interrupt_enable(cookie); 12201 /* 12202 * We may have had an exit action from a BEGIN probe; only change our 12203 * state to ACTIVE if we're still in WARMUP. 12204 */ 12205 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 12206 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 12207 12208 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 12209 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 12210 12211 /* 12212 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 12213 * want each CPU to transition its principal buffer out of the 12214 * INACTIVE state. Doing this assures that no CPU will suddenly begin 12215 * processing an ECB halfway down a probe's ECB chain; all CPUs will 12216 * atomically transition from processing none of a state's ECBs to 12217 * processing all of them. 12218 */ 12219 dtrace_xcall(DTRACE_CPUALL, 12220 (dtrace_xcall_t)dtrace_buffer_activate, state); 12221 goto out; 12222 12223 err: 12224 dtrace_buffer_free(state->dts_buffer); 12225 dtrace_buffer_free(state->dts_aggbuffer); 12226 12227 if ((nspec = state->dts_nspeculations) == 0) { 12228 ASSERT(state->dts_speculations == NULL); 12229 goto out; 12230 } 12231 12232 spec = state->dts_speculations; 12233 ASSERT(spec != NULL); 12234 12235 for (i = 0; i < state->dts_nspeculations; i++) { 12236 if ((buf = spec[i].dtsp_buffer) == NULL) 12237 break; 12238 12239 dtrace_buffer_free(buf); 12240 kmem_free(buf, bufsize); 12241 } 12242 12243 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12244 state->dts_nspeculations = 0; 12245 state->dts_speculations = NULL; 12246 12247 out: 12248 mutex_exit(&dtrace_lock); 12249 mutex_exit(&cpu_lock); 12250 12251 return (rval); 12252 } 12253 12254 static int 12255 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 12256 { 12257 dtrace_icookie_t cookie; 12258 12259 ASSERT(MUTEX_HELD(&dtrace_lock)); 12260 12261 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 12262 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 12263 return (EINVAL); 12264 12265 /* 12266 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 12267 * to be sure that every CPU has seen it. See below for the details 12268 * on why this is done. 12269 */ 12270 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 12271 dtrace_sync(); 12272 12273 /* 12274 * By this point, it is impossible for any CPU to be still processing 12275 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 12276 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 12277 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 12278 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 12279 * iff we're in the END probe. 12280 */ 12281 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 12282 dtrace_sync(); 12283 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 12284 12285 /* 12286 * Finally, we can release the reserve and call the END probe. We 12287 * disable interrupts across calling the END probe to allow us to 12288 * return the CPU on which we actually called the END probe. This 12289 * allows user-land to be sure that this CPU's principal buffer is 12290 * processed last. 12291 */ 12292 state->dts_reserve = 0; 12293 12294 cookie = dtrace_interrupt_disable(); 12295 *cpu = CPU->cpu_id; 12296 dtrace_probe(dtrace_probeid_end, 12297 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 12298 dtrace_interrupt_enable(cookie); 12299 12300 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 12301 dtrace_sync(); 12302 12303 return (0); 12304 } 12305 12306 static int 12307 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 12308 dtrace_optval_t val) 12309 { 12310 ASSERT(MUTEX_HELD(&dtrace_lock)); 12311 12312 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 12313 return (EBUSY); 12314 12315 if (option >= DTRACEOPT_MAX) 12316 return (EINVAL); 12317 12318 if (option != DTRACEOPT_CPU && val < 0) 12319 return (EINVAL); 12320 12321 switch (option) { 12322 case DTRACEOPT_DESTRUCTIVE: 12323 if (dtrace_destructive_disallow) 12324 return (EACCES); 12325 12326 state->dts_cred.dcr_destructive = 1; 12327 break; 12328 12329 case DTRACEOPT_BUFSIZE: 12330 case DTRACEOPT_DYNVARSIZE: 12331 case DTRACEOPT_AGGSIZE: 12332 case DTRACEOPT_SPECSIZE: 12333 case DTRACEOPT_STRSIZE: 12334 if (val < 0) 12335 return (EINVAL); 12336 12337 if (val >= LONG_MAX) { 12338 /* 12339 * If this is an otherwise negative value, set it to 12340 * the highest multiple of 128m less than LONG_MAX. 12341 * Technically, we're adjusting the size without 12342 * regard to the buffer resizing policy, but in fact, 12343 * this has no effect -- if we set the buffer size to 12344 * ~LONG_MAX and the buffer policy is ultimately set to 12345 * be "manual", the buffer allocation is guaranteed to 12346 * fail, if only because the allocation requires two 12347 * buffers. (We set the the size to the highest 12348 * multiple of 128m because it ensures that the size 12349 * will remain a multiple of a megabyte when 12350 * repeatedly halved -- all the way down to 15m.) 12351 */ 12352 val = LONG_MAX - (1 << 27) + 1; 12353 } 12354 } 12355 12356 state->dts_options[option] = val; 12357 12358 return (0); 12359 } 12360 12361 static void 12362 dtrace_state_destroy(dtrace_state_t *state) 12363 { 12364 dtrace_ecb_t *ecb; 12365 dtrace_vstate_t *vstate = &state->dts_vstate; 12366 minor_t minor = getminor(state->dts_dev); 12367 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 12368 dtrace_speculation_t *spec = state->dts_speculations; 12369 int nspec = state->dts_nspeculations; 12370 uint32_t match; 12371 12372 ASSERT(MUTEX_HELD(&dtrace_lock)); 12373 ASSERT(MUTEX_HELD(&cpu_lock)); 12374 12375 /* 12376 * First, retract any retained enablings for this state. 12377 */ 12378 dtrace_enabling_retract(state); 12379 ASSERT(state->dts_nretained == 0); 12380 12381 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 12382 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 12383 /* 12384 * We have managed to come into dtrace_state_destroy() on a 12385 * hot enabling -- almost certainly because of a disorderly 12386 * shutdown of a consumer. (That is, a consumer that is 12387 * exiting without having called dtrace_stop().) In this case, 12388 * we're going to set our activity to be KILLED, and then 12389 * issue a sync to be sure that everyone is out of probe 12390 * context before we start blowing away ECBs. 12391 */ 12392 state->dts_activity = DTRACE_ACTIVITY_KILLED; 12393 dtrace_sync(); 12394 } 12395 12396 /* 12397 * Release the credential hold we took in dtrace_state_create(). 12398 */ 12399 if (state->dts_cred.dcr_cred != NULL) 12400 crfree(state->dts_cred.dcr_cred); 12401 12402 /* 12403 * Now we can safely disable and destroy any enabled probes. Because 12404 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 12405 * (especially if they're all enabled), we take two passes through the 12406 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 12407 * in the second we disable whatever is left over. 12408 */ 12409 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 12410 for (i = 0; i < state->dts_necbs; i++) { 12411 if ((ecb = state->dts_ecbs[i]) == NULL) 12412 continue; 12413 12414 if (match && ecb->dte_probe != NULL) { 12415 dtrace_probe_t *probe = ecb->dte_probe; 12416 dtrace_provider_t *prov = probe->dtpr_provider; 12417 12418 if (!(prov->dtpv_priv.dtpp_flags & match)) 12419 continue; 12420 } 12421 12422 dtrace_ecb_disable(ecb); 12423 dtrace_ecb_destroy(ecb); 12424 } 12425 12426 if (!match) 12427 break; 12428 } 12429 12430 /* 12431 * Before we free the buffers, perform one more sync to assure that 12432 * every CPU is out of probe context. 12433 */ 12434 dtrace_sync(); 12435 12436 dtrace_buffer_free(state->dts_buffer); 12437 dtrace_buffer_free(state->dts_aggbuffer); 12438 12439 for (i = 0; i < nspec; i++) 12440 dtrace_buffer_free(spec[i].dtsp_buffer); 12441 12442 if (state->dts_cleaner != CYCLIC_NONE) 12443 cyclic_remove(state->dts_cleaner); 12444 12445 if (state->dts_deadman != CYCLIC_NONE) 12446 cyclic_remove(state->dts_deadman); 12447 12448 dtrace_dstate_fini(&vstate->dtvs_dynvars); 12449 dtrace_vstate_fini(vstate); 12450 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 12451 12452 if (state->dts_aggregations != NULL) { 12453 #ifdef DEBUG 12454 for (i = 0; i < state->dts_naggregations; i++) 12455 ASSERT(state->dts_aggregations[i] == NULL); 12456 #endif 12457 ASSERT(state->dts_naggregations > 0); 12458 kmem_free(state->dts_aggregations, 12459 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 12460 } 12461 12462 kmem_free(state->dts_buffer, bufsize); 12463 kmem_free(state->dts_aggbuffer, bufsize); 12464 12465 for (i = 0; i < nspec; i++) 12466 kmem_free(spec[i].dtsp_buffer, bufsize); 12467 12468 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 12469 12470 dtrace_format_destroy(state); 12471 12472 vmem_destroy(state->dts_aggid_arena); 12473 ddi_soft_state_free(dtrace_softstate, minor); 12474 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12475 } 12476 12477 /* 12478 * DTrace Anonymous Enabling Functions 12479 */ 12480 static dtrace_state_t * 12481 dtrace_anon_grab(void) 12482 { 12483 dtrace_state_t *state; 12484 12485 ASSERT(MUTEX_HELD(&dtrace_lock)); 12486 12487 if ((state = dtrace_anon.dta_state) == NULL) { 12488 ASSERT(dtrace_anon.dta_enabling == NULL); 12489 return (NULL); 12490 } 12491 12492 ASSERT(dtrace_anon.dta_enabling != NULL); 12493 ASSERT(dtrace_retained != NULL); 12494 12495 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 12496 dtrace_anon.dta_enabling = NULL; 12497 dtrace_anon.dta_state = NULL; 12498 12499 return (state); 12500 } 12501 12502 static void 12503 dtrace_anon_property(void) 12504 { 12505 int i, rv; 12506 dtrace_state_t *state; 12507 dof_hdr_t *dof; 12508 char c[32]; /* enough for "dof-data-" + digits */ 12509 12510 ASSERT(MUTEX_HELD(&dtrace_lock)); 12511 ASSERT(MUTEX_HELD(&cpu_lock)); 12512 12513 for (i = 0; ; i++) { 12514 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 12515 12516 dtrace_err_verbose = 1; 12517 12518 if ((dof = dtrace_dof_property(c)) == NULL) { 12519 dtrace_err_verbose = 0; 12520 break; 12521 } 12522 12523 /* 12524 * We want to create anonymous state, so we need to transition 12525 * the kernel debugger to indicate that DTrace is active. If 12526 * this fails (e.g. because the debugger has modified text in 12527 * some way), we won't continue with the processing. 12528 */ 12529 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 12530 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 12531 "enabling ignored."); 12532 dtrace_dof_destroy(dof); 12533 break; 12534 } 12535 12536 /* 12537 * If we haven't allocated an anonymous state, we'll do so now. 12538 */ 12539 if ((state = dtrace_anon.dta_state) == NULL) { 12540 state = dtrace_state_create(NULL, NULL); 12541 dtrace_anon.dta_state = state; 12542 12543 if (state == NULL) { 12544 /* 12545 * This basically shouldn't happen: the only 12546 * failure mode from dtrace_state_create() is a 12547 * failure of ddi_soft_state_zalloc() that 12548 * itself should never happen. Still, the 12549 * interface allows for a failure mode, and 12550 * we want to fail as gracefully as possible: 12551 * we'll emit an error message and cease 12552 * processing anonymous state in this case. 12553 */ 12554 cmn_err(CE_WARN, "failed to create " 12555 "anonymous state"); 12556 dtrace_dof_destroy(dof); 12557 break; 12558 } 12559 } 12560 12561 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 12562 &dtrace_anon.dta_enabling, 0, B_TRUE); 12563 12564 if (rv == 0) 12565 rv = dtrace_dof_options(dof, state); 12566 12567 dtrace_err_verbose = 0; 12568 dtrace_dof_destroy(dof); 12569 12570 if (rv != 0) { 12571 /* 12572 * This is malformed DOF; chuck any anonymous state 12573 * that we created. 12574 */ 12575 ASSERT(dtrace_anon.dta_enabling == NULL); 12576 dtrace_state_destroy(state); 12577 dtrace_anon.dta_state = NULL; 12578 break; 12579 } 12580 12581 ASSERT(dtrace_anon.dta_enabling != NULL); 12582 } 12583 12584 if (dtrace_anon.dta_enabling != NULL) { 12585 int rval; 12586 12587 /* 12588 * dtrace_enabling_retain() can only fail because we are 12589 * trying to retain more enablings than are allowed -- but 12590 * we only have one anonymous enabling, and we are guaranteed 12591 * to be allowed at least one retained enabling; we assert 12592 * that dtrace_enabling_retain() returns success. 12593 */ 12594 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 12595 ASSERT(rval == 0); 12596 12597 dtrace_enabling_dump(dtrace_anon.dta_enabling); 12598 } 12599 } 12600 12601 /* 12602 * DTrace Helper Functions 12603 */ 12604 static void 12605 dtrace_helper_trace(dtrace_helper_action_t *helper, 12606 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 12607 { 12608 uint32_t size, next, nnext, i; 12609 dtrace_helptrace_t *ent; 12610 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12611 12612 if (!dtrace_helptrace_enabled) 12613 return; 12614 12615 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 12616 12617 /* 12618 * What would a tracing framework be without its own tracing 12619 * framework? (Well, a hell of a lot simpler, for starters...) 12620 */ 12621 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 12622 sizeof (uint64_t) - sizeof (uint64_t); 12623 12624 /* 12625 * Iterate until we can allocate a slot in the trace buffer. 12626 */ 12627 do { 12628 next = dtrace_helptrace_next; 12629 12630 if (next + size < dtrace_helptrace_bufsize) { 12631 nnext = next + size; 12632 } else { 12633 nnext = size; 12634 } 12635 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 12636 12637 /* 12638 * We have our slot; fill it in. 12639 */ 12640 if (nnext == size) 12641 next = 0; 12642 12643 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 12644 ent->dtht_helper = helper; 12645 ent->dtht_where = where; 12646 ent->dtht_nlocals = vstate->dtvs_nlocals; 12647 12648 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 12649 mstate->dtms_fltoffs : -1; 12650 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 12651 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval; 12652 12653 for (i = 0; i < vstate->dtvs_nlocals; i++) { 12654 dtrace_statvar_t *svar; 12655 12656 if ((svar = vstate->dtvs_locals[i]) == NULL) 12657 continue; 12658 12659 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 12660 ent->dtht_locals[i] = 12661 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id]; 12662 } 12663 } 12664 12665 static uint64_t 12666 dtrace_helper(int which, dtrace_mstate_t *mstate, 12667 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 12668 { 12669 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 12670 uint64_t sarg0 = mstate->dtms_arg[0]; 12671 uint64_t sarg1 = mstate->dtms_arg[1]; 12672 uint64_t rval; 12673 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 12674 dtrace_helper_action_t *helper; 12675 dtrace_vstate_t *vstate; 12676 dtrace_difo_t *pred; 12677 int i, trace = dtrace_helptrace_enabled; 12678 12679 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 12680 12681 if (helpers == NULL) 12682 return (0); 12683 12684 if ((helper = helpers->dthps_actions[which]) == NULL) 12685 return (0); 12686 12687 vstate = &helpers->dthps_vstate; 12688 mstate->dtms_arg[0] = arg0; 12689 mstate->dtms_arg[1] = arg1; 12690 12691 /* 12692 * Now iterate over each helper. If its predicate evaluates to 'true', 12693 * we'll call the corresponding actions. Note that the below calls 12694 * to dtrace_dif_emulate() may set faults in machine state. This is 12695 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 12696 * the stored DIF offset with its own (which is the desired behavior). 12697 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 12698 * from machine state; this is okay, too. 12699 */ 12700 for (; helper != NULL; helper = helper->dtha_next) { 12701 if ((pred = helper->dtha_predicate) != NULL) { 12702 if (trace) 12703 dtrace_helper_trace(helper, mstate, vstate, 0); 12704 12705 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 12706 goto next; 12707 12708 if (*flags & CPU_DTRACE_FAULT) 12709 goto err; 12710 } 12711 12712 for (i = 0; i < helper->dtha_nactions; i++) { 12713 if (trace) 12714 dtrace_helper_trace(helper, 12715 mstate, vstate, i + 1); 12716 12717 rval = dtrace_dif_emulate(helper->dtha_actions[i], 12718 mstate, vstate, state); 12719 12720 if (*flags & CPU_DTRACE_FAULT) 12721 goto err; 12722 } 12723 12724 next: 12725 if (trace) 12726 dtrace_helper_trace(helper, mstate, vstate, 12727 DTRACE_HELPTRACE_NEXT); 12728 } 12729 12730 if (trace) 12731 dtrace_helper_trace(helper, mstate, vstate, 12732 DTRACE_HELPTRACE_DONE); 12733 12734 /* 12735 * Restore the arg0 that we saved upon entry. 12736 */ 12737 mstate->dtms_arg[0] = sarg0; 12738 mstate->dtms_arg[1] = sarg1; 12739 12740 return (rval); 12741 12742 err: 12743 if (trace) 12744 dtrace_helper_trace(helper, mstate, vstate, 12745 DTRACE_HELPTRACE_ERR); 12746 12747 /* 12748 * Restore the arg0 that we saved upon entry. 12749 */ 12750 mstate->dtms_arg[0] = sarg0; 12751 mstate->dtms_arg[1] = sarg1; 12752 12753 return (NULL); 12754 } 12755 12756 static void 12757 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 12758 dtrace_vstate_t *vstate) 12759 { 12760 int i; 12761 12762 if (helper->dtha_predicate != NULL) 12763 dtrace_difo_release(helper->dtha_predicate, vstate); 12764 12765 for (i = 0; i < helper->dtha_nactions; i++) { 12766 ASSERT(helper->dtha_actions[i] != NULL); 12767 dtrace_difo_release(helper->dtha_actions[i], vstate); 12768 } 12769 12770 kmem_free(helper->dtha_actions, 12771 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 12772 kmem_free(helper, sizeof (dtrace_helper_action_t)); 12773 } 12774 12775 static int 12776 dtrace_helper_destroygen(int gen) 12777 { 12778 proc_t *p = curproc; 12779 dtrace_helpers_t *help = p->p_dtrace_helpers; 12780 dtrace_vstate_t *vstate; 12781 int i; 12782 12783 ASSERT(MUTEX_HELD(&dtrace_lock)); 12784 12785 if (help == NULL || gen > help->dthps_generation) 12786 return (EINVAL); 12787 12788 vstate = &help->dthps_vstate; 12789 12790 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 12791 dtrace_helper_action_t *last = NULL, *h, *next; 12792 12793 for (h = help->dthps_actions[i]; h != NULL; h = next) { 12794 next = h->dtha_next; 12795 12796 if (h->dtha_generation == gen) { 12797 if (last != NULL) { 12798 last->dtha_next = next; 12799 } else { 12800 help->dthps_actions[i] = next; 12801 } 12802 12803 dtrace_helper_action_destroy(h, vstate); 12804 } else { 12805 last = h; 12806 } 12807 } 12808 } 12809 12810 /* 12811 * Interate until we've cleared out all helper providers with the 12812 * given generation number. 12813 */ 12814 for (;;) { 12815 dtrace_helper_provider_t *prov; 12816 12817 /* 12818 * Look for a helper provider with the right generation. We 12819 * have to start back at the beginning of the list each time 12820 * because we drop dtrace_lock. It's unlikely that we'll make 12821 * more than two passes. 12822 */ 12823 for (i = 0; i < help->dthps_nprovs; i++) { 12824 prov = help->dthps_provs[i]; 12825 12826 if (prov->dthp_generation == gen) 12827 break; 12828 } 12829 12830 /* 12831 * If there were no matches, we're done. 12832 */ 12833 if (i == help->dthps_nprovs) 12834 break; 12835 12836 /* 12837 * Move the last helper provider into this slot. 12838 */ 12839 help->dthps_nprovs--; 12840 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 12841 help->dthps_provs[help->dthps_nprovs] = NULL; 12842 12843 mutex_exit(&dtrace_lock); 12844 12845 /* 12846 * If we have a meta provider, remove this helper provider. 12847 */ 12848 mutex_enter(&dtrace_meta_lock); 12849 if (dtrace_meta_pid != NULL) { 12850 ASSERT(dtrace_deferred_pid == NULL); 12851 dtrace_helper_provider_remove(&prov->dthp_prov, 12852 p->p_pid); 12853 } 12854 mutex_exit(&dtrace_meta_lock); 12855 12856 dtrace_helper_provider_destroy(prov); 12857 12858 mutex_enter(&dtrace_lock); 12859 } 12860 12861 return (0); 12862 } 12863 12864 static int 12865 dtrace_helper_validate(dtrace_helper_action_t *helper) 12866 { 12867 int err = 0, i; 12868 dtrace_difo_t *dp; 12869 12870 if ((dp = helper->dtha_predicate) != NULL) 12871 err += dtrace_difo_validate_helper(dp); 12872 12873 for (i = 0; i < helper->dtha_nactions; i++) 12874 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 12875 12876 return (err == 0); 12877 } 12878 12879 static int 12880 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 12881 { 12882 dtrace_helpers_t *help; 12883 dtrace_helper_action_t *helper, *last; 12884 dtrace_actdesc_t *act; 12885 dtrace_vstate_t *vstate; 12886 dtrace_predicate_t *pred; 12887 int count = 0, nactions = 0, i; 12888 12889 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 12890 return (EINVAL); 12891 12892 help = curproc->p_dtrace_helpers; 12893 last = help->dthps_actions[which]; 12894 vstate = &help->dthps_vstate; 12895 12896 for (count = 0; last != NULL; last = last->dtha_next) { 12897 count++; 12898 if (last->dtha_next == NULL) 12899 break; 12900 } 12901 12902 /* 12903 * If we already have dtrace_helper_actions_max helper actions for this 12904 * helper action type, we'll refuse to add a new one. 12905 */ 12906 if (count >= dtrace_helper_actions_max) 12907 return (ENOSPC); 12908 12909 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 12910 helper->dtha_generation = help->dthps_generation; 12911 12912 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 12913 ASSERT(pred->dtp_difo != NULL); 12914 dtrace_difo_hold(pred->dtp_difo); 12915 helper->dtha_predicate = pred->dtp_difo; 12916 } 12917 12918 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 12919 if (act->dtad_kind != DTRACEACT_DIFEXPR) 12920 goto err; 12921 12922 if (act->dtad_difo == NULL) 12923 goto err; 12924 12925 nactions++; 12926 } 12927 12928 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 12929 (helper->dtha_nactions = nactions), KM_SLEEP); 12930 12931 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 12932 dtrace_difo_hold(act->dtad_difo); 12933 helper->dtha_actions[i++] = act->dtad_difo; 12934 } 12935 12936 if (!dtrace_helper_validate(helper)) 12937 goto err; 12938 12939 if (last == NULL) { 12940 help->dthps_actions[which] = helper; 12941 } else { 12942 last->dtha_next = helper; 12943 } 12944 12945 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 12946 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 12947 dtrace_helptrace_next = 0; 12948 } 12949 12950 return (0); 12951 err: 12952 dtrace_helper_action_destroy(helper, vstate); 12953 return (EINVAL); 12954 } 12955 12956 static void 12957 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 12958 dof_helper_t *dofhp) 12959 { 12960 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 12961 12962 mutex_enter(&dtrace_meta_lock); 12963 mutex_enter(&dtrace_lock); 12964 12965 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 12966 /* 12967 * If the dtrace module is loaded but not attached, or if 12968 * there aren't isn't a meta provider registered to deal with 12969 * these provider descriptions, we need to postpone creating 12970 * the actual providers until later. 12971 */ 12972 12973 if (help->dthps_next == NULL && help->dthps_prev == NULL && 12974 dtrace_deferred_pid != help) { 12975 help->dthps_deferred = 1; 12976 help->dthps_pid = p->p_pid; 12977 help->dthps_next = dtrace_deferred_pid; 12978 help->dthps_prev = NULL; 12979 if (dtrace_deferred_pid != NULL) 12980 dtrace_deferred_pid->dthps_prev = help; 12981 dtrace_deferred_pid = help; 12982 } 12983 12984 mutex_exit(&dtrace_lock); 12985 12986 } else if (dofhp != NULL) { 12987 /* 12988 * If the dtrace module is loaded and we have a particular 12989 * helper provider description, pass that off to the 12990 * meta provider. 12991 */ 12992 12993 mutex_exit(&dtrace_lock); 12994 12995 dtrace_helper_provide(dofhp, p->p_pid); 12996 12997 } else { 12998 /* 12999 * Otherwise, just pass all the helper provider descriptions 13000 * off to the meta provider. 13001 */ 13002 13003 int i; 13004 mutex_exit(&dtrace_lock); 13005 13006 for (i = 0; i < help->dthps_nprovs; i++) { 13007 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 13008 p->p_pid); 13009 } 13010 } 13011 13012 mutex_exit(&dtrace_meta_lock); 13013 } 13014 13015 static int 13016 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 13017 { 13018 dtrace_helpers_t *help; 13019 dtrace_helper_provider_t *hprov, **tmp_provs; 13020 uint_t tmp_maxprovs, i; 13021 13022 ASSERT(MUTEX_HELD(&dtrace_lock)); 13023 13024 help = curproc->p_dtrace_helpers; 13025 ASSERT(help != NULL); 13026 13027 /* 13028 * If we already have dtrace_helper_providers_max helper providers, 13029 * we're refuse to add a new one. 13030 */ 13031 if (help->dthps_nprovs >= dtrace_helper_providers_max) 13032 return (ENOSPC); 13033 13034 /* 13035 * Check to make sure this isn't a duplicate. 13036 */ 13037 for (i = 0; i < help->dthps_nprovs; i++) { 13038 if (dofhp->dofhp_addr == 13039 help->dthps_provs[i]->dthp_prov.dofhp_addr) 13040 return (EALREADY); 13041 } 13042 13043 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 13044 hprov->dthp_prov = *dofhp; 13045 hprov->dthp_ref = 1; 13046 hprov->dthp_generation = gen; 13047 13048 /* 13049 * Allocate a bigger table for helper providers if it's already full. 13050 */ 13051 if (help->dthps_maxprovs == help->dthps_nprovs) { 13052 tmp_maxprovs = help->dthps_maxprovs; 13053 tmp_provs = help->dthps_provs; 13054 13055 if (help->dthps_maxprovs == 0) 13056 help->dthps_maxprovs = 2; 13057 else 13058 help->dthps_maxprovs *= 2; 13059 if (help->dthps_maxprovs > dtrace_helper_providers_max) 13060 help->dthps_maxprovs = dtrace_helper_providers_max; 13061 13062 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 13063 13064 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 13065 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13066 13067 if (tmp_provs != NULL) { 13068 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 13069 sizeof (dtrace_helper_provider_t *)); 13070 kmem_free(tmp_provs, tmp_maxprovs * 13071 sizeof (dtrace_helper_provider_t *)); 13072 } 13073 } 13074 13075 help->dthps_provs[help->dthps_nprovs] = hprov; 13076 help->dthps_nprovs++; 13077 13078 return (0); 13079 } 13080 13081 static void 13082 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 13083 { 13084 mutex_enter(&dtrace_lock); 13085 13086 if (--hprov->dthp_ref == 0) { 13087 dof_hdr_t *dof; 13088 mutex_exit(&dtrace_lock); 13089 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 13090 dtrace_dof_destroy(dof); 13091 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 13092 } else { 13093 mutex_exit(&dtrace_lock); 13094 } 13095 } 13096 13097 static int 13098 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 13099 { 13100 uintptr_t daddr = (uintptr_t)dof; 13101 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 13102 dof_provider_t *provider; 13103 dof_probe_t *probe; 13104 uint8_t *arg; 13105 char *strtab, *typestr; 13106 dof_stridx_t typeidx; 13107 size_t typesz; 13108 uint_t nprobes, j, k; 13109 13110 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 13111 13112 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 13113 dtrace_dof_error(dof, "misaligned section offset"); 13114 return (-1); 13115 } 13116 13117 /* 13118 * The section needs to be large enough to contain the DOF provider 13119 * structure appropriate for the given version. 13120 */ 13121 if (sec->dofs_size < 13122 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 13123 offsetof(dof_provider_t, dofpv_prenoffs) : 13124 sizeof (dof_provider_t))) { 13125 dtrace_dof_error(dof, "provider section too small"); 13126 return (-1); 13127 } 13128 13129 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 13130 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 13131 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 13132 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 13133 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 13134 13135 if (str_sec == NULL || prb_sec == NULL || 13136 arg_sec == NULL || off_sec == NULL) 13137 return (-1); 13138 13139 enoff_sec = NULL; 13140 13141 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 13142 provider->dofpv_prenoffs != DOF_SECT_NONE && 13143 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 13144 provider->dofpv_prenoffs)) == NULL) 13145 return (-1); 13146 13147 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 13148 13149 if (provider->dofpv_name >= str_sec->dofs_size || 13150 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 13151 dtrace_dof_error(dof, "invalid provider name"); 13152 return (-1); 13153 } 13154 13155 if (prb_sec->dofs_entsize == 0 || 13156 prb_sec->dofs_entsize > prb_sec->dofs_size) { 13157 dtrace_dof_error(dof, "invalid entry size"); 13158 return (-1); 13159 } 13160 13161 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 13162 dtrace_dof_error(dof, "misaligned entry size"); 13163 return (-1); 13164 } 13165 13166 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 13167 dtrace_dof_error(dof, "invalid entry size"); 13168 return (-1); 13169 } 13170 13171 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 13172 dtrace_dof_error(dof, "misaligned section offset"); 13173 return (-1); 13174 } 13175 13176 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 13177 dtrace_dof_error(dof, "invalid entry size"); 13178 return (-1); 13179 } 13180 13181 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 13182 13183 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 13184 13185 /* 13186 * Take a pass through the probes to check for errors. 13187 */ 13188 for (j = 0; j < nprobes; j++) { 13189 probe = (dof_probe_t *)(uintptr_t)(daddr + 13190 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 13191 13192 if (probe->dofpr_func >= str_sec->dofs_size) { 13193 dtrace_dof_error(dof, "invalid function name"); 13194 return (-1); 13195 } 13196 13197 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 13198 dtrace_dof_error(dof, "function name too long"); 13199 return (-1); 13200 } 13201 13202 if (probe->dofpr_name >= str_sec->dofs_size || 13203 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 13204 dtrace_dof_error(dof, "invalid probe name"); 13205 return (-1); 13206 } 13207 13208 /* 13209 * The offset count must not wrap the index, and the offsets 13210 * must also not overflow the section's data. 13211 */ 13212 if (probe->dofpr_offidx + probe->dofpr_noffs < 13213 probe->dofpr_offidx || 13214 (probe->dofpr_offidx + probe->dofpr_noffs) * 13215 off_sec->dofs_entsize > off_sec->dofs_size) { 13216 dtrace_dof_error(dof, "invalid probe offset"); 13217 return (-1); 13218 } 13219 13220 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 13221 /* 13222 * If there's no is-enabled offset section, make sure 13223 * there aren't any is-enabled offsets. Otherwise 13224 * perform the same checks as for probe offsets 13225 * (immediately above). 13226 */ 13227 if (enoff_sec == NULL) { 13228 if (probe->dofpr_enoffidx != 0 || 13229 probe->dofpr_nenoffs != 0) { 13230 dtrace_dof_error(dof, "is-enabled " 13231 "offsets with null section"); 13232 return (-1); 13233 } 13234 } else if (probe->dofpr_enoffidx + 13235 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 13236 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 13237 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 13238 dtrace_dof_error(dof, "invalid is-enabled " 13239 "offset"); 13240 return (-1); 13241 } 13242 13243 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 13244 dtrace_dof_error(dof, "zero probe and " 13245 "is-enabled offsets"); 13246 return (-1); 13247 } 13248 } else if (probe->dofpr_noffs == 0) { 13249 dtrace_dof_error(dof, "zero probe offsets"); 13250 return (-1); 13251 } 13252 13253 if (probe->dofpr_argidx + probe->dofpr_xargc < 13254 probe->dofpr_argidx || 13255 (probe->dofpr_argidx + probe->dofpr_xargc) * 13256 arg_sec->dofs_entsize > arg_sec->dofs_size) { 13257 dtrace_dof_error(dof, "invalid args"); 13258 return (-1); 13259 } 13260 13261 typeidx = probe->dofpr_nargv; 13262 typestr = strtab + probe->dofpr_nargv; 13263 for (k = 0; k < probe->dofpr_nargc; k++) { 13264 if (typeidx >= str_sec->dofs_size) { 13265 dtrace_dof_error(dof, "bad " 13266 "native argument type"); 13267 return (-1); 13268 } 13269 13270 typesz = strlen(typestr) + 1; 13271 if (typesz > DTRACE_ARGTYPELEN) { 13272 dtrace_dof_error(dof, "native " 13273 "argument type too long"); 13274 return (-1); 13275 } 13276 typeidx += typesz; 13277 typestr += typesz; 13278 } 13279 13280 typeidx = probe->dofpr_xargv; 13281 typestr = strtab + probe->dofpr_xargv; 13282 for (k = 0; k < probe->dofpr_xargc; k++) { 13283 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 13284 dtrace_dof_error(dof, "bad " 13285 "native argument index"); 13286 return (-1); 13287 } 13288 13289 if (typeidx >= str_sec->dofs_size) { 13290 dtrace_dof_error(dof, "bad " 13291 "translated argument type"); 13292 return (-1); 13293 } 13294 13295 typesz = strlen(typestr) + 1; 13296 if (typesz > DTRACE_ARGTYPELEN) { 13297 dtrace_dof_error(dof, "translated argument " 13298 "type too long"); 13299 return (-1); 13300 } 13301 13302 typeidx += typesz; 13303 typestr += typesz; 13304 } 13305 } 13306 13307 return (0); 13308 } 13309 13310 static int 13311 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 13312 { 13313 dtrace_helpers_t *help; 13314 dtrace_vstate_t *vstate; 13315 dtrace_enabling_t *enab = NULL; 13316 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 13317 uintptr_t daddr = (uintptr_t)dof; 13318 13319 ASSERT(MUTEX_HELD(&dtrace_lock)); 13320 13321 if ((help = curproc->p_dtrace_helpers) == NULL) 13322 help = dtrace_helpers_create(curproc); 13323 13324 vstate = &help->dthps_vstate; 13325 13326 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 13327 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 13328 dtrace_dof_destroy(dof); 13329 return (rv); 13330 } 13331 13332 /* 13333 * Look for helper providers and validate their descriptions. 13334 */ 13335 if (dhp != NULL) { 13336 for (i = 0; i < dof->dofh_secnum; i++) { 13337 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 13338 dof->dofh_secoff + i * dof->dofh_secsize); 13339 13340 if (sec->dofs_type != DOF_SECT_PROVIDER) 13341 continue; 13342 13343 if (dtrace_helper_provider_validate(dof, sec) != 0) { 13344 dtrace_enabling_destroy(enab); 13345 dtrace_dof_destroy(dof); 13346 return (-1); 13347 } 13348 13349 nprovs++; 13350 } 13351 } 13352 13353 /* 13354 * Now we need to walk through the ECB descriptions in the enabling. 13355 */ 13356 for (i = 0; i < enab->dten_ndesc; i++) { 13357 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 13358 dtrace_probedesc_t *desc = &ep->dted_probe; 13359 13360 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 13361 continue; 13362 13363 if (strcmp(desc->dtpd_mod, "helper") != 0) 13364 continue; 13365 13366 if (strcmp(desc->dtpd_func, "ustack") != 0) 13367 continue; 13368 13369 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 13370 ep)) != 0) { 13371 /* 13372 * Adding this helper action failed -- we are now going 13373 * to rip out the entire generation and return failure. 13374 */ 13375 (void) dtrace_helper_destroygen(help->dthps_generation); 13376 dtrace_enabling_destroy(enab); 13377 dtrace_dof_destroy(dof); 13378 return (-1); 13379 } 13380 13381 nhelpers++; 13382 } 13383 13384 if (nhelpers < enab->dten_ndesc) 13385 dtrace_dof_error(dof, "unmatched helpers"); 13386 13387 gen = help->dthps_generation++; 13388 dtrace_enabling_destroy(enab); 13389 13390 if (dhp != NULL && nprovs > 0) { 13391 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 13392 if (dtrace_helper_provider_add(dhp, gen) == 0) { 13393 mutex_exit(&dtrace_lock); 13394 dtrace_helper_provider_register(curproc, help, dhp); 13395 mutex_enter(&dtrace_lock); 13396 13397 destroy = 0; 13398 } 13399 } 13400 13401 if (destroy) 13402 dtrace_dof_destroy(dof); 13403 13404 return (gen); 13405 } 13406 13407 static dtrace_helpers_t * 13408 dtrace_helpers_create(proc_t *p) 13409 { 13410 dtrace_helpers_t *help; 13411 13412 ASSERT(MUTEX_HELD(&dtrace_lock)); 13413 ASSERT(p->p_dtrace_helpers == NULL); 13414 13415 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 13416 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 13417 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 13418 13419 p->p_dtrace_helpers = help; 13420 dtrace_helpers++; 13421 13422 return (help); 13423 } 13424 13425 static void 13426 dtrace_helpers_destroy(void) 13427 { 13428 dtrace_helpers_t *help; 13429 dtrace_vstate_t *vstate; 13430 proc_t *p = curproc; 13431 int i; 13432 13433 mutex_enter(&dtrace_lock); 13434 13435 ASSERT(p->p_dtrace_helpers != NULL); 13436 ASSERT(dtrace_helpers > 0); 13437 13438 help = p->p_dtrace_helpers; 13439 vstate = &help->dthps_vstate; 13440 13441 /* 13442 * We're now going to lose the help from this process. 13443 */ 13444 p->p_dtrace_helpers = NULL; 13445 dtrace_sync(); 13446 13447 /* 13448 * Destory the helper actions. 13449 */ 13450 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13451 dtrace_helper_action_t *h, *next; 13452 13453 for (h = help->dthps_actions[i]; h != NULL; h = next) { 13454 next = h->dtha_next; 13455 dtrace_helper_action_destroy(h, vstate); 13456 h = next; 13457 } 13458 } 13459 13460 mutex_exit(&dtrace_lock); 13461 13462 /* 13463 * Destroy the helper providers. 13464 */ 13465 if (help->dthps_maxprovs > 0) { 13466 mutex_enter(&dtrace_meta_lock); 13467 if (dtrace_meta_pid != NULL) { 13468 ASSERT(dtrace_deferred_pid == NULL); 13469 13470 for (i = 0; i < help->dthps_nprovs; i++) { 13471 dtrace_helper_provider_remove( 13472 &help->dthps_provs[i]->dthp_prov, p->p_pid); 13473 } 13474 } else { 13475 mutex_enter(&dtrace_lock); 13476 ASSERT(help->dthps_deferred == 0 || 13477 help->dthps_next != NULL || 13478 help->dthps_prev != NULL || 13479 help == dtrace_deferred_pid); 13480 13481 /* 13482 * Remove the helper from the deferred list. 13483 */ 13484 if (help->dthps_next != NULL) 13485 help->dthps_next->dthps_prev = help->dthps_prev; 13486 if (help->dthps_prev != NULL) 13487 help->dthps_prev->dthps_next = help->dthps_next; 13488 if (dtrace_deferred_pid == help) { 13489 dtrace_deferred_pid = help->dthps_next; 13490 ASSERT(help->dthps_prev == NULL); 13491 } 13492 13493 mutex_exit(&dtrace_lock); 13494 } 13495 13496 mutex_exit(&dtrace_meta_lock); 13497 13498 for (i = 0; i < help->dthps_nprovs; i++) { 13499 dtrace_helper_provider_destroy(help->dthps_provs[i]); 13500 } 13501 13502 kmem_free(help->dthps_provs, help->dthps_maxprovs * 13503 sizeof (dtrace_helper_provider_t *)); 13504 } 13505 13506 mutex_enter(&dtrace_lock); 13507 13508 dtrace_vstate_fini(&help->dthps_vstate); 13509 kmem_free(help->dthps_actions, 13510 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 13511 kmem_free(help, sizeof (dtrace_helpers_t)); 13512 13513 --dtrace_helpers; 13514 mutex_exit(&dtrace_lock); 13515 } 13516 13517 static void 13518 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 13519 { 13520 dtrace_helpers_t *help, *newhelp; 13521 dtrace_helper_action_t *helper, *new, *last; 13522 dtrace_difo_t *dp; 13523 dtrace_vstate_t *vstate; 13524 int i, j, sz, hasprovs = 0; 13525 13526 mutex_enter(&dtrace_lock); 13527 ASSERT(from->p_dtrace_helpers != NULL); 13528 ASSERT(dtrace_helpers > 0); 13529 13530 help = from->p_dtrace_helpers; 13531 newhelp = dtrace_helpers_create(to); 13532 ASSERT(to->p_dtrace_helpers != NULL); 13533 13534 newhelp->dthps_generation = help->dthps_generation; 13535 vstate = &newhelp->dthps_vstate; 13536 13537 /* 13538 * Duplicate the helper actions. 13539 */ 13540 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 13541 if ((helper = help->dthps_actions[i]) == NULL) 13542 continue; 13543 13544 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 13545 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 13546 KM_SLEEP); 13547 new->dtha_generation = helper->dtha_generation; 13548 13549 if ((dp = helper->dtha_predicate) != NULL) { 13550 dp = dtrace_difo_duplicate(dp, vstate); 13551 new->dtha_predicate = dp; 13552 } 13553 13554 new->dtha_nactions = helper->dtha_nactions; 13555 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 13556 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 13557 13558 for (j = 0; j < new->dtha_nactions; j++) { 13559 dtrace_difo_t *dp = helper->dtha_actions[j]; 13560 13561 ASSERT(dp != NULL); 13562 dp = dtrace_difo_duplicate(dp, vstate); 13563 new->dtha_actions[j] = dp; 13564 } 13565 13566 if (last != NULL) { 13567 last->dtha_next = new; 13568 } else { 13569 newhelp->dthps_actions[i] = new; 13570 } 13571 13572 last = new; 13573 } 13574 } 13575 13576 /* 13577 * Duplicate the helper providers and register them with the 13578 * DTrace framework. 13579 */ 13580 if (help->dthps_nprovs > 0) { 13581 newhelp->dthps_nprovs = help->dthps_nprovs; 13582 newhelp->dthps_maxprovs = help->dthps_nprovs; 13583 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 13584 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 13585 for (i = 0; i < newhelp->dthps_nprovs; i++) { 13586 newhelp->dthps_provs[i] = help->dthps_provs[i]; 13587 newhelp->dthps_provs[i]->dthp_ref++; 13588 } 13589 13590 hasprovs = 1; 13591 } 13592 13593 mutex_exit(&dtrace_lock); 13594 13595 if (hasprovs) 13596 dtrace_helper_provider_register(to, newhelp, NULL); 13597 } 13598 13599 /* 13600 * DTrace Hook Functions 13601 */ 13602 static void 13603 dtrace_module_loaded(struct modctl *ctl) 13604 { 13605 dtrace_provider_t *prv; 13606 13607 mutex_enter(&dtrace_provider_lock); 13608 mutex_enter(&mod_lock); 13609 13610 ASSERT(ctl->mod_busy); 13611 13612 /* 13613 * We're going to call each providers per-module provide operation 13614 * specifying only this module. 13615 */ 13616 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 13617 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 13618 13619 mutex_exit(&mod_lock); 13620 mutex_exit(&dtrace_provider_lock); 13621 13622 /* 13623 * If we have any retained enablings, we need to match against them. 13624 * Enabling probes requires that cpu_lock be held, and we cannot hold 13625 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 13626 * module. (In particular, this happens when loading scheduling 13627 * classes.) So if we have any retained enablings, we need to dispatch 13628 * our task queue to do the match for us. 13629 */ 13630 mutex_enter(&dtrace_lock); 13631 13632 if (dtrace_retained == NULL) { 13633 mutex_exit(&dtrace_lock); 13634 return; 13635 } 13636 13637 (void) taskq_dispatch(dtrace_taskq, 13638 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 13639 13640 mutex_exit(&dtrace_lock); 13641 13642 /* 13643 * And now, for a little heuristic sleaze: in general, we want to 13644 * match modules as soon as they load. However, we cannot guarantee 13645 * this, because it would lead us to the lock ordering violation 13646 * outlined above. The common case, of course, is that cpu_lock is 13647 * _not_ held -- so we delay here for a clock tick, hoping that that's 13648 * long enough for the task queue to do its work. If it's not, it's 13649 * not a serious problem -- it just means that the module that we 13650 * just loaded may not be immediately instrumentable. 13651 */ 13652 delay(1); 13653 } 13654 13655 static void 13656 dtrace_module_unloaded(struct modctl *ctl) 13657 { 13658 dtrace_probe_t template, *probe, *first, *next; 13659 dtrace_provider_t *prov; 13660 13661 template.dtpr_mod = ctl->mod_modname; 13662 13663 mutex_enter(&dtrace_provider_lock); 13664 mutex_enter(&mod_lock); 13665 mutex_enter(&dtrace_lock); 13666 13667 if (dtrace_bymod == NULL) { 13668 /* 13669 * The DTrace module is loaded (obviously) but not attached; 13670 * we don't have any work to do. 13671 */ 13672 mutex_exit(&dtrace_provider_lock); 13673 mutex_exit(&mod_lock); 13674 mutex_exit(&dtrace_lock); 13675 return; 13676 } 13677 13678 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 13679 probe != NULL; probe = probe->dtpr_nextmod) { 13680 if (probe->dtpr_ecb != NULL) { 13681 mutex_exit(&dtrace_provider_lock); 13682 mutex_exit(&mod_lock); 13683 mutex_exit(&dtrace_lock); 13684 13685 /* 13686 * This shouldn't _actually_ be possible -- we're 13687 * unloading a module that has an enabled probe in it. 13688 * (It's normally up to the provider to make sure that 13689 * this can't happen.) However, because dtps_enable() 13690 * doesn't have a failure mode, there can be an 13691 * enable/unload race. Upshot: we don't want to 13692 * assert, but we're not going to disable the 13693 * probe, either. 13694 */ 13695 if (dtrace_err_verbose) { 13696 cmn_err(CE_WARN, "unloaded module '%s' had " 13697 "enabled probes", ctl->mod_modname); 13698 } 13699 13700 return; 13701 } 13702 } 13703 13704 probe = first; 13705 13706 for (first = NULL; probe != NULL; probe = next) { 13707 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 13708 13709 dtrace_probes[probe->dtpr_id - 1] = NULL; 13710 13711 next = probe->dtpr_nextmod; 13712 dtrace_hash_remove(dtrace_bymod, probe); 13713 dtrace_hash_remove(dtrace_byfunc, probe); 13714 dtrace_hash_remove(dtrace_byname, probe); 13715 13716 if (first == NULL) { 13717 first = probe; 13718 probe->dtpr_nextmod = NULL; 13719 } else { 13720 probe->dtpr_nextmod = first; 13721 first = probe; 13722 } 13723 } 13724 13725 /* 13726 * We've removed all of the module's probes from the hash chains and 13727 * from the probe array. Now issue a dtrace_sync() to be sure that 13728 * everyone has cleared out from any probe array processing. 13729 */ 13730 dtrace_sync(); 13731 13732 for (probe = first; probe != NULL; probe = first) { 13733 first = probe->dtpr_nextmod; 13734 prov = probe->dtpr_provider; 13735 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 13736 probe->dtpr_arg); 13737 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 13738 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 13739 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 13740 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 13741 kmem_free(probe, sizeof (dtrace_probe_t)); 13742 } 13743 13744 mutex_exit(&dtrace_lock); 13745 mutex_exit(&mod_lock); 13746 mutex_exit(&dtrace_provider_lock); 13747 } 13748 13749 void 13750 dtrace_suspend(void) 13751 { 13752 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 13753 } 13754 13755 void 13756 dtrace_resume(void) 13757 { 13758 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 13759 } 13760 13761 static int 13762 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 13763 { 13764 ASSERT(MUTEX_HELD(&cpu_lock)); 13765 mutex_enter(&dtrace_lock); 13766 13767 switch (what) { 13768 case CPU_CONFIG: { 13769 dtrace_state_t *state; 13770 dtrace_optval_t *opt, rs, c; 13771 13772 /* 13773 * For now, we only allocate a new buffer for anonymous state. 13774 */ 13775 if ((state = dtrace_anon.dta_state) == NULL) 13776 break; 13777 13778 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13779 break; 13780 13781 opt = state->dts_options; 13782 c = opt[DTRACEOPT_CPU]; 13783 13784 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 13785 break; 13786 13787 /* 13788 * Regardless of what the actual policy is, we're going to 13789 * temporarily set our resize policy to be manual. We're 13790 * also going to temporarily set our CPU option to denote 13791 * the newly configured CPU. 13792 */ 13793 rs = opt[DTRACEOPT_BUFRESIZE]; 13794 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 13795 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 13796 13797 (void) dtrace_state_buffers(state); 13798 13799 opt[DTRACEOPT_BUFRESIZE] = rs; 13800 opt[DTRACEOPT_CPU] = c; 13801 13802 break; 13803 } 13804 13805 case CPU_UNCONFIG: 13806 /* 13807 * We don't free the buffer in the CPU_UNCONFIG case. (The 13808 * buffer will be freed when the consumer exits.) 13809 */ 13810 break; 13811 13812 default: 13813 break; 13814 } 13815 13816 mutex_exit(&dtrace_lock); 13817 return (0); 13818 } 13819 13820 static void 13821 dtrace_cpu_setup_initial(processorid_t cpu) 13822 { 13823 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 13824 } 13825 13826 static void 13827 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 13828 { 13829 if (dtrace_toxranges >= dtrace_toxranges_max) { 13830 int osize, nsize; 13831 dtrace_toxrange_t *range; 13832 13833 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13834 13835 if (osize == 0) { 13836 ASSERT(dtrace_toxrange == NULL); 13837 ASSERT(dtrace_toxranges_max == 0); 13838 dtrace_toxranges_max = 1; 13839 } else { 13840 dtrace_toxranges_max <<= 1; 13841 } 13842 13843 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 13844 range = kmem_zalloc(nsize, KM_SLEEP); 13845 13846 if (dtrace_toxrange != NULL) { 13847 ASSERT(osize != 0); 13848 bcopy(dtrace_toxrange, range, osize); 13849 kmem_free(dtrace_toxrange, osize); 13850 } 13851 13852 dtrace_toxrange = range; 13853 } 13854 13855 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL); 13856 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL); 13857 13858 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 13859 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 13860 dtrace_toxranges++; 13861 } 13862 13863 /* 13864 * DTrace Driver Cookbook Functions 13865 */ 13866 /*ARGSUSED*/ 13867 static int 13868 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 13869 { 13870 dtrace_provider_id_t id; 13871 dtrace_state_t *state = NULL; 13872 dtrace_enabling_t *enab; 13873 13874 mutex_enter(&cpu_lock); 13875 mutex_enter(&dtrace_provider_lock); 13876 mutex_enter(&dtrace_lock); 13877 13878 if (ddi_soft_state_init(&dtrace_softstate, 13879 sizeof (dtrace_state_t), 0) != 0) { 13880 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 13881 mutex_exit(&cpu_lock); 13882 mutex_exit(&dtrace_provider_lock); 13883 mutex_exit(&dtrace_lock); 13884 return (DDI_FAILURE); 13885 } 13886 13887 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 13888 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 13889 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 13890 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 13891 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 13892 ddi_remove_minor_node(devi, NULL); 13893 ddi_soft_state_fini(&dtrace_softstate); 13894 mutex_exit(&cpu_lock); 13895 mutex_exit(&dtrace_provider_lock); 13896 mutex_exit(&dtrace_lock); 13897 return (DDI_FAILURE); 13898 } 13899 13900 ddi_report_dev(devi); 13901 dtrace_devi = devi; 13902 13903 dtrace_modload = dtrace_module_loaded; 13904 dtrace_modunload = dtrace_module_unloaded; 13905 dtrace_cpu_init = dtrace_cpu_setup_initial; 13906 dtrace_helpers_cleanup = dtrace_helpers_destroy; 13907 dtrace_helpers_fork = dtrace_helpers_duplicate; 13908 dtrace_cpustart_init = dtrace_suspend; 13909 dtrace_cpustart_fini = dtrace_resume; 13910 dtrace_debugger_init = dtrace_suspend; 13911 dtrace_debugger_fini = dtrace_resume; 13912 dtrace_kreloc_init = dtrace_suspend; 13913 dtrace_kreloc_fini = dtrace_resume; 13914 13915 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 13916 13917 ASSERT(MUTEX_HELD(&cpu_lock)); 13918 13919 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 13920 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13921 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 13922 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 13923 VM_SLEEP | VMC_IDENTIFIER); 13924 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 13925 1, INT_MAX, 0); 13926 13927 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 13928 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 13929 NULL, NULL, NULL, NULL, NULL, 0); 13930 13931 ASSERT(MUTEX_HELD(&cpu_lock)); 13932 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 13933 offsetof(dtrace_probe_t, dtpr_nextmod), 13934 offsetof(dtrace_probe_t, dtpr_prevmod)); 13935 13936 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 13937 offsetof(dtrace_probe_t, dtpr_nextfunc), 13938 offsetof(dtrace_probe_t, dtpr_prevfunc)); 13939 13940 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 13941 offsetof(dtrace_probe_t, dtpr_nextname), 13942 offsetof(dtrace_probe_t, dtpr_prevname)); 13943 13944 if (dtrace_retain_max < 1) { 13945 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 13946 "setting to 1", dtrace_retain_max); 13947 dtrace_retain_max = 1; 13948 } 13949 13950 /* 13951 * Now discover our toxic ranges. 13952 */ 13953 dtrace_toxic_ranges(dtrace_toxrange_add); 13954 13955 /* 13956 * Before we register ourselves as a provider to our own framework, 13957 * we would like to assert that dtrace_provider is NULL -- but that's 13958 * not true if we were loaded as a dependency of a DTrace provider. 13959 * Once we've registered, we can assert that dtrace_provider is our 13960 * pseudo provider. 13961 */ 13962 (void) dtrace_register("dtrace", &dtrace_provider_attr, 13963 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 13964 13965 ASSERT(dtrace_provider != NULL); 13966 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 13967 13968 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 13969 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 13970 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 13971 dtrace_provider, NULL, NULL, "END", 0, NULL); 13972 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 13973 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 13974 13975 dtrace_anon_property(); 13976 mutex_exit(&cpu_lock); 13977 13978 /* 13979 * If DTrace helper tracing is enabled, we need to allocate the 13980 * trace buffer and initialize the values. 13981 */ 13982 if (dtrace_helptrace_enabled) { 13983 ASSERT(dtrace_helptrace_buffer == NULL); 13984 dtrace_helptrace_buffer = 13985 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 13986 dtrace_helptrace_next = 0; 13987 } 13988 13989 /* 13990 * If there are already providers, we must ask them to provide their 13991 * probes, and then match any anonymous enabling against them. Note 13992 * that there should be no other retained enablings at this time: 13993 * the only retained enablings at this time should be the anonymous 13994 * enabling. 13995 */ 13996 if (dtrace_anon.dta_enabling != NULL) { 13997 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 13998 13999 dtrace_enabling_provide(NULL); 14000 state = dtrace_anon.dta_state; 14001 14002 /* 14003 * We couldn't hold cpu_lock across the above call to 14004 * dtrace_enabling_provide(), but we must hold it to actually 14005 * enable the probes. We have to drop all of our locks, pick 14006 * up cpu_lock, and regain our locks before matching the 14007 * retained anonymous enabling. 14008 */ 14009 mutex_exit(&dtrace_lock); 14010 mutex_exit(&dtrace_provider_lock); 14011 14012 mutex_enter(&cpu_lock); 14013 mutex_enter(&dtrace_provider_lock); 14014 mutex_enter(&dtrace_lock); 14015 14016 if ((enab = dtrace_anon.dta_enabling) != NULL) 14017 (void) dtrace_enabling_match(enab, NULL); 14018 14019 mutex_exit(&cpu_lock); 14020 } 14021 14022 mutex_exit(&dtrace_lock); 14023 mutex_exit(&dtrace_provider_lock); 14024 14025 if (state != NULL) { 14026 /* 14027 * If we created any anonymous state, set it going now. 14028 */ 14029 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 14030 } 14031 14032 return (DDI_SUCCESS); 14033 } 14034 14035 /*ARGSUSED*/ 14036 static int 14037 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 14038 { 14039 dtrace_state_t *state; 14040 uint32_t priv; 14041 uid_t uid; 14042 zoneid_t zoneid; 14043 14044 if (getminor(*devp) == DTRACEMNRN_HELPER) 14045 return (0); 14046 14047 /* 14048 * If this wasn't an open with the "helper" minor, then it must be 14049 * the "dtrace" minor. 14050 */ 14051 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 14052 14053 /* 14054 * If no DTRACE_PRIV_* bits are set in the credential, then the 14055 * caller lacks sufficient permission to do anything with DTrace. 14056 */ 14057 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 14058 if (priv == DTRACE_PRIV_NONE) 14059 return (EACCES); 14060 14061 /* 14062 * Ask all providers to provide all their probes. 14063 */ 14064 mutex_enter(&dtrace_provider_lock); 14065 dtrace_probe_provide(NULL, NULL); 14066 mutex_exit(&dtrace_provider_lock); 14067 14068 mutex_enter(&cpu_lock); 14069 mutex_enter(&dtrace_lock); 14070 dtrace_opens++; 14071 dtrace_membar_producer(); 14072 14073 /* 14074 * If the kernel debugger is active (that is, if the kernel debugger 14075 * modified text in some way), we won't allow the open. 14076 */ 14077 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14078 dtrace_opens--; 14079 mutex_exit(&cpu_lock); 14080 mutex_exit(&dtrace_lock); 14081 return (EBUSY); 14082 } 14083 14084 state = dtrace_state_create(devp, cred_p); 14085 mutex_exit(&cpu_lock); 14086 14087 if (state == NULL) { 14088 if (--dtrace_opens == 0) 14089 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14090 mutex_exit(&dtrace_lock); 14091 return (EAGAIN); 14092 } 14093 14094 mutex_exit(&dtrace_lock); 14095 14096 return (0); 14097 } 14098 14099 /*ARGSUSED*/ 14100 static int 14101 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 14102 { 14103 minor_t minor = getminor(dev); 14104 dtrace_state_t *state; 14105 14106 if (minor == DTRACEMNRN_HELPER) 14107 return (0); 14108 14109 state = ddi_get_soft_state(dtrace_softstate, minor); 14110 14111 mutex_enter(&cpu_lock); 14112 mutex_enter(&dtrace_lock); 14113 14114 if (state->dts_anon) { 14115 /* 14116 * There is anonymous state. Destroy that first. 14117 */ 14118 ASSERT(dtrace_anon.dta_state == NULL); 14119 dtrace_state_destroy(state->dts_anon); 14120 } 14121 14122 dtrace_state_destroy(state); 14123 ASSERT(dtrace_opens > 0); 14124 if (--dtrace_opens == 0) 14125 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 14126 14127 mutex_exit(&dtrace_lock); 14128 mutex_exit(&cpu_lock); 14129 14130 return (0); 14131 } 14132 14133 /*ARGSUSED*/ 14134 static int 14135 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 14136 { 14137 int rval; 14138 dof_helper_t help, *dhp = NULL; 14139 14140 switch (cmd) { 14141 case DTRACEHIOC_ADDDOF: 14142 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 14143 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 14144 return (EFAULT); 14145 } 14146 14147 dhp = &help; 14148 arg = (intptr_t)help.dofhp_dof; 14149 /*FALLTHROUGH*/ 14150 14151 case DTRACEHIOC_ADD: { 14152 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 14153 14154 if (dof == NULL) 14155 return (rval); 14156 14157 mutex_enter(&dtrace_lock); 14158 14159 /* 14160 * dtrace_helper_slurp() takes responsibility for the dof -- 14161 * it may free it now or it may save it and free it later. 14162 */ 14163 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 14164 *rv = rval; 14165 rval = 0; 14166 } else { 14167 rval = EINVAL; 14168 } 14169 14170 mutex_exit(&dtrace_lock); 14171 return (rval); 14172 } 14173 14174 case DTRACEHIOC_REMOVE: { 14175 mutex_enter(&dtrace_lock); 14176 rval = dtrace_helper_destroygen(arg); 14177 mutex_exit(&dtrace_lock); 14178 14179 return (rval); 14180 } 14181 14182 default: 14183 break; 14184 } 14185 14186 return (ENOTTY); 14187 } 14188 14189 /*ARGSUSED*/ 14190 static int 14191 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 14192 { 14193 minor_t minor = getminor(dev); 14194 dtrace_state_t *state; 14195 int rval; 14196 14197 if (minor == DTRACEMNRN_HELPER) 14198 return (dtrace_ioctl_helper(cmd, arg, rv)); 14199 14200 state = ddi_get_soft_state(dtrace_softstate, minor); 14201 14202 if (state->dts_anon) { 14203 ASSERT(dtrace_anon.dta_state == NULL); 14204 state = state->dts_anon; 14205 } 14206 14207 switch (cmd) { 14208 case DTRACEIOC_PROVIDER: { 14209 dtrace_providerdesc_t pvd; 14210 dtrace_provider_t *pvp; 14211 14212 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 14213 return (EFAULT); 14214 14215 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 14216 mutex_enter(&dtrace_provider_lock); 14217 14218 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 14219 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 14220 break; 14221 } 14222 14223 mutex_exit(&dtrace_provider_lock); 14224 14225 if (pvp == NULL) 14226 return (ESRCH); 14227 14228 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 14229 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 14230 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 14231 return (EFAULT); 14232 14233 return (0); 14234 } 14235 14236 case DTRACEIOC_EPROBE: { 14237 dtrace_eprobedesc_t epdesc; 14238 dtrace_ecb_t *ecb; 14239 dtrace_action_t *act; 14240 void *buf; 14241 size_t size; 14242 uintptr_t dest; 14243 int nrecs; 14244 14245 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 14246 return (EFAULT); 14247 14248 mutex_enter(&dtrace_lock); 14249 14250 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 14251 mutex_exit(&dtrace_lock); 14252 return (EINVAL); 14253 } 14254 14255 if (ecb->dte_probe == NULL) { 14256 mutex_exit(&dtrace_lock); 14257 return (EINVAL); 14258 } 14259 14260 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 14261 epdesc.dtepd_uarg = ecb->dte_uarg; 14262 epdesc.dtepd_size = ecb->dte_size; 14263 14264 nrecs = epdesc.dtepd_nrecs; 14265 epdesc.dtepd_nrecs = 0; 14266 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14267 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14268 continue; 14269 14270 epdesc.dtepd_nrecs++; 14271 } 14272 14273 /* 14274 * Now that we have the size, we need to allocate a temporary 14275 * buffer in which to store the complete description. We need 14276 * the temporary buffer to be able to drop dtrace_lock() 14277 * across the copyout(), below. 14278 */ 14279 size = sizeof (dtrace_eprobedesc_t) + 14280 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 14281 14282 buf = kmem_alloc(size, KM_SLEEP); 14283 dest = (uintptr_t)buf; 14284 14285 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 14286 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 14287 14288 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 14289 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 14290 continue; 14291 14292 if (nrecs-- == 0) 14293 break; 14294 14295 bcopy(&act->dta_rec, (void *)dest, 14296 sizeof (dtrace_recdesc_t)); 14297 dest += sizeof (dtrace_recdesc_t); 14298 } 14299 14300 mutex_exit(&dtrace_lock); 14301 14302 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14303 kmem_free(buf, size); 14304 return (EFAULT); 14305 } 14306 14307 kmem_free(buf, size); 14308 return (0); 14309 } 14310 14311 case DTRACEIOC_AGGDESC: { 14312 dtrace_aggdesc_t aggdesc; 14313 dtrace_action_t *act; 14314 dtrace_aggregation_t *agg; 14315 int nrecs; 14316 uint32_t offs; 14317 dtrace_recdesc_t *lrec; 14318 void *buf; 14319 size_t size; 14320 uintptr_t dest; 14321 14322 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 14323 return (EFAULT); 14324 14325 mutex_enter(&dtrace_lock); 14326 14327 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 14328 mutex_exit(&dtrace_lock); 14329 return (EINVAL); 14330 } 14331 14332 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 14333 14334 nrecs = aggdesc.dtagd_nrecs; 14335 aggdesc.dtagd_nrecs = 0; 14336 14337 offs = agg->dtag_base; 14338 lrec = &agg->dtag_action.dta_rec; 14339 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 14340 14341 for (act = agg->dtag_first; ; act = act->dta_next) { 14342 ASSERT(act->dta_intuple || 14343 DTRACEACT_ISAGG(act->dta_kind)); 14344 14345 /* 14346 * If this action has a record size of zero, it 14347 * denotes an argument to the aggregating action. 14348 * Because the presence of this record doesn't (or 14349 * shouldn't) affect the way the data is interpreted, 14350 * we don't copy it out to save user-level the 14351 * confusion of dealing with a zero-length record. 14352 */ 14353 if (act->dta_rec.dtrd_size == 0) { 14354 ASSERT(agg->dtag_hasarg); 14355 continue; 14356 } 14357 14358 aggdesc.dtagd_nrecs++; 14359 14360 if (act == &agg->dtag_action) 14361 break; 14362 } 14363 14364 /* 14365 * Now that we have the size, we need to allocate a temporary 14366 * buffer in which to store the complete description. We need 14367 * the temporary buffer to be able to drop dtrace_lock() 14368 * across the copyout(), below. 14369 */ 14370 size = sizeof (dtrace_aggdesc_t) + 14371 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 14372 14373 buf = kmem_alloc(size, KM_SLEEP); 14374 dest = (uintptr_t)buf; 14375 14376 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 14377 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 14378 14379 for (act = agg->dtag_first; ; act = act->dta_next) { 14380 dtrace_recdesc_t rec = act->dta_rec; 14381 14382 /* 14383 * See the comment in the above loop for why we pass 14384 * over zero-length records. 14385 */ 14386 if (rec.dtrd_size == 0) { 14387 ASSERT(agg->dtag_hasarg); 14388 continue; 14389 } 14390 14391 if (nrecs-- == 0) 14392 break; 14393 14394 rec.dtrd_offset -= offs; 14395 bcopy(&rec, (void *)dest, sizeof (rec)); 14396 dest += sizeof (dtrace_recdesc_t); 14397 14398 if (act == &agg->dtag_action) 14399 break; 14400 } 14401 14402 mutex_exit(&dtrace_lock); 14403 14404 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 14405 kmem_free(buf, size); 14406 return (EFAULT); 14407 } 14408 14409 kmem_free(buf, size); 14410 return (0); 14411 } 14412 14413 case DTRACEIOC_ENABLE: { 14414 dof_hdr_t *dof; 14415 dtrace_enabling_t *enab = NULL; 14416 dtrace_vstate_t *vstate; 14417 int err = 0; 14418 14419 *rv = 0; 14420 14421 /* 14422 * If a NULL argument has been passed, we take this as our 14423 * cue to reevaluate our enablings. 14424 */ 14425 if (arg == NULL) { 14426 mutex_enter(&cpu_lock); 14427 mutex_enter(&dtrace_lock); 14428 err = dtrace_enabling_matchstate(state, rv); 14429 mutex_exit(&dtrace_lock); 14430 mutex_exit(&cpu_lock); 14431 14432 return (err); 14433 } 14434 14435 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 14436 return (rval); 14437 14438 mutex_enter(&cpu_lock); 14439 mutex_enter(&dtrace_lock); 14440 vstate = &state->dts_vstate; 14441 14442 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 14443 mutex_exit(&dtrace_lock); 14444 mutex_exit(&cpu_lock); 14445 dtrace_dof_destroy(dof); 14446 return (EBUSY); 14447 } 14448 14449 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 14450 mutex_exit(&dtrace_lock); 14451 mutex_exit(&cpu_lock); 14452 dtrace_dof_destroy(dof); 14453 return (EINVAL); 14454 } 14455 14456 if ((rval = dtrace_dof_options(dof, state)) != 0) { 14457 dtrace_enabling_destroy(enab); 14458 mutex_exit(&dtrace_lock); 14459 mutex_exit(&cpu_lock); 14460 dtrace_dof_destroy(dof); 14461 return (rval); 14462 } 14463 14464 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 14465 err = dtrace_enabling_retain(enab); 14466 } else { 14467 dtrace_enabling_destroy(enab); 14468 } 14469 14470 mutex_exit(&cpu_lock); 14471 mutex_exit(&dtrace_lock); 14472 dtrace_dof_destroy(dof); 14473 14474 return (err); 14475 } 14476 14477 case DTRACEIOC_REPLICATE: { 14478 dtrace_repldesc_t desc; 14479 dtrace_probedesc_t *match = &desc.dtrpd_match; 14480 dtrace_probedesc_t *create = &desc.dtrpd_create; 14481 int err; 14482 14483 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14484 return (EFAULT); 14485 14486 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14487 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14488 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14489 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14490 14491 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14492 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14493 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14494 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14495 14496 mutex_enter(&dtrace_lock); 14497 err = dtrace_enabling_replicate(state, match, create); 14498 mutex_exit(&dtrace_lock); 14499 14500 return (err); 14501 } 14502 14503 case DTRACEIOC_PROBEMATCH: 14504 case DTRACEIOC_PROBES: { 14505 dtrace_probe_t *probe = NULL; 14506 dtrace_probedesc_t desc; 14507 dtrace_probekey_t pkey; 14508 dtrace_id_t i; 14509 int m = 0; 14510 uint32_t priv; 14511 uid_t uid; 14512 zoneid_t zoneid; 14513 14514 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14515 return (EFAULT); 14516 14517 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 14518 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 14519 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 14520 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 14521 14522 /* 14523 * Before we attempt to match this probe, we want to give 14524 * all providers the opportunity to provide it. 14525 */ 14526 if (desc.dtpd_id == DTRACE_IDNONE) { 14527 mutex_enter(&dtrace_provider_lock); 14528 dtrace_probe_provide(&desc, NULL); 14529 mutex_exit(&dtrace_provider_lock); 14530 desc.dtpd_id++; 14531 } 14532 14533 if (cmd == DTRACEIOC_PROBEMATCH) { 14534 dtrace_probekey(&desc, &pkey); 14535 pkey.dtpk_id = DTRACE_IDNONE; 14536 } 14537 14538 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 14539 14540 mutex_enter(&dtrace_lock); 14541 14542 if (cmd == DTRACEIOC_PROBEMATCH) { 14543 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14544 if ((probe = dtrace_probes[i - 1]) != NULL && 14545 (m = dtrace_match_probe(probe, &pkey, 14546 priv, uid, zoneid)) != 0) 14547 break; 14548 } 14549 14550 if (m < 0) { 14551 mutex_exit(&dtrace_lock); 14552 return (EINVAL); 14553 } 14554 14555 } else { 14556 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 14557 if ((probe = dtrace_probes[i - 1]) != NULL && 14558 dtrace_match_priv(probe, priv, uid, zoneid)) 14559 break; 14560 } 14561 } 14562 14563 if (probe == NULL) { 14564 mutex_exit(&dtrace_lock); 14565 return (ESRCH); 14566 } 14567 14568 dtrace_probe_description(probe, &desc); 14569 mutex_exit(&dtrace_lock); 14570 14571 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14572 return (EFAULT); 14573 14574 return (0); 14575 } 14576 14577 case DTRACEIOC_PROBEARG: { 14578 dtrace_argdesc_t desc; 14579 dtrace_probe_t *probe; 14580 dtrace_provider_t *prov; 14581 14582 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14583 return (EFAULT); 14584 14585 if (desc.dtargd_id == DTRACE_IDNONE) 14586 return (EINVAL); 14587 14588 if (desc.dtargd_ndx == DTRACE_ARGNONE) 14589 return (EINVAL); 14590 14591 mutex_enter(&dtrace_provider_lock); 14592 mutex_enter(&mod_lock); 14593 mutex_enter(&dtrace_lock); 14594 14595 if (desc.dtargd_id > dtrace_nprobes) { 14596 mutex_exit(&dtrace_lock); 14597 mutex_exit(&mod_lock); 14598 mutex_exit(&dtrace_provider_lock); 14599 return (EINVAL); 14600 } 14601 14602 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 14603 mutex_exit(&dtrace_lock); 14604 mutex_exit(&mod_lock); 14605 mutex_exit(&dtrace_provider_lock); 14606 return (EINVAL); 14607 } 14608 14609 mutex_exit(&dtrace_lock); 14610 14611 prov = probe->dtpr_provider; 14612 14613 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 14614 /* 14615 * There isn't any typed information for this probe. 14616 * Set the argument number to DTRACE_ARGNONE. 14617 */ 14618 desc.dtargd_ndx = DTRACE_ARGNONE; 14619 } else { 14620 desc.dtargd_native[0] = '\0'; 14621 desc.dtargd_xlate[0] = '\0'; 14622 desc.dtargd_mapping = desc.dtargd_ndx; 14623 14624 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 14625 probe->dtpr_id, probe->dtpr_arg, &desc); 14626 } 14627 14628 mutex_exit(&mod_lock); 14629 mutex_exit(&dtrace_provider_lock); 14630 14631 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14632 return (EFAULT); 14633 14634 return (0); 14635 } 14636 14637 case DTRACEIOC_GO: { 14638 processorid_t cpuid; 14639 rval = dtrace_state_go(state, &cpuid); 14640 14641 if (rval != 0) 14642 return (rval); 14643 14644 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14645 return (EFAULT); 14646 14647 return (0); 14648 } 14649 14650 case DTRACEIOC_STOP: { 14651 processorid_t cpuid; 14652 14653 mutex_enter(&dtrace_lock); 14654 rval = dtrace_state_stop(state, &cpuid); 14655 mutex_exit(&dtrace_lock); 14656 14657 if (rval != 0) 14658 return (rval); 14659 14660 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 14661 return (EFAULT); 14662 14663 return (0); 14664 } 14665 14666 case DTRACEIOC_DOFGET: { 14667 dof_hdr_t hdr, *dof; 14668 uint64_t len; 14669 14670 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 14671 return (EFAULT); 14672 14673 mutex_enter(&dtrace_lock); 14674 dof = dtrace_dof_create(state); 14675 mutex_exit(&dtrace_lock); 14676 14677 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 14678 rval = copyout(dof, (void *)arg, len); 14679 dtrace_dof_destroy(dof); 14680 14681 return (rval == 0 ? 0 : EFAULT); 14682 } 14683 14684 case DTRACEIOC_AGGSNAP: 14685 case DTRACEIOC_BUFSNAP: { 14686 dtrace_bufdesc_t desc; 14687 caddr_t cached; 14688 dtrace_buffer_t *buf; 14689 14690 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 14691 return (EFAULT); 14692 14693 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 14694 return (EINVAL); 14695 14696 mutex_enter(&dtrace_lock); 14697 14698 if (cmd == DTRACEIOC_BUFSNAP) { 14699 buf = &state->dts_buffer[desc.dtbd_cpu]; 14700 } else { 14701 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 14702 } 14703 14704 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 14705 size_t sz = buf->dtb_offset; 14706 14707 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 14708 mutex_exit(&dtrace_lock); 14709 return (EBUSY); 14710 } 14711 14712 /* 14713 * If this buffer has already been consumed, we're 14714 * going to indicate that there's nothing left here 14715 * to consume. 14716 */ 14717 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 14718 mutex_exit(&dtrace_lock); 14719 14720 desc.dtbd_size = 0; 14721 desc.dtbd_drops = 0; 14722 desc.dtbd_errors = 0; 14723 desc.dtbd_oldest = 0; 14724 sz = sizeof (desc); 14725 14726 if (copyout(&desc, (void *)arg, sz) != 0) 14727 return (EFAULT); 14728 14729 return (0); 14730 } 14731 14732 /* 14733 * If this is a ring buffer that has wrapped, we want 14734 * to copy the whole thing out. 14735 */ 14736 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 14737 dtrace_buffer_polish(buf); 14738 sz = buf->dtb_size; 14739 } 14740 14741 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 14742 mutex_exit(&dtrace_lock); 14743 return (EFAULT); 14744 } 14745 14746 desc.dtbd_size = sz; 14747 desc.dtbd_drops = buf->dtb_drops; 14748 desc.dtbd_errors = buf->dtb_errors; 14749 desc.dtbd_oldest = buf->dtb_xamot_offset; 14750 14751 mutex_exit(&dtrace_lock); 14752 14753 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14754 return (EFAULT); 14755 14756 buf->dtb_flags |= DTRACEBUF_CONSUMED; 14757 14758 return (0); 14759 } 14760 14761 if (buf->dtb_tomax == NULL) { 14762 ASSERT(buf->dtb_xamot == NULL); 14763 mutex_exit(&dtrace_lock); 14764 return (ENOENT); 14765 } 14766 14767 cached = buf->dtb_tomax; 14768 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 14769 14770 dtrace_xcall(desc.dtbd_cpu, 14771 (dtrace_xcall_t)dtrace_buffer_switch, buf); 14772 14773 state->dts_errors += buf->dtb_xamot_errors; 14774 14775 /* 14776 * If the buffers did not actually switch, then the cross call 14777 * did not take place -- presumably because the given CPU is 14778 * not in the ready set. If this is the case, we'll return 14779 * ENOENT. 14780 */ 14781 if (buf->dtb_tomax == cached) { 14782 ASSERT(buf->dtb_xamot != cached); 14783 mutex_exit(&dtrace_lock); 14784 return (ENOENT); 14785 } 14786 14787 ASSERT(cached == buf->dtb_xamot); 14788 14789 /* 14790 * We have our snapshot; now copy it out. 14791 */ 14792 if (copyout(buf->dtb_xamot, desc.dtbd_data, 14793 buf->dtb_xamot_offset) != 0) { 14794 mutex_exit(&dtrace_lock); 14795 return (EFAULT); 14796 } 14797 14798 desc.dtbd_size = buf->dtb_xamot_offset; 14799 desc.dtbd_drops = buf->dtb_xamot_drops; 14800 desc.dtbd_errors = buf->dtb_xamot_errors; 14801 desc.dtbd_oldest = 0; 14802 14803 mutex_exit(&dtrace_lock); 14804 14805 /* 14806 * Finally, copy out the buffer description. 14807 */ 14808 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 14809 return (EFAULT); 14810 14811 return (0); 14812 } 14813 14814 case DTRACEIOC_CONF: { 14815 dtrace_conf_t conf; 14816 14817 bzero(&conf, sizeof (conf)); 14818 conf.dtc_difversion = DIF_VERSION; 14819 conf.dtc_difintregs = DIF_DIR_NREGS; 14820 conf.dtc_diftupregs = DIF_DTR_NREGS; 14821 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 14822 14823 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 14824 return (EFAULT); 14825 14826 return (0); 14827 } 14828 14829 case DTRACEIOC_STATUS: { 14830 dtrace_status_t stat; 14831 dtrace_dstate_t *dstate; 14832 int i, j; 14833 uint64_t nerrs; 14834 14835 /* 14836 * See the comment in dtrace_state_deadman() for the reason 14837 * for setting dts_laststatus to INT64_MAX before setting 14838 * it to the correct value. 14839 */ 14840 state->dts_laststatus = INT64_MAX; 14841 dtrace_membar_producer(); 14842 state->dts_laststatus = dtrace_gethrtime(); 14843 14844 bzero(&stat, sizeof (stat)); 14845 14846 mutex_enter(&dtrace_lock); 14847 14848 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 14849 mutex_exit(&dtrace_lock); 14850 return (ENOENT); 14851 } 14852 14853 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 14854 stat.dtst_exiting = 1; 14855 14856 nerrs = state->dts_errors; 14857 dstate = &state->dts_vstate.dtvs_dynvars; 14858 14859 for (i = 0; i < NCPU; i++) { 14860 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 14861 14862 stat.dtst_dyndrops += dcpu->dtdsc_drops; 14863 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 14864 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 14865 14866 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 14867 stat.dtst_filled++; 14868 14869 nerrs += state->dts_buffer[i].dtb_errors; 14870 14871 for (j = 0; j < state->dts_nspeculations; j++) { 14872 dtrace_speculation_t *spec; 14873 dtrace_buffer_t *buf; 14874 14875 spec = &state->dts_speculations[j]; 14876 buf = &spec->dtsp_buffer[i]; 14877 stat.dtst_specdrops += buf->dtb_xamot_drops; 14878 } 14879 } 14880 14881 stat.dtst_specdrops_busy = state->dts_speculations_busy; 14882 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 14883 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 14884 stat.dtst_dblerrors = state->dts_dblerrors; 14885 stat.dtst_killed = 14886 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 14887 stat.dtst_errors = nerrs; 14888 14889 mutex_exit(&dtrace_lock); 14890 14891 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 14892 return (EFAULT); 14893 14894 return (0); 14895 } 14896 14897 case DTRACEIOC_FORMAT: { 14898 dtrace_fmtdesc_t fmt; 14899 char *str; 14900 int len; 14901 14902 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 14903 return (EFAULT); 14904 14905 mutex_enter(&dtrace_lock); 14906 14907 if (fmt.dtfd_format == 0 || 14908 fmt.dtfd_format > state->dts_nformats) { 14909 mutex_exit(&dtrace_lock); 14910 return (EINVAL); 14911 } 14912 14913 /* 14914 * Format strings are allocated contiguously and they are 14915 * never freed; if a format index is less than the number 14916 * of formats, we can assert that the format map is non-NULL 14917 * and that the format for the specified index is non-NULL. 14918 */ 14919 ASSERT(state->dts_formats != NULL); 14920 str = state->dts_formats[fmt.dtfd_format - 1]; 14921 ASSERT(str != NULL); 14922 14923 len = strlen(str) + 1; 14924 14925 if (len > fmt.dtfd_length) { 14926 fmt.dtfd_length = len; 14927 14928 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 14929 mutex_exit(&dtrace_lock); 14930 return (EINVAL); 14931 } 14932 } else { 14933 if (copyout(str, fmt.dtfd_string, len) != 0) { 14934 mutex_exit(&dtrace_lock); 14935 return (EINVAL); 14936 } 14937 } 14938 14939 mutex_exit(&dtrace_lock); 14940 return (0); 14941 } 14942 14943 default: 14944 break; 14945 } 14946 14947 return (ENOTTY); 14948 } 14949 14950 /*ARGSUSED*/ 14951 static int 14952 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 14953 { 14954 dtrace_state_t *state; 14955 14956 switch (cmd) { 14957 case DDI_DETACH: 14958 break; 14959 14960 case DDI_SUSPEND: 14961 return (DDI_SUCCESS); 14962 14963 default: 14964 return (DDI_FAILURE); 14965 } 14966 14967 mutex_enter(&cpu_lock); 14968 mutex_enter(&dtrace_provider_lock); 14969 mutex_enter(&dtrace_lock); 14970 14971 ASSERT(dtrace_opens == 0); 14972 14973 if (dtrace_helpers > 0) { 14974 mutex_exit(&dtrace_provider_lock); 14975 mutex_exit(&dtrace_lock); 14976 mutex_exit(&cpu_lock); 14977 return (DDI_FAILURE); 14978 } 14979 14980 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 14981 mutex_exit(&dtrace_provider_lock); 14982 mutex_exit(&dtrace_lock); 14983 mutex_exit(&cpu_lock); 14984 return (DDI_FAILURE); 14985 } 14986 14987 dtrace_provider = NULL; 14988 14989 if ((state = dtrace_anon_grab()) != NULL) { 14990 /* 14991 * If there were ECBs on this state, the provider should 14992 * have not been allowed to detach; assert that there is 14993 * none. 14994 */ 14995 ASSERT(state->dts_necbs == 0); 14996 dtrace_state_destroy(state); 14997 14998 /* 14999 * If we're being detached with anonymous state, we need to 15000 * indicate to the kernel debugger that DTrace is now inactive. 15001 */ 15002 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15003 } 15004 15005 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 15006 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15007 dtrace_cpu_init = NULL; 15008 dtrace_helpers_cleanup = NULL; 15009 dtrace_helpers_fork = NULL; 15010 dtrace_cpustart_init = NULL; 15011 dtrace_cpustart_fini = NULL; 15012 dtrace_debugger_init = NULL; 15013 dtrace_debugger_fini = NULL; 15014 dtrace_kreloc_init = NULL; 15015 dtrace_kreloc_fini = NULL; 15016 dtrace_modload = NULL; 15017 dtrace_modunload = NULL; 15018 15019 mutex_exit(&cpu_lock); 15020 15021 if (dtrace_helptrace_enabled) { 15022 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 15023 dtrace_helptrace_buffer = NULL; 15024 } 15025 15026 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 15027 dtrace_probes = NULL; 15028 dtrace_nprobes = 0; 15029 15030 dtrace_hash_destroy(dtrace_bymod); 15031 dtrace_hash_destroy(dtrace_byfunc); 15032 dtrace_hash_destroy(dtrace_byname); 15033 dtrace_bymod = NULL; 15034 dtrace_byfunc = NULL; 15035 dtrace_byname = NULL; 15036 15037 kmem_cache_destroy(dtrace_state_cache); 15038 vmem_destroy(dtrace_minor); 15039 vmem_destroy(dtrace_arena); 15040 15041 if (dtrace_toxrange != NULL) { 15042 kmem_free(dtrace_toxrange, 15043 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 15044 dtrace_toxrange = NULL; 15045 dtrace_toxranges = 0; 15046 dtrace_toxranges_max = 0; 15047 } 15048 15049 ddi_remove_minor_node(dtrace_devi, NULL); 15050 dtrace_devi = NULL; 15051 15052 ddi_soft_state_fini(&dtrace_softstate); 15053 15054 ASSERT(dtrace_vtime_references == 0); 15055 ASSERT(dtrace_opens == 0); 15056 ASSERT(dtrace_retained == NULL); 15057 15058 mutex_exit(&dtrace_lock); 15059 mutex_exit(&dtrace_provider_lock); 15060 15061 /* 15062 * We don't destroy the task queue until after we have dropped our 15063 * locks (taskq_destroy() may block on running tasks). To prevent 15064 * attempting to do work after we have effectively detached but before 15065 * the task queue has been destroyed, all tasks dispatched via the 15066 * task queue must check that DTrace is still attached before 15067 * performing any operation. 15068 */ 15069 taskq_destroy(dtrace_taskq); 15070 dtrace_taskq = NULL; 15071 15072 return (DDI_SUCCESS); 15073 } 15074 15075 /*ARGSUSED*/ 15076 static int 15077 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 15078 { 15079 int error; 15080 15081 switch (infocmd) { 15082 case DDI_INFO_DEVT2DEVINFO: 15083 *result = (void *)dtrace_devi; 15084 error = DDI_SUCCESS; 15085 break; 15086 case DDI_INFO_DEVT2INSTANCE: 15087 *result = (void *)0; 15088 error = DDI_SUCCESS; 15089 break; 15090 default: 15091 error = DDI_FAILURE; 15092 } 15093 return (error); 15094 } 15095 15096 static struct cb_ops dtrace_cb_ops = { 15097 dtrace_open, /* open */ 15098 dtrace_close, /* close */ 15099 nulldev, /* strategy */ 15100 nulldev, /* print */ 15101 nodev, /* dump */ 15102 nodev, /* read */ 15103 nodev, /* write */ 15104 dtrace_ioctl, /* ioctl */ 15105 nodev, /* devmap */ 15106 nodev, /* mmap */ 15107 nodev, /* segmap */ 15108 nochpoll, /* poll */ 15109 ddi_prop_op, /* cb_prop_op */ 15110 0, /* streamtab */ 15111 D_NEW | D_MP /* Driver compatibility flag */ 15112 }; 15113 15114 static struct dev_ops dtrace_ops = { 15115 DEVO_REV, /* devo_rev */ 15116 0, /* refcnt */ 15117 dtrace_info, /* get_dev_info */ 15118 nulldev, /* identify */ 15119 nulldev, /* probe */ 15120 dtrace_attach, /* attach */ 15121 dtrace_detach, /* detach */ 15122 nodev, /* reset */ 15123 &dtrace_cb_ops, /* driver operations */ 15124 NULL, /* bus operations */ 15125 nodev /* dev power */ 15126 }; 15127 15128 static struct modldrv modldrv = { 15129 &mod_driverops, /* module type (this is a pseudo driver) */ 15130 "Dynamic Tracing", /* name of module */ 15131 &dtrace_ops, /* driver ops */ 15132 }; 15133 15134 static struct modlinkage modlinkage = { 15135 MODREV_1, 15136 (void *)&modldrv, 15137 NULL 15138 }; 15139 15140 int 15141 _init(void) 15142 { 15143 return (mod_install(&modlinkage)); 15144 } 15145 15146 int 15147 _info(struct modinfo *modinfop) 15148 { 15149 return (mod_info(&modlinkage, modinfop)); 15150 } 15151 15152 int 15153 _fini(void) 15154 { 15155 return (mod_remove(&modlinkage)); 15156 } 15157